repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abattat/simple_word_counter | https://github.com/abattat/simple_word_counter | 9959b6baf378116260575c453867643586c72e82 | ef6ee5418d7cabd0dd591c416c7b1f83c0c82f5e | f0a3d4df2d02e9fe9aaab6df1ecb0e3d7f31401e | refs/heads/master | 2020-03-23T21:01:53.784635 | 2018-07-23T23:30:41 | 2018-07-23T23:30:41 | 142,077,295 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6841723322868347,
"alphanum_fraction": 0.6924007534980774,
"avg_line_length": 32.5934944152832,
"blob_id": "a5f25a72f86ea4c3a92da15d35e03bc875c63c7b",
"content_id": "a6ca1df10cbc79fd99946414f211ad4c999ac9e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4132,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 123,
"path": "/wordcountv1.0.py",
"repo_name": "abattat/simple_word_counter",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# Author : Andrew Battat\n# Date : 7-23-18\n\n'''\nDescription:\n This program accepts a text file and returns the number of occurences of each word in the file\n\n The programm is called like such:\n ./wordcount.py {--count | --topcount} input_file [output_file]\n\n The user must give the program one of two count options:\n count:\n prints a sorted list of every word found in the text file along with each word's number of occurences\n topcount:\n prints the 20 most common words found in the text file along with each word's number of occurences\n\n The user must give the program an input file, which is the text file the program reads from\n\n The user has the option of giving the program an output_file\n If an output file is given, the program will output to that file\n If no output file is given, the program will output to stdout\n\n The program will output as such:\n word1 word1_count\n word2 word2_count\n ...\n'''\n\nimport sys\nimport re\n\n'''\nreturns_sorted_lowercase_word_list is a helper-function that takes in a text file and returns a sorted list of all the words that appear in the text file\nnotes: Punctuation (except 's) have been filtered out; text is converted to lowercase\n'''\ndef returns_sorted_lowercase_word_list(filename):\n input_file = open(filename, 'r')\n text = re.sub('--', ' ', input_file.read())\n text = text.lower()\n sorted_lowercase_word_list = sorted(re.findall(r'([a-zA-Z]+(?:\\'[a-zA-Z]+)*)', text))\n input_file.close()\n return sorted_lowercase_word_list\n # finds all the words that match 1 or more alphas followed by 0 or more (' followed by 1 or more alphas) and returns a sorted list out of them\n\n\n'''\nreturns_word_count_dict is a function that takes in a text file and returns word_count_dict\nword_count_dict is a dictionary that pairs each word with its count--the word's number of occurences in the text file\n'''\ndef returns_word_count_dict(filename):\n sorted_lowercase_word_list = returns_sorted_lowercase_word_list(filename)\n\n word_count_dict = {} # maps each word to its count\n for word in sorted_lowercase_word_list:\n if word not in word_count_dict: # word not found in dict -> set count to 1\n word_count_dict[word] = 1\n else: # word found in dict -> add 1 to count\n word_count_dict[word] += 1\n return word_count_dict\n\n\n'''\nprint_words is a function that takes in a text file and returns a string content\ncontent contains all the unique word in the text file along with each word's number of occurences\n'''\ndef print_words(filename):\n content = ''\n word_count_dict = returns_word_count_dict(filename)\n for word in word_count_dict.keys():\n content += word + ' ' + str(word_count_dict[word]) + '\\n'\n return content\n\n\n'''\nsort_by_value is a custom-sort helper-function that returns the value in the key-value pair\n'''\ndef sort_by_value(item):\n return item[1]\n\n\n'''\nprint_top is a function that takes in a text file and returns a string containing the 20 most common words in the text file along with each word's number of occurences\n'''\ndef print_top(filename):\n content = ''\n word_count_dict = returns_word_count_dict(filename)\n sorted_count_list = sorted(word_count_dict.items(), key=sort_by_value, reverse=True)\n for item in sorted_count_list[:20]:\n content += item[0] + ' ' + str(item[1]) + '\\n'\n return content\n\n\ndef main():\n if len(sys.argv) != 3 and len(sys.argv) != 4: # ADDED\n print('usage: ./wordcount.py {--count | --topcount} input_file [output_file]')\n sys.exit(1)\n\n option = sys.argv[1]\n filename = sys.argv[2]\n saveFile = ''\n if len(sys.argv) == 4: # if length is 4, an output file has been given\n saveFile = open(sys.argv[3], 'w')\n\n if option == '--count':\n content = print_words(filename) # ADDED CONTENT\n elif option == '--topcount':\n content = print_top(filename)\n else:\n print('unknown option: ' + option)\n sys.exit(1)\n\n # if an output file is given, output to that file.\n # if no output file given, output to stdout\n if saveFile:\n saveFile.write(content)\n saveFile.close()\n else:\n sys.stdout.write(content)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7701975107192993,
"alphanum_fraction": 0.7701975107192993,
"avg_line_length": 54.650001525878906,
"blob_id": "680a5a37c619c088c59494529b32a0b119637bfd",
"content_id": "5db90669c9c17a2913e449cf6c90184eeb65bc10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1114,
"license_type": "no_license",
"max_line_length": 312,
"num_lines": 20,
"path": "/README.md",
"repo_name": "abattat/simple_word_counter",
"src_encoding": "UTF-8",
"text": "# simple_word_counter\n\nThis program is an example of a concise, basic, completed project.\n\nThe purpose of this program is to exhibit that I am comfortable with Python syntax and convention.\n\nAdditionally, the program incorporates several features to show what I am familiar with: reading in files, argument parsing, data parsing, iterating through data types, regular expression matching, python library usage, custom sorting, and my knowledge of python data structures: lists, dictionaries and strings.\n\nIncluded in this repository is a text file of Alice's Adventures in Wonderland, to be used on the program to test its performace.\n\nTo call the program:\n **./wordcount.py {--count | --topcount} inputFile [outputFile]**\n\n ex: ./wordcount.py --count alice.txt\n\n The above command will output to the console a list of each unique word found in the text of alice.txt along with each word's count (the number of times it occurs)\n\n\n**This exercise was taken from an online Python class taught by Google; however, the code is entirely mine.**\n*http://code.google.com/edu/languages/google-python-class/*\n\n"
}
] | 2 |
yashkantharia/CSV-codes-Python- | https://github.com/yashkantharia/CSV-codes-Python- | 2e6abffe340204eb465909849f8c178dcf97bdab | 3769c89147cedf8d090de5529f1171dfb6d0afd2 | 7f25169e4b8c197a9d992a5e0110486b49d86780 | refs/heads/master | 2023-05-09T17:57:46.085801 | 2021-06-06T23:24:24 | 2021-06-06T23:24:24 | 253,786,372 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5976063013076782,
"alphanum_fraction": 0.6091622114181519,
"avg_line_length": 29.670886993408203,
"blob_id": "978e3fad1e7d67de6fe6a9d3eb11893e020086c5",
"content_id": "84c966bcfe2a6bb513f8ba49358ca0977cad008d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2423,
"license_type": "no_license",
"max_line_length": 287,
"num_lines": 79,
"path": "/profile_csv_parth.py",
"repo_name": "yashkantharia/CSV-codes-Python-",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas as pd\nimport time\n\n\ndf = pd.DataFrame()\nids=[]\nall_ids=[]\n\nregion = input(\"Select the index number as per your account region: \\n [1] eu \\n [2] in \\n [3] sg \\n\")\n\nif region==\"1\" or region ==\"eu\": \n url = \"https://api.clevertap.com/1/profiles.json\"\nelif region == \"2\" or region ==\"in\":\n url = \"https://in.api.clevertap.com/1/profiles.json\"\nelif region == \"3\" or region ==\"sg\":\n url =\"https://sg.api.clevertap.com/1/profiles.json\"\n\n\nacc_id = input(\"Enter your CleverTap account ID: \\n\")\npass_code = input(\"Enter the pass-code: \\n\")\nretry_counter=0\n\n\npayload = \"{\\n \\\"event_name\\\": \\\"-1\\\",\\n \\\"common_profile_properties\\\": {\\n \\\"profile_fields\\\": [\\n {\\n \\\"name\\\": \\\"bad_identities\\\",\\n \\\"operator\\\": \\\"exists\\\",\\n \\\"value\\\": \\\"-1\\\"\\n }\\n ]\\n }\\n}\"\nheaders = {\n 'X-CleverTap-Account-Id': acc_id,\n 'X-CleverTap-Passcode': pass_code,\n 'Content-Type': \"application/json\"\n }\n\nresponse = requests.request(\"POST\", url, data=payload, headers=headers)\n\ncursor = response.json()['cursor']\nnext_cursor = cursor\nwhile next_cursor is not None:\n params =(('cursor', next_cursor),)\n response = requests.get(url = url+\"?cursor=\"+next_cursor,headers = headers)\n print(response, response.json()[\"status\"])\n while response.json()[\"status\"]==\"fail\":\n time.sleep(15)\n response = requests.get(url = url+\"?cursor=\"+next_cursor,headers = headers)\n print(\"Retry Made\")\n print(response, response.json()[\"status\"])\n retry_counter=retry_counter+1\n if retry_counter>2:\n continue_retry = input(\"Do you want to continue trying? y/n :\")\n if continue_retry==\"y\":\n retry_counter=0\n else:\n print(\"Terminating process\")\n exit()\n\n record = response.json()['records']\n if len(record)==0:\n break\n\n df = pd.read_json(response.content)\n for i in range (0,len(df)):\n for key,value in df.records[i].items():\n if key==\"identity\":\n ids.append(value)\n if key ==\"all_identities\":\n all_ids.append(str(value))\n\n next_cursor = response.json()['next_cursor']\n\ndf_1 = pd.DataFrame(ids)\ndf_1.columns=[\"Identity\"]\n\ndf_2 = pd.DataFrame(all_ids)\ndf_2.columns=[\"All_ids\"]\n\nresult = pd.concat([df_1,df_2],axis=1)\nresult.head(10)\n\nsave_csv = input(\"Do you want to save this list as a csv in the pwd? y/n: \")\nif save_csv==\"y\":\n result.to_csv(\"result.csv\")\n"
},
{
"alpha_fraction": 0.6468609571456909,
"alphanum_fraction": 0.6647982001304626,
"avg_line_length": 21.58974266052246,
"blob_id": "e40a93c0e14ee48a715a92c69438d9312513301a",
"content_id": "38222a7d53d19e8dadb8d0965d13f5594da681cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 892,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 39,
"path": "/csv_mismatch.py",
"repo_name": "yashkantharia/CSV-codes-Python-",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport regex as re\n\nfile_up = raw_input(\"Enter path of CSV uploaded\")\nfile_down = raw_input(\"Enter path of CSV downloaded from the CT dashboard\")\n\ndf1 = pd.read_csv(file_up)\ndf2 = pd.read_csv(file_down, skiprows=1)\n\nuploaded_id = df1.identity\nuploaded_id = uploaded_id.values.tolist()\n\ndf2_identity = df2.Identity\n\nidentities_ct =[]\n\nfor i in range (0,len(df2_identity)):\n x = re.split(\",\",df2_identity[i])\n for j in range(0,len(x)):\n identities_ct.append(x[j])\n \ndiff = []\nfound = 0 \n\nfor i in range(0,len(uploaded_id)):\n if uploaded_id in identities_ct:\n found = found + 1\n else:\n diff.append(uploaded_id[i])\n \nprint(\"Number of records not found \"+str(found))\n\noption = raw_input(\"Do you want to save the difference CSV in the pwd? y/n\")\n\nif option==\"y\":\n df3 = pd.DataFrame(diff)\n df3.columns=[\"Identity\"]\n\n df3.to_csv(\"diff_list.csv\")\n\n\n \n\n\n\n\n"
}
] | 2 |
michaelsilverstein/HackerRank-with-Kevin | https://github.com/michaelsilverstein/HackerRank-with-Kevin | 198a52ba7eb032dc2bf080fc20b813c495681533 | b076e2f14e9bd10349d2dc12364676754d65270f | f7c2d6a82ee580d93226fa30e8d3e32fc37333c3 | refs/heads/master | 2020-06-11T12:59:39.212271 | 2016-12-29T23:56:23 | 2016-12-29T23:56:23 | 75,659,031 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4779249429702759,
"alphanum_fraction": 0.5474613904953003,
"avg_line_length": 17.489795684814453,
"blob_id": "111ee1f3c96ad8c1f763434b2566f0e6b63184c7",
"content_id": "f47f5ecc8f7e13b37ed9d2fdf4903aba2b8f8cf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 906,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 49,
"path": "/PE30.py",
"repo_name": "michaelsilverstein/HackerRank-with-Kevin",
"src_encoding": "UTF-8",
"text": "#Project Euler #30\n#https://projecteuler.net/problem=30\n\nBrute Force:\n\n#Problem 30\n\nfrom time import *\n\nstart = clock()\n\ndef sum_fifth(num):\n #Sums each digit to the fifth power of a number\n return int(sum([int(x)**5 for x in str(num)]))\n\ndef check(num):\n if num == sum_fifth(num):\n return True\ndef find_constraint():\n ##Find constraint\n nines = ''\n n9 = range(1,101)\n sums = []\n for x in range(100):\n nines += '9'\n n = int(nines)\n s = sum_fifth(n)\n sums.append(s)\n print nines,s,len(nines),len(str(s))\ndef main():\n s = 0\n for x in range(2,int(10e5)):\n if check(x):\n s += x\n print x\n print 'Total:',s\nmain()\n\nprint 'Time required: %f seconds'%(clock()-start) \n--------------------------------------------------\nOutput:\n4150\n4151\n54748\n92727\n93084\n194979\nTotal: 443839\nTime required: 4.798988 seconds\n"
},
{
"alpha_fraction": 0.8035714030265808,
"alphanum_fraction": 0.8035714030265808,
"avg_line_length": 27,
"blob_id": "fb7e4d9c9a9e397b3db69ba02c45dd515dfe2343",
"content_id": "df0ef6de7c7f15e3fa3146fb6ddaded340531011",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 2,
"path": "/README.md",
"repo_name": "michaelsilverstein/HackerRank-with-Kevin",
"src_encoding": "UTF-8",
"text": "# HackerRank-with-Kevin\nCollab space to work with Kevin\n"
},
{
"alpha_fraction": 0.3826714754104614,
"alphanum_fraction": 0.4103489816188812,
"avg_line_length": 20.30769157409668,
"blob_id": "6cc4fdf619daec7c0c5b382f448fbe4176c087b5",
"content_id": "9e438270e2a9b1b5bd478e2f2dae46df97765d81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 831,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 39,
"path": "/Game of Stones.py",
"repo_name": "michaelsilverstein/HackerRank-with-Kevin",
"src_encoding": "UTF-8",
"text": "##https://www.hackerrank.com/challenges/game-of-stones-1\n\n#Michael so far:\ndef gos(stones):\n for game in stones:\n if game <= 1:\n return 'Second'\n turn = 0\n while game > 1:\n if game % 5 == 0:\n game -= 5\n turn += 1\n if game % 3 == 0:\n game -= 3\n turn += 1\n if game % 2 == 0:\n game -= 2\n turn += 1\n if turn % 2 == 0:\n return 'First'\n else:\n return 'Second'\n\n\n#Kevin (works):\na = input()\nn = int(a)\nc = [input() for _ in range(n)]\nb=list(map(int,c))\ndef gameofstones(b):\n i=0\n for x in b:\n if (x % 7 >=2) and (x % 7 <= 6):\n print('First')\n else: \n print('Second')\n print(answer)\n\ngameofstones(b)\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5072992444038391,
"avg_line_length": 26.399999618530273,
"blob_id": "dfc177dfbf71697bbfcc6033cd0f948fc2643ebb",
"content_id": "c288b0668d59a62065e609cb4655d9db08727d70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 10,
"path": "/Lonely_Integer.py",
"repo_name": "michaelsilverstein/HackerRank-with-Kevin",
"src_encoding": "UTF-8",
"text": "def lonelyinteger(a):\n counts = [a.count(x) for x in a]\n unique_counts = []\n for i in counts:\n c = 0\n for j in counts:\n if i!=j:\n c += 1\n unique_counts.append(c)\n answer = a[unique_counts.index(max(unique_counts))]\n"
}
] | 4 |
PEtab-dev/petab_web_validator | https://github.com/PEtab-dev/petab_web_validator | 692c23405dc706f4e5a11691a0b9025ac73a7bad | 3de8451c6c5de497432c91d9d11d1d221958cae5 | e0b46b6f68d0153c7474c247afde1d604d5aa6f2 | refs/heads/main | 2023-07-22T15:33:45.560719 | 2023-07-06T21:55:22 | 2023-07-07T17:24:36 | 240,063,950 | 3 | 0 | null | 2020-02-12T16:46:25 | 2022-03-23T12:43:26 | 2023-07-07T17:24:36 | Python | [
{
"alpha_fraction": 0.6051059365272522,
"alphanum_fraction": 0.6051059365272522,
"avg_line_length": 33.092594146728516,
"blob_id": "d6486834654e7095eea81593685ea2f0287c358a",
"content_id": "12e69cd2083df33eecdd5530f401fc1a8902f119",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1841,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 54,
"path": "/petab_web_validator.py",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nimport libsbml\nimport pandas as pd\nimport petab\n\nfrom app import app\n\n\ndef get_petab_problem(sbml_str: str = None,\n condition_df: Optional[pd.DataFrame] = None,\n measurement_df: Optional[pd.DataFrame] = None,\n parameter_df: Optional[pd.DataFrame] = None,\n observable_df: Optional[pd.DataFrame] = None\n ) -> 'petab.Problem':\n \"\"\"\n load petab problem.\n\n Arguments:\n sbml_str: PEtab SBML file\n condition_df: PEtab condition table\n measurement_df: PEtab measurement table\n parameter_df: PEtab parameter table\n observable_df: PEtab observables tables\n \"\"\"\n\n sbml_model = sbml_document = sbml_reader = None\n\n if condition_df:\n condition_df = petab.conditions.get_condition_df(condition_df)\n\n if measurement_df:\n # TODO: If there are multiple tables, we will merge them\n measurement_df = petab.measurements.get_measurement_df(measurement_df)\n\n if parameter_df:\n parameter_df = petab.parameters.get_parameter_df(parameter_df)\n\n if sbml_str:\n sbml_reader = libsbml.SBMLReader()\n sbml_document = sbml_reader.readSBMLFromString(sbml_str)\n sbml_model = sbml_document.getModel()\n\n if observable_df:\n # TODO: If there are multiple tables, we will merge them\n observable_df = petab.observables.get_observable_df(observable_df)\n\n return petab.Problem(condition_df=condition_df,\n measurement_df=measurement_df,\n parameter_df=parameter_df,\n observable_df=observable_df,\n sbml_model=sbml_model,\n sbml_document=sbml_document,\n sbml_reader=sbml_reader)\n"
},
{
"alpha_fraction": 0.558590292930603,
"alphanum_fraction": 0.558590292930603,
"avg_line_length": 34.46875,
"blob_id": "da8fca3bca30e81fcd3bcb81173b536f9e2a2e1f",
"content_id": "3cecd18fafbdbddc442491ed5dd9654cb583d8b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1135,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 32,
"path": "/app/forms.py",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed\nfrom wtforms.fields import SubmitField\n\n\nclass PEtabForm(FlaskForm):\n sbml_file = FileField(\n ' sbml file',\n validators=[\n FileAllowed(['xml'],\n 'Only files with the *.xml extension are allowed')])\n condition_file = FileField(\n ' condition file',\n validators=[\n FileAllowed(['tsv'],\n 'Only files with the *.tsv extension are allowed')])\n measurement_file = FileField(\n ' measurement file',\n validators=[\n FileAllowed(['tsv'],\n 'Only files with the *.tsv extension are allowed')])\n parameters_file = FileField(\n ' parameters file',\n validators=[\n FileAllowed(['tsv'],\n 'Only files with the *.tsv extension are allowed')])\n observables_file = FileField(\n 'observables file',\n validators=[\n FileAllowed(['tsv'],\n 'Only files with the *.tsv extension are allowed')])\n submit = SubmitField('Upload')\n"
},
{
"alpha_fraction": 0.735336184501648,
"alphanum_fraction": 0.7596566677093506,
"avg_line_length": 26.920000076293945,
"blob_id": "69e19c05c5ed08ef903cb66ff3e083a70e4eb237",
"content_id": "f53f7429f5b3250d9ee6c6bd6d7f296633d32737",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 699,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 25,
"path": "/Dockerfile",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "FROM python:3.8-slim-buster\n\nRUN adduser --disabled-password petab_web_validator\n\nWORKDIR /home/petab_web_validator\n\nCOPY requirements.txt requirements.txt\nRUN apt-get update && apt-get -y install gcc && apt-get -y install g++\nRUN python -m venv venv\nRUN venv/bin/pip install --upgrade pip\nRUN venv/bin/pip install --no-cache-dir matplotlib>=2.2.3 pandas>=0.23.4 python-libsbml>=5.17.0\nRUN venv/bin/pip install -r requirements.txt\nRUN venv/bin/pip install gunicorn\n\nCOPY app app\nCOPY petab_web_validator.py boot.sh ./\nRUN chmod a+x boot.sh\n\nENV FLASK_APP petab_web_validator.py\n\nRUN chown -R petab_web_validator:petab_web_validator ./\nUSER petab_web_validator\n\nEXPOSE 5000\nENTRYPOINT [\"./boot.sh\"]\n\n"
},
{
"alpha_fraction": 0.8235294222831726,
"alphanum_fraction": 0.8235294222831726,
"avg_line_length": 24.5,
"blob_id": "788e17c866b19287ce58b5bd6b9ddc1f942b5ff7",
"content_id": "fbbfacb4defab32db2375b3e7ec5ce9cfb31cf94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/README.md",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "# petab_web_validator\nPEtab validation web service\n"
},
{
"alpha_fraction": 0.47858473658561707,
"alphanum_fraction": 0.6871508359909058,
"avg_line_length": 15.272727012634277,
"blob_id": "43267f98188188ff803f5b3bde40bdd0b392aacb",
"content_id": "ddf45f5904be744ee4105b1e233ab62f852a4143",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 537,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 33,
"path": "/requirements.txt",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "attrs==19.3.0\nClick==7.0\ncolorama==0.4.3\ncycler==0.10.0\ndominate==2.4.0\nFlask==2.3.2\nFlask-Bootstrap==3.3.7.1\nFlask-WTF==0.14.2\nimportlib-metadata==1.5.0\nitsdangerous==1.1.0\nJinja2==2.11.3\njsonschema==3.2.0\nkiwisolver==1.1.0\nMarkupSafe==1.1.1\nmatplotlib>=2.2.3\nmpmath==1.3.0\nnumpy>=1.15.1\npandas>=0.23.4\npetab==0.1.1\npyparsing==2.4.6\npyrsistent==0.15.7\npython-dateutil==2.8.1\npython-libsbml>=5.17.0\npytz==2019.3\nPyYAML==5.4\nscipy==1.10.0\nseaborn==0.10.0\nsix==1.14.0\nsympy==1.5.1\nvisitor==0.1.3\nWerkzeug==2.2.3\nWTForms==2.2.1\nzipp==2.1.0\n"
},
{
"alpha_fraction": 0.7513513565063477,
"alphanum_fraction": 0.7513513565063477,
"avg_line_length": 19.44444465637207,
"blob_id": "bd7732ab9ca0076b087630832f56407c31bbb877",
"content_id": "c48395fa37383537591e70b963ac7c8235d60390",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/app/__init__.py",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_bootstrap import Bootstrap\n\napp = Flask(__name__)\nbootstrap = Bootstrap(app)\n\napp.secret_key = 'secret password'\n\nfrom app import routes, errors\n\n"
},
{
"alpha_fraction": 0.5396865010261536,
"alphanum_fraction": 0.539935290813446,
"avg_line_length": 34.56637191772461,
"blob_id": "a3dbd86212e0fbdb4c8518471311f48f38da1ddf",
"content_id": "ab78287e1d0169fc608f26916486ebe732130b60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4019,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 113,
"path": "/app/routes.py",
"repo_name": "PEtab-dev/petab_web_validator",
"src_encoding": "UTF-8",
"text": "import logging\nimport os\nimport re\nimport tempfile\n\nfrom petab.C import *\nfrom petab.lint import lint_problem\nimport petab\nfrom flask import render_template, flash\nfrom markupsafe import Markup\nimport libsbml\nimport pandas as pd\n\nfrom app import app\nfrom app.forms import PEtabForm\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n form = PEtabForm()\n if form.validate_on_submit():\n with tempfile.TemporaryDirectory(dir=f\"{app.root_path}\") as tmpdirname:\n fn = tempfile.mktemp(\".log\", dir=f\"{tmpdirname}\")\n file_handler = logging.FileHandler(fn, mode='w')\n file_handler.setLevel('DEBUG')\n petab.lint.logger.addHandler(file_handler)\n\n try:\n petab_problem = get_problem(form.sbml_file.data,\n form.condition_file.data,\n form.measurement_file.data,\n form.parameters_file.data,\n form.observables_file.data)\n except Exception as e:\n flash(Markup(f'<p> Not valid: </p> {e} '), category='error')\n return render_template('index.html', form=form)\n\n try:\n res = lint_problem(petab_problem)\n if res:\n with open(fn) as f:\n error_log = f.read()\n p = re.compile('\\n')\n error_log = p.sub('<br>', error_log)\n flash(Markup(f'<p> Not valid: </p> <p> {error_log} </p>'), category='error')\n else:\n flash(Markup(f'<p> Great! Your model is valid. </p>'), category='success')\n except Exception as e:\n flash(Markup(f'<p> Error: </p> {e} '), category='error')\n\n return render_template('index.html', form=form)\n\n\ndef get_problem(sbml_file, condition_file, measurement_file, parameters_file,\n observables_file):\n \"\"\"\n will be removed\n :return:\n \"\"\"\n\n if sbml_file:\n sbml_reader = libsbml.SBMLReader()\n sbml_str = str(sbml_file.stream.read(), \"utf-8\")\n sbml_document = sbml_reader.readSBMLFromString(sbml_str)\n sbml_model = sbml_document.getModel()\n else:\n sbml_reader = None\n sbml_document = None\n sbml_model = None\n\n if condition_file:\n condition_df = pd.read_csv(condition_file, sep='\\t')\n try:\n condition_df.set_index([CONDITION_ID], inplace=True)\n except KeyError:\n raise KeyError(\n f'Condition table missing mandatory field {CONDITION_ID}.')\n else:\n condition_df = None\n\n if measurement_file:\n measurement_df = petab.measurements.get_measurement_df(pd.read_csv(measurement_file, sep='\\t'))\n else:\n measurement_df = None\n\n if parameters_file:\n parameters_df = pd.read_csv(parameters_file, sep='\\t')\n try:\n parameters_df.set_index([PARAMETER_ID], inplace=True)\n except KeyError:\n raise KeyError(\n f\"Parameter table missing mandatory field {PARAMETER_ID}.\")\n else:\n parameters_df = None\n\n if observables_file:\n observables_df = pd.read_csv(observables_file, sep='\\t')\n try:\n observables_df.set_index([OBSERVABLE_ID], inplace=True)\n except KeyError:\n raise KeyError(\n f\"Observable table missing mandatory field {OBSERVABLE_ID}.\")\n else:\n observables_df = None\n\n petab_problem = petab.Problem(sbml_reader=sbml_reader,\n sbml_document=sbml_document,\n sbml_model=sbml_model,\n condition_df=condition_df,\n measurement_df=measurement_df,\n parameter_df=parameters_df,\n observable_df=observables_df)\n return petab_problem\n"
}
] | 7 |
asdfMaciej/Ascii-Canvas | https://github.com/asdfMaciej/Ascii-Canvas | 24e8fffbc78e5d7b80b98cff15320165ad979a5a | 6d884a54a7cc80ce2a50fda92089133e66d7ad03 | 255006adec49fb4a6d70c04f1ec259e96c7ca6ad | refs/heads/master | 2021-01-02T08:52:42.117453 | 2014-04-18T17:53:17 | 2014-04-18T17:53:17 | 18,920,657 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.562109649181366,
"alphanum_fraction": 0.6016654968261719,
"avg_line_length": 22.81818199157715,
"blob_id": "e4d97c0aa41aa2027e9ed0b61d2276b50010b3e6",
"content_id": "a21d63ece3929a9536d7898ce8c2ac83bb7e4e8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2882,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 121,
"path": "/ascii-canvas.py",
"repo_name": "asdfMaciej/Ascii-Canvas",
"src_encoding": "UTF-8",
"text": "import math\n\nclass Canvas:\n\tdef __init__(self, width=10, height=10, filler=' '):\n\t\tself.painting = []\n\t\tfor i in range(height):\n\t\t\tx = []\n\t\t\tfor z in range(width):\n\t\t\t\tx.append(str(filler))\n\t\t\tself.painting.append(x)\n\n\tdef __str__(self):\n\t\tres = ''\n\t\tfor line in self.painting:\n\t\t\tfor char in line:\n\t\t\t\tres += char\n\t\t\tres += '\\n'\n\t\treturn res\n\n\tdef __eq__(self, other):\n\t\tif isinstance(other, Canvas):\n\t\t\treturn other.painting == self.painting\n\t\telse:\n\t\t\treturn False\n\n\tdef __ne__(self, other):\n\t\tif isinstance(other, Canvas):\n\t\t\treturn other.painting != self.painting\n\t\telse:\n\t\t\treturn False\n\n\tdef __len__(self):\n\t\treturn len(self.painting) * len(self.painting[0])\n\n\tdef __setitem__(self, key, value):\n\t\tself.painting[key] = value\n\n\tdef __getitem__(self, key):\n\t\treturn self.painting[key]\n\t\t\n\tdef set_point(self, coords, filler):\n\t\ttry:\n\t\t\tself.painting[coords[1]][coords[0]] = str(filler)\n\t\texcept IndexError:\n\t\t\traise IndexError(\"[!] Point out of bounds of the array.\")\n\n\tdef get_point(self, coords):\n\t\ttry:\n\t\t\treturn self.painting[cooords[1]][coords[0]]\n\t\texcept IndexError:\n\t\t\traise IndexError(\"[!] Point out of bounds of the array.\")\n\n\tdef draw_point_intensitity(self, coords, intense):\n\t\t\"\"\" Intense is on scale of 0 to 255\"\"\"\n\t\tcharacter = ' `.,:;irsXA253hGSBH9E@M#'[int(math.floor(intense/11))]\n\t\tself.set_point(coords, character)\n\n\tdef draw_text(self, coords, text):\n\t\tbc = len(self.painting) * len(self.painting[0])\n\t\tif bc - (coords[0] + coords[1]*len(self.painting[0])) < len(text):\n\t\t\traise IndexError(\"[!] Fail due to text too long.\")\n\n\t\tfor a in range(len(text)):\n\t\t\tb = a+1\n\t\t\tb = b-1\n\t\t\ta += coords[0] + coords[1]*len(self.painting[0])\n\t\t\ty = (a - (a % len(self.painting[0]))) / len(self.painting[0])\n\t\t\tx = a % len(self.painting[0])\n\t\t\tself.set_point((x, y), text[b])\n\n\tdef draw_fill(self, coords1, coords2, symbol):\n\t\tx1, y1 = coords1\n\t\tx2, y2 = coords2\n\t\tdifx, dify = abs(x1-x2)+1, abs(y1-y2)+1\n\t\tfor a in range(dify):\n\t\t\tfor b in range(difx):\n\t\t\t\tself.set_point((min(x1, x2)+b, min(y1, y2)+a), symbol)\n\tdef draw_line(self, coords1, coords2, symbol):\n\t\tx1, y1 = coords1\n\t\tx2, y2 = coords2\n\t\tpoints = []\n\t\tissteep = abs(y2-y1) > abs(x2-x1)\n\t\tif issteep:\n\t\t\tx1, y1 = y1, x1\n\t\t\tx2, y2 = y2, x2\n\t\trev = False\n\t\tif x1 > x2:\n\t\t\tx1, x2 = x2, x1\n\t\t\ty1, y2 = y2, y1\n\t\t\trev = True\n\t\tdeltax = x2 - x1\n\t\tdeltay = abs(y2-y1)\n\t\terror = int(deltax / 2)\n\t\ty = y1\n\t\tystep = None\n\t\tif y1 < y2:\n\t\t\tystep = 1\n\t\telse:\n\t\t\tystep = -1\n\t\tfor x in range(x1, x2 + 1):\n\t\t\tif issteep:\n\t\t\t\tpoints.append((y, x))\n\t\t\telse:\n\t\t\t\tpoints.append((x, y))\n\t\t\terror -= deltay\n\t\t\tif error < 0:\n\t\t\t\ty += ystep\n\t\t\t\terror += deltax\n\n\t\tif rev:\n\t\t\tpoints.reverse()\n\t\tfor p in points:\n\t\t\tself.set_point(p, symbol)\n\nif __name__ == '__main__':\n\tcanvas = Canvas(20, 20, ' ')\n\n\tcanvas.draw_text((5, 5), \"bozek\"*30)\n\tcanvas.draw_fill((10, 7), (16, 17), '$')\n\tcanvas.draw_line((0, 0), (19, 19), '#')\n\tprint canvas\n"
},
{
"alpha_fraction": 0.42307692766189575,
"alphanum_fraction": 0.42307692766189575,
"avg_line_length": 12,
"blob_id": "7cc1dbfe5123341e1fe471679860f13ed997b721",
"content_id": "0a873113c40c3020ca006ed0d81b05574b23b6ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 2,
"path": "/README.md",
"repo_name": "asdfMaciej/Ascii-Canvas",
"src_encoding": "UTF-8",
"text": "Ascii-Canvas\n============\n"
}
] | 2 |
DiRAC-HPC/Essentials-Level | https://github.com/DiRAC-HPC/Essentials-Level | 37c1364f1313f3215aed10f2dca47256fa5b44bd | 1d05f840fcb7c8a90eb978b118ad7a7dad790c19 | 706057a89da67ca6dff808c02f84ac70d38cd68a | refs/heads/master | 2018-10-09T01:39:15.173898 | 2018-10-08T08:23:51 | 2018-10-08T08:23:51 | 125,998,644 | 1 | 4 | null | null | null | null | null | [
{
"alpha_fraction": 0.4183168411254883,
"alphanum_fraction": 0.45049506425857544,
"avg_line_length": 12.931034088134766,
"blob_id": "289d5b0c17734256c8496b606c6c60d3a95e4d66",
"content_id": "35c52f072109b6970c78df0aace5edc96b6960b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 404,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 29,
"path": "/python/power2.c",
"repo_name": "DiRAC-HPC/Essentials-Level",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n\nvoid power2(int i)\n{\n int products[10];\n int x = 1;\n int c = 0;\n while(i>0)\n {\n if(i%2)\n {\n products[c++]=x;\n }\n x=x*2; // Multiply by 2\n i=i>>1; // Do a shift. Is this a bug?\n }\n for(x=0; x< c; x++)\n {\n printf(\"%i\\n\",products[x]);\n }\n\n}\n\n\nvoid main(int argc, char* argv[])\n{\n int value = atoi(argv[1]);\n power2(value);\n}\n"
},
{
"alpha_fraction": 0.3855932056903839,
"alphanum_fraction": 0.5402542352676392,
"avg_line_length": 15.857142448425293,
"blob_id": "4f995d5204642af842ec5ff9ba93da49c67f3b73",
"content_id": "744d7e3398282f02967b37fda0ace7468eccb807",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 28,
"path": "/python/test_power2.py",
"repo_name": "DiRAC-HPC/Essentials-Level",
"src_encoding": "UTF-8",
"text": "from power2 import power2\n\ndef test_0():\n assert power2(0) == []\n\ndef test_1():\n assert power2(1) == [1]\n\ndef test_2():\n assert power2(2) == [2]\n\ndef test_3():\n assert power2(3) == [2, 1]\n\ndef test_4():\n assert power2(4) == [4]\n\ndef test_27():\n assert power2(27) == [16, 8, 2, 1]\n\ndef test_255():\n assert power2(255) == [128, 64, 32, 16, 8, 4, 2, 1]\n\ndef test_256():\n assert power2(256) == [256]\n\ndef test_257():\n assert power2(257) == [256, 1]\n"
},
{
"alpha_fraction": 0.4564102590084076,
"alphanum_fraction": 0.482051283121109,
"avg_line_length": 18.5,
"blob_id": "ad239673f7a178b4b39e20ac869bd5a81fc6c52d",
"content_id": "b9528f9d129d78c9a2d925c5391f3e4be3ac13c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 20,
"path": "/python/power2.py",
"repo_name": "DiRAC-HPC/Essentials-Level",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\n\ndef power2(i):\n products = []\n x = 1\n while (i > 0):\n if (i % 2):\n products.insert(0, x)\n x = x * 2 # Multiply by 2\n i = i >> 1 # Do a shift. Is this a bug?\n # Print the powers.\n for x in products:\n print (x)\n return products\n\nif __name__ == '__main__':\n value = int(sys.argv[1])\n power2(value)\n"
},
{
"alpha_fraction": 0.735973596572876,
"alphanum_fraction": 0.7400990128517151,
"avg_line_length": 45.61538314819336,
"blob_id": "f9dbaa5ae5a86a6b38f73f9b7b4b5bec2faf18a7",
"content_id": "e965408960f87627a9963e0c4e95183295dcf9ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1212,
"license_type": "no_license",
"max_line_length": 308,
"num_lines": 26,
"path": "/README.md",
"repo_name": "DiRAC-HPC/Essentials-Level",
"src_encoding": "UTF-8",
"text": "# Welcome to the DiRAC Essentials Level Test\n\n\n### Computer requirements\n\n- Web browser\n- Bash shell (other shells are also fine but please ensure they support the commands `grep`, `find`, `cat`, `history`, `wget`, `zip`, `unzip`)\n- A text editor e.g. nano, vi or emacs\n- Git [tutorial](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)\n- Python 2.6 or 2.7\n- Python [pip](https://pypi.python.org/pypi/pip)\n- Python [nose](=https://nose.readthedocs.org/en/latest/)\n- Python [coverage](http://nedbatchelder.com/code/coverage/)\n- Python [pytest](http://pytest.org/)\n- Python [pytest-cov](=https://pypi.python.org/pypi/pytest-cov)\n\n\n### Getting started\n\nIn order to take the Essentials Level Test, you will need to clone a Git repository. This contains all the files you need to take the test. It serves as both a repository and your working copy. You will do all of your work in this local repository, and use version control to commit your solutions as you go.\n\nTo clone the repository on your local machine go to the folder where you want to clone it and execute\n```\ngit clone https://github.com/DiRAC-HPC/Essentials-Level.git\n```\nin your terminal. Now you can start the DiRAC Essentials Level test.\n"
},
{
"alpha_fraction": 0.5423728823661804,
"alphanum_fraction": 0.6271186470985413,
"avg_line_length": 28.5,
"blob_id": "46940992fb51b7b2003d51005737c9ae2a65bec6",
"content_id": "f697dacf92eb4520788a603b1019107f6fd4d01b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 4,
"path": "/python/test_analyze.py",
"repo_name": "DiRAC-HPC/Essentials-Level",
"src_encoding": "UTF-8",
"text": "from analyze import running_total\n\ndef test_example_one():\n assert running_total([1, 2, 1, 8, 9, 2]) == [3, 18, 2]\n"
},
{
"alpha_fraction": 0.5642299652099609,
"alphanum_fraction": 0.5848119258880615,
"avg_line_length": 25.092592239379883,
"blob_id": "ac3bc3f2fc02305061c4e69e3c196aee17970a66",
"content_id": "5ca0d4456a0951dd63bef44ada9e139646e3b7b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1409,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 54,
"path": "/python/analyze.py",
"repo_name": "DiRAC-HPC/Essentials-Level",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\n\n# Calculate the total of each increasing sequence of numbers in a\n# list: \n#\n# running_total([1, 2, 1, 8, 9, 2]) == [3, 18, 2]\n# running_total([1, 3, 4, 2, 5, 4, 6, 9]) == [8, 7, 19]\n#\n# No checks are made for invalid inputs (e.g. strings, lists of lists\n# or anything else that isn't a list of numbers.\n\ndef running_total(sequence):\n if (sequence == []):\n return []\n current = sequence[0]\n total = current\n totals = []\n for next in sequence[1:]:\n if (next <= current):\n totals.append(total)\n total = 0\n total = total + next\n current = next\n totals.append(total)\n return totals \n\n# Given an input file consisting of a list of numbers, one per line,\n# apply running_total to the sequence and output the totals on each\n# line of the output file.\n\nif __name__ == '__main__':\n if (len(sys.argv) < 2):\n sys.exit(\"Missing input file name\")\n if (len(sys.argv) < 3):\n sys.exit(\"Missing output file name\")\n\n input_file = sys.argv[1]\n output_file = sys.argv[2]\n\n source = open(input_file, 'r')\n sequence = []\n for line in source:\n sequence.append(int(line))\n source.close()\n\n totals = running_total(sequence)\n\n target = open(output_file, 'w')\n for total in totals:\n target.write(str(total))\n target.write('\\n')\n target.close()\n"
}
] | 6 |
DiegoAscanio/hostnames_provider | https://github.com/DiegoAscanio/hostnames_provider | 63c6ce9b322c91b2e344a32e38eb1f6d1d095cbb | d6973b3f65c0647827893e1198d4bca1da81be35 | 4f5e209fb02c77f7139692edf2eea98603e3bbec | refs/heads/master | 2021-01-10T17:10:54.296641 | 2016-11-22T12:29:50 | 2016-11-22T12:29:50 | 52,210,117 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7335211038589478,
"alphanum_fraction": 0.7616901397705078,
"avg_line_length": 39.34090805053711,
"blob_id": "4e1faef056dcf16d3155c78a1722f273c21bdf83",
"content_id": "e6628dd2d10ecae3b7e93a77a8aa3d40700638ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1775,
"license_type": "no_license",
"max_line_length": 238,
"num_lines": 44,
"path": "/README.md",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "#hostnames provider\n\nAn app to provide hostnames to hosts in a network, designed to work with inventory solutions like FusionInventory or OCSInventory that requires hosts to have distinct hostnames in a network.\n\nIt's simply a django app designed with a Host model containing three fields:\n\n```python\nclass Host(models.Model):\n hostname = models.CharField(max_length=15)\n mac_address = models.CharField(max_length=17)\n ip_address = models.TextField(unique=True)\n def __str__(self):\n return self.hostname+'|'+self.mac_address+'|'+self.ip_address\n```\n\nThe hosts are imported to the app through the script feeder.py, which reads csv files containing the hosts specifications.\n\nExample of a hosts.csv file:\n\n```csv\nap-1|00:1d:7e:39:cb:82|172.16.0.2\nap-2|00:1d:7e:39:cb:4f|172.16.0.3\nap-3|00:1d:7e:39:ac:e9|172.16.0.4\n```\n\n## Clients operation\n\nActually, there are two scripts: change_linux_hostname.sh and change_windows_hostname.bat.\n\nBoth of them cURL hostnames_provider URL and then, compares the client actual hostname to its final hostname (provided by hostnames_provider). If the comparison is false, the script changes the host hostname and then restarts the machine.\n\nIn a computer science lab, it's a great help to sysadmins, since a sysadmin won't need to go computer by computer changing it's hostname manually.\n\n## Future implementations\n\n-Adapt hostnames_provider to work with REST, allowing a host to get its hostname by his mac_address\n-Adapt hostnames_provider to work with turica's rows (https://github.com/turicas/rows)\n-Create a web interface to manage hosts, allowing file upload of hosts files (csv, xls, xlsx, etc)\n\n## Further infos\n\nThis software is being developed at CEFET-MG. Any doubts, please don't hesitate to contact me:\n\[email protected]\n"
},
{
"alpha_fraction": 0.640625,
"alphanum_fraction": 0.6631944179534912,
"avg_line_length": 24.909090042114258,
"blob_id": "655f0eefdabbc75089bf8fe1a24c771a82c16223",
"content_id": "cfc8832f1ee6cfd92e33079fd38a0d3571fc6224",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 22,
"path": "/hostnames_provider/hostnames/models.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "'''\n @file models.py\n @lastmod 9/11/2016\n'''\n\n# Importacoes\nfrom django.db import models\nimport django\nfrom django import forms\n\n# Area de criacao de classes\n\n# Classe do Host\nclass Host(models.Model):\n '''\n Classe de Host: Armazena um hostname, e seus Enderecos Mac e IP\n '''\n hostname = models.CharField(max_length=15)\n mac_address = models.CharField(max_length=17)\n ip_address = models.CharField(primary_key=True, unique=True, max_length=15)\n def __str__(self):\n return self.hostname+'|'+self.mac_address+'|'+self.ip_address\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6522842645645142,
"alphanum_fraction": 0.6725888252258301,
"avg_line_length": 29.30769157409668,
"blob_id": "2cc19d487dd8b3491ad10098e3e84e47bc1c8c39",
"content_id": "cd00e5bd36a39339a839ba56993fec77a5718a0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 788,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 26,
"path": "/hostnames_provider/hostnames/HostList.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "'''\n @file HostList.py\n @lastmod 9/11/2016\n'''\n\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\nclass HostList(APIView):\n \"\"\"\n List all hosts, or create a new host.\n \"\"\"\n def get(self, request, format=None):\n hosts = Host.objects.all()\n serializer = HostSerializer(hosts, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = HostSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n"
},
{
"alpha_fraction": 0.866995096206665,
"alphanum_fraction": 0.866995096206665,
"avg_line_length": 39.400001525878906,
"blob_id": "5ea697a2d81d16cc6acd11fbe59588877d81b386",
"content_id": "350d80d5597172cfca1ccd67540ad446f9592d4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 5,
"path": "/hostnames_provider/exec.sh",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "python manage.py shell\nfrom hostnames.models import Host\nfrom hostnames.serializers import HostSerializer\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\n\n"
},
{
"alpha_fraction": 0.7634408473968506,
"alphanum_fraction": 0.7634408473968506,
"avg_line_length": 17.600000381469727,
"blob_id": "92d6b992351c7f47f2eb5e93f5002d0379568167",
"content_id": "775722a19b26bd557fb70c012113d8508c1bf418",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/hostnames_provider/hostnames/apps.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass HostnamesConfig(AppConfig):\n name = 'hostnames'\n"
},
{
"alpha_fraction": 0.5779967308044434,
"alphanum_fraction": 0.6091954112052917,
"avg_line_length": 42.5,
"blob_id": "6ec4814d3d236ce8c02cb1d207e09d73870d104c",
"content_id": "c4c183a62f7bbf562e7deb24e45391c35ad9ea29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1827,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 42,
"path": "/hostnames_provider/hostnames/urls.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "'''\n @file urls.py\n @lastmod 8/11/2016\n'''\n\n# Importacoes\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nimport django.contrib.auth.views\n\nfrom views import HostDetail\nfrom . import views\nfrom . import models\n\n\n\n# Area de criacao de urls\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^ajuda$', views.help, name='help'),\n url(r'^sobre$', views.sobre, name='sobre'),\n url(r'^contato$', views.contato, name='contato'),\n url(r'^perfil$', views.profile, name='profile'),\n url(r'^opcoes$', views.options, name='options'),\n url(r'^rest/search__ip__address/(?P<ip>[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})$', views.host_detail_ipaddress),\n url(r'^rest/search__mac__address/(?P<mac>[0-9a-f]{2}\\:[0-9a-f]{2}\\:[0-9a-f]{2}\\:[0-9a-f]{2}\\:[0-9a-f]{2}\\:[0-9a-f]{2})$', views.host_detail_macaddress),\n url(r'^rest/list/$',views.HostList.as_view(), name='restlist'),\n url(r'^rest/detail/(?P<pk>[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})$',views.HostDetail.as_view(), name='restdetail'),\n url(r'^error/$', views.error, name='error'),\n url(r'^accounts/login/$', django.contrib.auth.views.login, name='login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout',{'next_page': '/'}, name='admin'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^hostname/$', views.hostname, name='hostname'),\n url(r'^create/$', views.create, name='create'),\n url(r'^upload/$', views.upload, name='upload'),\n url(r'^download/$', views.download, name='download'),\n url(r'^update/$', views.update, name='update'),\n url(r'^retrieve/$', views.retrieve, name='retrieve'),\n url(r'^list/$', views.list, name='list'),\n url(r'^pesquisar/$', views.pesquisar, name='pesquisar'),\n url(r'^delete/$', views.delete, name='delete'),\n]\n"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6216931343078613,
"avg_line_length": 26,
"blob_id": "94eefc6a9cd25dd98e964a53448b6dd144b80233",
"content_id": "75b630527d63d9959fc2ec44eb493a353087616f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 14,
"path": "/hostnames_provider/convert.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "csv_file = request.FILES['filename'].name\ntxt_file = 'dados.txt'\n\ntext_list = []\n\nwith open(csv_file, \"r\") as my_input_file:\n for line in my_input_file:\n line = line.split(\",\", 2)\n text_list.append(\" \".join(line))\n\nwith open(txt_file, \"w\") as my_output_file:\n for line in text_list:\n my_output_file.write(line)\n print('File Successfully written.')\n"
},
{
"alpha_fraction": 0.4065934121608734,
"alphanum_fraction": 0.4395604431629181,
"avg_line_length": 29.33333396911621,
"blob_id": "5981d260a63b45d40b9466a047d27178ee700ac1",
"content_id": "75e1fa55d8cf3548e72220e02236ff511aeec23e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 9,
"path": "/hostnames_provider/templates/hostnames/bootstrap.html",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "<!------------------------------------------\n\t\t@file bootstrap.html\n\t\t@lastmod 1/11/12\n------------------------------------------->\n{% extends 'bootstrap3/bootstrap3.html' %}\n\n{% load bootstrap3 %}\n\n{% block bootstrap3_title %}{% block title %}{% endblock %}{% endblock %}\n"
},
{
"alpha_fraction": 0.5968688726425171,
"alphanum_fraction": 0.6007827520370483,
"avg_line_length": 33.06666564941406,
"blob_id": "347681bb8ec7f8f11d8bde06d7735dcc616d640c",
"content_id": "04d3f96905b3e0ef55b0698fbe1fbd679d399e20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 511,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 15,
"path": "/hostnames_provider/feeder.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "import sys, getopt, csv\nimport django\n\ndef feed(file_name):\n django.setup()\n from hostnames.models import Host\n with open(file_name, 'r') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='|')\n for h,m,i in spamreader:\n #print(hostname,mac_address,ip_address)\n h = Host(hostname=h,mac_address=m,ip_address=i)\n h.save()\n \n\nfeed(sys.argv[1]) if __name__ == \"__main__\" and len(sys.argv) == 2 else print('Usage: python feeder.py <csv_file>')\n"
},
{
"alpha_fraction": 0.5128534436225891,
"alphanum_fraction": 0.5167095065116882,
"avg_line_length": 26.785715103149414,
"blob_id": "9df176a5fa31f9106c6dcd528195f134300e7e58",
"content_id": "24a0698539e11e918f86583bf0796fbd2085073e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1559,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 56,
"path": "/hostnames_provider/templates/hostnames/base.html",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "<!------------------------------------------\n\t\t@file base.html\n\t\t@lastmod 1/11/12\n------------------------------------------->\n\n<!-- \n\testende a base que contem o menu \n-->\n{% extends 'hostnames/bootstrap.html' %}\n\n<!--\n\tinicia bloco de conteudo bootstrap\n-->\n{% block bootstrap3_content %}\n\n<!--\n\tdefine navbar padrão para todos os templates\n-->\n<nav class=\"navbar navbar-inverse\">\n <div class=\"container-fluid\">\n <div class=\"navbar-header\">\n <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#navbar\" aria-expanded=\"false\" aria-controls=\"navbar\">\n <span class=\"sr-only\">Toggle navigation</span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n </button>\n\t\t\t<!--\n\t\t\t\tseta o icone de brand\n\t\t\t-->\n <a class=\"navbar-brand\" href=\"#\">Hostnames Provider</a>\n </div>\n\t\t<!--\n\t\t\tdefine navbar interna contendo menu da esquerda e da direita\n\t\t-->\n <div id=\"navbar\" class=\"navbar-collapse collapse\">\n <ul class=\"nav navbar-nav navbar-left\">\n {% block navbar_left_menu %}\n {% endblock %}\n </ul>\n <ul class=\"nav navbar-nav navbar-right\">\n {% block navbar_right_menu %}\n {% endblock %}\n </ul>\n </div>\n </div>\n</nav>\n<!--\n\tdefine o conteúdo da página a ser exibida\n-->\n<div class=\"container\">\n\t{% block main %}\n\t{% endblock %}\n</div>\n\n{% endblock %}\n"
},
{
"alpha_fraction": 0.6141361594200134,
"alphanum_fraction": 0.6175228953361511,
"avg_line_length": 31.295454025268555,
"blob_id": "f70ad3580e420ff4b4c325e9d11c964af1147da0",
"content_id": "7645897f53560162ba953c63a75928259441df41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22736,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 704,
"path": "/hostnames_provider/hostnames/views.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "'''\n @file views.py\n @lastmod 9/11/2016\n'''\n\n#Importacoes\nfrom django.db.models import Q\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader, RequestContext\nfrom django.template.context_processors import csrf\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.encoding import smart_str\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom hostnames.forms import ContactForm\nfrom django.core.mail import EmailMessage\nfrom django.shortcuts import redirect\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom django.core.mail import EmailMultiAlternatives\n\nimport csv, pdb, json\n\nfrom .models import Host\nfrom .forms import *\nfrom .serializers import HostSerializer\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom django.utils.six import BytesIO\n\n\n# Area de criacao de views\n\n#Metodo padrao para erro\ndef showError(request,mensagem):\n\t'''\n\tRetorna uma pagina de erro contendo os dados passados na mensagem\n\t'''\t\n\tc = RequestContext (request, {\n\t\t\t'mensagem' : mensagem\n\t\t})\n\t\t\n\ttemplateError = loader.get_template('hostnames/error.html')\n\n\treturn HttpResponse(templateError.render(c))\n\n#Renderiza o conteudo de um HttpResponse em JSONResponse\nclass JSONResponse(HttpResponse):\n '''\n HttpResponse que renderiza o conteudo em formato JSON\n '''\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n@csrf_exempt\ndef host_list(request, format=None):\n '''\n Lista todos os hosts, ou devolve um host especifico\n '''\n if request.method == 'GET':\n hosts = Host.objects.all()\n serializer = HostSerializer(hosts, many=True)\n return JSONResponse(serializer.data)\n\n@csrf_exempt\ndef host_detail_ipaddress(request, ip, format=None):\n '''\n Retorna um JSON com o IP requisitado\n '''\n if request.method == 'GET':\n try:\n host = Host.objects.filter(ip_address__icontains = ip)\n except Host.DoesNotExist:\n return error\n serializer = HostSerializer(data = host,many=True)\n\tserializer.is_valid()\n\tcontent = JSONRenderer().render(serializer.data)\n\n\treturn JSONResponse(serializer.data) if (len(serializer.data) != 0) else showError(request,'ERRO: IP INEXISTENTE!')\n\n@csrf_exempt\ndef host_detail_macaddress(request, mac, format=None):\n '''\n Retorna um JSON com o MAC requisitado\n '''\n \n if request.method == 'GET':\n try:\n host = Host.objects.filter(mac_address__icontains = mac)\n\t \n except Host.DoesNotExist:\n return error\n \tserializer = HostSerializer(data = host,many=True)\n\tserializer.is_valid()\n\tcontent = JSONRenderer().render(serializer.data)\n\n return JSONResponse(serializer.data) if (len(serializer.data) != 0) else showError(request,'ERRO: MAC INEXISTENTE!')\n\n@csrf_exempt\ndef host_detail_pesquisar(request, pesquisa, ordem):\n '''\n Retorna um JSON com o resultado da pesquisa (Geral ou Especifica)\n '''\n \n if request.method == 'GET':\n\tif('0' in ordem):\n \n\t if('h' in ordem):\n\t\tordem = 'hostname'\n\t elif('m' in ordem):\n\t\tordem = 'mac_address'\n\t elif('i' in ordem):\n\t\tordem = 'ip_address'\n\telse:\n\t if('h' in ordem):\n\t\tordem = '-hostname'\n\t elif('m' in ordem):\n\t\tordem = '-mac_address'\n\t elif('i' in ordem):\n\t\tordem = '-ip_address'\n try:\n\t if (pesquisa != None):\n if(len(pesquisa) == 1):\n host = Host.objects.filter(Q(hostname__icontains = pesquisa[0]) | Q(mac_address__icontains = pesquisa[0]) | Q(ip_address__icontains = pesquisa[0])).order_by(ordem)\n\t\t \n\t\t for h in host:\n\t\t\th.hostname = h.hostname.replace(str(pesquisa[0]), \"<b style='color:red'>\"+str(pesquisa[0])+\"</b>\")\n\t\t\th.mac_address = h.mac_address.replace(str(pesquisa[0]), \"<b style='color:red'>\"+str(pesquisa[0])+\"</b>\")\n\t\t\th.ip_address = h.ip_address.replace(str(pesquisa[0]), \"<b style='color:red'>\"+str(pesquisa[0])+\"</b>\")\t\n \telse:\n host = Host.objects.filter(hostname__icontains = pesquisa[0],mac_address__icontains = pesquisa[1],ip_address__icontains = pesquisa[2]).order_by(ordem)\n\t\t for h in host:\n h.hostname = h.hostname.replace(str(pesquisa[0]), \"<b style='color:red'>\"+str(pesquisa[0])+\"</b>\")\n\t\t\th.mac_address = h.mac_address.replace(str(pesquisa[1]), \"<b style='color:red'>\"+str(pesquisa[1])+\"</b>\")\n\t\t\th.ip_address = h.ip_address.replace(str(pesquisa[2]), \"<b style='color:red'>\"+str(pesquisa[2])+\"</b>\")\n\t else:\n\t\thost = Host.objects.all()\n except Host.DoesNotExist:\n return error\n serializer = HostSerializer(data = host,many=True)\n\tserializer.is_valid()\n\tcontent = JSONRenderer().render(serializer.data)\n\treturn content \n\n\n\ndef hostname(request):\n if request.method == 'GET':\n #instancia objeto do tipo HostForm contendo os dados do form recebido pelo GET\n form = HostForm(request.GET)\n \n #testa se o form e valido\n if form.is_valid():\n #obtem os dados recebidos no form\n mac = form.cleaned_data['mac']\n ip = form.cleaned_data['ip']\n \n #prepara um objeto do tipo Host para ser enviado por GET\n h = Host.objects.get(ip_address=ip, mac_address=mac)\n\n #envia\n return HttpResponse(h.hostname)\n else:\n #envia o proprio form\n return HttpResponse(form)\n\n#View do delete\ndef delete(request):\n '''\n View da Exclusao de Host: Deleta um ou mais Hosts pelo ID\n '''\n #formalidades de seguranca\n c = {}\n c.update(csrf(request))\n\n \n if request.method == 'POST':\n #instancia um objeto DeleteForm contendo o request passado por POST\n form = DeleteForm(request.POST)\n\n #testa se o form e valido\n if form.is_valid():\n # pega a(s) id(s) do(s) host(s) a ser(em) excluido(s)\n id = form.cleaned_data['ip']\n\t \n # separa os hosts pela virgula e armazena em um array (em caso de dois ou mais hosts)\n idSplit = id.split(',')\n \n # itera o array de hosts\n for i in idSplit:\n # deleta host por host\n h = Host.objects.filter(ip_address=i).delete()\n\n #retorna redirecionamento para a pagina de listagem\n return HttpResponseRedirect(\"/list\")\n else:\n return HttpResponseRedirect(\"/create\")\n\n#View da tela de erro\ndef error(request):\n '''\n View da Tela de Error: Redireciona para a Pagina de Erro, caso seja solicitado um redirecionamento para\n ela\n '''\n if (request.method == 'POST'): # redirecionamento padrao\n template = loader.get_template('hostnames/error.html')\n c = RequestContext (request, {})\n return HttpResponse(template.render(c))\n if (request.method == 'GET'):\n template = loader.get_template('hostnames/error.html')\n c = RequestContext (request, {}) \n return HttpResponse(template.render(c))\n\n#View do index\ndef index(request):\n '''\n View da Tela de Index: Redireciona para a Pagina de Insercao de Host, caso o Usuario esteja logado.\n Caso contrario, carrega o Template do Index\n '''\n if (isUserAutenticated(request)):\n return HttpResponseRedirect(\"/create\")\n else:\n template = loader.get_template('hostnames/index.html')\n\n c = RequestContext (request, {}) \n return HttpResponse(template.render(c))\n\n#View do contato\ndef contato(request): \n \n # new logic!\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n first_name = request.POST.get('first_name', '')\n last_name = request.POST.get('last_name', '')\n email = request.POST.get('email', '')\n\t phone = request.POST.get('phone', '')\n\t comment = request.POST.get('comment', '')\n\n # Email the profile with the \n # contact information\n \n\t template = get_template('contact_template.txt')\n context = Context({\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email,\n 'phone': phone,\n 'comment': comment,\n })\n content = template.render(context)\n\n email = EmailMessage(\n \"New contact form submission\",\n content,\n \"Your website\" +'',\n ['[email protected]'],\n headers = {'Reply-To': email }\n )\n #pdb.set_trace()\n email.send()\n return HttpResponseRedirect(\"/create\")\n else:\n return HttpResponseRedirect(\"/list\")\n\n if request.method == 'GET':\n return render(request, 'hostnames/contato.html', {\n #'form': form_class,\n })\n\n \n#View do opcoes\ndef options(request):\n template = loader.get_template('hostnames/opcoes.html')\n c = RequestContext (request, {}) \n return HttpResponse(template.render(c))\n\n#View do perfil\ndef profile(request):\n template = loader.get_template('hostnames/perfil.html')\n c = RequestContext (request, {}) \n return HttpResponse(template.render(c))\n \n#View da ajuda\ndef help(request):\n template = loader.get_template('hostnames/ajuda.html')\n c = RequestContext (request, {}) \n return HttpResponse(template.render(c))\n\n#View do sobre\ndef sobre(request):\n template = loader.get_template('hostnames/sobre.html')\n c = RequestContext (request, {}) \n return HttpResponse(template.render(c))\n\n#View da tela de login (nao mais utilizado)\ndef login(request): \n if (request.method == 'GET'): # redirecionamento padrao\n\n #carrega o template da tela de login\n template = loader.get_template('registration/login.html')\n\n #carrega o contexto \n c = RequestContext (request, {})\n\n #retorna o template a ser renderizado\n return HttpResponse(template.render(c))\n\n if (request.method == 'POST'): \n #formalidades de seguranca \n c = {}\n c.update(csrf(request))\n\n #recebe o form\n form = LoginForm(request.POST)\n\n #redireciona caso esteja autenticado\n if isUserAutenticated(form) is not None:\n #redireciona para o template da list\n return HttpResponseRedirect(\"/list\")\n else:\n #redireciona para o template do index\n return HttpResponseRedirect(\"/\")\n\n#funcao para verificar se usuario esta autenticado\ndef isUserAutenticated(request):\n #testa autenticacao do usuario (verifica se ele esta logado ou nao) \n answer = True if request.user.is_authenticated() else False\n return answer\n\n#processa a operacao de logout\ndef logout(request):\n \"\"\"\n Desconecta o usuario e envia mensagem\n \"\"\"\n logout(request)\n #obtem um redirecionamento por GET para o index\n redirect_to = request.REQUEST.get('/', '')\n\n if redirect_to:\n netloc = urlparse.urlparse(redirect_to)[1]\n # Checagem de seguranca -- nao permite o redirecionamento para um host diferente.\n if not (netloc and netloc != request.get_host()):\n return HttpResponseRedirect(redirect_to)\n\n#View da tela de create\n@login_required\ndef create(request):\n if (request.method == 'GET'): # redirecionamento padrao\n\n #carrega template\n template = loader.get_template('hostnames/create.html')\n\n #solicita contexto\n c = RequestContext (request, {})\n\n #responde\n return HttpResponse(template.render(c))\n if (request.method == 'POST'):\n #formalidades de seguranca\n c = RequestContext (request, {})\n c.update(csrf(request))\n\n #recebe os dados do form\n form = CreateForm(request.POST)\n\n #testa se o form foi e valido\n if (form.is_valid()):\n \n #captura os dados de cada campo do form\n hostForm = form.cleaned_data['hostname']\n \t macForm = form.cleaned_data['mac']\n ipForm = form.cleaned_data['ip']\n\n #testa duplicidade de ip\n\t hosts = Host.objects.all().filter(ip_address__icontains = ipForm)\n \n\t if(len(hosts) > 0):\n\t \tc = RequestContext (request, {\n\t\t\t'mensagem' : 'ERRO: IP JA EXISTENTE NO BANCO DE DADOS!'\n\t\t})\n\t\t\n\t\ttemplateError = loader.get_template('hostnames/error.html')\n\t\treturn showError(request,'ERRO: IP JA EXISTENTE NO BANCO DE DADOS!')\n\t else:\n\t\t#instancia um objeto do tipo Host contendo os dados do form\n \th = Host(hostname=hostForm,mac_address=macForm,ip_address=ipForm)\n \t#salva o objeto no banco de dados\n \th.save()\n \n \t#devolve um redirecionamento para a pagina de listagem\n \treturn HttpResponseRedirect(\"/list\")\n else:\n c = RequestContext (request, {\n\t\t\t'mensagem' : 'ERRO: IP FORA DO PADRAO!'\n\t })\n\t\n\t templateError = loader.get_template('hostnames/error.html')\n\t return showError(request,'ERRO: IP FORA DO PADRAO!')\n\n#View da tela de upload csv\ndef upload(request):\n #formalidades de seguranca\n c = {}\n c.update(csrf(request))\n\n #instancia um objeto do time UploadFileForm que recebe o request passado por POST e o arquivo\n form = UploadFileForm(request.POST, request.FILES)\n\n #testa se o form e valido\n if form.is_valid():\n #obtem o arquivo passado na requisicao\n filename = request.FILES['file']\n\n #itera no arquivo csv \n with open(filename.name, 'r') as csvfile:\n #separa os dados utilizando o delimitador | e cria um array os contendo em spamreader \n spamreader = csv.reader(csvfile, delimiter='|')\n\n #variavel de controle da quantidade de execucoes do loop\n n = 0\n\n #para cada elemento em spamreader armazene seu conteudo em row\n for row in spamreader:\n #incrementa n ate obter a quantidade total de linhas\n n = n + 1\n #retorna o ponteiro para o inicio do arquivo\n csvfile.seek(0)\n\n #percorre spamreader buscando os dados desejados\n for (h,m,i) in spamreader:\n #cria um objeto do tipo Host contendo os dados obtidos\n h = Host(hostname = h, mac_address = m, ip_address = i)\n\n #salva no banco de dados o objeto com os dados obtidos\n h.save()\n\n #decrementa o contador da quantidade de execucoes\n n = n - 1\n\n #caso ja tenha feito todos, entao pare\n if(n == 1):\n break\n\n #redireciona para a pagina de listagem\n return HttpResponseRedirect(\"/list\")\n else:\n #redireciona para a pagina de listagem\n return HttpResponseRedirect(\"/list\")\n\n#View da tela de upload\ndef download(request):\n # Cria o objeto Http Response com o cabecalho de CSV apropriado.\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"hostnames.csv\"'\n\n # Pega todos os hosts do banco de dadosz\n hosts = Host.objects.all()\n writer = csv.writer(response)\n\n # Percorre todos os hosts\n for h in hosts:\n # Escreve uma linha no arquivo .csv de acordo com o padrao\n # <<hostname|mac|ip>>\n writer.writerow([\n smart_str(h.hostname+\"|\"+h.mac_address+\"|\"+h.ip_address),\n ])\n # Retorna a resposta em formato de arquivo, possibilitando o download\n return response\n\n#View da tela de update\n@login_required\ndef update(request):\n if (request.method == 'GET'): # redirecionamento padrao\n #resgata o ip passado por GET\n ip = request.GET['ip']\n #carrega o template da tela de update\n template = loader.get_template('hostnames/update.html')\n\n #passa no contexto o ip do host desejado \n c = RequestContext(request, {\n 'host' : Host.objects.get(ip_address=ip),\n })\n\n #responde com o template e o renderiza\n return HttpResponse(template.render(c))\n\n if (request.method == 'POST'):\n #formalidades de seguranca \n c = {}\n c.update(csrf(request))\n\n #instacia um objeto EditForm contendo os dados de atualizacao\n form = EditForm(request.POST)\n\n #testa se o form e valido\n if form.is_valid():\n #instancia um objeto do tipo Host que contem o id passado no form\n h = Host.objects.get(id=form.cleaned_data['id'])\n #seta o valor de hostname com o valor passado no form\n h.hostname = form.cleaned_data['hostname']\n #seta o valor de mac_adress com o valor passado no form\n h.mac_address = form.cleaned_data['mac']\n #seta o valor do ip_adress com o valor passado no form\n h.ip_address = form.cleaned_data['ip']\n #salva no banco de dados o objeto atualizado\n h.save()\n\n #redireciona para a pagina de listagem\n return HttpResponseRedirect(\"/list\")\n else:\n return HttpResponseRedirect(\"/list\")\n\n#View da tela de retrieve\n@login_required\ndef retrieve(request):\n #captura o ip passado por GET\n ip = request.GET['ip']\n #carrega o template do retrieve\n template = loader.get_template('hostnames/retrieve.html')\n \n #requisita o contexto passando um objeto host contendo o ip \n c = RequestContext(request, {\n 'host' : Host.objects.get(ip_address=ip),\n })\n\n #responde com o template renderizado\n return HttpResponse(template.render(c))\n\n#View da pagina de listagem\n@login_required\ndef list(request):\n page = request.GET.get('page', 1)\n try:\n pagination = request.GET.get('pagination', 10)\n except:\n pagination = 10\n try:\n ordem = request.GET['ordem']\n if('0' in ordem):\n if('h' in ordem):\n host_list = Host.objects.filter().order_by('hostname')\n if('m' in ordem):\n host_list = Host.objects.filter().order_by('mac_address')\n if('i' in ordem):\n host_list = Host.objects.filter().order_by('ip_address')\n else:\n if('h' in ordem):\n host_list = Host.objects.filter().order_by('-hostname')\n if('m' in ordem):\n host_list = Host.objects.filter().order_by('-mac_address')\n if('i' in ordem):\n host_list = Host.objects.filter().order_by('-ip_address')\n\t \n except:\n ordem = '0h'\n host_list = Host.objects.filter().order_by('hostname')\n if(pagination == 'todos'):\n paginator = Paginator(host_list, len(host_list))\n else:\n paginator = Paginator(host_list, pagination)\n try:\n hosts = paginator.page(page)\n except PageNotAnInteger:\n hosts = paginator.page(1)\n except EmptyPage:\n hosts = paginator.page(paginator.num_pages)\n\n #carrega o template da list\n template = loader.get_template('hostnames/list.html')\n\n if('0' in ordem):\n proxOrdem = '1'\n else:\n proxOrdem = '0'\n print(pagination)\n #solicita um contexto contendo todos os objetos do tipo Host\n c = RequestContext(request, {\n 'hosts' : hosts,\n\t'ordem': ordem,\n 'proxOrdem': proxOrdem,\n 'paginacao': pagination\n })\n\n #responde com o template renderizado\n return HttpResponse(template.render(c))\n\n# Ordena os campos da pesquisa de acordo com a ordem - Hostname, Mac, Ip\ndef ordenaPesquisa(pesquisa):\n\tpesquisaOrdenada = ['','','']\n\tif (pesquisa != None):\n\t\tfor i in pesquisa:\n\t\t\tif (\"hostname\" in i):\n\t\t\t\tpesquisaOrdenada[0] = i\n\t\t\tif (\"mac\" in i):\n\t\t\t\tpesquisaOrdenada[1] = i\n\t\t\tif (\"ip\" in i):\n\t\t\t\tpesquisaOrdenada[2] = i\n\t\tpesquisaOrdenada = separaDados(pesquisaOrdenada)\n\telse:\n\t\tpesquisaOrdenada = None\n\t\n\treturn pesquisaOrdenada\n\n# Cria um Array com os dados da pesquisa\ndef separaDados(pesquisa):\n\tlistaDados = ['','','']\n\tif(pesquisa[0] is not ''):\n\t\tlistaDados[0] = pesquisa[0][9:]\n\tif(pesquisa[1] is not ''):\n\t\tlistaDados[1] = pesquisa[1][4:]\n\tif(pesquisa[2] is not ''):\n\t\tlistaDados[2] = pesquisa[2][3:]\n\treturn listaDados\nimport operator\n# View do Resultado da Pesquisa\ndef pesquisar(request):\n if request.method == 'GET':\n\ttry:\n pagination = request.GET.get('pagination', 10)\n except:\n pagination = 10\n #carrega o template da list\n\tpesquisa = request.GET['pesquisa']\n\tpesquisa = pesquisa.replace(' ', '')\n\n \n try:\n ordem = request.GET['ordem']\n except:\n\t ordem = '0h'\n\tpesquisaSplit = pesquisa.split(\"|\")\n\n\tif('|' not in pesquisa and '=' not in pesquisa):\n\t\tif(len(pesquisa) == 1):\n\t\t\tpesquisa2 = [pesquisa]\n\t\telse:\n\t\t\tpesquisa2 = [pesquisa]\n\telse:\n\t\tpesquisa2 = ordenaPesquisa(pesquisaSplit)\n\n\n\tcontent = host_detail_pesquisar(request, pesquisa2, ordem)\n\tstream = BytesIO(content)\n\tdata = JSONParser().parse(stream)\n\n \n\n\t#data = data.order_by('+hostname')\n\n\tserializer = HostSerializer(data=data)\n\n\tserializer.is_valid()\n\n\ttemplate = loader.get_template('hostnames/pesquisar.html')\n \tpage = request.GET.get('page', 1)\n\tif(pagination == 'todos'):\n paginator = Paginator(data, len(data))\n else:\n paginator = Paginator(data, pagination)\n \ttry:\n \thosts = paginator.page(page)\t\n\texcept PageNotAnInteger:\n \thosts = paginator.page(1)\n \texcept EmptyPage:\n \thosts = paginator.page(paginator.num_pages)\n\n if('0' in ordem):\n\t\tproxOrdem= '1'\n\t\t\n \telse:\n\t\tproxOrdem = '0'\n\n\tc = RequestContext (request, {\n 'hosts' : hosts,\n 'pesquisa': pesquisa,\n\t 'ordem': ordem,\n 'proxOrdem': proxOrdem,\n\t 'paginacao': pagination\n })\n\n #responde com o template renderizado\n return HttpResponse(template.render(c))\n\n\nclass HostList(generics.ListCreateAPIView):\n queryset = Host.objects.all()\n serializer_class = HostSerializer\n\n\nclass HostDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Host.objects.all()\n serializer_class = HostSerializer\n"
},
{
"alpha_fraction": 0.6737473011016846,
"alphanum_fraction": 0.6840958595275879,
"avg_line_length": 29.098360061645508,
"blob_id": "91ae98d4d7e9046e2e63efdb0c8501a5c51d60b9",
"content_id": "31d4ae758a4fd94ae0e22c2297842543bfb9cafc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1836,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 61,
"path": "/hostnames_provider/hostnames/forms.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "'''\n @file forms.py\n @lastmod 8/11/2016\n'''\n\n# Importacoes\nfrom django import forms\n\n# Area de criacao de forms\n\n#Form de Login (nao utilizado no momento)\nclass LoginForm(forms.Form):\n user = forms.CharField(label='user')\n password = forms.CharField(label='password')\n\n#Form de Upload de Arquivo .csv\nclass UploadFileForm(forms.Form):\n '''\n Classe de Upload de Arquivo: Usada na validacao de um Formulario de Upload\n '''\n file = forms.FileField(label='file')\n\n#Form de Criacao de Host\nclass CreateForm(forms.Form):\n '''\n Classe de Criacao de Host: Usada na validacao de um Host a ser adicionado\n '''\n hostname = forms.CharField(label='hostname')\n mac = forms.CharField(label='mac', max_length=17)\n ip = forms.CharField(label='ip', max_length=15)\n\n#Form de Visualizacao de Host\nclass RetrieveForm(forms.Form):\n hostname = forms.CharField(label='hostname')\n mac = forms.CharField(label='mac', max_length=17)\n ip = forms.CharField(label='ip', max_length=15)\n\n#Form de Edicao de Host\nclass EditForm(forms.Form):\n '''\n Classe de Edicao de Host: Usada na validacao de um Host a ser atualizado\n '''\n id = forms.CharField(label='id')\n hostname = forms.CharField(label='hostname')\n mac = forms.CharField(label='mac', max_length=17)\n ip = forms.CharField(label='ip', max_length=15)\n\n#Form de Exclusao de Host\nclass DeleteForm(forms.Form):\n '''\n Classe de Exclusao de Host: Usada na validacao de um Host a ser excluido\n '''\n ip = forms.CharField(label='ip_address')\n\n#Form do Contato\nclass ContactForm(forms.Form):\n first_name = forms.CharField(label='first_name')\n last_name = forms.CharField(label='last_name')\n email = forms.EmailField(label='email')\n phone = forms.CharField(label='phone')\n comment = forms.CharField(label='comment')\n"
},
{
"alpha_fraction": 0.5465116500854492,
"alphanum_fraction": 0.5639534592628479,
"avg_line_length": 18.05555534362793,
"blob_id": "19ceef3d82d80b2f4ad877747eef6f09a6b5e8f4",
"content_id": "f49ff93c23ead0a7ba37ce026ebbb3506fa7a88a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 346,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 18,
"path": "/hostnames_provider/extract.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "#MODULO DE EXTRAÇÃO DE QQ CSV SEPARADO POR |\nlines = [line.rstrip('\\n') for line in open('dados.txt')]\nprint(lines)\n\nn = len(lines)\n\nhostname = \"\"\nmac = \"\"\nip = \"\"\ni = 0\n\nwhile(i < n-1) :\n\tarray = lines[i].split(\"|\");\n\thostname = array[0]\n\tmac = array[1]\n\tip = array[2]\n\tprint(\"HOSTNAME = \", hostname, \"MAC = \", mac,\"IP = \", ip) \t\n\ti = i + 1\n\n"
},
{
"alpha_fraction": 0.6753731369972229,
"alphanum_fraction": 0.6977611780166626,
"avg_line_length": 28.77777862548828,
"blob_id": "cd2ea2a62d0c0699c9ac5d6bbf6ceabb5430494a",
"content_id": "0668355297a9513ef3230b0a3e27d27227e509b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 9,
"path": "/scripts/change_linux_hostname.sh",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nurl='hostnameprovider'\nfinal_hostname=`curl $url`\ncurrent_hostname=`hostname`\nif [ $current_hostname != $final_hostname ]; then\n echo $final_hostname > /etc/hostname\n sed -i '/$current_hostname/c\\127.0.1.1\\t$final_hostname' /etc/hosts\n reboot\nfi;\n"
},
{
"alpha_fraction": 0.6674107313156128,
"alphanum_fraction": 0.6830357313156128,
"avg_line_length": 25.352941513061523,
"blob_id": "30e4b41b411f00970ceb8d62e900a8aae2ed818c",
"content_id": "0b3fed6f7653dd394c51f89d7df9bfc59f983954",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 17,
"path": "/hostnames_provider/hostnames/serializers.py",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "'''\n @file serializers.py\n @lastmod 8/11/2016\n'''\n\nfrom rest_framework import serializers\nfrom .models import Host\nfrom django.db import models\n\n# Serializer da Classe Host\nclass HostSerializer(serializers.ModelSerializer):\n '''\n Classe Serializadora de Host: Indica a Classe que servira de modelo e seus determinados campos \n '''\n class Meta:\n model = Host\n fields = ('hostname','mac_address','ip_address')\n"
},
{
"alpha_fraction": 0.493865042924881,
"alphanum_fraction": 0.504601240158081,
"avg_line_length": 25.079999923706055,
"blob_id": "b02040489caa4bb5cb92d1daed9f7f3301e023c6",
"content_id": "71f38b7d47452ce5fde23dff189f65cd9388030c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 25,
"path": "/hostnames_provider/templates/hostnames/base_with_menu.html",
"repo_name": "DiegoAscanio/hostnames_provider",
"src_encoding": "UTF-8",
"text": "<!------------------------------------------\n\t\t@file base_with_menu.html\n\t\t@lastmod 1/11/12\n------------------------------------------->\n\n<!--\n\testende a base padrao\n-->\n{% extends 'hostnames/base.html' %}\n\n<!--\n\tdefine o conteudo da navbar esquerda\n-->\n{% block navbar_left_menu %}\n <li><a href=\"/create\">Adicionar</a></li>\n <li><a href=\"/list\">Listar</a></li>\n{% endblock %}\n\n<!--\n\tdefine o conteudo da navbar direita\n-->\n{% block navbar_right_menu %}\n \n\t\t<li><a style='padding-top:7px;padding-bottom:7px' href=\"/logout\"><button type=\"button\" class=\"btn btn-success\">Sair</button></a></li>\n{% endblock %}\n"
}
] | 16 |
mhussainphys/J.A.R.V.I.S. | https://github.com/mhussainphys/J.A.R.V.I.S. | 128b402e2820d3bcf3167030dd440ec795259a2c | be34fdf661c1f27c65df0d491889f3d6c8edbb94 | 2f5a16defe62a42af9b6b8215417712f482efe1a | refs/heads/master | 2020-04-29T06:52:13.023980 | 2019-03-19T00:59:54 | 2019-03-19T00:59:54 | 175,932,790 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4329093098640442,
"alphanum_fraction": 0.43762823939323425,
"avg_line_length": 68.4000015258789,
"blob_id": "a00dc8d60dac093ed90aa5eb48fde79044832d93",
"content_id": "3d844483c1ce93d1386d0fa814d421b2f180aa67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9748,
"license_type": "no_license",
"max_line_length": 259,
"num_lines": 140,
"path": "/AutoPilot.py",
"repo_name": "mhussainphys/J.A.R.V.I.S.",
"src_encoding": "UTF-8",
"text": "import ParseFunctions as pf\nimport TCP_com as tp #in-built 5s delay in all of them\nimport AllModules as am\n\n################################### Important #########################################\n######## This parameter defines at what time it is safe to start a new run ############\n######## It should be about 30 seconds before the arrival time of each spill ##########\n######## Since spills come every minute, this is defined as a number of seconds #######\n######## after the start of each clock minute (only meaningful modulo 60 seconds) #####\n################ Periodically make sure this value makes sense. #######################\n#######################################################################################\n\nStartSeconds = 9\nStopSeconds = 40\nNumSpillsPerRun = 2\n\n\n#################################Parsing arguments###################################### \n\nparser = argparse.ArgumentParser(description='Information for running the AutoPilot program. \n/n /n General Instructions: If using OTSDAQ make sure the start and stop seconds in the beginning of the program are hard coded correctly. /n Make sure to add sensor and configuration after each controlled access and pass it as an argument to this script. /n\n/n /n TekScope Specific Instructions: /n Make sure you hard code the dpo_fastframe path. /n If using the OTSDAQ with TekScope make sure the Use_otsdaq boolean is True in dpo_fastframe script. /n Make Sure you pass all four Scope trigger and channel settings. \n/n /n Other Digitizer Specific Instructions: /n If not running the TekScope make sure that the run file name in TCP_com is correct.') \nparser.add_argument('-rtm', '--RunTableMode', type=int, default = 0, help='Give 1 if you are using the run table', required=False) \nparser.add_argument('-ac', '--AlreadyConfigured', type=int, default = 0, help='Give 1 if the OTSDAQ is already configured', required=False) \nparser.add_argument('-de', '--Debug', type=int, default = 0, required=False) \nparser.add_argument('-io', '--IsOTSDAQ', type=int, default=0, help = 'Give 1 if using OTSDAQ',required=False) \nparser.add_argument('-it', '--IsTelescope', type=int,default=0, help = 'Give 1 if using the telescope',required=False) \nparser.add_argument('-di', '--Digitizer', type=str,default= 'TekScope', help = 'Give VME or DT5742 or TekScope', required =False) \nparser.add_argument('-se', '--Sensor', type=int, help = 'Make sure to add the sensor record in the run table. Give sensor S/N from the run table',required=False) \nparser.add_argument('-conf', '--Configuration', type=int, help = 'Make sure to add the configuration in the run table. Give COnfiguration S/N from the run table',required=False) \nparser.add_argument('-sac', '--StopAndContinue', type=int, default = 0, help = 'This bool should be 1 if the OTSDAQ is already in the running state and you want to stop and it and continue running it.',required=False) \n\n######################### Only care about this if using TekScope #########################\nparser.add_argument('-tl', '--TriggerLevel', type=float,default= -0.01, help = 'Trigger level in volts', required =False) \nparser.add_argument('-tc', '--TriggerChannel', type=str, deafult= 'CH4', help = 'Channel to trigger on',required=False) \nparser.add_argument('-ne', '--NumEvents', type=int,default=50000, help = 'Number of events',required=False) \nparser.add_argument('-tne', '--TotalNumEvents', type=int,default=50000, help = 'Total number of events',required=False) \n \nargs = parser.parse_args() \nRunTableMode = args.RunTableMode \nAlreadyConfigured = args.AlreadyConfigured \nDebug = args.Debug \nIsOTS = args.IsOTSDAQ \nIsTelescope = args.IsTelescope \nDigitizer = args.Digitizer \nSensor = args.Sensor \nConfiguration = args.Configuration \nStopAndContinue = args.StopAndContinue\nTriggerLevel = args.TriggerLevel\nTriggerChannel = args.TriggerChannel\nNumEvents = args.NumEvents\nTotalNumEvents = args.TotalNumEvents\n\n\n\n########################### Only when Run table is used ############################\n\nif RunTableMode:\n\n if IsTelescope: \n Tracking = 'Not started'\n else:\n Tracking = 'N/A'\n if Digitizer == 'TekScope': \n IsScope = True\n Conversion = 'Not started'\n StartScopeCMD = \"python %s --trig=%f --trigCh=%s --numFrames=%i --totalNumber=%i\" % (am.DPOFastFramePath, TriggerLevel, TriggerChannel, NumEvents, TotalNumEvents) \n else: \n Conversion = 'N/A' \n\n TimingDAQ = 'Not started'\n TimingDAQNoTracks = 'Not started'\n\n # Get Sensor ID and Configuration ID list \n \n if pf.QueryGreenSignal(True): \n SensorID = pf.GetFieldIDOtherTable('Sensor', 'Configuration number', str(Sensor), False) \n ConfigID = pf.GetFieldIDOtherTable('Config', 'Configuration number', str(Configuration), False) \n\n if not SensorID or not ConfigID:\n raise Exception('\\n The sensor and configuration you passed as argument are not in the table!!!!!!!!!!!!!!!!!!!! \\n')\n ##### Exit the program ######\n\n\n#################### CONFIGURING AND INITIALIZING THE OTSDAQ ######################\n\nif not Debug and not AlreadyConfigured and UseOTS: \n\tprint 'INTITIALIZING THE OTS-DAQ'\n\tinit_ots()\nif not Debug and not AlreadyConfigured and UseOTS: \n\tprint 'CONFIGURING THE OTS-DAQ'\n\tconfig_ots()\n\ttime.sleep(25)\n\n\n\nwhile True:\n\n if not IsScope and UseOTS and StopAndContinue: \n\n ############### Wait until stop time ################## \n am.wait_until(StopSeconds) \n print \"Stopping run at %s\" % (am.datetime.now().time()) \n if not debug: stop_ots(False) \n StopAndContinue = False \n time.sleep(20) \n \n elif not StopAndContinue:\n\n ############ Wait for safe time to start run ##########\n am.wait_until(StartSeconds) \n \n if not Debug and IsScope: \n \n # In case of the scope, running the dpo_fastframe script which will take care of the otsdaq.\n os.system(StartScopeCMD)\n time.sleep(1) \n\n elif not Debug and not IsScope:\n\n ################### Starting the run ###################\n StartTime = str(am.datetime.now())\n print \"Starting run at %s\" % (StartTime)\n\n RunNumber = tp.start_ots(False)\n\n time.sleep(60*(NumSpillsPerRun-1)) \n\n am.wait_until(StopSeconds) \n StopTime = str(am.datetime.now())\n\n print \"Stopping run at %s\" % (StopTime) \n if not debug: tp.stop_ots(False) \n\n if RunTableMode:\n\n Duration = StopTime - StartTime\n\n if pf.QueryGreenSignal(True): pf.NewRunRecord(RunNumber, StartTime, Duration, Digitizer, Tracking, Conversion, TimingDAQ, TimingDAQNoTracks, SensorID, ConfigID, False)\n "
},
{
"alpha_fraction": 0.35548606514930725,
"alphanum_fraction": 0.36056816577911377,
"avg_line_length": 90.3452377319336,
"blob_id": "a7a287aefbc312ba59bcfdc713a3eba1eaec1584",
"content_id": "7d57f8f118c9921a9da2c2b4b05b129f06505f91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7674,
"license_type": "no_license",
"max_line_length": 359,
"num_lines": 84,
"path": "/ProcessExec.py",
"repo_name": "mhussainphys/J.A.R.V.I.S.",
"src_encoding": "UTF-8",
"text": "import AllModules as am\nimport ProcessCMDs as pc\nimport ParseFunctions as pf\n\ndef ProcessLog(ProcessName, RunNumber, ProcessOutput):\n\tProcessFile_handle = open(\"/home/daq/fnal_tb_18_11/ProcessLog/%s/run%d.txt\" % (ProcessName, RunNumber), \"a+\") \n\tProcessFile_handle.write(ProcessOutput) \n\tProcessFile_handle.close() \n\ndef exists_remote(host, path): \n\tstatus = subprocess.call(['ssh', host, 'test -f {0}'.format(pipes.quote(path))]) \n\tif status == 0: \n\t\treturn True \n\tif status == 1: \n\t\treturn False \n\traise Exception('SSH failed') \n\ndef TrackFileRemoteExists(RunNumber):\n\tTrackFilePathRulinux = am.BaseTrackDirRulinux +'CMSTimingConverted/Run%i_CMSTiming_converted.root' % RunNumber \n\treturn exists_remote(am.RulinuxSSH, am.TrackFilePathRulinux), am.TrackFilePathRulinux \n\ndef TrackFileLocalExists(RunNumber):\n\tTrackFilePathLocal = am.BaseTrackDirLocal + 'Run%i_CMSTiming_converted.root' % RunNumber \n\treturn am.os.path.exists(TrackFilePathLocal), TrackFilePathLocal \n\ndef FileSizeBool(FilePath, SizeCut):\n\tif am.os.path.exists(FilePath):\n\t\treturn am.os.stat(FilePath).st_size < SizeCut\n\telse: return True\n\ndef ProcessExec(OrderOfExecution, PID, SaveWaveformBool, Version): #PID is 1 for Tracking, 2 for Conversion, 3 for TimingDAQ\n\t\n\tSaveWaveformBool = SaveWaveformBool\n\tVersion = Version\n\n\tif PID == 1:\n\t\tProcessName = 'Tracking'\n\t\tCMDList, ResultFileLocationList, RunList, FieldIDList = pc.TrackingCMDs(False)\n\t\tSizeCut = 10000\n\telif PID == 2:\n\t\tProcessName = 'Conversion'\n\t\tCMDList, ResultFileLocationList, RunList, FieldIDList = pc.ConversionCMDs(False)\n\t\tSizeCut = 10000\n\telif PID == 3:\n\t\tProcessName = 'TimingDAQ'\t\n\t\tDoTracking = True\n\t\tCMDList, ResultFileLocationList, RunList, FieldIDList = pc.TimingDAQCMDs(SaveWaveformBool, Version, DoTracking, False)\n\t\tSizeCut = 20000\n\telif PID == 4:\n\t\tProcessName = 'TimingDAQNoTracks'\n\t\tDoTracking = False\t\n\t\tCMDList, ResultFileLocationList, RunList, FieldIDList = pc.TimingDAQCMDs(SaveWaveformBool, Version, DoTracking, False)\n\t\tSizeCut = 20000\n\n\tRunListInt = map(int,RunList)\n\tif OrderOfExecution == 1: \n\t\tRunListInt.sort() #Ascending Sorting\n\telse:\n\t\tRunListInt.sort(reverse = True)\n\n\tif CMDList:\t\n\n\t\tfor run in \tRunListInt: \n\t\t\tindex = RunList.index(run) \n\t\t\tCMD = CMDList[index] \n\t\t\tResultFileLocation = ResultFileLocationList[index]\n\t\t\tBadProcessExec = False\n\t\t\tRawStageTwoFilePath = am.RawStageTwoLocalPathScope + 'run_scope' + str(run) + '.root'\n\t\t\tif PID == 1:\n\t\t\t\tif pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Processing', False)\n\t\t\t\tsession = am.subprocess.Popen([\"ssh\", am.RulinuxSSH, str(CMD)], stderr=am.subprocess.PIPE, stdout=am.subprocess.PIPE)\n\t\t\telif PID == 2:\n\t\t\t\tif pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Processing', False)\n\t\t\t\tsession = am.subprocess.Popen('source %s; %s' % (am.EnvSetupPath,str(CMD)),stdout=am.subprocess.PIPE, stderr=am.PIPE, shell=True)\n\t\t\telif PID == 3 or PID == 4:\n\t\t\t\tif pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Processing', False)\n\t\t\t\tsession = am.subprocess.Popen('cd %s; source %s; %s;cd -' % (am.TimingDAQDir,am.EnvSetupPath,str(CMD)),stdout=am.PIPE, stderr=am.subprocess.PIPE, shell=True) \t\t\t\n\t\t\tstdout, stderr = session.communicate() \n\t\t\tProcessLog(ProcessName, run, stdout) \n\t\t\tif FileSizeBool(ResultFileLocation,SizeCut) or not am.os.path.exists(ResultFileLocation): BadProcessExec = True \n\t\t\tif BadProcessExec: \n\t\t\t\tif pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Failed', False) \n\t\t\telse:\n\t\t\t\tif pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Complete', False)\n\n"
},
{
"alpha_fraction": 0.48128342628479004,
"alphanum_fraction": 0.48770052194595337,
"avg_line_length": 45.70000076293945,
"blob_id": "8e9199222198a9b141663cf8e7c57d2b4153a6a0",
"content_id": "525ae22ae81d93c91657dc297a05ac8bdbe80caf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1870,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 40,
"path": "/TCP_com.py",
"repo_name": "mhussainphys/J.A.R.V.I.S.",
"src_encoding": "UTF-8",
"text": "import socket\nimport sys\nimport time\nimport os\nimport AllModules as am\n\ndef init_ots():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n MESSAGE = \"OtherRuns0,Initialize\"\n sock.sendto(MESSAGE, (am.ip_address, am.use_socket)) \n time.sleep(5)\n\ndef config_ots():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \n MESSAGE = \"OtherRuns0,Configure,T992Config\"\n sock.sendto(MESSAGE, (am.ip_address, am.use_socket)) \n time.sleep(5)\n\ndef start_ots(Delay=False):\n copy_cmd = 'scp [email protected]:' + am.runFileName + ' ' + am.localRunFileName \n os.system(copy_cmd) \n runFile = open(am.localRunFileName)\n nextRun = int(runFile.read().strip())\n runFile.close()\n incrementRunFile = open(am.localRunFileName,\"w\")\n incrementRunFile.write(str(nextRun+1)+\"\\n\")\n incrementRunFile.close()\n copy_cmd = 'scp ' + am.localRunFileName +' [email protected]:' + am.runFileName\n os.system(copy_cmd) \n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n MESSAGE = \"OtherRuns0,Start, %d\" % (nextRun)\n sock.sendto(MESSAGE, (am.ip_address, am.use_socket))\n return nextRun\n if Delay: time.sleep(5)\n\ndef stop_ots(Delay=True):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n MESSAGE = \"OtherRuns0,Stop\"\n sock.sendto(MESSAGE, (am.ip_address, am.use_socket))\n if Delay: time.sleep(5)\n\n\n"
},
{
"alpha_fraction": 0.4652513563632965,
"alphanum_fraction": 0.4674544334411621,
"avg_line_length": 52.64516067504883,
"blob_id": "0362430fcb5aa136ed34c711c44b47886530710d",
"content_id": "90cf10cf5ee93e60d74a1cb87b442cab8bc1c9dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4993,
"license_type": "no_license",
"max_line_length": 596,
"num_lines": 93,
"path": "/ProcessCMDs.py",
"repo_name": "mhussainphys/J.A.R.V.I.S.",
"src_encoding": "UTF-8",
"text": "import ParseFunctions as pf\nimport ProcessRuns as pr\nimport AllModules as am\n\n\n################################################################################################################################################################################################################ \n################################################################################################################################################################################################################ \n##################################These Functions get run lists for various processes from the run table and returns the list of the respective process running commands #######################################\n################################################################################################################################################################################################################ ################################################################################################################################################################################################################ \n\n\ndef TrackingCMDs(Debug):\n\n RunList, FieldIDList = pr.TrackingRuns(False)\n TrackingCMDList = []\n ResultFileLocationList = []\n\n if RunList:\n \n for run in RunList: \n\n TrackingCMDList.append('source %s %d' % (am.HyperscriptPath, run))\n ResultFileLocationList.append(am.BaseTrackDirLocal + 'Run%d_CMSTiming_converted.root' % run)\n\n return TrackingCMDList, ResultFileLocationList, RunList, FieldIDList\n\n\ndef ConversionCMDs(Debug):\n\n RunList, FieldIDList = pr.ConversionRuns(False)\n ConversionCMDList = []\n ResultFileLocationList = []\n\n if RunList:\n \n for run in RunList: \n\n ConversionCMDList.append(am.ConversionCMD + str(run))\n ResultFileLocationList.append(am.RawStageTwoLocalPathScope + 'run_scope' + str(run) + '.root')\n\n return ConversionCMDList, ResultFileLocationList, RunList, FieldIDList\n\n\ndef TimingDAQCMDs(SaveWaveformBool, Version, DoTracking, Debug):\n DoTracking = DoTracking \n RunList, FieldIDList, DigitizerList, RedoList, VersionList = pr.TimingDAQRuns(DoTracking, False)\n DatToRootCMDList = []\n ResultFileLocationList = []\n\n if RunList:\n \n for run in RunList: \n\n RecoLocalPath = None\n RawLocalPath = None\n Digitizer = []\n Index = RunList.index(run)\n Digitizer = (DigitizerList[Index])[0]\n\n if RedoList[Index] == 'Redo': \n Version = VersionList[Index]\n else: \n Version = Version\n\n if Digitizer == 'TekScope': Digitizer = 'NetScopeStandalone'\n\n RecoBaseLocalPath = am.RecoBaseLocalPath + Digitizer+ '/' + Version + '/'\n if not DoTracking: RecoBaseLocalPath = RecoBaseLocalPath + 'RecoWithoutTracks/'\n if not am.os.path.exists(RecoBaseLocalPath): am.os.system('mkdir -p %s' % RecoBaseLocalPath)\n\n if Digitizer == 'VME' or Digitizer == 'DT5742':\n RawBaseLocalPath = am.RawBaseLocalPath + Digitizer + '/' + Version + '/' \n ListRawRunNumber = [(x.split(\"_Run\")[1].split(\".dat\")[0].split(\"_\")[0]) for x in am.glob.glob(RawBaseLocalPath + '*_Run*')]\n ListRawFilePath = [x for x in am.glob.glob(RawBaseLocalPath + '*_Run*')] \n RawLocalPath = ListRawFilePath[ListRawRunNumber.index(run)]\n RecoLocalPath = RecoBaseLocalPath + RawLocalPath.split(\".dat\")[0].split(\"%s/\" % Version)[1] + '.root' \n \n elif Digitizer == 'NetScopeStandalone':\n RawLocalPath = am.RawStageTwoLocalPathScope + 'run_scope' + str(run) + '.root' \n RecoLocalPath = RecoBaseLocalPath + 'run_scope' + str(run) + '_converted.root' \n\n ResultFileLocationList.append(RecoLocalPath)\n ConfigFilePath = am.ConfigFileBasePath + Digitizer + '_%s.config' % Version\n DatToRootCMD = './' + Digitizer + 'Dat2Root' + ' --config_file=' + ConfigFilePath + ' --input_file=' + RawLocalPath + ' --output_file=' + RecoLocalPath\n if SaveWaveformBool: DatToRootCMD = DatToRootCMD + ' --save_meas'\n \n if DoTracking: \n TrackFilePathLocal = am.BaseTrackDirLocal + 'Run%i_CMSTiming_converted.root' % run\n DatToRootCMD = DatToRootCMD + ' --pixel_input_file=' + TrackFilePathLocal \n\n DatToRootCMDList.append(DatToRootCMD)\n\n return DatToRootCMDList, ResultFileLocationList, RunList, FieldIDList\n\n\n\n\n"
},
{
"alpha_fraction": 0.5533483028411865,
"alphanum_fraction": 0.5614692568778992,
"avg_line_length": 48.63975143432617,
"blob_id": "43effc066b6ed7f71c4a52a7b533c92ce6553e7b",
"content_id": "3dd1b4828dda191d3e142ff187678fe9639516d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8004,
"license_type": "no_license",
"max_line_length": 410,
"num_lines": 161,
"path": "/ParseFunctions.py",
"repo_name": "mhussainphys/J.A.R.V.I.S.",
"src_encoding": "UTF-8",
"text": "import AllModules as am\n\n################################################################################################################################################################################################################ \n################################################################################################################################################################################################################ \n#########################################These Functions parse the run table and performs function such as record query, record update, record Addition etc ###################################################\n################################################################################################################################################################################################################ \n################################################################################################################################################################################################################ \n \n\n################### Unicode Operations to form CURL commands ###################\n\ndef QueryAllow():\n\n QueryFile = open(am.QueryFilePath,\"a+\") \n ScanLines = [line.rstrip('\\n') for line in open(am.QueryFilePath)]\n \n QueryNumberList = []\n QueryTimeList = [] \n TimeToWait = -1\n\n if ScanLines:\n for entry in ScanLines:\n if ScanLines.index(entry) % 2 == 0:\n QueryNumberList.append(int(entry))\n else:\n QueryTimeList.append(entry) #Absolute time \n else:\n QueryNumberList.append(0)\n\n LastQueryNumber = QueryNumberList[len(QueryNumberList - 1)]\n if LastQueryNumber < 5:\n AllowQuery = True\n QueryFile.write(str(LastQueryNumber + 1) + \"\\n\") \n QueryFile.write(str(datetime.now()) + \"\\n\")\n QueryFile.close() \n\n elif LastQueryNumber == 5:\n TimeSinceFirstQuery = (datetime.now() - datetime.strptime(QueryTimeList[0],\"%Y-%m-%d %H:%M:%S.%f\")).total_seconds()\n if TimeSinceFirstQuery > 60:\n AllowQuery = True\n os.system(\"rm %s\" % am.QueryFilePath)\n QueryFile = open(am.QueryFilePath,\"a+\") \n QueryFile.write(str(1) + \"\\n\") \n QueryFile.write(str(datetime.now()) + \"\\n\")\n QueryFile.close()\n else:\n TimeToWait = 65 - TimeSinceFirstQuery\n AllowQuery = False\n \n return AllowQuery, TimeToWait \n\ndef QueryGreenSignal(Bypass):\n while True:\n if Bypass == True:\n return True\n break\n IsQueryAllowed, TimeToWait = QueryAllow()\n if IsQueryAllowed: \n return True\n break\n else:\n time.sleep(TimeToWait)\n\ndef DoubleQuotes(string):\n return '%%22%s%%22' % string \n\ndef Curly(string):\n return '%%7B%s%%7D' % string\n \ndef EqualToFunc(string1,string2):\n return '%s%%3D%s' % (string1,string2)\n\ndef ANDFunc(AttributeNameList, AttributeStatusList):\n Output = 'AND('\n index = 0\n for AttributeName in AttributeNameList:\n AttributeStatus = AttributeStatusList[index]\n Condition = EqualToFunc(Curly(AttributeName), DoubleQuotes(AttributeStatus))\n if index > 0: Output = Output + ','\n Output = Output + Condition\n index = index + 1\n Output = Output + ')'\n return Output\n \ndef ORFunc(AttributeNameList, AttributeStatusList):\n Output = 'OR('\n index = 0 \n for AttributeName in AttributeNameList:\n AttributeStatus = AttributeStatusList[index]\n Condition = EqualToFunc(Curly(AttributeName), DoubleQuotes(AttributeStatus))\n if index > 0: Output = Output + ','\n Output = Output + Condition\n index = index + 1\n Output = Output + ')'\n return Output\n\n\n##################### Main Run Table Operaton functions #########################\n\ndef ParsingQuery(NumberOfConditions, ConditionAttributeNames, ConditionAttributeStatus, QueryAttributeName, Debug):\n Output = [] \n FieldID = []\n FilterByFormula = None\n headers = {'Authorization': 'Bearer %s' % am.MyKey, }\n for i in range (0, NumberOfConditions): \n if i > 0: FilterByFormula = FilterByFormula + ','\n FilterByFormula = FilterByFormula + EqualToFunc(Curly(ConditionAttributeNames[i]), DoubleQuotes(ConditionAttributeStatus[i])) \n if NumberOfConditions > 1: FilterByFormula = 'AND(' + FilterByFormula + ')'\n response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)\n ResponseDict = am.ast.literal_eval(response.text)\n if Debug: return ResponseDict, FilterByFormula\n\n for i in ResponseDict[\"records\"]: Output.append(i['fields'][QueryAttributeName]) \n for i in ResponseDict[\"records\"]: FieldID.append(i['id']) \n return Output, FieldID\n\ndef GetFieldID(ConditionAttributeName, ConditionAttributeStatus, Debug):\n Output = [] \n FilterByFormula = EqualToFunc(Curly(ConditionAttributeName), DoubleQuotes(ConditionAttributeStatus))\n headers = {'Authorization': 'Bearer %s' % am.MyKey, }\n response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)\n ResponseDict = am.ast.literal_eval(response.text)\n if Debug: return ResponseDict, FilterByFormula\n\n for i in ResponseDict[\"records\"]: Output.append(i['id']) \n return Output\n\ndef UpdateAttributeStatus(FieldID, UpdateAttributeName, UpdateAttributeStatus, Debug):\n headers = {\n 'Authorization': 'Bearer %s' % am.MyKey, \n 'Content-Type': 'application/json',\n }\n data = '{\"fields\":{\"%s\": [\"%s\"]}}' % (UpdateAttributeName,UpdateAttributeStatus)\n response = am.requests.patch(am.CurlBaseCommand + '/' + FieldID, headers=headers, data=data)\n ResponseDict = am.ast.literal_eval(response.text)\n if Debug: return ResponseDict\n\ndef GetFieldIDOtherTable(TableName,ConditionAttributeName, ConditionAttributeStatus, Debug): \n if TableName == 'Sensor' :\n CurlBaseCommand = am.CurlBaseCommandSensor\n elif TableName == 'Config':\n CurlBaseCommand = am.CurlBaseCommandConfig\n Output = [] \n FilterByFormula = EqualToFunc(Curly(ConditionAttributeName), DoubleQuotes(ConditionAttributeStatus))\n headers = {'Authorization': 'Bearer %s' % am.MyKey, }\n response = am.requests.get(CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)\n ResponseDict = am.ast.literal_eval(response.text)\n if Debug: return ResponseDict, FilterByFormula\n for i in ResponseDict[\"records\"]: Output.append(i['id']) \n return Output\n\ndef NewRunRecord(RunNumber, StartTime, Duration, Digitizer, Tracking, Conversion, TimingDAQ, TimingDAQNoTracks, SensorID, ConfigID, Debug):\n headers = {\n 'Authorization': 'Bearer %s' % am.MyKey, \n 'Content-Type': 'application/json',\n }\n #Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}\n data = '{\"fields\":{\"Run number\": %d,\"Start time\": \"%s\", \"Duration\": \"%s\", \"Digitizer\": [\"%s\"], \"Tracking\": [\"%s\"], \"Conversion\": [\"%s\"],\"TimingDAQ\": [\"%s\"],\"TimingDAQNoTracks\": [\"%s\"], \"Sensor\": [\"%s\"],\"Configuration\": [\"%s\"]}}' % (RunNumber, StartTime, Duration, Digitizer, Tracking, Conversion, TimingDAQ, TimingDAQNoTracks, SensorID[0], ConfigID[0])\n response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)\n ResponseDict = am.ast.literal_eval(response.text)\n if Debug: return ResponseDict\n \n\n\n\n\n\n \n"
},
{
"alpha_fraction": 0.6909090876579285,
"alphanum_fraction": 0.6909090876579285,
"avg_line_length": 25.5,
"blob_id": "c61fe1983bcc2612e9db5d2054a0e3ab24f35c69",
"content_id": "cc28213538c48831bcc30388964bf4c5d4b17fa5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 2,
"path": "/README.md",
"repo_name": "mhussainphys/J.A.R.V.I.S.",
"src_encoding": "UTF-8",
"text": "# J.A.R.V.I.S.\nJust A Rather Very Intelligent System\n\n\n"
}
] | 6 |
shaikhmohammadirfan/oddeven | https://github.com/shaikhmohammadirfan/oddeven | b3425583f1b0fa3952940f202e1957ea23e3ea8f | 99c5133cd8f8a5933ddecb7aaf5363d4df87d763 | 98770ac84b04c2d7857625104cac23e0ee5d9866 | refs/heads/master | 2021-01-10T23:14:17.677540 | 2016-10-01T07:21:51 | 2016-10-01T07:21:51 | 69,725,453 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6418604850769043,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 25.875,
"blob_id": "a1ced29da49048a535fa9877a1c262df29c8a697",
"content_id": "4f57035329865f3404ffc39bb4862451bf0524fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 8,
"path": "/OddEven.py",
"repo_name": "shaikhmohammadirfan/oddeven",
"src_encoding": "UTF-8",
"text": "# program to identify the even/odd state of given number\n\nnumber = int (input(\"Enter a number\") )\n\nif number % 2== 0:\n print (\"The enet=tered number is even\")\nelse:\n print (\"The entered number is odd\")\n"
}
] | 1 |
KinWaiCheuk/ICPR2020 | https://github.com/KinWaiCheuk/ICPR2020 | b50b8cf15097d7fddf5d1de16040952842a41d3b | a13843b6bd8d91fcc1ffe5dc70b506bcfb439c44 | 6f46e0dfc2d1b85a6f410d709d3e8e2b02ba069c | refs/heads/master | 2023-01-04T22:06:47.801449 | 2020-11-10T06:27:10 | 2020-11-10T06:27:10 | 225,833,725 | 12 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6945024132728577,
"alphanum_fraction": 0.7230340838432312,
"avg_line_length": 40.08571243286133,
"blob_id": "922cab52c185872977fe7518bd4394874474441a",
"content_id": "fe337d345b144f23a9698f198e8d095eb2b7e50a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1437,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 35,
"path": "/README.md",
"repo_name": "KinWaiCheuk/ICPR2020",
"src_encoding": "UTF-8",
"text": "# ICPR2020: The Effect of Spectrogram Reconstructions on Automatic Music Transcription\nThis repository is for the [paper](https://arxiv.org/abs/2010.09969) accepted in ICPR2020. This code uses the PyTorch version of Onsets and Frames written by [jongwook](https://github.com/jongwook/onsets-and-frames) as a template.\n\n## Requirement\n* torch == 1.6.0\n* torchvision == 0.7.0\n* tensorboard == 2.2.0\n* numpy == 1.19.1\n* matplotlib == 3.0.2\n* sacred == 0.8.1\n* nnAudio == 0.2.0\n\n\n## Training the model\nThe python script can be run using using the sacred syntax `with`.\n```python\npython train.py with train_on=<arg> spec=<arg> reconstruction=<arg> device=<arg>\n```\n\n* `train_on`: the dataset to be trained on. Either `MAPS` or `MAESTRO` or `MusicNet`\n* `spec`: the input spectrogram type. Either `Mel` or `CQT`.\n* `reconstruction`: to include the reconstruction loss or not. Either `True` or `False`\n* `device`: the device to be trained on. Either `cpu` or `cuda:0`\n\n## Evaluating the model and exporting the midi files\n\n```python\npython evaluate.py with weight_file=<arg> reconstruction=<arg> device=<arg>\n```\n\n* `weight_file`: The weight files should be located inside the `trained_weight` folder\n* `dataset`: which dataset to evaluate on, can be either `MAPS` or `MAESTRO` or `MusicNet`.\n* `device`: the device to be trained on. Either `cpu` or `cuda:0`\n\nThe transcripted midi files, accuracy reports are saved inside the `results` folder."
},
{
"alpha_fraction": 0.4399999976158142,
"alphanum_fraction": 0.6159999966621399,
"avg_line_length": 17,
"blob_id": "84b1d60a03cdd9bf7842a4e4180bffcfe8c822ef",
"content_id": "ccff5af0c795ad6d09bec62086b26f2acf90cbc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 125,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "KinWaiCheuk/ICPR2020",
"src_encoding": "UTF-8",
"text": "torch == 1.6.0\ntorchvision == 0.7.0\ntensorboard == 2.2.0\nnumpy == 1.19.1\nmatplotlib == 3.0.2\nsacred == 0.8.1\nnnAudio == 0.2.0"
},
{
"alpha_fraction": 0.6471803784370422,
"alphanum_fraction": 0.6516091227531433,
"avg_line_length": 31.87378692626953,
"blob_id": "f584b7d47a656dfec5e0345c3329dcd3fbd52f90",
"content_id": "107225a88f70687c3324436173c04a5a47e15988",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3388,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 103,
"path": "/evaluate.py",
"repo_name": "KinWaiCheuk/ICPR2020",
"src_encoding": "UTF-8",
"text": "import os\n\n\nfrom datetime import datetime\nimport pickle\n\nimport numpy as np\nfrom sacred import Experiment\nfrom sacred.commands import print_config, save_config\nfrom sacred.observers import FileStorageObserver\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\n\nfrom model.evaluate_fn import evaluate_wo_velocity\nfrom model import *\n\nimport matplotlib.pyplot as plt\nex = Experiment('Evaluation')\n\n# parameters for the network (These parameters works the best)\nds_ksize, ds_stride = (2,2),(2,2)\nmode = 'imagewise'\nsparsity = 1\nlog = True # Turn on log magnitude scale spectrograms.\n\ndef removing_nnAudio_parameters(state_dict):\n pop_list = []\n for i in state_dict.keys():\n if i.startswith('spectrogram'):\n pop_list.append(i)\n\n print(f'The following weights will be remove:\\n{pop_list}')\n decision = input(\"Do you want to proceed? [y/n] \")\n\n while True:\n if decision.lower()=='y': \n for i in pop_list:\n state_dict.pop(i)\n return state_dict \n elif decision.lower()=='n':\n return state_dict \n\n print(f'Please choose only [y] or [n]')\n decision = input(\"Do you want to proceed? [y/n] \") \n\n\[email protected]\ndef config():\n weight_file = 'MAESTRO-CQT-transcriber_only'\n logdir = os.path.join('results', weight_file)\n unpacked_weight_name = weight_file.split('-')\n spec =unpacked_weight_name[1]\n dataset = 'MAPS'\n device = 'cuda:0'\n \n if weight_file.split('-')[-1] == \"transcriber_only\":\n reconstruction = False\n elif weight_file.split('-')[-1] == \"transcriber_reconstructor\":\n reconstruction = True\n print(f'reconstruction = {reconstruction}')\n# reconstruction = True\n \n leave_one_out = None\n \[email protected]\ndef train(spec, dataset, device, reconstruction, logdir, leave_one_out, weight_file): \n print_config(ex.current_run)\n\n # Choosing the dataset to use\n if dataset == 'MAESTRO':\n validation_dataset = MAESTRO(path='../../MAESTRO/', groups=['test'], sequence_length=None, device=device)\n\n elif dataset == 'MusicNet':\n validation_dataset = MusicNet(groups=['small test'], sequence_length=None, device=device)\n\n else:\n validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, overlap=True, device=device)\n \n\n model = Net(ds_ksize,ds_stride, log=log, reconstruction=reconstruction, mode=mode, spec=spec, norm=sparsity)\n model.to(device)\n model_path = os.path.join('trained_weights', weight_file)\n state_dict = torch.load(model_path)\n model.load_my_state_dict(state_dict)\n\n summary(model)\n \n with torch.no_grad():\n model = model.eval()\n metrics = evaluate_wo_velocity(tqdm(validation_dataset), model, reconstruction=reconstruction,\n save_path=os.path.join(logdir,f'./{dataset}_MIDI_results'))\n\n for key, values in metrics.items():\n if key.startswith('metric/'):\n _, category, name = key.split('/')\n print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')\n\n export_path = os.path.join(logdir, f'{dataset}_result_dict') \n pickle.dump(metrics, open(export_path, 'wb'))\n\n"
}
] | 3 |
fagan2888/Python-Weighted-Means | https://github.com/fagan2888/Python-Weighted-Means | 26d229b29a835ed2db6bd9c29378f1c780d3c47c | 765ce98ebb55be7f43328881a96834c1e2ea939e | d59291521d0fd4d28c63c36e63395fd640a822e2 | refs/heads/master | 2021-01-06T10:35:17.995790 | 2019-11-19T22:41:16 | 2019-11-19T22:41:16 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.43847620487213135,
"alphanum_fraction": 0.5192381143569946,
"avg_line_length": 28.858823776245117,
"blob_id": "32467f77290c791d1ac7412e7bc58ea58b4b7861",
"content_id": "823ddd056fb3ae80d601575678e496c39b51b2b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2625,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 85,
"path": "/test_solution.py",
"repo_name": "fagan2888/Python-Weighted-Means",
"src_encoding": "UTF-8",
"text": "#To test in terminal call: pytest test_solution.py -v --durations=1\r\n\r\nimport solution\r\nimport pytest\r\nimport numpy as np\r\n\r\ndef test_three_groups():\r\n vals = [1, 2, 3, 8, 5]\r\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA']\r\n grps_2 = ['MA', 'MA', 'MA', 'RI', 'RI']\r\n grps_3 = ['WEYMOUTH', 'BOSTON', 'BOSTON', 'PROVIDENCE', 'PROVIDENCE']\r\n weights = [.15, .35, .5]\r\n\r\n adj_vals = solution.group_adjust(vals, [grps_1, grps_2, grps_3], weights)\r\n\r\n answer = [-0.770, -0.520, 0.480, 1.905, -1.095]\r\n for ans, res in zip(answer, adj_vals):\r\n assert abs(ans - res) < 1e-5\r\n\r\n\r\ndef test_two_groups():\r\n vals = [1, 2, 3, 8, 5]\r\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA']\r\n grps_2 = ['MA', 'RI', 'CT', 'CT', 'CT']\r\n weights = [.65, .35]\r\n\r\n adj_vals = solution.group_adjust(vals, [grps_1, grps_2], weights)\r\n\r\n answer = [-1.82, -1.17, -1.33666, 3.66333, 0.66333]\r\n for ans, res in zip(answer, adj_vals):\r\n assert abs(ans - res) < 1e-5\r\n\r\n\r\ndef test_missing_vals():\r\n vals = [1, np.NaN, 3, 5, 8, 7]\r\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA', 'USA']\r\n grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']\r\n weights = [.65, .35]\r\n\r\n adj_vals = solution.group_adjust(vals, [grps_1, grps_2], weights)\r\n answer = [-2.47, np.NaN, -1.170, -0.4533333, 2.54666666, 1.54666666]\r\n\r\n for ans, res in zip(answer, adj_vals):\r\n if ans is None:\r\n assert res is None\r\n elif np.isnan(ans):\r\n assert np.isnan(res)\r\n else:\r\n assert abs(ans - res) < 1e-5\r\n\r\n\r\ndef test_weights_len_equals_group_len():\r\n # Need to have 1 weight for each group\r\n\r\n vals = [1, np.NaN, 3, 5, 8, 7]\r\n grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA', 'USA']\r\n grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']\r\n weights = [.65]\r\n\r\n with pytest.raises(ValueError):\r\n solution.group_adjust(vals, [grps_1, grps_2], weights)\r\n pass\r\n\r\n\r\ndef test_group_len_equals_vals_len():\r\n # The groups need to be same shape as vals\r\n\r\n vals = [1, None, 3, 5, 8, 7]\r\n grps_1 = ['USA']\r\n grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']\r\n weights = [.65]\r\n\r\n with pytest.raises(ValueError):\r\n solution.group_adjust(vals, [grps_1, grps_2], weights)\r\n pass\r\n\r\ndef test_performance():\r\n vals = 1000000 * [1, np.NaN, 3, 5, 8, 7]\r\n grps_1 = 1000000 * [1, 1, 1, 1, 1, 1]\r\n grps_2 = 1000000 * [1, 1, 1, 1, 2, 2]\r\n grps_3 = 1000000 * [1, 2, 2, 3, 4, 5]\r\n weights = [.20, .30, .50]\r\n\r\n #Timed using --durations when calling pytest\r\n solution.group_adjust(vals, [grps_1, grps_2, grps_3], weights)\r\n\r\n"
},
{
"alpha_fraction": 0.6336569786071777,
"alphanum_fraction": 0.6362459659576416,
"avg_line_length": 34.78571319580078,
"blob_id": "875912d864d4da1a1b0fdc041d2d3a4e517f0b8c",
"content_id": "bfba5b3a809008033d927fe6c5c5368ab036fb05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1545,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 42,
"path": "/solution.py",
"repo_name": "fagan2888/Python-Weighted-Means",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\ndef group_adjust(vals,groups,weights):\r\n\r\n #Check if inputs are appropriate\r\n for j in groups:\r\n if len(j) != len(vals):\r\n raise ValueError('Size of each group must be same as size of val list')\r\n if len(groups) != len(weights):\r\n raise ValueError('Number of weights must equal number of groups')\r\n\r\n #Make lists into arrays for computational efficiency\r\n vals = np.array(vals)\r\n groups = np.array(groups)\r\n\r\n #Find indices in list where value is np.NaN\r\n value_list = np.where(np.isfinite(vals))[0]\r\n\r\n group_means = []\r\n for i in range(len(groups)):\r\n #Find indices of unique elements\r\n _, indices = np.unique(groups[i], return_inverse=True)\r\n\r\n #Make array of np.NaN of size of vals\r\n group_avg = np.empty(len(vals))\r\n group_avg.fill(np.NaN)\r\n\r\n indices = np.array(indices)\r\n #For each unique group element, get mean of vals for indices where that element is present and vals is finite\r\n for j in np.unique(indices):\r\n val_indices = np.where(indices == j)[0]\r\n group_avg[np.intersect1d(val_indices,value_list)] = np.mean(vals[np.intersect1d(val_indices,value_list)])\r\n group_means.append(group_avg)\r\n\r\n #Array of means for each element at all its locations in its group (where vals is finite)\r\n group_means = np.array(group_means)\r\n\r\n #Demeaned values\r\n weights = np.array(weights)\r\n final_vals = np.array(vals) - weights.dot(group_means)\r\n\r\n return final_vals\r\n"
}
] | 2 |
Manoj-M-97/Hangman | https://github.com/Manoj-M-97/Hangman | 6024cbc68d93f2099e496f94c88cc30d3322b188 | 8f038f371163ccb2fe14eef54082143c77b7fa19 | eebbf1c435d7f391992a8a64bbaa1f27c4343b1a | refs/heads/master | 2020-03-22T02:47:26.440887 | 2019-08-23T15:11:28 | 2019-08-23T15:11:28 | 139,393,298 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7704917788505554,
"alphanum_fraction": 0.7737705111503601,
"avg_line_length": 45.846153259277344,
"blob_id": "1a2643ff3ad77d9c2710b398d5d263bc8f60b05a",
"content_id": "3c754e363947117eefeeec366319da0a96767360",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 614,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 13,
"path": "/README.md",
"repo_name": "Manoj-M-97/Hangman",
"src_encoding": "UTF-8",
"text": "# Hangman\nThis is a game developed in python. The player will be given a word and a hint in the form of its definition. The player will have to guess the word by guessing one letter at a time.\nIt makes use of dictionaries, lists and randomizing the words given to the player.\nThe game ends when the player makes ‘n’ wrong moves where n is defined as the length of the word minus 3.\nThe game has an option to continue to another game or end it.\nIt takes care of the wrong entries by the user. \n\nLibraries used:\n- Random\nThis is a library which allows to generate numbers in random.\n\nTo Run:\npython3 hangman.py\n\n"
},
{
"alpha_fraction": 0.6501719951629639,
"alphanum_fraction": 0.6567875146865845,
"avg_line_length": 41.18390655517578,
"blob_id": "a86db71affdbd5d1255a24e3ac3bb6e1811523c4",
"content_id": "55838dd66d67e1b77471118b26c314779aec1e81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3779,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 87,
"path": "/hangman.py",
"repo_name": "Manoj-M-97/Hangman",
"src_encoding": "UTF-8",
"text": "# Libraries to be imported:\r\nimport random\r\n\r\n# User Interface on start of the game\r\nprint(format('-','->40'))\r\nprint(format(' ','>10'),'Welcome to Hangman!',\"\\n\")\r\nprint(\"The game goes like this.\",\"\\n\",\"\\n\",\"You will be given a word and a hint in the form of its definition.\",\"\\n\",\r\n \"You will have to guess the word by guessing one letter at a time.\",\"\\n\"\r\n \"The number of wrong guesses you can make will be three fewer than the number of letters in the word.\")\r\n\r\n# The while loop is for playing the game again or not according to the user's choice.\r\nagain=True\r\nwhile(again==True):\r\n print(\"\\n\",\"Here you go....\",\"\\n\")\r\n# The following lists have all the words and their meanings respectively. TO extend the game, words and their definitions can be added accordingly\r\n word_list = [\"quorum\", \"cabal\", \"prolix\", \"rhythm\", \"bayou\", \"foxglove\", \"bevel\", \"logorrhea\", \"ornery\", \"bloviate\"]\r\n definitions =[\"the minimum number of members of an assembly or society that must be present at any of its meetings to make the proceedings of that meeting valid\",\r\n \"a secret political clique or faction\",\r\n\t\t\t \"(of speech or writing) using or containing too many words; tediously lengthy.\",\r\n\t\t\t \"a strong, regular repeated pattern of movement or sound\",\r\n\t\t\t \"a marshy outlet of a lake or river\",\r\n\t\t\t \"a tall Eurasian plant with erect spikes of pinkish-purple (or white) flowers shaped like the fingers of gloves\",\r\n\t\t\t \"a sloping surface or edge\",\r\n\t\t\t \"a tendency to extreme loquacity\",\r\n\t\t\t \"bad-tempered or difficult to deal with\",\r\n\t\t\t \"talk at length, especially in an inflated or empty way\"]\r\n diction = zip(word_list, definitions)\r\n guesses = 0\r\n win = 0\r\n separated_letters = []\r\n already_guessed = []\r\n every_key_value = {}\r\n # Random function is to generate a random number which is used as the index of the list to get the corresponding word.\r\n # This is to randomize the words that are given to the player.\r\n rand_index = random.randint(0, 9)\r\n word = word_list[rand_index]\r\n meaning = definitions[rand_index]\r\n print(\"The meaning of your word is: \", meaning)\r\n for a in word:\r\n separated_letters.append(a)\r\n l = len(word)\r\n print('_ '*l)\r\n\r\n # The following (l-3) is the number of wrong letter guessess the player is allowed before the player loses.\r\n while guesses <= l - 3:\r\n flag = 0\r\n guess = input('Guess a letter: ')\r\n if guess in already_guessed:\r\n print('You have already guessed this. Try again.')\r\n continue\r\n if guess.isalpha() and (guess not in already_guessed):\r\n for index, item in enumerate(separated_letters):\r\n if item == guess:\r\n flag = 1\r\n every_key_value[index] = item\r\n if flag == 0:\r\n print(\"The word doesn't have that letter. Try again.\")\r\n guesses += 1\r\n already_guessed.append(guess)\r\n #even if the letter is not in the word, this adds it to the 'already_guessed' list so that the player isn't penalized for accidentally guessing it again\r\n continue\r\n for i in range(l):\r\n if i in every_key_value.keys():\r\n print (every_key_value[i], ' ', end = '')\r\n else:\r\n print ('_ ', end = '') \r\n already_guessed.append(guess)\r\n if len(every_key_value.keys()) == l and guesses <= l - 3: \r\n print(\"You win!\")\r\n win = 1\r\n break\r\n if win == 0:\r\n print(\"You lose!\")\r\n print (\"Your word was '\", word, \"'.\", sep = '')\r\n print(\"Better luck next time!\")\r\n print(\"That was fun.\")\r\n try1=input(\"Would you like to play again? (YES or NO): \")\r\n#For wrong input for 'yes' or 'no'\r\n while try1.lower()!=\"yes\" and try1.lower()!=\"no\":\r\n print(\"Invalid entry\")\r\n try1=input(\"Enter either 'yes' or 'no': \")\r\n if try1.lower()==\"yes\":\r\n again=True\r\n elif try1.lower()==\"no\":\r\n again=False\r\n print(format('-','->30'))\r\n print('Waiting for you to come back next time')\r\n \r\n\r\n \r\n \r\n \r\n \r\n"
}
] | 2 |
Vigneshram1206/DASD_ASSIGMENT1 | https://github.com/Vigneshram1206/DASD_ASSIGMENT1 | 6136e4d96cbba870eb06c40731dda223d1878d64 | 42ffccd31987c866e0176d2f01cf396a77a87264 | 54a9607e22b20bc9dfd773e002e3efe56201b91d | refs/heads/main | 2023-02-02T02:56:19.848099 | 2020-12-16T05:26:36 | 2020-12-16T05:26:36 | 322,058,442 | 0 | 0 | null | 2020-12-16T17:44:59 | 2020-12-16T05:27:25 | 2020-12-16T05:27:22 | null | [
{
"alpha_fraction": 0.7843971848487854,
"alphanum_fraction": 0.7843971848487854,
"avg_line_length": 40.5,
"blob_id": "502111f72e58e0fc911bf5902b6d34e6a251b7a3",
"content_id": "ccefd34b2b7db3212e6bb5b56eca2c8f916f7ab1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1416,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 34,
"path": "/src.py",
"repo_name": "Vigneshram1206/DASD_ASSIGMENT1",
"src_encoding": "UTF-8",
"text": "## this must be implemented in OOP, I am thinking to create one class for student record and one class for hashtable\n\n\n#Design a hash function HashId() which accepts the applicant’s name as a parameter and returns the hash value\ndef HashId(stname):\n #to mimic the functionality intitally planning to with \n #python dictionaries slowly remove them later once the project is executing\n pass\n\n#This function creates an empty hash table and points to null\ndef initializeHash(self):\n # I suppose the project must be planned in OOPS\n pass\n\n#This function inserts the student’s name and corresponding details into the hash table\ndef insertAppDetails(ApplicationRecords, name, phone, country, program, status): \n pass\n\n#This function finds the applicant’s details based on the name and updates the corresponding details into the hash table.\ndef updateAppDetails(ApplicationRecords, name, phone, country, program, status):\n pass\n\n#This function prints the list of all applicants who have applied to a particular program\ndef memRef(ApplicationRecords, Program):\n pass\n\n#This function prints the list of number of applications in their current stage \n# - of the application process including Applied, Rejected and Approved\ndef appStatus(ApplicationRecords):\n pass\n\n#This function destroys all the entries inside hash table. This is a clean-up code.\ndef destroyHash(ApplicationRecords):\n pass"
}
] | 1 |
TMichaelan/test | https://github.com/TMichaelan/test | ed7c7aa46977cfdd290b25d16c3565c5dcaf750c | 4382f46e8bee560dece8d9ff88d73bb796ec9ced | 37fccbdb2da5d39b3f4d54c9b4a863fdc50bc502 | refs/heads/master | 2021-02-18T17:53:33.791120 | 2020-03-07T15:30:41 | 2020-03-07T15:30:41 | 245,219,709 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.612500011920929,
"alphanum_fraction": 0.625,
"avg_line_length": 13.636363983154297,
"blob_id": "ee58436fad2c22d1c15d4084e86a274d2462ccbe",
"content_id": "7204eb7912c2b00a9eae4bdba2493568f205543b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 11,
"path": "/Remove vowels.py",
"repo_name": "TMichaelan/test",
"src_encoding": "UTF-8",
"text": "#TEST\n#Version 1.1\nword = input('')\nnew_word = ''\nvow ='aeyuioAEYUIO'\n\nfor letter in word:\n if letter not in vow:\n new_word += letter\n\nprint(new_word)"
},
{
"alpha_fraction": 0.4559686779975891,
"alphanum_fraction": 0.5303326845169067,
"avg_line_length": 23.380952835083008,
"blob_id": "a80df038eb63909302e36273217dec0ab24efcd1",
"content_id": "9441016067d593e0ff067cac5841e8a7784a705d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 511,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 21,
"path": "/1.py",
"repo_name": "TMichaelan/test",
"src_encoding": "UTF-8",
"text": "#Selection Sort\nA = [9,4,5,7,1,2,4,6,8,9,10,3,2,7,8,9,0]\n\nfor i in range(len(A)):\n min_idx = i\n for j in range(i + 1, len(A)):\n if A[min_idx] > A[j]:\n min_idx = j\n A[i], A[min_idx] = A[min_idx], A[i]\nprint(A)\n\n#Reversed\narray = [9,4,5,7,1,2,4,6,8,9,10,3,2,7,8,9,0]\n\nfor i in range(len(array)):\n min_index = i\n for j in range(i+1,len(array)):\n if array[min_index] < array[j]:\n min_index = j\n array[i],array[min_index] = array[min_index],array[i]\nprint(array)"
}
] | 2 |
devitos/HW_RE | https://github.com/devitos/HW_RE | 868ac02829ea05c1f3171663bd3bd307468459f4 | f558a4fbecd18d4d4be9c77066ff4dfa19a67bc6 | ef8ee901b9c3a892919a886732ec7648655c3e06 | refs/heads/main | 2023-02-24T12:11:56.254743 | 2021-01-30T21:16:27 | 2021-01-30T21:16:27 | 334,513,128 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.45492488145828247,
"alphanum_fraction": 0.4866444170475006,
"avg_line_length": 30.37837791442871,
"blob_id": "96e6e4b875de863e1961c7e47e5a5d73a2f2a499",
"content_id": "6be5052e93bf7d08f0ff86914ed36af17999a7da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1216,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 37,
"path": "/HW_RE.py",
"repo_name": "devitos/HW_RE",
"src_encoding": "UTF-8",
"text": "import re\r\nimport csv\r\nwith open(\"phonebook_raw.csv\") as f:\r\n rows = csv.reader(f, delimiter=\",\")\r\n contacts_list = list(rows)\r\n#pprint(contacts_list)\r\nnew_list = list()\r\n\r\n\r\nfor contact in contacts_list:\r\n\r\n pattern = re.compile(\"(\\+7|8)\\s*[\\(\\s*]?(\\d{3})[\\)\\s*\\-*]?\\s*(\\d{3})[-\\s]?(\\d{2})[-\\s]?(\\d+)\")\r\n contact[5] = pattern.sub(r\"+7(\\2)\\3-\\4-\\5\", contact[5])\r\n fio = contact[0] + ' ' + contact[1] + ' ' + contact[2]\r\n pattern2 = re.compile(\"^([\\wа-яА-ЯёЁ]+)[\\s*\\,?]([\\wа-яА-ЯёЁ]+)[\\s*\\,?]([\\wа-яА-ЯёЁ]+)?\")\r\n new_fio = pattern2.sub(r\"\\1 \\2 \\3 \", fio)\r\n contact[0] = new_fio.split(' ')[0]\r\n contact[1] = new_fio.split(' ')[1]\r\n contact[2] = new_fio.split(' ')[2]\r\n new_list.append(contact)\r\n\r\n\r\nfor i in range(len(new_list)):\r\n n = 0\r\n for cont in new_list:\r\n # print(cont)\r\n if cont[0] == new_list[i-1][0] and cont[1] == new_list[i-1][1]:\r\n n += 1\r\n if n >= 2:\r\n for m in range(2, len(cont)-1):\r\n if cont[m] == '':\r\n cont[m] = new_list[i-1][m]\r\n\r\n\r\nwith open(\"phonebook.csv\", \"w\") as f:\r\n datawriter = csv.writer(f, delimiter=',')\r\n datawriter.writerows(new_list)\r\n"
}
] | 1 |
Hardeepsingh980/DeeNotes-Notes-Share | https://github.com/Hardeepsingh980/DeeNotes-Notes-Share | a03840500a24444caad93c569aebda86c60dbc32 | 2272265652bd8623935f4f244b3c4ce33b8b23dd | 145d4c2c60499a276f808146001ede87ccd3346f | refs/heads/master | 2020-06-14T00:17:22.822240 | 2019-07-02T09:43:40 | 2019-07-02T09:43:40 | 194,833,531 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7218683362007141,
"alphanum_fraction": 0.747346043586731,
"avg_line_length": 15.821428298950195,
"blob_id": "027ede1eb74aea11b0701a9c2a2168573b3e9f45",
"content_id": "c9816388b1c5fd09ca23c9b7befa4fa9a66c67c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 471,
"license_type": "permissive",
"max_line_length": 187,
"num_lines": 28,
"path": "/README.md",
"repo_name": "Hardeepsingh980/DeeNotes-Notes-Share",
"src_encoding": "UTF-8",
"text": "# DeeNotes\n\n## Summary\nThis application can be used to share and view Notes with each other online. The problem of getting notes in school or collages is a major issue. This application can resolve the problem.\n\n## Features\n\n1. Upload Notes\n\n2. View Notes uploaded by anyone.\n\n3. Download notes\n\n4. Settings\n\n5. Delete Notes\n\n## Requirements\n1. Python 3.7 or above\n\n2. Socket module\n\n3. Tkinter module\n\n## Screenshots\n\n\n\n"
},
{
"alpha_fraction": 0.5229681730270386,
"alphanum_fraction": 0.5512367486953735,
"avg_line_length": 17.517240524291992,
"blob_id": "6e054ee5b389c492751ab45d53042f205b47a34c",
"content_id": "403ddc5886ceeac40b42c63887cdb053f70b05ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 566,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 29,
"path": "/server.py",
"repo_name": "Hardeepsingh980/DeeNotes-Notes-Share",
"src_encoding": "UTF-8",
"text": "import socket\r\nimport _thread\r\n\r\nclients = []\r\n\r\ns = socket.socket()\r\ns.bind(('',5000))\r\ns.listen(5)\r\n\r\n\r\n\r\ndef recv_msg(conn):\r\n while True:\r\n msg = conn.recv(10000000).decode('utf-8')\r\n if 'post!@!' in msg:\r\n msg = msg.split('!@!')[1]\r\n send_to_all(msg)\r\n\r\n\r\ndef send_to_all(msg):\r\n for client in clients:\r\n client.send(msg.encode('utf-8'))\r\n\r\n\r\nwhile True:\r\n conn, addr = s.accept()\r\n clients.append(conn)\r\n print('Connection Establised With ', str(addr))\r\n _thread.start_new_thread(recv_msg,(conn,))\r\n"
},
{
"alpha_fraction": 0.405061811208725,
"alphanum_fraction": 0.44448330998420715,
"avg_line_length": 38.06074905395508,
"blob_id": "907aee7d61f29f61e24ea9e6d90ff8bd29d287a3",
"content_id": "fb073f7c842f854e70ec8efbafb43b1897706a58",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17148,
"license_type": "permissive",
"max_line_length": 319,
"num_lines": 428,
"path": "/main.py",
"repo_name": "Hardeepsingh980/DeeNotes-Notes-Share",
"src_encoding": "UTF-8",
"text": "## ----------------------- Import Modules ----------------------------------------\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox as mb\r\nfrom tkinter import colorchooser\r\nimport os\r\nimport socket\r\nimport _thread\r\n##--------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n##---------------------- Variables ---------------------------------------\r\nmenu_counter = 2\r\ncur_note = -1\r\nbg_color = 'light green'\r\n##--------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n##-------------------------------- Class Main_ --------------------------------\r\nclass Main_():\r\n\r\n ##-------------------------------- About Function -------------------------------------------\r\n def about(self):\r\n mb.showinfo('About Us','This is the exclusive distribution of DeeNotes created by Hardeep Singh. This Application was developed with Python Language. This for Created for the students who face problems in taking notes. So, this application was created to help the students share the notes with other fellow students.')\r\n ##------------------------------------------------------------------------------------------------\r\n\r\n\r\n \r\n\r\n ##-------------------------------- Change Background --------------------------------------\r\n def tcolor(self):\r\n global bg_color\r\n clr = colorchooser.askcolor(title='Select Color')\r\n co = clr[1] \r\n bg_color = co\r\n bg_label1['bg'] = bg_color\r\n menu_bg['bg'] = bg_color\r\n logo_bg['bg'] = bg_color\r\n menu_frame['bg'] = bg_color\r\n log_img['bg'] = bg_color\r\n log_in_as['bg'] = bg_color\r\n name['bg'] = bg_color\r\n log_bg['bg'] = bg_color\r\n upload_l['bg'] = bg_color\r\n down_l['bg'] = bg_color\r\n setting_l['bg'] = bg_color\r\n about_l['bg'] = bg_color\r\n home_l['bg'] = bg_color\r\n set_bg['bg'] = bg_color\r\n ##-------------------------------------------------------------------------------------------------\r\n\r\n\r\n \r\n\r\n ##------------------------------------ Setting Window --------------------------------------------\r\n def setting_func(self):\r\n global set_bg\r\n set_ = Toplevel()\r\n set_.geometry('300x300')\r\n set_.resizable(0,0)\r\n set_.title('Settings')\r\n set_bg = Label(set_, text='Settings', bg=bg_color, font=('',20),width=300,relief='groove')\r\n set_bg.pack()\r\n Label(set_, text='Change Background', font=('',13)).place(x=10,y=50)\r\n Button(set_, text='Select',font=('',13),command=self.tcolor).place(x=200,y=45)\r\n ##------------------------------------------------------------------------------------------------------\r\n\r\n\r\n \r\n\r\n ##------------------------------ Delete Notes Function ---------------------------------------------\r\n def delete_func(self):\r\n global cur_note\r\n dir_l = os.listdir('Notes')\r\n use = dir_l[cur_note]\r\n os.remove(f'Notes/{use}')\r\n dir_l = os.listdir('Notes')\r\n c = 0\r\n for i in dir_l:\r\n os.rename(f'Notes/{i}',f'Notes/{c}.txt')\r\n c+=1\r\n\r\n self.next_func()\r\n mb.showinfo('Success','Note Deleted Successfully.')\r\n ##--------------------------------------------------------------------------------------------------------\r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n ##------------------------ Upload Notes Function ---------------------------------\r\n def upload_n_func(self):\r\n user = name_entry.get()\r\n title = title_entry.get()\r\n data = text_u.get(1.0,END)\r\n if user != '' and title != '' and data != '':\r\n to_write = f'post!@!{user}@!{title}@!{data}'\r\n s.send(to_write.encode('utf-8'))\r\n mb.showinfo('Success','Notes Successfully Uploaded.')\r\n self.home_func()\r\n else:\r\n mb.showerror('Fill','Fill All The Columns.')\r\n ##---------------------------------------------------------------------------------------\r\n\r\n\r\n \r\n\r\n \r\n\r\n ##------------------------------------ Home Function ---------------------------------------\r\n def home_func(self):\r\n global menu_counter\r\n user_label.place(x=60,y=51)\r\n title_label.place(x=60,y=75)\r\n text_.place(x=10,y=100)\r\n frame.place(x=10,y=50)\r\n log_a_label.place(x=13,y=52)\r\n dots_b.place(x=320,y=60)\r\n next_b.place(x=200,y=395)\r\n pre_b.place(x=5,y=395)\r\n name_label.place(x=1000)\r\n name_entry.place(x=1000)\r\n title_u_label.place(x=1000)\r\n title_entry.place(x=1000)\r\n text_u_label.place(x=1000)\r\n text_u.place(x=1000)\r\n attach_u.place(x=1000)\r\n upload_u_b.place(x=1000)\r\n cancel_u_b.place(x=1000)\r\n upload_l.place(x=1000,y=1000)\r\n down_l.place(x=1000,y=1000)\r\n setting_l.place(x=1000,y=1000)\r\n about_l.place(x=1000,y=1000)\r\n menu_frame.place(x=1000,y=1000)\r\n log_img.place(x=1000,y=1000)\r\n home_l.place(x=1000)\r\n name.place(x=1000,y=1000)\r\n log_bg.place(x=1000,y=1000)\r\n log_in_as.place(x=1000,y=1000)\r\n comp_label.place(x=1000)\r\n menu_counter+=1\r\n ##------------------------------------------------------------------------------------------------\r\n\r\n \r\n\r\n ##------------------------------- Attach Notes Functions ---------------------------------------\r\n def attach_func(self):\r\n returned = filedialog.askopenfile(initialdir='E:\\\\', title='Select file to open')\r\n if returned != None:\r\n for line in returned:\r\n text_u.insert(END, line)\r\n returned.close()\r\n #------------------------------------------------------------------------------------------------------\r\n \r\n \r\n ##---------------- Upload Note Function ---------------------------------\r\n def upload_note_func(self):\r\n global menu_counter\r\n user_label.place(x=1000)\r\n title_label.place(x=1000)\r\n text_.place(x=1000)\r\n frame.place(x=1000)\r\n log_a_label.place(x=1000)\r\n dots_b.place(x=1000)\r\n menu_frame.place(x=1000,y=1000)\r\n log_img.place(x=1000,y=1000)\r\n log_in_as.place(x=1000,y=1000)\r\n name.place(x=1000,y=1000)\r\n log_bg.place(x=1000,y=1000)\r\n upload_l.place(x=1000,y=1000)\r\n down_l.place(x=1000,y=1000)\r\n setting_l.place(x=1000,y=1000)\r\n about_l.place(x=1000,y=1000)\r\n next_b.place(x=1000)\r\n pre_b.place(x=1000)\r\n menu_counter+=1\r\n name_label.place(x=20,y=50)\r\n name_entry.place(x=90,y=50)\r\n title_u_label.place(x=20,y=90)\r\n title_entry.place(x=90,y=90)\r\n text_u_label.place(x=20,y=150)\r\n text_u.place(x=20,y=180)\r\n attach_u.place(x=220,y=150)\r\n upload_u_b.place(x=170,y=420)\r\n cancel_u_b.place(x=240,y=420)\r\n home_l.place(x=1000)\r\n comp_label.place(x=1000)\r\n ##---------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n \r\n\r\n ##----------------------- Download Notes Function -------------------------------- \r\n def download_notes_func(self):\r\n f = filedialog.asksaveasfile(mode='w', defaultextension='.txt')\r\n if f is None:\r\n return\r\n global cur_note\r\n dir_l = os.listdir('Notes')\r\n use = dir_l[cur_note]\r\n file = open(f'Notes/{use}','r')\r\n data_l = file.read(10000).split('@!')\r\n user = data_l[0]\r\n title = data_l[1]\r\n note_data = data_l[2]\r\n to_write =f'{user} \\n\\nTitle : {title}\\n\\n{note_data}' \r\n f.write(to_write)\r\n f.close\r\n ##---------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n \r\n ##----------------------- Next Function ---------------------------------------------------\r\n def next_func(self):\r\n try:\r\n global cur_note\r\n dir_l = os.listdir('Notes')\r\n use = dir_l[cur_note+1]\r\n f = open(f'Notes/{use}','r')\r\n data_l = f.read(10000).split('@!')\r\n user = data_l[0]\r\n title = data_l[1]\r\n note_data = data_l[2]\r\n user_label['text'] = user\r\n title_label['text'] = 'Title : '+title\r\n text_.delete(1.0,END)\r\n text_.insert(INSERT, note_data)\r\n cur_note += 1\r\n except:\r\n global all_caught\r\n user_label.place(x=1000)\r\n title_label.place(x=1000)\r\n text_.place(x=1000)\r\n frame.place(x=1000)\r\n comp_label.place(x=10,y=100)\r\n log_a_label.place(x=1000)\r\n dots_b.place(x=1000)\r\n ##-----------------------------------------------------------------------------------------------\r\n\r\n \r\n\r\n\r\n\r\n ##-------------------------------------- Previous Function -----------------------------------------\r\n def prev_func(self):\r\n try:\r\n global cur_note\r\n if cur_note <= 0:\r\n return\r\n \r\n else:\r\n dir_l = os.listdir('Notes')\r\n use = dir_l[cur_note-1]\r\n f = open(f'Notes/{use}','r')\r\n data_l = f.read(10000).split('@!')\r\n user = data_l[0]\r\n title = data_l[1]\r\n note_data = data_l[2]\r\n user_label['text'] = user\r\n title_label['text'] = 'Title : '+title\r\n text_.delete(1.0,END)\r\n text_.insert(INSERT, note_data)\r\n cur_note -= 1\r\n except:\r\n pass\r\n ##--------------------------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n \r\n #----------------------------------------------- Menu Function --------------------------------------------------------------\r\n def menu_func(self):\r\n global menu_counter\r\n if menu_counter%2 != 0:\r\n menu_frame.place(x=1000,y=1000)\r\n log_img.place(x=1000,y=1000)\r\n log_in_as.place(x=1000,y=1000)\r\n name.place(x=1000,y=1000)\r\n log_bg.place(x=1000,y=1000)\r\n upload_l.place(x=1000,y=1000)\r\n down_l.place(x=1000,y=1000)\r\n setting_l.place(x=1000,y=1000)\r\n about_l.place(x=1000,y=1000)\r\n home_l.place(x=1000) \r\n else:\r\n menu_frame.place(x=0,y=38)\r\n log_bg.place(x=0,y=38)\r\n log_img.place(x=10,y=50)\r\n log_in_as.place(x=10,y=120)\r\n name.place(x=5,y=145)\r\n home_l.place(x=5,y=180)\r\n upload_l.place(x=5,y=220)\r\n down_l.place(x=5,y=260)\r\n setting_l.place(x=5,y=310)\r\n about_l.place(x=5,y=350)\r\n menu_counter+=1\r\n ##-----------------------------------------------------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n ## ----------------------------- Contructor Function -------------------------------------------------------------------------------- \r\n def __init__(self):\r\n global bg_label1,menu_bg,logo_bg\r\n win = Tk()\r\n win.geometry('350x450')\r\n win.resizable(0,0)\r\n win.title('DeeNotes')\r\n win.configure(bg='white')\r\n bg_label1 = Label(win, text='',width=35,bg=bg_color,font=('',20),relief='groove')\r\n bg_label1.pack()\r\n menu_img = PhotoImage(file='resources/menu.png') \r\n menu_bg = Button(win, image=menu_img,bg=bg_color,bd=0,command=self.menu_func)\r\n menu_bg.place(x=10,y=9)\r\n logo_img = PhotoImage(file='resources/logo.png')\r\n logo_bg = Label(win, image=logo_img,bg=bg_color)\r\n logo_bg.place(x=240,y=9)\r\n\r\n\r\n ## Canvas\r\n global user_label, title_label, text_,frame,comp_label,log_a_label,dots_b, next_b, pre_b\r\n frame = Label(win, text='',bg='white',width=14,relief='groove',font=('',30))\r\n frame.place(x=10,y=50)\r\n log_a_img = PhotoImage(file='resources/log_a.png')\r\n log_a_label = Label(win, image=log_a_img,bg='white')\r\n log_a_label.place(x=13,y=52) \r\n user_label = Label(win,text='',bg='white',font=('',13))\r\n user_label.place(x=60,y=51)\r\n title_label = Label(win, text='',bg='white',font=('',11,'italic'))\r\n title_label.place(x=60,y=75)\r\n text_ = Text(win,font=('',13),wrap='word',width=36,bd=2,height=15)\r\n text_.place(x=10,y=100)\r\n dots_img = PhotoImage(file='resources/dots.png')\r\n dots_b = Button(win, image=dots_img,bd=0,bg='white')\r\n dots_b.place(x=320,y=60)\r\n pop = Menu(win, tearoff=0) \r\n pop.add_command(label='Download Notes',command=self.download_notes_func)\r\n pop.add_separator()\r\n pop.add_command(label='Delete',command=self.delete_func)\r\n pop.add_separator()\r\n pop.add_command(label='Report Notes')\r\n self.next_func()\r\n def do(event):\r\n try:\r\n pop.tk_popup(event.x_root,event.y_root,0)\r\n finally:\r\n pop.grab_release\r\n dots_b.bind('<Button-1>',do)\r\n next_img = PhotoImage(file='resources/next.png')\r\n next_b = Button(win, image=next_img,text='Next Note ',bg='white',bd=0,compound='right',font=('',13,'bold'),command=self.next_func)\r\n next_b.place(x=200,y=395)\r\n pre_img = PhotoImage(file='resources/prev.png')\r\n pre_b = Button(win, image=pre_img,text=' Previous Note',bg='white',bd=0,compound='left',font=('',13,'bold'),command=self.prev_func)\r\n pre_b.place(x=5,y=395)\r\n comp_img = PhotoImage(file='resources/comp.png')\r\n comp_label = Label(win, image=comp_img)\r\n\r\n\r\n ## Upload Canvas\r\n global name_label, name_entry,title_u_label, title_entry,text_u_label,text_u,attach_u,upload_u_b,cancel_u_b\r\n name_label = Label(win, text='Name : ',bg='white',font=('',13,'bold'))\r\n name_entry = Entry(win,font=('',13),bd=2 )\r\n title_u_label = Label(win, text='Title : ',bg='white',font=('',13,'bold'))\r\n title_entry = Entry(win,font=('',13),bd=2 )\r\n text_u_label = Label(win, text='Write Notes : ',font=('',13,'bold'),bg='white')\r\n text_u = Text(win, font=('',13,'bold'),width=34,bd=2,height=12,wrap = 'word')\r\n attach_img = PhotoImage(file='resources/attach.png')\r\n attach_u = Button(win, image=attach_img,text='Attach Notes',bd=0,compound='left',font=('',10),bg='white',command=self.attach_func)\r\n upload_u_b = Button(win, text='Upload',bg='Green',fg='white',bd=0,font=('',11),command=self.upload_n_func)\r\n cancel_u_b = Button(win, text='Cancel',bg='Red',fg='white',bd=0,font=('',11),command=self.home_func)\r\n\r\n\r\n ## Menu Bar\r\n global menu_frame,log_img, log_in_as,name,log_bg,upload_l,down_l,setting_l,about_l,home_l\r\n menu_frame = Label(win,bg=bg_color,width=20,height=27,relief='groove')\r\n log_bg = Label(win,bg=bg_color,width=20,height=9,relief='groove')\r\n login_img =PhotoImage(file='resources/login.png')\r\n log_img = Label(win,image=login_img,bg=bg_color)\r\n log_in_as = Label(win, text='Logged In As : ',bg=bg_color,font=('',13))\r\n name = Label(win, text='Hardeep Singh',bg=bg_color,font=('',13,'bold'))\r\n home_img = PhotoImage(file='resources/home.png')\r\n home_l = Button(win, image=home_img,text='Home',font=('',13),bg=bg_color,bd=0,compound='left',command=self.home_func)\r\n upload_img = PhotoImage(file='resources/upload.png')\r\n upload_l = Button(win,image=upload_img,text='Upload Notes',bg=bg_color,bd=0,compound='left',font=('',13),command=self.upload_note_func)\r\n down_img = PhotoImage(file='resources/download.png')\r\n down_l = Button(win, image=down_img,text='Downloaded \\nNotes',font=('',13),bg=bg_color,bd=0,compound='left')\r\n setting_img = PhotoImage(file='resources/setting.png')\r\n setting_l = Button(win, image=setting_img,text='Settings',font=('',13),bg=bg_color,bd=0,compound='left',command=self.setting_func)\r\n about_img = PhotoImage(file='resources/about.png')\r\n about_l = Button(win, image=about_img,text='About',font=('',13),bg=bg_color,bd=0,compound='left',command=self.about)\r\n\r\n\r\n ## Mainloop\r\n win.mainloop()\r\n ##-------------------------------------------------------------------------------------------------------\r\n##----------------------------------------------------------------------------------------------------------\r\n\r\n\r\n\r\ndef recv_msg():\r\n while True:\r\n msg = s.recv(10000000).decode('utf-8')\r\n len_l = len(os.listdir('Notes'))\r\n f = open(f'Notes/{len_l}.txt','w')\r\n f.write(msg)\r\n f.close()\r\n\r\n\r\n\r\ndef create_socket():\r\n global s\r\n s = socket.socket()\r\n s.connect(('localhost',5000))\r\n _thread.start_new_thread(recv_msg,())\r\n\r\n## Object For Main_ Class\r\n_thread.start_new_thread(create_socket,())\r\nDeeNotes = Main_()\r\n\r\n"
}
] | 3 |
romulovieira777/Hacker_Rank | https://github.com/romulovieira777/Hacker_Rank | fe5e7a122cadd473e01564bd3bed17277d961c94 | 75799d7adff8b5d3d45aaf83d46ef782bc7bbdb0 | 4eeba1537bab3a2ddab7c69a335bf6db5c78c3e0 | refs/heads/master | 2023-07-24T01:56:02.860344 | 2023-07-05T15:08:12 | 2023-07-05T15:08:12 | 263,773,780 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.584645688533783,
"alphanum_fraction": 0.624015748500824,
"avg_line_length": 18.921567916870117,
"blob_id": "d447eaa31af65e56c3e8c0d2ca0d1b690791d2fa",
"content_id": "023e9f89667a883450dcf1a9ca55c3b31337ad7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 51,
"path": "/Python/Exercises/Python_If_Else.py",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTask\n\nGiven an integer, n, perform the following conditional actions:\n -If n is odd, print Weird\n -If n is even and in the inclusive range of 2 to 5, print Not Weird\n -If n is even and in the inclusive range of 6 to 20, print Weird\n -If n is even and greater than 20, print Not Weird\n\nInput Format\n\nA single line containing a positive integer, n.\n\nConstraints\n - 1 <= n <= 100\n\nOutput Format\n\nPrint Weird if the number is weird. otherwise, print Not Weird.\n\nSample Input 0\n3\n\nSample Output 0\nWeird\n\nExlpaination 0\nn = 3\nn is odd and odd numbers are weird, so we print Weird.\n\nSample Input 1\n24\n\nSample Output 1\nNot Weird\n\nExplanation 1\nn = 24\nn > 20 and n is even, so it isn't weird. Thus, we print Not Weird.\n\"\"\"\n\nif __name__ == '__main__':\n n = int(input().strip())\n if n % 2 == 1:\n print(\"Weird\")\n elif n % 2 == 0 and 2 <= n <= 5:\n print(\"Not Weird\")\n elif n % 2 == 0 and 6 <= n <= 20:\n print(\"Weird\")\n elif n % 2 == 0 and n > 20:\n print(\"Not Weird\")\n"
},
{
"alpha_fraction": 0.6589147448539734,
"alphanum_fraction": 0.682170569896698,
"avg_line_length": 63.5,
"blob_id": "307d026baf403f0db2b8f5f20c1c928dd329e36b",
"content_id": "6f68287bbe9970eea099bbe6b5789d8ee3827721",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 4,
"path": "/README.md",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "# [Hacker Rank](https://www.hackerrank.com/) 👩🏻💻🤖👽🤯🐍🎲\n### 🚀 Códigos Feitos no Site Hacker Rank 💥\n* #### [Python](https://github.com/romulovieira777/Hacker_Rank/tree/master/Python)\n* #### [SQL](https://github.com/romulovieira777/Hacker_Rank/tree/master/SQL)\n"
},
{
"alpha_fraction": 0.7155025601387024,
"alphanum_fraction": 0.72742760181427,
"avg_line_length": 24.521739959716797,
"blob_id": "63478853debcbfd6b6515f9cb58c07ba21787fd5",
"content_id": "14c8a97a035acf1112d02c70d40dbcd6e44426f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 587,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 23,
"path": "/SQL/Certification_Test/Intermediate/01_Customer_Spending.sql",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "/*\nList all customers who spent 25% os less than the average amount spent on all invoices. For each customer, display\ntheir name and the amount spent to 6 decimal places. Order the result by the amount spent from high to low.\n*/\nSELECT\n customer.customer_name AS customer_name\n, ROUND(SUM(invoice.total_price), 6) AS total_amount_spent\nFROM\n customer\nINNER JOIN\n invoice\nON\n customer.id = invoice.customer_id\nWHERE\n invoice.total_price <= 0.25 * (\n SELECT\n AVG(total_price)\n FROM\n invoice)\nGROUP BY\n customer.customer_name\nORDER BY\n total_amount_spent DESC;\n"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.6841397881507874,
"avg_line_length": 17.14634132385254,
"blob_id": "a037e678ce3d7a13244698d4318cb62e29338e77",
"content_id": "7c5403b75df99787ecce01acec7fa02deb5b6a1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 744,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 41,
"path": "/Python/Exercises/Python_Division.py",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTask\nThe provided code stub reads two integers, a and b, from STDIN.\n\nAdd logic to print two lines. The first line should contain the result of integer division, a // b. The second line\nshould contain the result of float division, a / b.\n\nNo rounding or formatting is necessary.\n\nExample\na = 3\nb = 5\n\n- The result of the integer division 3 // 5 = 0.\n- The result of the float division is 3 / 5 = 0.6.\n\nPrint:\n0\n0.6\n\nInput Format\nThe first line contains the first integer, a.\nThe second line contains the second integer, b.\n\nOutput Format\nPrint the two lines as described above.\n\nSample Input 0\n4\n3\n\nSample Output 0\n1\n1.33333333333\n\"\"\"\n\nif __name__ == '__main__':\n a = int(input())\n b = int(input())\n print(a // b)\n print(a / b)\n"
},
{
"alpha_fraction": 0.6888889074325562,
"alphanum_fraction": 0.6888889074325562,
"avg_line_length": 14,
"blob_id": "20bb20221068d05db428e9f651b92e56f34db622",
"content_id": "e2d8561951ec75e38c3db418b860a353cafb3782",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 6,
"path": "/SQL/Exercises/Revising_Aggregations_Averages.sql",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "SELECT\n AVG(POPULATION) AS POPULATION\nFROM\n CITY\nWHERE\n DISTRICT = 'CALIFORNIA';\n"
},
{
"alpha_fraction": 0.6235294342041016,
"alphanum_fraction": 0.6640523076057434,
"avg_line_length": 13.166666984558105,
"blob_id": "7263216c381b400b9e1e5775a427341edd777f58",
"content_id": "1b5a2e793d252a3b89a343c0762b2a54b9d45fcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 54,
"path": "/Python/Exercises/Arithmetic_Operators.py",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTask\n\nThe provided code stub reads two integers from STDIN, and . Add code to print three lines where:\n\nThe first line contains the sum of the two numbers.\nThe second line contains the difference of the two numbers (first - second).\nThe third line contains the product of the two numbers.\nExample\n\n\nPrint the following:\n\n8\n-2\n15\n\nInput Format\n\nThe first line contains the first integer, a.\nThe second line contains the second integer, b.\n\nConstraints\n\n1 <_ a <_ 10^10\n1 <_ b <_ 10^10\n\nOutput Format\n\nPrint the three lines as explained above.\n\nSample Input 0\n\n3\n2\n\nSample Output 0\n\n5\n1\n6\n\nExplanation 0\n3 + 2 -> 5\n3 - 2 -> 1\n3 * 2 -> 6\n\"\"\"\n\nif __name__ == '__main__':\n a = int(input())\n b = int(input())\n print(a + b)\n print(a - b)\n print(a * b)\n"
},
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 17.5,
"blob_id": "b3a5f29f0c9f4224186524b8b4e25a94f6a67de0",
"content_id": "517b75653f776d89fec0b9de4fe04a97c752c6c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 4,
"path": "/SQL/Exercises/Population_Density_Difference.sql",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "SELECT\n MAX(POPULATION) - MIN(POPULATION) AS POPULATION\nFROM\n CITY;\n"
},
{
"alpha_fraction": 0.42121896147727966,
"alphanum_fraction": 0.44785553216934204,
"avg_line_length": 35.31147384643555,
"blob_id": "00dd931689f6f85212275fe8ac8f58700ac1d5af",
"content_id": "f28c73647977a34dd9a696305a93dffbd8517311",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 2215,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 61,
"path": "/SQL/Certification_Test/Advanced/01_Crypto_Market_Algorithms_Report.sql",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "/*\n1 - Crypto Market Algorithms Report\n\nA number of algorithms are used to mine cryptocurrencies. As part of a comparison, create a query to return a list of\nalgorithms and their volumes for each quarter of the year 2020.\n\nThe result should be in the following format: Algorithm name, Q1, Q2, Q3, Q4 transactions.\n\n - Q1 through Q4 contain the sums of transactions volumes for the algorithm for each calendar quarter of 2020 precise\n to 6 places after the decimal.\n - Results should be sorted ascending by algorithm name.\n\n\nSCHEMA\n\nThere are two tables:\n\n-----------------------------------------------------------\n coins\n-----------------------------------------------------------\nname | type | description\n-----------------------------------------------------------\ncode | VARCHAR(4) | Coin code\n-----------------------------------------------------------\nname | VARCHAR(64) | Coin name\n-----------------------------------------------------------\nalgorithm | varchar(128) | Cryptocurrency algorithm name\n-----------------------------------------------------------\n\n\n-----------------------------------------------------------\n transactions\n-----------------------------------------------------------\nname | type | description\n-----------------------------------------------------------\ncoin_code | VARCHAR(4) | Coin code\n-----------------------------------------------------------\ndt | VARCHAR(19) | Transaction timestamp\n-----------------------------------------------------------\nvolume | DECIMAL(11,6)| Transaction volume\n-----------------------------------------------------------\n\n*/\nSELECT\n c.algorithm,\n ROUND(SUM(CASE WHEN MONTH(t.dt) IN (1,2,3) THEN t.volume ELSE 0 END), 6) AS Q1,\n ROUND(SUM(CASE WHEN MONTH(t.dt) IN (4,5,6) THEN t.volume ELSE 0 END), 6) AS Q2,\n ROUND(SUM(CASE WHEN MONTH(t.dt) IN (7,8,9) THEN t.volume ELSE 0 END), 6) AS Q3,\n ROUND(SUM(CASE WHEN MONTH(t.dt) IN (10,11,12) THEN t.volume ELSE 0 END), 6) AS Q4\nFROM\n coins c\nINNER JOIN\n transactions t\nON\n c.code = t.coin_code\nWHERE\n YEAR(t.dt) = 2020\nGROUP BY\n c.algorithm\nORDER BY\n c.algorithm;\n"
},
{
"alpha_fraction": 0.5454545617103577,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 10,
"blob_id": "e5cb91143e2c4489687191e94f5a5b1bba56c24a",
"content_id": "edc25cf254de55bab99e50a7eb4a7d946c5a381b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 66,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 6,
"path": "/SQL/Exercises/Revising_Aggregations_The_Count_Function.sql",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "SELECT\n COUNT(ID)\nFROM\n CITY\nWHERE\n POPULATION > 100000;\n"
},
{
"alpha_fraction": 0.5802047848701477,
"alphanum_fraction": 0.6143344640731812,
"avg_line_length": 12.627906799316406,
"blob_id": "141cb891d29187fc915a712600b2ede2a92b2cb8",
"content_id": "86731306c4df9c9743b9c75f89c1a45a992594da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 586,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 43,
"path": "/Python/Exercises/Loops.py",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTask\n\nThe provided code stub reads and integer, n, from STDIN. For all non-negative integers i < n, print.\n\nExample\n\nThe list of non-negative integers that are less than == 3 is [0, 1, 2]. Print the square of each number on a separate\nline.\n\n 0\n 1\n 4\n\nInput Format\n\nThe first and only line contains the integer, n.\n\nConstraints\n\n1 <= n <= 20\n\nOutput Format\n\nPrint n lines, one corresponding to each i.\n\nSample Input 0\n\n 5\n\nSample Output 0\n\n 0\n 1\n 4\n 9\n 16\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n\n for i in range(n):\n print(i ** 2)\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.6974359154701233,
"avg_line_length": 23.375,
"blob_id": "f85cf5d96842036c152a0e58bd00133605caae21",
"content_id": "08c2bb02eda0afebb79e6a563628c91367350fdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 8,
"path": "/SQL/Exercises/Weather_Observation_Station_05.sql",
"repo_name": "romulovieira777/Hacker_Rank",
"src_encoding": "UTF-8",
"text": "SELECT\n\tcity\n , length_city \nFROM\n\t(SELECT a.*, rownum r FROM (SELECT LENGTH(city) length_city, city FROM station\n\tORDER BY length_city, city)a)\nWHERE\n\tr IN (1, (SELECT COUNT(*) FROM station));\n"
}
] | 11 |
locvx1234/git-practice | https://github.com/locvx1234/git-practice | 92d82936b9417daeb1a7eb50974fafedaf430ee0 | be08118988c833bc45155a98c6be2695b324e2da | b33c840cbde080d11ea00e04eeb0f8774f8f6154 | refs/heads/master | 2021-08-23T13:46:35.979828 | 2017-12-05T03:53:57 | 2017-12-05T03:53:57 | 107,029,338 | 0 | 3 | null | 2017-10-15T16:33:23 | 2017-10-19T07:45:00 | 2017-10-21T15:34:05 | Python | [
{
"alpha_fraction": 0.6948130130767822,
"alphanum_fraction": 0.7080820202827454,
"avg_line_length": 24.8125,
"blob_id": "4eebc5778d7c9e876cdf294f79f1696efb2b6723",
"content_id": "968f2ac3c05f1a6c4a292f24efafbdc5c55be886",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 893,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 32,
"path": "/doc/Agents.md",
"repo_name": "locvx1234/git-practice",
"src_encoding": "UTF-8",
"text": "## Windows\n\n### [Git-scm](https://git-scm.com/)\n\n\n\n###### Git GUI\n\nGit GUI cung cấp một giao diện đồ họa với đầy đủ các chức năng như Merge, Commit, Push, Branch, ... \n\n\n\n###### Git Bash\n\nGit bash cung cấp giao diện console, sử dụng các command tương tự môi trường Linux/UNIX\n\n\n\n### [Github Desktop](https://desktop.github.com/)\n\nGitHub Desktop là công cụ Git Client do [GitHub](https://github.com) phát triển. Bộ công cụ cũng bao gồm cả bản GUI và Bash\n\nXem thêm về GitHub Desktop GUI: https://github.com/locvx1234/Git-GUI\n\n## Linux \n\nInstall git :\n\n```\n$ sudo apt-get update\n$ sudo apt-get install git -y\n```\n\n\n\n"
},
{
"alpha_fraction": 0.6689189076423645,
"alphanum_fraction": 0.6689189076423645,
"avg_line_length": 23.5,
"blob_id": "f01c547e0ce16d3e53ef308d32c384a847bc6669",
"content_id": "c0f40bea8d1ad576e846d42419c747fad560fe26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 6,
"path": "/README.md",
"repo_name": "locvx1234/git-practice",
"src_encoding": "UTF-8",
"text": "# git-practice\n\nDanh sách thư mục:\n \n- `code` : Lưu trữ các đoạn mã để luyện tập làm việc với git \n- `doc` : Ghi chép các vấn đề liên quan đến git \n"
},
{
"alpha_fraction": 0.6416465044021606,
"alphanum_fraction": 0.6440678238868713,
"avg_line_length": 28.5,
"blob_id": "654166f6622c72c1c57c59d3400f72ed814f79a5",
"content_id": "f56948a8cc7f2ce6823492aadc68990737d87e27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 14,
"path": "/code/selectionSort.py",
"repo_name": "locvx1234/git-practice",
"src_encoding": "UTF-8",
"text": "def selection_sort(array):\n\tsource = array\n\tfor i in range(len(source)):\n\t mini = min(source[i:]) #find minimum element\n\t min_index = source[i:].index(mini)-1 #find index of minimum element\n\t source[i + min_index]= source[i] #replace element at min_index with first element\n\t source[i] = mini #replace first element with min element\n\treturn source\n\t\nif __name__ == \"__name__\":\n##\n##\n\n###LONG\n"
},
{
"alpha_fraction": 0.5734708905220032,
"alphanum_fraction": 0.5754614472389221,
"avg_line_length": 18.443662643432617,
"blob_id": "e3d42d0e437036cff4cf4fdeac86bf45f8a23d76",
"content_id": "9f80f0044bc21d91621895aa0d1db3871762319d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6376,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 284,
"path": "/doc/Explain_Command.md",
"repo_name": "locvx1234/git-practice",
"src_encoding": "UTF-8",
"text": "## Config\n\n### Danh tính \n\nCấu hình username và email, các thông tin này sẽ gắn vào mỗi lần commit \n\n```\n$ git config --global user.name \"Loc VU\"\n$ git config --global user.email [email protected]\n```\n\nBiến `--global` giúp áp dụng thông tin này trên các project của hệ thống, chỉ phải cấu hình 1 lần đầu tiên\n\n### Editor \n\nMặc định là Vi hoặc Vim, có thể thay đổi editor bằng lệnh :\n\n```\n$ git config --global core.editor emacs\n```\n\n### Công cụ so sánh thay đổi \n\n```\n$ git config --global merge.tool vimdiff\n```\n\nCác công cụ khác : kdiff3, tkdiff, meld, xxdiff, emerge, vimdiff, gvimdiff, ecmerge, và opendiff\n\n### Kiểm tra cấu hình \n\n```\n$ git config --list\n```\n\n## Help\n\nCó 3 cú pháp dùng để tìm kiếm tài liệu của lệnh git\n\n```\n$ git help <verb>\n$ git <verb> --help\n$ man git-<verb>\n```\n\nVí dụ : \n\n```\n$ git help config\n```\n\n## Git basic\n\nKhởi tạo một thư mục git \n\n```\n$ git init\n```\n\nTổ chức các tập tin, tạo mới ảnh của các tập tin đó vào khu vực tổ chức\n\n```\n$ git add filename\n```\n\nCommit, ảnh của các tập tin trong khu vực tổ chức sẽ được lưu trữ vĩnh viễn vào thư mục Git\n\n```\n$ git commit -m \"commit message\"\n```\n\nTips: Thêm tùy chọn `-a` để bỏ qua giai đoạn staged mỗi lần commit \n\nSao chép một repository \n\n```\n$ git clone [url]\n```\n\nKiểm tra trạng thái tập tin \n\n```\n$ git status\n```\n\nSự thay đổi của tập tin chưa được staged \n\n```\n$ git diff\n```\n\nSự thay đổi của tập tin đã staged, chuẩn bị commit \n\n```\n$ git diff --staged\n``` \n\nTrước version 1.6.1 là `git diff --cached`\n\nXóa tập tin khỏi thư mục staged \n\n```\n$ git rm filename\n```\n\nTips: Sử dụng `-f` chức năng an toàn để đưa ra thông báo xác nhận xóa.\n\nGiữ tập tin trong thư mục làm việc nhưng không thêm vào khu vực tổ chức, ngoài việc thêm vào file `.gitognore`\n\n```\n$ git rm --cached readme.txt\n```\n\nDi chuyển file \n\n```\n$ git mv file_from file_to\n```\n\n## Git log \n\nLiệt kê các commit đã thực hiện trên repo đó, theo thứ tự commit mới nhất được hiển thị đầu tiên\n\n```\n$ git log\n```\n\nMột số tùy chọn sử dụng với `git log`\n\n\n| Tùy chọn | Mô tả |\n|-------------------|-------------------------------------------------------------------------------------|\n| `-p`\t\t | Hiển thị bản vá ở mỗi commit |\n| `--word-dif` | Hiển thị bản vá ở định dạng tổng quan (word) |\n| `--stat`\t | Hiển thị thống kê của các tập tin được chỉnh sửa trong mỗi commit |\n| `--shortstat` | \tChỉ hiển thị thay đổi/thêm mới/xoá bằng lệnh --stat |\n| `--name-only` | Hiển thị danh sách các tập tin đã thay đổi sau thông tin của commit |\n| `--name-status` | Hiển thị các tập tin bị ảnh hưởng với các thông tin như thêm mới/sửa/xoá |\n| `--abbrev-commit` | Chỉ hiện thị một số ký tự đầu của mã băm SHA-1 thay vì tất cả 40 |\n| `--relative-date` | Hiển thị ngày ở định dạng tương đối (ví dụ, \"2 weeks ago\") thay vì định dạng đầy đủ |\n| `--graph`\t\t | Hiển thị biểu đồ ASCII của nhánh và lịch sử tích hợp cùng với thông tin đầu ra khác |\n| `--pretty` \t | Hiện thị các commit sử dụng một định dạng khác. Các lựa chọn bao gồm oneline, short, full, fuller và format (cho phép bạn sử dụng định dạng riêng) |\n| `--oneline` \t | Một lựa chọn ngắn, thuận tiện cho `--pretty=oneline` `--abbrev-commit` |\n\n \nGiới hạn thông tin đầu ra \n\n`git log` nhận một số lựa chọn khác cho việc giới hạn thông tin xuất ra \n\n| Lựa chọn\t\t\t | Mô tả\t\t\t\t\t\t\t\t\t\t\t\t \t\t \t\t |\n|---------------------|--------------------------------------------------------------------------|\n| `-(n)`\t\t\t | Chỉ hiển thị n commit mới nhất \t\t\t\t\t\t\t |\n| `--since, --after` | Giới hạn các commit được thực hiện sau ngày nhất định\t\t\t\t\t |\n| `--until, --before` | Giới hạn các commit được thực hiện trước ngày nhất định \t\t\t\t |\n| `--author`\t\t | Chỉ hiện thị các commit mà tên tác giả thoả mãn điều kiện nhất định \t |\n| `--committer`\t\t |\tChỉ hiện thị các commit mà tên người commit thoả mãn điều kiện nhất định |\n\n\n## Undo \n\nThay đổi commit cuối cùng, lệnh này sẽ thực hiện commit một sự sửa đổi và chỉ tính như 1 commit \n\n```\n$ git commit --amend\n```\n\nUnstage một tập tin \n\n```\n$ git reset HEAD <file>\n```\n\nPhục hồi tập tin đã thay đổi \n\n```\ngit checkout -- <file>\n```\n\n## Remote server\n\nLiệt kê ngắn gọn remote server \n\n```\n$ git remote -v\n```\n\nThêm các Remote Repositorie\n\n```\n$ git remote add [shortname] [url]\n```\n\nLấy dữ liệu từ các Remote Repositorie\n\n```\n$ git fetch <remote>\n```\n\nĐẩy lên Remote của bạn\n\n```\ngit push <remote> <branch>\n```\n\nKiểm tra Remote\n\n```\n$ git remote show <remote>\n```\n\nĐổi tên Remote\n\n```\n$ git remote rename <old-remote> <new-remote>\n```\n\nXóa Remote\n\n```\n$ git remote remove <remote>\n```\n\n## Tagging\n\nLiệt kê tag\n\n```\n$ git tag\n```\n\nTạo Annotated Tags\n\n```\n$ git tag -a <tag> -m <message>\n```\n\nXem thông tin tag \n\n```\n$ git show <tag>\n```\n\nTạo Signed Tags\n\n```\n$ git tag -s <tag> -m <message>\n```\n\nTạo Lightweight Tags\n\n```\n$ git tag <tag>\n```\n\nXác thực tag \n\n```\ngit tag -v <tag>\n```\n\np/s: Lệnh sử dụng GPG để xác minh chữ ký, do vậy cần có public key của người ký \n\n\nTag commit trước đó\n\nSử dụng `$ git log --pretty=oneline` để xác định commit checksum cần tagging\n\n```\n$ git tag -a <tag> <checksum or a part of checksum>\n```\n\nSharing tag \n\n```\n$ git push origin <tag>\n```\n\nTips: `--tags` thay cho <tag> để push tất cả các tag\n\nChecking out Tags\n\n```\n$ git checkout <tag>\n```\n\n\n\n\n"
}
] | 4 |
sorao57/netalgos | https://github.com/sorao57/netalgos | 79f977e1c5de9e9bfdc4ab42d9b88c24c87a6ae4 | 37d524571e28a00eb62005387286b921f6aef167 | cb83e4d6c8dca65f7f344fdc2bf746c34f186668 | refs/heads/master | 2016-09-07T07:40:53.399385 | 2015-06-28T01:36:58 | 2015-06-28T01:36:58 | 38,116,241 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4316484332084656,
"alphanum_fraction": 0.44311273097991943,
"avg_line_length": 32.08620834350586,
"blob_id": "c65d6ae81422b5bae7f2aff92ff88a8c2e9ac31b",
"content_id": "96d55ef7cfa6ceabc837168b6d42d10a3dd3dd52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5757,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 174,
"path": "/clustering.py",
"repo_name": "sorao57/netalgos",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\nimport math\nfrom random import random, shuffle, choice\n\n\ndef facetnet(graph, comm, x=None, lmd=None, alpha=1.0, y=None, threshold=None):\n \"\"\"\n References\n ----------\n [1] Lin, Y. et al. \"Facetnet: a framework for analyzing communities\n and their evolutions in dynamic networks.\"\n In Proceedings of the 17th international conference on World Wide Web,\n pp. 685-694. ACM, 2008.\n \"\"\"\n def cost(__x, __lmd):\n cs = 0\n ct = 0\n for node in graph:\n for _, adj, d in graph.edges_iter(node, data=True):\n denom = 0\n for k in range(comm):\n denom += __x[node][k] * __x[adj][k] * __lmd[k]\n if d['weight'] > 0:\n cs += d['weight'] * math.log(d['weight'] / denom)\n if y is not None:\n for k in range(comm):\n if y[node][k] > 0:\n ct += (y[node][k] *\n math.log(y[node][k] / __x[node][k] / __lmd[k]))\n return alpha * cs + (1 - alpha) * ct\n\n def denom(__x, __lmd, __i, __j):\n d = 0\n for l in range(comm):\n d += __x[__i][l] * __x[__j][l] * __lmd[l]\n return d\n # initialization\n try:\n w_sum = 0.0\n for _, _, d in graph.edges_iter(data=True):\n w_sum += d['weight']\n for u, v in graph.edges_iter():\n graph[u][v]['weight'] /= w_sum\n except KeyError:\n for u, v in graph.edges_iter():\n graph[u][v]['weight'] = 0.5 / graph.number_of_edges()\n x_next = {node: {k: 0 for k in range(comm)} for node in graph}\n lmd_next = [0] * comm\n if x is None:\n x = deepcopy(x_next)\n for node in x:\n for k in range(comm):\n x[node][k] = random()\n for k in range(comm):\n col_sum = 0\n for node in x:\n col_sum += x[node][k]\n for node in x:\n x[node][k] /= col_sum\n if lmd is None:\n lmd = [1.0 / comm] * comm\n # search the local optimum\n c = cost(x, lmd)\n iter_num = 0\n while True:\n iter_num += 1\n # update x\n for k in range(comm):\n for node in graph:\n x_factor = 0\n for _, adj, d in graph.edges_iter(node, data=True):\n x_factor += (d['weight'] * lmd[k] * x[adj][k]) /\\\n denom(x, lmd, node, adj)\n x_next[node][k] = x[node][k] * 2 * alpha * x_factor\n if y is not None:\n x_next[node][k] += (1 - alpha) * y[node][k]\n x = deepcopy(x_next)\n for k in range(comm):\n x_sum = 0\n for node in x:\n x_sum += x[node][k]\n for node in x:\n x[node][k] /= x_sum\n # update lmd\n for k in range(comm):\n lmd_factor = 0\n for node in graph:\n for _, adj, d in graph.edges_iter(node, data=True):\n lmd_factor += (d['weight'] * x[node][k] * x[adj][k]) /\\\n denom(x, lmd, node, adj)\n lmd_next[k] = lmd[k] * alpha * lmd_factor\n if y is not None:\n y_sum = 0\n for node in y:\n y_sum += y[node][k]\n lmd_next[k] += (1 - alpha) * y_sum\n lmd = deepcopy(lmd_next)\n lmd_sum = 0\n for k in range(comm):\n lmd_sum += lmd[k]\n for k in range(comm):\n lmd[k] /= lmd_sum\n # terminal condition\n c_next = cost(x, lmd)\n if (threshold is None or c - c_next <= threshold):\n break\n c = c_next\n label = {}\n for node in graph:\n max_val = 0\n lab = 0\n for k, val in x[node].items():\n val *= lmd[k]\n if val > max_val:\n max_val = val\n lab = k\n label[node] = lab\n return {'x': x, 'lmd': lmd, 'label': label, 'iter_num': iter_num}\n\n\ndef slpa(graph, time, threshold):\n \"\"\"\n References\n ----------\n Xie, J. et al.\n \"Slpa: Uncovering overlapping communities in social networks\n via a speaker-listener interaction dynamic process.\"\n In Data Mining Workshops (ICDMW),\n 2011 IEEE 11th International Conference on, pp. 344-349. IEEE, 2011.\n \"\"\"\n mem = \"memory\"\n for n in graph:\n graph.node[n][mem] = [n]\n for i in range(time):\n ns = list(graph.nodes())\n shuffle(ns)\n for listener in ns:\n labels = {}\n # speaker rule\n for speaker in graph[listener]:\n signal = choice(graph.node[speaker][mem])\n if signal in labels:\n labels[signal] += 1\n else:\n labels[signal] = 1\n # listener rule\n num = 0\n lab = -1\n keys = list(labels.keys())\n shuffle(keys)\n for key in keys:\n val = labels[key]\n if val > num:\n num = val\n lab = key\n graph.node[listener][mem].append(lab)\n # set labels using memories\n cover = {}\n for n in graph:\n label_num = {}\n for lab in graph.node[n][mem]:\n if lab in label_num:\n label_num[lab] += 1\n else:\n label_num[lab] = 1\n for lab in label_num.keys():\n label_num[lab] /= float(time + 1)\n if label_num[lab] >= threshold:\n if lab not in cover:\n cover[lab] = set()\n cover[lab].add(n)\n del(graph.node[n][mem])\n graph.node[n][\"label\"] = label_num\n return cover\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 49,
"blob_id": "a0f91afcc67fc83020f0e8c46d263dd333a82131",
"content_id": "9d03157a0ab782c88c740c86701bf6f1917f879e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/__init__.py",
"repo_name": "sorao57/netalgos",
"src_encoding": "UTF-8",
"text": "__all__ = ['clustering', 'generative', 'measure']\n"
},
{
"alpha_fraction": 0.5411310791969299,
"alphanum_fraction": 0.544987142086029,
"avg_line_length": 26.785715103149414,
"blob_id": "a1f60ef81dde97681f6fef030097b8750596b1b3",
"content_id": "b05be4253bd784041acee406184dc060a295e4c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 778,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 28,
"path": "/measure.py",
"repo_name": "sorao57/netalgos",
"src_encoding": "UTF-8",
"text": "import math\n\n\ndef mutual_information(answer, prediction):\n # auxiliary function\n def increment_val(dictionary, key):\n try:\n dictionary[key] += 1\n except KeyError:\n dictionary[key] = 1\n # start\n ans_num = {}\n pred_num = {}\n ans_pred_num = {}\n for n in prediction:\n ans = answer[n]\n pred = prediction[n]\n increment_val(ans_num, ans)\n increment_val(pred_num, pred)\n increment_val(ans_pred_num, (ans, pred))\n for d in [ans_num, pred_num, ans_pred_num]:\n for key in d:\n d[key] /= len(prediction)\n info = 0\n for ans, pred in ans_pred_num:\n p_j = ans_pred_num[(ans, pred)]\n info += p_j * math.log(p_j / (ans_num[ans] * pred_num[pred]))\n return info\n"
},
{
"alpha_fraction": 0.46321070194244385,
"alphanum_fraction": 0.5016722679138184,
"avg_line_length": 25,
"blob_id": "d6af05f6c0a1aa6ba264d9bceaf7636479b15194",
"content_id": "ed9705367e83aa2de33bcd5850e632183d09e2bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 598,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 23,
"path": "/generative.py",
"repo_name": "sorao57/netalgos",
"src_encoding": "UTF-8",
"text": "import networkx as nx\nfrom random import random\n\n\ndef newman_girvan_community_graph(z):\n \"\"\"\n References\n ----------\n Newman, M.E.J. and Girvan, M.\n \"Finding and evaluating community structure in networks.\"\n Physical Review E 69, no. 2 (2004): 026113.\n \"\"\"\n G = nx.Graph()\n num = 32\n p_out = z / (3.0 * num)\n p_in = (16 - z) / (num - 1)\n for i in range(4 * num):\n G.add_node(i)\n for j in range(i + 1, 4 * num):\n rand = random()\n if (i // num == j // num and rand <= p_in) or rand <= p_out:\n G.add_edge(i, j)\n return G\n"
}
] | 4 |
Arjung27/Face_swap | https://github.com/Arjung27/Face_swap | 66366ce0eb2d81feec10f20f189f89af8302c026 | e2fabd648d73453f0dec8fd8dd64d8040751dbc1 | bf39a182a15521be26756feb2d6497b3effead20 | refs/heads/master | 2021-03-17T11:01:13.315495 | 2020-04-01T02:48:39 | 2020-04-01T02:48:39 | 246,984,489 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5567834973335266,
"alphanum_fraction": 0.5875170826911926,
"avg_line_length": 37.61788558959961,
"blob_id": "f166ad15367390ae7e00b5dcd8c952b56e039ba1",
"content_id": "3f830686788aabc7f13d7bf40555ef0da921589c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9501,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 246,
"path": "/tps.py",
"repo_name": "Arjung27/Face_swap",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport cv2\nimport dlib\nimport copy\nimport sys\nimport argparse\nimport math\nfrom scipy import interpolate\nfrom imutils import face_utils\nfrom Code.FeatureTrack import videoDetector\nfrom Code.DoubleTrack import videoDoubleDetector\n\ndef videoToImage(fname,tarname):\n cap = cv2.VideoCapture(fname)\n i=0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n cv2.imwrite(tarname+'/Img'+str(i)+'.jpg',frame)\n i+=1\n\n cap.release()\n cv2.destroyAllWindows()\n\ndef drawFaceLandmarks(img, rects):\n\n for (i,rects) in enumerate(rects):\n shape = predictor(img,rects)\n shape = face_utils.shape_to_np(shape)\n \n for (x,y) in shape:\n cv2.circle(img_drawn,(x,y),2,(0,255,0),-1)\n\ndef potentialEnergy(r):\n return (r**2)*(math.log(r**2))\n\ndef funcxy(index, points_tar, wt_x, wt_y):\n\n K = np.zeros((points_tar.shape[0], 1))\n value = np.zeros((index.shape[0],2))\n epsilon = 1e-11\n for j, pt1 in enumerate(index):\n for i, pt2 in enumerate(points_tar):\n K[i] = potentialEnergy(np.linalg.norm(pt2 - pt1, ord=2) + epsilon)\n \n # Implementing a1 + (a_x)x + (a_y)y + + np.matmul(K.T, wt[:-3])\n value[j,0] = wt_x[-1] + pt1[0]*wt_x[-3] + pt1[1]*wt_x[-2] + np.matmul(K.T, wt_x[:-3])\n value[j,1] = wt_y[-1] + pt1[0]*wt_y[-3] + pt1[1]*wt_y[-2] + np.matmul(K.T, wt_y[:-3])\n\n return value\n\ndef warp_images(img_tar, img_src, pt_tar, pt_src, wt_x, wt_y, K):\n\n # cv2.imshow(\"image\", img_tar)\n # cv2.waitKey(0)\n mask = np.zeros_like(img_tar)\n # img_gray = cv2.cvtColor(img_tar, cv2.COLOR_BGR2GRAY)\n convex_hull = cv2.convexHull(pt_tar, returnPoints = True)\n mask = cv2.fillConvexPoly(mask, convex_hull, (255,255,255))\n mask = mask[:,:,0]\n # mask = cv2.bitwise_and(img_gray, img_gray, mask=mask)\n # cv2.imshow(\"mask\", mask)\n # cv2.waitKey(0)\n\n pt1_min = np.asarray(([min(pt_tar[:,0]),min(pt_tar[:,1])])).astype(np.float32)\n pt2_min = np.asarray(([min(pt_src[:,0]),min(pt_src[:,1])])).astype(np.float32)\n pt1_max = np.asarray(([max(pt_tar[:,0]),max(pt_tar[:,1])])).astype(np.float32)\n pt2_max = np.asarray(([max(pt_src[:,0]),max(pt_src[:,1])])).astype(np.float32)\n\n x = np.arange(pt1_min[0],pt1_max[0]).astype(int)\n y = np.arange(pt1_min[1],pt1_max[1]).astype(int)\n # print(pt1_min[0],pt1_max[0], pt1_min[1],pt1_max[1], mask.shape)\n X,Y = np.mgrid[x[0]:x[-1],y[0]:y[-1]]\n X = np.reshape(X.flatten(), [X.shape[0]*X.shape[1],1])\n Y = np.reshape(Y.flatten(), [Y.shape[0]*Y.shape[1],1])\n index = np.hstack([X,Y])\n x_coord = np.zeros(((X.shape[0]),1))\n y_coord = np.zeros(((Y.shape[0]),1))\n\n value = funcxy(index, pt_tar, wt_x, wt_y)\n x_coord = value[:,0]\n x_coord[x_coord < pt2_min[0]] = pt2_min[0]\n x_coord[x_coord > pt2_max[0]] = pt2_max[0]\n y_coord = value[:,1]\n y_coord[y_coord < pt2_min[1]] = pt2_min[1]\n y_coord[y_coord > pt2_max[1]] = pt2_max[1]\n\n blue = interpolate.interp2d(range(img_src.shape[1]), range(img_src.shape[0]), img_src[:,:,0], kind='cubic')\n green = interpolate.interp2d(range(img_src.shape[1]), range(img_src.shape[0]), img_src[:,:,1], kind='cubic')\n red = interpolate.interp2d(range(img_src.shape[1]), range(img_src.shape[0]), img_src[:,:,2], kind='cubic')\n m = interpolate.interp2d(range(mask.shape[1]), range(mask.shape[0]), mask, kind='cubic')\n\n warped_img = img_tar.copy()\n mask_warped_img = np.zeros_like(warped_img[:,:,0])\n \n for a in range(x_coord.shape[0]):\n\n intesity = mask[index[a,1],index[a,0]]\n if intesity>0:\n warped_img[index[a,1],index[a,0],0] = blue(x_coord[a], y_coord[a])\n warped_img[index[a,1],index[a,0],1] = green(x_coord[a], y_coord[a])\n warped_img[index[a,1],index[a,0],2] = red(x_coord[a], y_coord[a])\n mask_warped_img[index[a,1],index[a,0]] = 255\n\n r = cv2.boundingRect(mask)\n center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))\n output = cv2.seamlessClone(warped_img, img_tar, mask, center, cv2.NORMAL_CLONE)\n\n return output\n\ndef initializeDlib(p):\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(p)\n\n return detector, predictor\n\ndef findFeatures(img, detector, predictor):\n\n rects = detector(img, 1)\n if len(rects) == 0:\n return False, 0\n else:\n for (i, rect) in enumerate(rects):\n\n shape = predictor(img, rect)\n shape = face_utils.shape_to_np(shape)\n\n return True, shape\n\ndef thinSplateSplineMat(points_tar, points_src):\n\n # Genrating the matrix [[K, P], [P.T, 0]] where P = (x,y,1)\n ones_mat = np.ones([points_tar.shape[0], 1])\n P = np.hstack([points_tar, ones_mat])\n P_trans = np.transpose(P)\n zero_mat = np.zeros((3,3))\n K = np.zeros([points_tar.shape[0], points_tar.shape[0]])\n epsilon = 1e-11\n for i in range(K.shape[0]):\n for j in range(K.shape[1]):\n K[i, j] = potentialEnergy(np.linalg.norm(points_tar[i] - points_tar[j], ord=2) + epsilon)\n\n row_one = np.hstack([K, P])\n row_two = np.hstack([P_trans, zero_mat])\n splate_mat = np.vstack([row_one, row_two])\n # Tune the labda for better results\n tune_lam = 400\n identity = tune_lam*np.identity(splate_mat.shape[0])\n splate_mat_inv = np.linalg.inv(splate_mat + identity)\n V = np.concatenate([points_src, np.zeros([3,])])\n V = np.reshape(V, [V.shape[0],1])\n wt_coord = np.matmul(splate_mat_inv, V)\n\n return wt_coord, K\n\ndef main_tps(Flags):\n\n target_video = Flags.video\n source_image = Flags.sourceImg\n method = Flags.method\n detector, predictor = initializeDlib(Flags.shape_predictor)\n # print(target_video)\n cap = cv2.VideoCapture(target_video)\n image_source = cv2.imread(source_image)\n ret, trial = cap.read()\n h, w, _ = trial.shape\n #print(h, w)\n vidWriter = cv2.VideoWriter(Flags.output_name,cv2.VideoWriter_fourcc(*'mp4v'), 24, (w, h))\n i = 0\n\n while (cap.isOpened()):\n\n print('Frame Number {}'.format(i))\n i += 1\n ret, img_target = cap.read()\n if ret == False:\n break\n # Creating copy of the target image\n img_tar = copy.deepcopy(img_target)\n img_src = image_source.copy()\n # Second parameter is the number of image pyramid layers to \n # apply when upscaling the image prior to applying the detector \n rects = detector(img_target, 1)\n index = np.max((0, len(rects)-2))\n\n if len(rects) == 1:\n\n img_tar = img_tar[int(rects[0].top()-50):int(rects[0].bottom()+50), \\\n int(rects[0].left()-50):int(rects[0].right()+50)]\n if len(rects) > 1:\n img_src = img_tar[int(rects[len(rects)-1].top()-50):int(rects[len(rects)-1].bottom()+50), \\\n int(rects[len(rects)-1].left()-50):int(rects[len(rects)-1].right()+50)]\n\n img_tar = img_tar[int(rects[len(rects)-2].top()-50):int(rects[len(rects)-2].bottom()+50), \\\n int(rects[len(rects)-2].left()-50):int(rects[len(rects)-2].right()+50)]\n\n if len(rects) > 0:\n\n flag_tar, points_tar = findFeatures(img_tar, detector, predictor)\n flag_src, points_src = findFeatures(img_src, detector, predictor)\n if (not flag_tar or not flag_src):\n continue\n\n wt_x, K = thinSplateSplineMat(points_tar, points_src[:,0])\n wt_y, K = thinSplateSplineMat(points_tar, points_src[:,1])\n warped = warp_images(img_tar, img_src, points_tar, points_src, wt_x, wt_y, K)\n img_target[int(rects[index].top()-50):int(rects[index].bottom()+50), \\\n int(rects[index].left()-50):int(rects[index].right()+50)] = warped\n\n #cv2.imshow(\"target\", img_target)\n #cv2.waitKey(0)\n vidWriter.write(img_target)\n\n if len(rects) > 1:\n\n wt_x, K = thinSplateSplineMat(points_src, points_src[:,0])\n wt_y, K = thinSplateSplineMat(points_src, points_src[:,1])\n warped = warp_images(img_src, img_tar, points_src, points_tar, wt_x, wt_y, K)\n img_target[int(rects[len(rects)-1].top()-50):int(rects[len(rects)-1].bottom()+50), \\\n int(rects[len(rects)-1].left()-50):int(rects[len(rects)-1].right()+50)] = warped\n\n vidWriter.write(img_target)\n\n else:\n vidWriter.write(img_target)\n continue\n\nif __name__ == '__main__':\n \n Parser = argparse.ArgumentParser()\n Parser.add_argument('--video', default='./TestSet_P2/Test1.mp4', help='Enter the path of target video')\n Parser.add_argument('--sourceImg', default='./TestSet_P2/Rambo.jpg', help='Enter the path of source image')\n Parser.add_argument('--method', default='tps', help='Type the name of the method')\n Parser.add_argument('--shape_predictor', default=\"shape_predictor_68_face_landmarks.dat\", help=\"Prdictor file\")\n Parser.add_argument('--output_name', default='./Data1OutputTPS.mp4', help='Name of the output file')\n Flags = Parser.parse_args()\n\n if Flags.method == 'tps':\n main_tps(Flags)\n elif Flags.method == 'Tri':\n print(Flags.output_name)\n videoDetector(Flags.video,Flags.sourceImg,Flags.output_name,Flags.shape_predictor)\n elif Flags.method == 'TriD':\n videoDoubleDetector(Flags.video,Flags.output_name,Flags.shape_predictor)\n\n"
},
{
"alpha_fraction": 0.5569605827331543,
"alphanum_fraction": 0.5835266709327698,
"avg_line_length": 40.647342681884766,
"blob_id": "45a9a24d46641e7c851f14ea078c4f385d6cf8a8",
"content_id": "22cbc4eeb2b1c02860dd22db5bddeee8aefd5938",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8620,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 207,
"path": "/PRNet/demo_texture.py",
"repo_name": "Arjung27/Face_swap",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\nfrom glob import glob\nimport scipy.io as sio\nfrom skimage.io import imread, imsave\nfrom skimage.transform import rescale, resize\nfrom time import time\nimport argparse\nimport ast\nimport matplotlib.pyplot as plt\nimport argparse\nimport copy\nimport dlib\n\nfrom api import PRN\nfrom utils.render import render_texture\nimport cv2\n\ndef utility(prn, image, ref_image):\n\n [h, w, _] = image.shape\n\n #-- 1. 3d reconstruction -> get texture. \n pos = prn.process(image)\n if np.any(pos) == None:\n return None\n vertices = prn.get_vertices(pos)\n image = image/255.\n texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))\n \n # ref_image = cv2.imread(args.ref_path)\n # imsave('TestImages/ref.jpg', ref_image)\n ref_pos = prn.process(ref_image)\n ref_image = ref_image/255.\n ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))\n ref_vertices = prn.get_vertices(ref_pos)\n new_texture = ref_texture#(texture + ref_texture)/2\n\n vis_colors = np.ones((vertices.shape[0], 1))\n face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1)\n face_mask = np.squeeze(face_mask > 0).astype(np.float32)\n \n new_colors = prn.get_colors_from_texture(new_texture)\n new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3)\n new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis]\n\n # Possion Editing for blending image\n vis_ind = np.argwhere(face_mask>0)\n vis_min = np.min(vis_ind, 0)\n vis_max = np.max(vis_ind, 0)\n center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5))\n output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE)\n\n return output\n\ndef initializeDlib(p):\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(p)\n\n return detector, predictor\n\ndef texture_editing(prn, args):\n # read image\n if args.video == 0:\n image = imread(args.image_path)\n [h, w, _] = image.shape\n\n #-- 1. 3d reconstruction -> get texture. \n pos = prn.process(image) \n vertices = prn.get_vertices(pos)\n image = image/255.\n texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))\n \n #-- 2. Texture Editing\n Mode = args.mode\n # change part of texture(for data augumentation/selfie editing. Here modify eyes for example)\n if Mode == 0: \n # load eye mask\n uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255. \n uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255.\n eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)\n\n # texture from another image or a processed texture\n ref_image = imread(args.ref_path)\n ref_pos = prn.process(ref_image)\n ref_image = ref_image/255.\n ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))\n\n # modify texture\n new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis]\n \n # change whole face(face swap)\n elif Mode == 1 and (not args.video):\n\n # texture from another image or a processed texture\n ref_image = imread(args.ref_path)\n ref_pos = prn.process(ref_image)\n ref_image = ref_image/255.\n ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))\n ref_vertices = prn.get_vertices(ref_pos)\n new_texture = ref_texture#(texture + ref_texture)/2.\n\n elif Mode == 1 and (args.video):\n\n cap = cv2.VideoCapture(args.image_path)\n ret, trial = cap.read()\n h, w, _ = trial.shape\n print(h, w)\n vidWriter = cv2.VideoWriter(args.output_name,cv2.VideoWriter_fourcc(*'mp4v'), 24, (w, h))\n detector, predictor = initializeDlib(args.shape_predictor)\n i = 0\n\n while (cap.isOpened()):\n\n print('Frame Number {}'.format(i))\n i += 1\n ret, image = cap.read()\n if ret == False:\n break\n\n if args.face == 1:\n ref_image = cv2.imread(args.ref_path)\n output = utility(prn, image, ref_image)\n if np.any(output) == None:\n continue\n vidWriter.write(output)\n\n elif args.face == 2:\n \n img_tar = copy.deepcopy(image)\n rects = detector(image, 1)\n index = np.max((0, len(rects)-2))\n if len(rects) == 2:\n img_src = img_tar[int(rects[len(rects)-1].top()-50):int(rects[len(rects)-1].bottom()+50), \\\n int(rects[len(rects)-1].left()-50):int(rects[len(rects)-1].right()+50)]\n\n img_tar = img_tar[int(rects[len(rects)-2].top()-50):int(rects[len(rects)-2].bottom()+50), \\\n int(rects[len(rects)-2].left()-50):int(rects[len(rects)-2].right()+50)]\n\n warped = utility(prn, img_tar, img_src)\n if np.any(warped) == None:\n continue\n image[int(rects[index].top()-50):int(rects[index].bottom()+50), \\\n int(rects[index].left()-50):int(rects[index].right()+50)] = warped\n\n warped2 = utility(prn, img_src, img_tar)\n if np.any(warped2) == None:\n continue\n image[int(rects[len(rects)-1].top()-50):int(rects[len(rects)-1].bottom()+50), \\\n int(rects[len(rects)-1].left()-50):int(rects[len(rects)-1].right()+50)] = warped2\n vidWriter.write(image)\n # cv2.imshow(\"Double\", image)\n # cv2.waitKey(0)\n # cv2.imwrite(\"Test.jpg\",image)\n\n vidWriter.release()\n\n\n\n else:\n print('Wrong Mode or Input type! Mode and input type should be 0 or 1.')\n exit()\n\n\n #-- 3. remap to input image.(render)\n if args.video == 0:\n vis_colors = np.ones((vertices.shape[0], 1))\n face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1)\n face_mask = np.squeeze(face_mask > 0).astype(np.float32)\n \n new_colors = prn.get_colors_from_texture(new_texture)\n new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3)\n new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis]\n\n # Possion Editing for blending image\n vis_ind = np.argwhere(face_mask>0)\n vis_min = np.min(vis_ind, 0)\n vis_max = np.max(vis_ind, 0)\n center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5))\n output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE)\n \n # save output\n imsave(args.output_path, output)\n # cv2.imwrite(args.output_path, output)\n print('Done.')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Texture Editing by PRN')\n\n parser.add_argument('-i', '--image_path', default='TestImages/AFLW2000/image00081.jpg', type=str,\n help='path to input image')\n parser.add_argument('-r', '--ref_path', default='TestImages/trump.jpg', type=str, \n help='path to reference image(texture ref)')\n parser.add_argument('-o', '--output_path', default='TestImages/output.jpg', type=str, \n help='path to save output')\n parser.add_argument('--mode', default=1, type=int, \n help='ways to edit texture. 0 for modifying parts, 1 for changing whole')\n parser.add_argument('--gpu', default='0', type=str, \n help='set gpu id, -1 for CPU')\n\n # ---- init PRN\n os.environ['CUDA_VISIBLE_DEVICES'] = parser.parse_args().gpu # GPU number, -1 for CPU\n prn = PRN(is_dlib = True) \n\n texture_editing(prn, parser.parse_args())"
},
{
"alpha_fraction": 0.5510340332984924,
"alphanum_fraction": 0.6065710186958313,
"avg_line_length": 26.21917724609375,
"blob_id": "f7b94016ae260ea2ba508511563b27ad06618bdd",
"content_id": "0437bedc6fd8f3a9acf321e1deb760bbe21291b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5996,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 219,
"path": "/FaceSwap.py",
"repo_name": "Arjung27/Face_swap",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport cv2\nimport dlib\nimport imutils\nfrom imutils import face_utils\nfrom scipy.interpolate import interp2d\n\ndef videoToImage(fname,tarname):\n\tcap = cv2.VideoCapture(fname)\n\ti=0\n\twhile(cap.isOpened()):\n\t\tret,frame = cap.read()\n\t\tif ret == False:\n\t\t\tbreak\n\t\tcv2.imwrite(tarname+'/Img'+str(i)+'.jpg',frame)\n\t\ti+=1\n\n\tcap.release()\n\tcv2.destroyAllWindows()\n\ndef getFaceLandmarks(fname,p):\n\tdetector = dlib.get_frontal_face_detector()\n\tpredictor = dlib.shape_predictor(p)\n\timg = cv2.imread(fname)\n\t#img = imutils.resize(img,width = 320)\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tbbox = detector(gray,0)\n\tfor (i,bbox) in enumerate(bbox):\n\t\tshape = predictor(gray,bbox)\n\t\tshape = face_utils.shape_to_np(shape)\n\t\t\n\t\tfor (x,y) in shape:\n\t\t\tcv2.circle(img,(x,y),2,(0,255,0),-1)\n\n\treturn img,shape\n\ndef triangulation(land_points,img):\n\tpoints = np.array(land_points, np.int32)\n\tchull = cv2.convexHull(points)\n\trect = cv2.boundingRect(chull)\n\trectangle = np.asarray(rect)\n\tsubdiv = cv2.Subdiv2D(rect)\n\tp_list = []\n\tfor p in land_points:\n\t\tp_list.append((p[0],p[1]))\n\tfor p in p_list:\n\t\tsubdiv.insert(p)\n\ttriangles = subdiv.getTriangleList()\n\ttriangles = np.array(triangles, dtype=np.int32)\n\tvert = []\n\tVPoint = []\n\tpt = []\n\tfor t in triangles:\n\t\tpt.append((t[0], t[1]))\n\t\tpt.append((t[2], t[3]))\n\t\tpt.append((t[4], t[5]))\n\t\ttemp = []\n\t\tpt1 = (t[0], t[1])\n\t\tpt2 = (t[2], t[3])\n\t\tpt3 = (t[4], t[5])\n\t\t\n\t\tfor i in range(3):\n\t\t\tfor j in range(len(points)):\n\t\t\t\tif(abs(pt[i][0] - points[j][0]) < 1.0 and abs(pt[i][1] - points[j][1]) < 1.0):\n\t\t\t\t\ttemp.append(j)\n\t\tif len(temp)==3:\n\t\t\tvert.append((points[temp[0]],points[temp[1]],points[temp[2]]))\n\t\t\tVPoint.append((temp[0],temp[1],temp[2]))\n\t\tpt=[]\n\t\t\n\t\t'''\n\t\tfor i in range(len(points)):\n\t\t\tif (abs(pt1[0]-points[i][0])<1.0 and abs(pt1[1]-points[i][1])<1.0):\n\t\t\t\ttemp.append(i)\n\t\t\tif (abs(pt2[0]-points[i][0])<1.0 and abs(pt2[1]-points[i][1])<1.0):\n\t\t\t\ttemp.append(i)\n\t\t\tif (abs(pt3[0]-points[i][0])<1.0 and abs(pt3[1]-points[i][1])<1.0):\n\t\t\t\ttemp.append(i)\n\t\tif len(temp)==3:\n\t\t\tvert.append((points[temp[0]],points[temp[1]],points[temp[2]]))\n\t\t\tVPoint.append((temp[0],temp[1],temp[2]))\n\t\t'''\n\t\tcv2.line(img, tuple(points[temp[0]]), tuple(points[temp[1]]), (0, 0, 255), 2)\n\t\tcv2.line(img, tuple(points[temp[1]]), tuple(points[temp[2]]), (0, 0, 255), 2)\n\t\tcv2.line(img, tuple(points[temp[0]]), tuple(points[temp[2]]), (0, 0, 255), 2)\n\tvert = np.asarray(vert)\n\tVPoint = np.asarray(VPoint)\n\tchull = np.reshape(chull,(chull.shape[0],chull.shape[2]))\n\treturn img,vert,VPoint,chull\n\ndef doTriangulate(PIndex,TIndex,img):\n\tT = []\n\tfor ti in TIndex:\n\t\tT.append((PIndex[ti[0]],PIndex[ti[1]],PIndex[ti[2]]))\n\tT = np.asarray(T)\n\tfor t in T:\n\t\tcv2.line(img, tuple(t[0]), tuple(t[1]), (0, 0, 255), 2)\n\t\tcv2.line(img, tuple(t[1]), tuple(t[2]), (0, 0, 255), 2)\n\t\tcv2.line(img, tuple(t[0]), tuple(t[2]), (0, 0, 255), 2)\n\treturn T, img\n\ndef affineBary(img,ini_tri,fin_tri,size):\n\tsrc = ini_tri\n\tdst = fin_tri\n\tx_min = min(src[0,0],src[1,0],src[2,0])\n\tx_max = max(src[0,0],src[1,0],src[2,0])\n\ty_min = min(src[0,1],src[1,1],src[2,1])\n\ty_max = max(src[0,1],src[1,1],src[2,1])\n\tx = np.linspace(x_min, x_max, x_max-x_min+1)\n\ty = np.linspace(y_min, y_max, y_max-y_min+1)\n\tmesh = np.meshgrid(x,y)\n\tmesh = np.asarray(mesh)\n\tmesh = mesh.reshape(*mesh.shape[:1], -1)\n\tgrid = np.vstack((mesh, np.ones((1, mesh.shape[1]))))\n\tB = [[src[0][0],src[1][0],src[2][0]],[src[0][1],src[1][1],src[2][1]],[1,1,1]]\n\tB_inv = np.linalg.inv(B)\n\n\tbc = np.dot(B_inv,grid)\n\n\tZ = []\n\tD = []\n\tfor i in range(bc.shape[1]):\n\t\tif bc[0,i]+bc[1,i]+bc[2,i]-1<0.0001 and 0<=bc[0,i] and 0<=bc[1,i] and 0<=bc[2,i] and bc[0,i]<=1 and bc[1,i]<=1 and bc[2,i]<=1:\n\t\t\tZ.append(bc[:,i])\n\t\t\tD.append((grid[0,i],grid[1,i]))\n\n\tZ = np.asarray(Z)\n\tZ = Z.T\n\tD = np.asarray(D,dtype='int32')\n\tD = D.T\n\tA = [[dst[0][0],dst[1][0],dst[2][0]],[dst[0][1],dst[1][1],dst[2][1]],[1,1,1]]\n\tcoord = np.dot(A,Z)\n\txA = coord[0,:]/coord[2,:]\n\tyA = coord[1,:]/coord[2,:]\n\n\tC = [xA,yA]\n\tC = np.asarray(C)\n\n\treturn C,D\n\ndef interpolate(img,size,pts,det):\n\txi = np.linspace(0, img.shape[1], img.shape[1],endpoint=False)\n\tyi = np.linspace(0, img.shape[0], img.shape[0],endpoint=False)\n\t#dest = np.zeros((size[0],size[1],3), np.uint8)\n\tblue = img[:,:,0]\n\tb = interp2d(xi, yi, blue, kind='cubic')\n\tgreen = img[:,:,1]\n\tg = interp2d(xi, yi, green, kind='cubic')\n\tred = img[:,:,2]\n\tr = interp2d(xi, yi, red, kind='cubic')\n\tfor i,(x,y) in enumerate(pts):\n\t\tbl = b(x,y)\n\t\tgr = g(x,y)\n\t\tre = r(x,y)\n\t\tsize[det[i,1],det[i,0]] = (bl,gr,re)\n\n\treturn size\n\n\n\ndef swapFace(d_img,s_img,d_tri,s_tri):\n\tL = np.zeros((2,1))\n\tD = np.zeros((2,1))\n\n\tfor i in range(d_tri.shape[0]):\n\t\tz,m = affineBary(s_img,d_tri[i],s_tri[i],d_img.shape)\n\t\tL = np.concatenate((L,z),axis=1)\n\t\tD = np.concatenate((D,m),axis=1)\n\tL = np.asarray(L)\n\tL = L.T\n\tL = L[1:,:]\n\tD = np.asarray(D,dtype='int32')\n\tD = D.T\n\tD = D[1:,:]\n\n\treturn L,D\n\ndef blendFace(hull,dimg,face):\n\tmask = np.zeros_like(dimg)\n\tcv2.fillPoly(mask, [hull], (255, 255, 255))\n\t#cv2.imshow('Mask',mask)\n\t#cv2.waitKey(0)\n\tr = cv2.boundingRect(np.float32([hull]))\n\tcenter = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))\n\t#cv2.circle(dimg,center,2,(0,255,0),-1)\n\toutput = cv2.seamlessClone(np.uint8(face), dimg, mask, center, cv2.MIXED_CLONE)\n\treturn output\n\n\ndef main():\n\t'''\n\t#Code to convert video stream to a set of images\n\tfname = './Data/abhi_vid.mp4'\n\ttarname = './Self'\n\tvideoToImage(fname,tarname)\n\t'''\n\t\n\ttname = './Self/Img0.jpg'\n\tsname = './Data/arjun.jpg'\n\tp = \"shape_predictor_68_face_landmarks.dat\"\n\timg1 = cv2.imread(sname)\n\timg2 = cv2.imread(tname)\n\timg3 = cv2.imread(tname)\n\n\tIMAGE,shp = getFaceLandmarks(tname,p)\n\tIMG,V,VP,rectangle = triangulation(shp,IMAGE)\n\tIMAGEs,shps = getFaceLandmarks(sname,p)\n\tVs,IMGs = doTriangulate(shps,VP,IMAGEs)\n\ta,b = swapFace(img2,img1,V,Vs)\n\tA = interpolate(img1,img2,a,b)\n\tF = blendFace(rectangle,img3,A)\t\n\t#cv2.imshow('Output',A)\n\t#cv2.waitKey(0)\n\tcv2.imshow('Blend',F)\n\tcv2.waitKey(0)\n\t\n\nif __name__ == '__main__':\n\tmain()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
},
{
"alpha_fraction": 0.5974441170692444,
"alphanum_fraction": 0.6108626127243042,
"avg_line_length": 51.20000076293945,
"blob_id": "80c9d8a98d6263cffc537dc3491288abefb232b3",
"content_id": "803065d4704300e928b9112d11d937b624c15fa1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1565,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 30,
"path": "/PRNet/custom_input.py",
"repo_name": "Arjung27/Face_swap",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nfrom api import PRN\nfrom demo_texture import texture_editing\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Texture Editing by PRN')\n\n parser.add_argument('-i', '--image_path', default='../TestSet_P2/Test1.mp4', type=str,\n help='path to input image')\n parser.add_argument('-r', '--ref_path', default='../TestSet_P2/Rambo.jpg', type=str, \n help='path to reference image(texture ref)')\n parser.add_argument('-o', '--output_path', default='TestImages/output.jpg', type=str, \n help='path to save output')\n parser.add_argument('--mode', default=1, type=int, \n help='ways to edit texture. 0 for modifying parts, 1 for changing whole')\n parser.add_argument('--video', default=0, help='1 for video input and 0 for image input')\n parser.add_argument('--output_name', default='../TestSet_P2/Data1OutputPRNet.mp4', \n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\thelp='Name of the output file')\n parser.add_argument('--shape_predictor', default=\"../shape_predictor_68_face_landmarks.dat\", \n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\thelp=\"Prdictor file\")\n parser.add_argument('--face', type=int, default=1, help='Number of faces to be swapped 1 or 2')\n parser.add_argument('--gpu', default='0', type=str, \n help='set gpu id, -1 for CPU')\n\n # ---- init PRN\n os.environ['CUDA_VISIBLE_DEVICES'] = parser.parse_args().gpu # GPU number, -1 for CPU\n prn = PRN(is_dlib = True) \n\n texture_editing(prn, parser.parse_args())"
},
{
"alpha_fraction": 0.7050473093986511,
"alphanum_fraction": 0.7276551127433777,
"avg_line_length": 53.31428527832031,
"blob_id": "072ca94dce0c4012c136d082d55089650174d0d0",
"content_id": "1da310fda282bb0ef88f0e5bcf3b477aea189a46",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1902,
"license_type": "permissive",
"max_line_length": 299,
"num_lines": 35,
"path": "/README.md",
"repo_name": "Arjung27/Face_swap",
"src_encoding": "UTF-8",
"text": "Overview\n========\n\nDependencies:\n=============\n\n 1. Ubuntu 16.04\n 2. python3.5\n 3. Python dependencies such as numpy\n 4. Tensorflow >= 1.4\n 5. OpenCV 3.4.9\n\nInstructions to run program:\n============================\n\n1) Phase 1 (Conventional Pipeline):\n```\n\n1. Create a folder in which you can put the requisite videos and images.\n2. Run command 'python3 dataUtils/dataGeneration.py --video <Target-Video-File-Path> --sourceImg <source-Image-File-Path>\n--method <tps-Tri-TriD> --shape_predictor <landmark-dat-file> --output_name <Path-and-Name-of-output-file-with-.mp4-in-the-name>' to start generating video with faces swapped. \n\nDefault Values have been given as inputs. The test results for the project are stored in a separate 'Test' folder inside the 'Data' folder.\n```\nDifferent methods specified under --method flag determines the algorithm that needs to be used for face swapping. tps is for This Plate Splines, Tri fror triangulation, and TriD for using triangulation on videos with two faces.\n\n2) Phase 2 (Deep Learning):\n\nThis includes the modified version of [code](https://github.com/YadiraF/PRNet). Download the PRN trained model at [Google Drive](https://drive.google.com/file/d/1UoE-XuW1SDLUjZmJPkIZ1MLxvQFgmTFH/view), and put it into Data/net-data. In order to run it for our use case follow the instructions below:\n```\n1. Navigate to the PRNet folder.\n2. CUDA_VISIBLE_DEVICES=0 python custom_input.py -i <path of the target video> -r <path of the source image> --video <1 if the target is video and 0 if it is image> --output_name <path of the utput video/image> --face <number of faces>\nExample: CUDA_VISIBLE_DEVICES=0 python custom_input.py -i ../TestSet_P2/Test2.mp4 -r ../TestSet_P2/Scarlett.jpg --video 1 --output_name ../TestSet_P2/Data2OutputPRNet.mp4 --face 2\n```\nIf no GOU is present on the system then remove CUDA_VISIBLE_DEVICES=0 from the above command.\n\n"
}
] | 5 |
StrideEric/SistOp2019 | https://github.com/StrideEric/SistOp2019 | ed26fbd6698214c23bb8c6802e9822d5a4ec69ae | a3a8e662b89571154050d0c7c438fc08d7552745 | ca02a52f9f33262742f4dc1b421456067e8476b3 | refs/heads/master | 2020-07-19T04:48:31.088656 | 2019-09-04T13:03:21 | 2019-09-04T13:03:21 | 206,376,767 | 0 | 0 | null | 2019-09-04T17:39:23 | 2019-09-04T13:05:23 | 2019-09-04T13:05:22 | null | [
{
"alpha_fraction": 0.6123954653739929,
"alphanum_fraction": 0.624200701713562,
"avg_line_length": 34.05172348022461,
"blob_id": "d345c97f3e790146c1894e4540bdda6b440db165",
"content_id": "9c767dc3565877909f22f479b63b5e4326dbec90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2033,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 58,
"path": "/pruebasqlite.py",
"repo_name": "StrideEric/SistOp2019",
"src_encoding": "UTF-8",
"text": "import sqlite3\nfrom sqlite3 import Error\n \nclass Procesador:#contendra gran parte de las tareas generales\n def __init__(self):\n \tself.procesos_listos=[]\n def add_proceso(self,proceso):\n self.procesos_listos.append(proceso)\n def show_procesos(self):\n print(\"ID MemSize Input CPUTime Output\")\n for x in self.procesos_listos:\n x.muestra_proceso()\n\nclass Proceso: #Contiene los datos esenciales de un proceso\n def __init__(self,datos):\n \tself.id=datos[0]\n \tself.memSize=datos[1]\n \tself.InSize=datos[2]\n \tself.CPUSize=datos[3]\n \tself.OutSize=datos[4]\n def muestra_proceso(self):\n print(str(self.id)+\" \"+str(self.memSize)+\" \"+str(self.InSize)+\" \"+str(self.CPUSize)+\" \"+str(self.OutSize))\n\nclass Interface:\n def create_connection(self,db_file):\n \"\"\" create a database connection to a SQLite database \"\"\"\n self.conn = None\n try:\n self.conn = sqlite3.connect(db_file)\n print(\"Successfully connected to Database\")\n except Error as e:\n print(e)\n return self.conn\n def show_menu(self):\n print(\"1.Seleccionar parametros iniciales\")\n print(\"2.Mostrar lista de parametros iniciales preexistentes\")\n\t#def retrieve_data(self):\n def retrieve_data(self,procesador):\n self.cur=self.conn.cursor()\n self.cur.execute(\"SELECT * FROM Procesos\")\n rows=self.cur.fetchall()\n for row in rows:\n a=Proceso(row)\n procesador.add_proceso(a)\n \n \n#Este main solo representa una prueba de las clases existentes, no se planea implementar de esta forma.\nif __name__ == '__main__':\n conexion=Interface()\n conn=conexion.create_connection(\"/root/Documents/UTN/SistOp2019/SistOp.db\")\n Core=Procesador()\n conexion.retrieve_data(Core)\n #a=Proceso(1,30,10,10,10)\n #Core.add_proceso(a)\n Core.show_procesos()\n \n #curr=conn.cursor()\n #print(curr.execute(\"SELECT * FROM Procesos2\"))\n"
}
] | 1 |
JustBeHerLucky/SimpleWebServer | https://github.com/JustBeHerLucky/SimpleWebServer | c1b0c66606d81cf166f3c1f22465479d2b0f45bd | 11db026e5bf1b28d9b244e0b9d84647e81e597b6 | 4447417ccc9cbae493846cfb183d62c2f831340c | refs/heads/master | 2022-11-14T18:51:24.109407 | 2020-07-11T01:36:26 | 2020-07-11T01:36:26 | 278,766,331 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5306122303009033,
"alphanum_fraction": 0.5469387769699097,
"avg_line_length": 33.619564056396484,
"blob_id": "297e4204c226d91b6e717b119cd3b2551f0921a7",
"content_id": "d617b9485ddc3dca2e06ae00429dc3e3ab21af30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3185,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 92,
"path": "/Server.py",
"repo_name": "JustBeHerLucky/SimpleWebServer",
"src_encoding": "UTF-8",
"text": "import socket\nimport sys\nimport os\nimport datetime\n\nHOST = 'localhost' # Address\nPORT = 8080 # Port\nMAXLISTEN = 100000\n\n\ndef splitData(data):\n start = data.find('username') + 9\n end = start\n _userName = ''\n for i in data[start:]:\n if i == '&':\n break\n _userName += i\n end += 1\n _passWord = data[end + 10:]\n return _userName, _passWord\n \ndef readFi(filename):\n try:\n file_open = open(filename, 'rb')\n data = file_open.read()\n file_open.close()\n response_code = 200\n except FileNotFoundError:\n print('File Not Found')\n response_code = 404\n data = \"\"\n return response_code, data\n\ndef getFiPath(req_pack):\n filepath = req_pack.split(' ')[1] \n filepath = filepath[1:] \n if filepath == '':\n filepath = 'index.html'\n return os.path.join(os.path.dirname(__file__), filepath)\n\ndef CrtResp(status, data):\n if status == 200:\n status_code = 'HTTP/1.1 200 OK\\r\\n'\n elif status == 404:\n status_code = 'HTTP/1.1 404 NOT FOUND\\r\\n'\n header = 'Connection: close\\r\\n'\n header += 'Accept: text/html\\r\\n'\n header += 'Accept-Language: en_US\\r\\n'\n header += 'Content-Type: text/html\\r\\n\\r\\n'\n res_header = status_code + header\n return res_header, data\n\ndef main():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Connection configuration\n s.bind((HOST, PORT)) #Listening\n s.listen(MAXLISTEN) # Setup 10 connection at the same time\n print('Server is listening on PORT: ', PORT)\n cur_path = os.path.dirname(__file__)\n while True:\n (client, adr) = s.accept()\t \n currentDT = datetime.datetime.now()\n data = client.recv(2048)\n print('Client connected:', client, ' Time:' ,str(currentDT))\n requests = data.decode()\n req_method = requests.split(' ')[0] \n res_path = os.path.join(cur_path, 'index.html') \n (status_code, data) = readFi(res_path) \n if req_method == 'GET' or req_method == 'POST':\n if req_method == 'POST':\n (userName, passWord) = splitData(requests)\n if userName == 'admin' and passWord == 'admin':\n res_path = os.path.join(cur_path, 'info.html')\n currentDT = datetime.datetime.now()\n print('\\nValid Username and password, redirecting.... Time:' ,str(currentDT),'\\n')\n else:\n status_code = 404\n res_path = os.path.join(cur_path, '404.html')\n currentDT = datetime.datetime.now()\n print('\\nInvalid Username and password, error.... Time:' ,str(currentDT), '\\n')\n elif req_method == 'GET':\n file_path = getFiPath(requests)\n if file_path.find('.html') == -1:\n res_path = os.path.join(cur_path, file_path)\n file_open = open(res_path, 'rb')\n data = file_open.read()\n (res_header, res_body) = CrtResp(status_code, data)\n client.send(res_header.encode())\n client.send(res_body)\n client.close()\n \nmain()\n"
}
] | 1 |
lcdi/Inventory | https://github.com/lcdi/Inventory | 074fb05b0248b78705e151b06c655e3d95eb1e68 | bed112f25a87abfbb80ea91c78c5a95d514890bf | 94cd05bf03161a47a8d97d3f9d5969f0d36dc7eb | refs/heads/master | 2022-08-19T13:42:19.604061 | 2016-09-12T14:19:51 | 2016-09-12T14:19:51 | 32,943,858 | 2 | 2 | MIT | 2015-03-26T18:06:16 | 2016-03-08T17:45:51 | 2022-07-06T20:00:07 | HTML | [
{
"alpha_fraction": 0.7006514072418213,
"alphanum_fraction": 0.7077715396881104,
"avg_line_length": 23.9132080078125,
"blob_id": "b5df2eae876d63b4090a4aca823f3ba39f04ee22",
"content_id": "d3c4bfefe14261d18bd9ee2fb00c5d85f05b8177",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6601,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 265,
"path": "/models.py",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "from peewee import *\nfrom playhouse.shortcuts import RetryOperationalError\nimport flask.ext.whooshalchemy\nimport datetime\nfrom lcdi import *\n\nclass MysqlRetryDatabase(RetryOperationalError, MySQLDatabase):\n\tpass\n\n#db = MySQLDatabase(getConfig('SQL_DATABASE'), user=getConfig('SQL_USERNAME'), password=getConfig('SQL_PASSWORD'))\ndb = MysqlRetryDatabase(getConfig('SQL_DATABASE'), user=getConfig('SQL_USERNAME'), password=getConfig('SQL_PASSWORD'))\n\nclass BaseModel(Model):\n\tclass Meta:\n\t\tdatabase = db\n\nclass Device(Model):\n\n\t__searchable__ = [\n\t\t'SerialNumber',\n\t\t'SerialDevice'\n\t\t'Type',\n\t\t'Description',\n\t\t'Issues',\n\t\t'PhotoName',\n\t\t'Quality'\n\t]\n\n\tSerialNumber = CharField(primary_key=True)\n\tSerialDevice = CharField()\n\tType = CharField()\n\tDescription = TextField()\n\tIssues = TextField()\n\tPhotoName = CharField()\n\tQuality = CharField()\n\t\n\tclass Meta:\n\t\tdatabase = db\n\t\nclass Log(Model):\n\t\n\tIdentifier = PrimaryKeyField()\n\tSerialNumber = ForeignKeyField(Device, db_column='SerialNumber')\n\tPurpose = TextField()\n\tUserOut = CharField()\n\tDateOut = DateTimeField()\n\tAuthorizerOut = CharField()\n\tUserIn = CharField()\n\tDateIn = DateTimeField()\n\tAuthorizerIn = CharField()\n\n\tclass Meta:\n\t\tdatabase = db\n\ndef getDeviceTypes():\n\ttypes = [];\n\n\tquery = Device.select(Device.Type).order_by(Device.Type)\n\tfor q in query:\n\t\tif (doesEntryExist(q.Type, types) == True):\n\t\t\tpass\n\t\telse:\n\t\t\ttypes.append(q.Type)\n\n\treturn types\n\t\ndef getStates():\n\tstates = [\"Operational\"];\n\n\tquery = Device.select(Device.Quality).order_by(Device.Quality)\n\tfor q in query:\n\t\tif (doesEntryExist(q.Quality, states) == True):\n\t\t\tpass\n\t\telse:\n\t\t\tstates.append(q.Quality)\n\n\treturn states\n\ndef doesEntryExist(x, arr):\n\tfor a in arr:\n\t\tif x == a:\n\t\t\treturn True\n\treturn False\n\ndef getDevices():\n\treturn Device.select(\n\t\tDevice.SerialNumber,\n\t\tDevice.Type,\n\t\tDevice.Description,\n\t\tDevice.Issues\n\t).order_by(Device.SerialNumber)\n\t\ndef getDevicesAndLogs():\n\treturn Device.select(Device, Log).join(Log).order_by(Device.SerialNumber)\n\ndef getDevicesWithLog(itemType, status, quality):\n\tquery = getDevices().where(\n\t\t(Device.Type == itemType if itemType != 'ALL' else Device.Type != ''),\n\t\t(Device.Quality == quality if quality != 'ALL' else Device.Quality != '')\n\t)\n\treturn getDeviceAndLogListForQuery(query, status)\n\ndef getDeviceAndLogListForQuery(query, status = 'ALL'):\n\tdeviceList = []\n\t\n\tfor device in query:\n\t\tdevice.log = getDeviceLog(device.SerialNumber)\n\t\t\n\t\tdevice.statusIsOut, device.log = getStatus(device.log)\n\t\t\n\t\tif status == 'ALL':\n\t\t\tdeviceList.append(device)\n\t\telif status == 'in' and not device.statusIsOut:\n\t\t\tdeviceList.append(device)\n\t\telif status == 'out' and device.statusIsOut:\n\t\t\tdeviceList.append(device)\n\t\n\treturn deviceList\n\ndef getStatus(log):\n\thasLog = len(log) > 0\n\tif hasLog:\n\t\tlog = log.get()\n\t\treturn (not log.DateIn, log)\n\telse:\n\t\treturn (False, log)\n\ndef getNextSerialNumber(device_type):\n\tprefixStr = \"LCDI\"\n\t\n\tquerySerial = Device.select(Device.SerialNumber).where(Device.Type == device_type).order_by(-Device.SerialNumber)\n\tnumberOfEntries = len(querySerial)\n\t\n\tlcdiPrefix = prefixStr + \"-\"\n\tlcdiPrefixLength = len(lcdiPrefix)\n\ttypeNumber = 0\n\ttypeNumberLength = 2\n\ttypeNumberQuantityMAX = pow(10, typeNumberLength)\n\ttypeNumberMAX = typeNumberQuantityMAX - 1\n\titemNumber = 0\n\titemNumberLength = 3\n\titemNumberQuantityMAX = pow(10, itemNumberLength)\n\titemNumberMAX = itemNumberQuantityMAX - 1\n\t\n\t# No items of type\n\tif numberOfEntries <= 0:\n\t\ttypeNumber = len(getDeviceTypes())\n\t\tif typeNumber > typeNumberQuantityMAX - 1:\n\t\t\treturn (None, \"OVERFLOW ERROR: Too many types\")\n\t# Items of type found\n\telse:\n\t\ttypeNumber = querySerial.get().SerialNumber[lcdiPrefixLength : lcdiPrefixLength + typeNumberLength]\n\t\t\n\t\tif numberOfEntries >= itemNumberQuantityMAX:\n\t\t\treturn (None, \"OVERFLOW ERROR: Too many items for type \" + device_type)\n\t\t# Less than maximum quantity of items for type\n\t\telse:\n\t\t\tlastSerial = querySerial.get().SerialNumber\n\t\t\tlastSerial_itemNumber = int(lastSerial[len(lastSerial)-2:])\n\t\t\t\n\t\t\tif lastSerial_itemNumber == numberOfEntries:\n\t\t\t\ti = 0\n\t\t\t\tfor device in querySerial.order_by(Device.SerialNumber):\n\t\t\t\t\ti_itemNumber = int(device.SerialNumber[lcdiPrefixLength + typeNumberLength:])\n\t\t\t\t\tif i_itemNumber != i:\n\t\t\t\t\t\titemNumber = i\n\t\t\t\t\t\tbreak\n\t\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\titemNumber = int(lastSerial[len(lastSerial)-2:]) + 1\n\t\t\t\n\t\t\tif itemNumber > itemNumberMAX:\n\t\t\t\treturn (None, \"OVERFLOW ERROR: All serials used up\")\n\n\treturn (lcdiPrefix + str(typeNumber).zfill(typeNumberLength) + str(itemNumber).zfill(itemNumberLength), None)\n\t\ndef generateSerial(device_type):\n\t\n\tquerySerial = Device.select(Device.SerialNumber).where(Device.Type == device_type).order_by(-Device.SerialNumber)\n\tnumberOfEntries = len(querySerial)\n\t\n\tif numberOfEntries > 99:\n\t\treturn (None, \"OVERFLOW ERROR: Too many items for type \" + device_type)\n\t\n\tif numberOfEntries <= 0:\n\t\tserialList = []\n\t\tdeviceQuery = getDevices();\n\t\t\n\t\tcount = 0;\n\t\tfor device in deviceQuery:\n\t\t\tserial = device.SerialNumber\n\t\t\tserial = serial[5:]\n\t\t\ttypeNumber = serial[:2]\n\t\t\t\n\t\t\tif typeNumber not in serialList:\n\t\t\t\tserialList.append(typeNumber)\n\t\t\t\t\n\t\t\n\t\tprint(serialList)\n\t\t\n\t\tmissingTypes = []\n\t\t\n\t\tfor x in range(0, 100):\n\t\t\ttestType = str(x).zfill(2)\n\t\t\tif testType not in serialList:\n\t\t\t\tmissingTypes.append(testType)\n\t\t\t\t\n\t\t\n\t\tprint(missingTypes)\n\t\t\n\t\ttypeNumber = missingTypes.pop(0)\n\t\tif int(typeNumber) > 99:\n\t\t\treturn (None, \"OVERFLOW ERROR: Too many types\")\n\t\t\n\t\titemNumber = 0\t\n\t\tprint(typeNumber)\n\telse:\n\t\t\n\t\tnumberList = []\n\t\t\n\t\tfor device in querySerial:\n\t\t\tserial = device.SerialNumber\n\t\t\tserial = serial[5:]\n\t\t\titemNumber = serial[2:]\n\t\t\t\n\t\t\tif itemNumber not in numberList:\n\t\t\t\tnumberList.append(itemNumber)\n\t\t\t\t\n\t\tprint numberList\n\t\t\n\t\tmissingNumbers = []\n\t\t\n\t\tfor x in range(0, 1000):\n\t\t\ttestNumber = str(x).zfill(3)\n\t\t\tif testNumber not in numberList:\n\t\t\t\tmissingNumbers.append(testNumber)\n\t\t\t\t\n\t\tprint missingNumbers\n\t\t\n\t\tserial = querySerial[0].SerialNumber\n\t\tserial = serial[5:]\n\t\ttypeNumber, itemnNumber = serial[:2] , serial[3:]\n\t\titemNumber = missingNumbers.pop(0)\n\t\n\treturn ('LCDI-' + typeNumber + str(itemNumber).zfill(3), None)\n\t\n\t\ndef getDeviceLog(serial):\n\treturn Log.select().where(Log.SerialNumber == serial).order_by(-Log.Identifier)\n\t\ndef isSearchUser(user):\n\t\n\tquery = Log.select().where(Log.AuthorizerIn == user | Log.AuthorizerOut == user | Log.UserIn == user | Log.UserOut == user)\n\t\n\tfor log in query:\n\t\tif log.AuthorizerIn == user:\n\t\t\treturn True\n\t\telif log.AuthorizerOut == user:\n\t\t\treturn True\n\t\telif log.UserIn == user:\n\t\t\treturn True\n\t\telif log.UserOut == user:\n\t\t\treturn True\n\t\n\treturn False"
},
{
"alpha_fraction": 0.5115384459495544,
"alphanum_fraction": 0.5115384459495544,
"avg_line_length": 21.65217399597168,
"blob_id": "1d1231e2cb26aa5709dfecefa55458c2fb25e379",
"content_id": "2809e7e0b59f45652924304d99098fb91cd5d56d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 520,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 23,
"path": "/templates/page/PageUserLogs_Body.html",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "{% for log in query %}\n\t<tr>\n\t\t<td>\n\t\t\t<a href=\"{{ url_for('view', serial=log.SerialNumber.SerialNumber) }}\">{{ log.SerialNumber.SerialNumber }}</a>\n\t\t</td>\n\t\t<td>{{ log.SerialNumber.Type }}</td>\n\t\t<th>\n\t\t\t{% if log.Purpose %}\n\t\t\t\t{{ log.Purpose }}\n\t\t\t{% endif %}\n\t\t</th>\n\t\t<th>{{ log.UserOut }}</th>\n\t\t<td>{{ log.DateOut }}</td>\n\t\t<th>{{ log.AuthorizerOut }}</th>\n\t\t<th>{{ log.UserIn }}</th>\n\t\t<td>\n\t\t\t{% if log.DateIn %}\n\t\t\t\t{{ log.DateIn }}\n\t\t\t{% endif %}\n\t\t</td>\n\t\t<th>{{ log.AuthorizerIn }}</th>\n\t</tr>\n{% endfor %}"
},
{
"alpha_fraction": 0.5345436334609985,
"alphanum_fraction": 0.5537065267562866,
"avg_line_length": 33.78947448730469,
"blob_id": "e6e08e75098d1a80410475ee95df881251a6ae68",
"content_id": "2345ae9b89f6570e9da98881c44b33edd630fe30",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1983,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 57,
"path": "/inventory_schema.sql",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "-- MySQL Workbench Forward Engineering\n\nSET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;\nSET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;\nSET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';\n\n-- -----------------------------------------------------\n-- Schema inventory\n-- -----------------------------------------------------\n\n-- -----------------------------------------------------\n-- Schema inventory\n-- -----------------------------------------------------\nCREATE SCHEMA IF NOT EXISTS `inventory` DEFAULT CHARACTER SET utf8 ;\nUSE `inventory` ;\n\n-- -----------------------------------------------------\n-- Table `inventory`.`device`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `inventory`.`device` (\n `SerialNumber` VARCHAR(255) NOT NULL DEFAULT 'LCDI-00000',\n `SerialDevice` VARCHAR(255) NULL,\n `Type` VARCHAR(255) NULL DEFAULT 'NONE',\n `Description` TEXT NULL,\n `Issues` TEXT NULL,\n `PhotoName` VARCHAR(255) NULL,\n `Quality` VARCHAR(255) NULL,\n PRIMARY KEY (`SerialNumber`))\nENGINE = InnoDB;\n\n\n-- -----------------------------------------------------\n-- Table `inventory`.`log`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `inventory`.`log` (\n `Identifier` INT NOT NULL AUTO_INCREMENT,\n `SerialNumber` VARCHAR(255) NOT NULL,\n `Purpose` TEXT NOT NULL,\n `UserOut` VARCHAR(255) NOT NULL,\n `DateOut` DATE NOT NULL,\n `AuthorizerOut` VARCHAR(255) NOT NULL,\n `UserIn` VARCHAR(255) NOT NULL,\n `DateIn` DATE NOT NULL,\n `AuthorizerIn` VARCHAR(255) NOT NULL,\n PRIMARY KEY (`Identifier`),\n INDEX `SerialNumber_idx` (`SerialNumber` ASC),\n CONSTRAINT `SerialNumber_constraint`\n FOREIGN KEY (`SerialNumber`)\n REFERENCES `inventory`.`device` (`SerialNumber`)\n ON DELETE CASCADE\n ON UPDATE CASCADE)\nENGINE = InnoDB;\n\n\nSET SQL_MODE=@OLD_SQL_MODE;\nSET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;\nSET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;\n"
},
{
"alpha_fraction": 0.7678571343421936,
"alphanum_fraction": 0.7678571343421936,
"avg_line_length": 27.5,
"blob_id": "8a8a5c332bfa0aa55c205b9aa571ca095ada4356",
"content_id": "ceb313c708c1d7fc6b0911c773f145593e66371e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 56,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 2,
"path": "/update.sh",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "sudo git fetch --all\nsudo git reset --hard origin/master"
},
{
"alpha_fraction": 0.6617886424064636,
"alphanum_fraction": 0.6677506566047668,
"avg_line_length": 26.53731346130371,
"blob_id": "6364318ebcc74f53e8600db3749b6383f9baabcb",
"content_id": "0cf254d629fdffab0d77a11bf0c020272bf2aaa5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1845,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 67,
"path": "/adLDAP.py",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "import ldap\n\nvalidEditAccessGroups = ['Office Assistants', 'Domain Admins', 'Inventory', 'Research Assistants']\n\ndef checkCredentials(controller, domainA, domainB, username, password):\n\tif password == \"\":\n\t\treturn ('Empty Password', False)\n\t\n\tdomain = domainA + '.' + domainB\n\t\n\tldapServer = 'ldap://' + controller + '.' + domain\n\tldapUsername = username + '@' + domain\n\tldapPassword = password\n\t\n\tbase_dn = 'DC=' + domainA + ',DC=' + domainB\n\tldap_filter = 'userPrincipalName=' + ldapUsername\n\t\n\t# Note: empty passwords WILL validate with ldap\n\ttry:\n\t\tldap_client = ldap.initialize(ldapServer)\n\t\tldap_client.set_option(ldap.OPT_REFERRALS, 0)\n\t\tldap_client.simple_bind_s(ldapUsername, ldapPassword)\n\texcept ldap.INVALID_CREDENTIALS:\n\t\tldap_client.unbind()\n\t\treturn ('Wrong Credentials', False)\n\texcept ldap.SERVER_DOWN:\n\t\treturn ('Server Down', False)\n\t\n\t#print(ldap_client.whoami_s())\n\t\n\thasEditAccess = False\n\t#dn = 'ou=Users,' + base_dn\n\tdn = base_dn\n\t#dn = 'cn=' + username + ',' + base_dn\n\t#print(dn)\n\t\n\t#filter_ = 'cn=' + username\n\t#filter_ = '(&(objectclass=person)(cn=%s)' % username\n\t#filter_ = '(uid=*)'\n\tfilter_ = 'samaccountname=' + username\n\t#filter_ = ldap_filter\n\t#filter_ = '(&(objectCategory=person)(%s))' % filter_\n\t#filter_ = 'memberOf=' + validEditAccessGroups[0]\n\t#filter_ = 'cn=' + username\n\t#print(filter_)\n\t\n\tattrs = ['memberOf']\n\t\n\tresult = ldap_client.search_s(dn, ldap.SCOPE_SUBTREE, filter_, attrs)\n\t#print(result)\n\t#for d1 in result:\n\t#\tprint(d1)\n\tgroups = result[0][1]['memberOf']\n\t#print(groups)\n\t\n\t#return (\"\", \"\")\n\t#groups = ldap_client.result(id)[1][0][1]['memberOf']\n\tfor group in groups:\n\t\taddress = group.split(',')\n\t\tgroupName = address[0].split('=')[1]\n\t\tif groupName in validEditAccessGroups:\n\t\t\thasEditAccess = True\n\t\t\tbreak\n\t#print(hasEditAccess)\n\t\n\tldap_client.unbind()\n\treturn (True, hasEditAccess)\n"
},
{
"alpha_fraction": 0.4102637469768524,
"alphanum_fraction": 0.6284337043762207,
"avg_line_length": 73.9817123413086,
"blob_id": "4a0819e20c9702c37d1bcab9d2b484aa1d48744b",
"content_id": "3b2cae07fb7ad4912814e63e47668d7cf14e50b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 430545,
"license_type": "permissive",
"max_line_length": 504,
"num_lines": 5742,
"path": "/localhost.sql",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "-- phpMyAdmin SQL Dump\n-- version 3.4.10.1deb1\n-- http://www.phpmyadmin.net\n--\n-- Host: localhost\n-- Generation Time: Feb 15, 2016 at 09:00 AM\n-- Server version: 5.5.40\n-- PHP Version: 5.3.10-1ubuntu3.15\n\nSET SQL_MODE=\"NO_AUTO_VALUE_ON_ZERO\";\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n\n--\n-- Database: `information_schema`\n--\nCREATE DATABASE `information_schema` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;\nUSE `information_schema`;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `CHARACTER_SETS`\n--\n\nCREATE TEMPORARY TABLE `CHARACTER_SETS` (\n `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '',\n `DEFAULT_COLLATE_NAME` varchar(32) NOT NULL DEFAULT '',\n `DESCRIPTION` varchar(60) NOT NULL DEFAULT '',\n `MAXLEN` bigint(3) NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `CHARACTER_SETS`\n--\n\nINSERT INTO `CHARACTER_SETS` (`CHARACTER_SET_NAME`, `DEFAULT_COLLATE_NAME`, `DESCRIPTION`, `MAXLEN`) VALUES\n('big5', 'big5_chinese_ci', 'Big5 Traditional Chinese', 2),\n('dec8', 'dec8_swedish_ci', 'DEC West European', 1),\n('cp850', 'cp850_general_ci', 'DOS West European', 1),\n('hp8', 'hp8_english_ci', 'HP West European', 1),\n('koi8r', 'koi8r_general_ci', 'KOI8-R Relcom Russian', 1),\n('latin1', 'latin1_swedish_ci', 'cp1252 West European', 1),\n('latin2', 'latin2_general_ci', 'ISO 8859-2 Central European', 1),\n('swe7', 'swe7_swedish_ci', '7bit Swedish', 1),\n('ascii', 'ascii_general_ci', 'US ASCII', 1),\n('ujis', 'ujis_japanese_ci', 'EUC-JP Japanese', 3),\n('sjis', 'sjis_japanese_ci', 'Shift-JIS Japanese', 2),\n('hebrew', 'hebrew_general_ci', 'ISO 8859-8 Hebrew', 1),\n('tis620', 'tis620_thai_ci', 'TIS620 Thai', 1),\n('euckr', 'euckr_korean_ci', 'EUC-KR Korean', 2),\n('koi8u', 'koi8u_general_ci', 'KOI8-U Ukrainian', 1),\n('gb2312', 'gb2312_chinese_ci', 'GB2312 Simplified Chinese', 2),\n('greek', 'greek_general_ci', 'ISO 8859-7 Greek', 1),\n('cp1250', 'cp1250_general_ci', 'Windows Central European', 1),\n('gbk', 'gbk_chinese_ci', 'GBK Simplified Chinese', 2),\n('latin5', 'latin5_turkish_ci', 'ISO 8859-9 Turkish', 1),\n('armscii8', 'armscii8_general_ci', 'ARMSCII-8 Armenian', 1),\n('utf8', 'utf8_general_ci', 'UTF-8 Unicode', 3),\n('ucs2', 'ucs2_general_ci', 'UCS-2 Unicode', 2),\n('cp866', 'cp866_general_ci', 'DOS Russian', 1),\n('keybcs2', 'keybcs2_general_ci', 'DOS Kamenicky Czech-Slovak', 1),\n('macce', 'macce_general_ci', 'Mac Central European', 1),\n('macroman', 'macroman_general_ci', 'Mac West European', 1),\n('cp852', 'cp852_general_ci', 'DOS Central European', 1),\n('latin7', 'latin7_general_ci', 'ISO 8859-13 Baltic', 1),\n('utf8mb4', 'utf8mb4_general_ci', 'UTF-8 Unicode', 4),\n('cp1251', 'cp1251_general_ci', 'Windows Cyrillic', 1),\n('utf16', 'utf16_general_ci', 'UTF-16 Unicode', 4),\n('cp1256', 'cp1256_general_ci', 'Windows Arabic', 1),\n('cp1257', 'cp1257_general_ci', 'Windows Baltic', 1),\n('utf32', 'utf32_general_ci', 'UTF-32 Unicode', 4),\n('binary', 'binary', 'Binary pseudo charset', 1),\n('geostd8', 'geostd8_general_ci', 'GEOSTD8 Georgian', 1),\n('cp932', 'cp932_japanese_ci', 'SJIS for Windows Japanese', 2),\n('eucjpms', 'eucjpms_japanese_ci', 'UJIS for Windows Japanese', 3);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `COLLATIONS`\n--\n\nCREATE TEMPORARY TABLE `COLLATIONS` (\n `COLLATION_NAME` varchar(32) NOT NULL DEFAULT '',\n `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '',\n `ID` bigint(11) NOT NULL DEFAULT '0',\n `IS_DEFAULT` varchar(3) NOT NULL DEFAULT '',\n `IS_COMPILED` varchar(3) NOT NULL DEFAULT '',\n `SORTLEN` bigint(3) NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `COLLATIONS`\n--\n\nINSERT INTO `COLLATIONS` (`COLLATION_NAME`, `CHARACTER_SET_NAME`, `ID`, `IS_DEFAULT`, `IS_COMPILED`, `SORTLEN`) VALUES\n('big5_chinese_ci', 'big5', 1, 'Yes', 'Yes', 1),\n('big5_bin', 'big5', 84, '', 'Yes', 1),\n('dec8_swedish_ci', 'dec8', 3, 'Yes', 'Yes', 1),\n('dec8_bin', 'dec8', 69, '', 'Yes', 1),\n('cp850_general_ci', 'cp850', 4, 'Yes', 'Yes', 1),\n('cp850_bin', 'cp850', 80, '', 'Yes', 1),\n('hp8_english_ci', 'hp8', 6, 'Yes', 'Yes', 1),\n('hp8_bin', 'hp8', 72, '', 'Yes', 1),\n('koi8r_general_ci', 'koi8r', 7, 'Yes', 'Yes', 1),\n('koi8r_bin', 'koi8r', 74, '', 'Yes', 1),\n('latin1_german1_ci', 'latin1', 5, '', 'Yes', 1),\n('latin1_swedish_ci', 'latin1', 8, 'Yes', 'Yes', 1),\n('latin1_danish_ci', 'latin1', 15, '', 'Yes', 1),\n('latin1_german2_ci', 'latin1', 31, '', 'Yes', 2),\n('latin1_bin', 'latin1', 47, '', 'Yes', 1),\n('latin1_general_ci', 'latin1', 48, '', 'Yes', 1),\n('latin1_general_cs', 'latin1', 49, '', 'Yes', 1),\n('latin1_spanish_ci', 'latin1', 94, '', 'Yes', 1),\n('latin2_czech_cs', 'latin2', 2, '', 'Yes', 4),\n('latin2_general_ci', 'latin2', 9, 'Yes', 'Yes', 1),\n('latin2_hungarian_ci', 'latin2', 21, '', 'Yes', 1),\n('latin2_croatian_ci', 'latin2', 27, '', 'Yes', 1),\n('latin2_bin', 'latin2', 77, '', 'Yes', 1),\n('swe7_swedish_ci', 'swe7', 10, 'Yes', 'Yes', 1),\n('swe7_bin', 'swe7', 82, '', 'Yes', 1),\n('ascii_general_ci', 'ascii', 11, 'Yes', 'Yes', 1),\n('ascii_bin', 'ascii', 65, '', 'Yes', 1),\n('ujis_japanese_ci', 'ujis', 12, 'Yes', 'Yes', 1),\n('ujis_bin', 'ujis', 91, '', 'Yes', 1),\n('sjis_japanese_ci', 'sjis', 13, 'Yes', 'Yes', 1),\n('sjis_bin', 'sjis', 88, '', 'Yes', 1),\n('hebrew_general_ci', 'hebrew', 16, 'Yes', 'Yes', 1),\n('hebrew_bin', 'hebrew', 71, '', 'Yes', 1),\n('tis620_thai_ci', 'tis620', 18, 'Yes', 'Yes', 4),\n('tis620_bin', 'tis620', 89, '', 'Yes', 1),\n('euckr_korean_ci', 'euckr', 19, 'Yes', 'Yes', 1),\n('euckr_bin', 'euckr', 85, '', 'Yes', 1),\n('koi8u_general_ci', 'koi8u', 22, 'Yes', 'Yes', 1),\n('koi8u_bin', 'koi8u', 75, '', 'Yes', 1),\n('gb2312_chinese_ci', 'gb2312', 24, 'Yes', 'Yes', 1),\n('gb2312_bin', 'gb2312', 86, '', 'Yes', 1),\n('greek_general_ci', 'greek', 25, 'Yes', 'Yes', 1),\n('greek_bin', 'greek', 70, '', 'Yes', 1),\n('cp1250_general_ci', 'cp1250', 26, 'Yes', 'Yes', 1),\n('cp1250_czech_cs', 'cp1250', 34, '', 'Yes', 2),\n('cp1250_croatian_ci', 'cp1250', 44, '', 'Yes', 1),\n('cp1250_bin', 'cp1250', 66, '', 'Yes', 1),\n('cp1250_polish_ci', 'cp1250', 99, '', 'Yes', 1),\n('gbk_chinese_ci', 'gbk', 28, 'Yes', 'Yes', 1),\n('gbk_bin', 'gbk', 87, '', 'Yes', 1),\n('latin5_turkish_ci', 'latin5', 30, 'Yes', 'Yes', 1),\n('latin5_bin', 'latin5', 78, '', 'Yes', 1),\n('armscii8_general_ci', 'armscii8', 32, 'Yes', 'Yes', 1),\n('armscii8_bin', 'armscii8', 64, '', 'Yes', 1),\n('utf8_general_ci', 'utf8', 33, 'Yes', 'Yes', 1),\n('utf8_bin', 'utf8', 83, '', 'Yes', 1),\n('utf8_unicode_ci', 'utf8', 192, '', 'Yes', 8),\n('utf8_icelandic_ci', 'utf8', 193, '', 'Yes', 8),\n('utf8_latvian_ci', 'utf8', 194, '', 'Yes', 8),\n('utf8_romanian_ci', 'utf8', 195, '', 'Yes', 8),\n('utf8_slovenian_ci', 'utf8', 196, '', 'Yes', 8),\n('utf8_polish_ci', 'utf8', 197, '', 'Yes', 8),\n('utf8_estonian_ci', 'utf8', 198, '', 'Yes', 8),\n('utf8_spanish_ci', 'utf8', 199, '', 'Yes', 8),\n('utf8_swedish_ci', 'utf8', 200, '', 'Yes', 8),\n('utf8_turkish_ci', 'utf8', 201, '', 'Yes', 8),\n('utf8_czech_ci', 'utf8', 202, '', 'Yes', 8),\n('utf8_danish_ci', 'utf8', 203, '', 'Yes', 8),\n('utf8_lithuanian_ci', 'utf8', 204, '', 'Yes', 8),\n('utf8_slovak_ci', 'utf8', 205, '', 'Yes', 8),\n('utf8_spanish2_ci', 'utf8', 206, '', 'Yes', 8),\n('utf8_roman_ci', 'utf8', 207, '', 'Yes', 8),\n('utf8_persian_ci', 'utf8', 208, '', 'Yes', 8),\n('utf8_esperanto_ci', 'utf8', 209, '', 'Yes', 8),\n('utf8_hungarian_ci', 'utf8', 210, '', 'Yes', 8),\n('utf8_sinhala_ci', 'utf8', 211, '', 'Yes', 8),\n('utf8_general_mysql500_ci', 'utf8', 223, '', 'Yes', 1),\n('ucs2_general_ci', 'ucs2', 35, 'Yes', 'Yes', 1),\n('ucs2_bin', 'ucs2', 90, '', 'Yes', 1),\n('ucs2_unicode_ci', 'ucs2', 128, '', 'Yes', 8),\n('ucs2_icelandic_ci', 'ucs2', 129, '', 'Yes', 8),\n('ucs2_latvian_ci', 'ucs2', 130, '', 'Yes', 8),\n('ucs2_romanian_ci', 'ucs2', 131, '', 'Yes', 8),\n('ucs2_slovenian_ci', 'ucs2', 132, '', 'Yes', 8),\n('ucs2_polish_ci', 'ucs2', 133, '', 'Yes', 8),\n('ucs2_estonian_ci', 'ucs2', 134, '', 'Yes', 8),\n('ucs2_spanish_ci', 'ucs2', 135, '', 'Yes', 8),\n('ucs2_swedish_ci', 'ucs2', 136, '', 'Yes', 8),\n('ucs2_turkish_ci', 'ucs2', 137, '', 'Yes', 8),\n('ucs2_czech_ci', 'ucs2', 138, '', 'Yes', 8),\n('ucs2_danish_ci', 'ucs2', 139, '', 'Yes', 8),\n('ucs2_lithuanian_ci', 'ucs2', 140, '', 'Yes', 8),\n('ucs2_slovak_ci', 'ucs2', 141, '', 'Yes', 8),\n('ucs2_spanish2_ci', 'ucs2', 142, '', 'Yes', 8),\n('ucs2_roman_ci', 'ucs2', 143, '', 'Yes', 8),\n('ucs2_persian_ci', 'ucs2', 144, '', 'Yes', 8),\n('ucs2_esperanto_ci', 'ucs2', 145, '', 'Yes', 8),\n('ucs2_hungarian_ci', 'ucs2', 146, '', 'Yes', 8),\n('ucs2_sinhala_ci', 'ucs2', 147, '', 'Yes', 8),\n('ucs2_general_mysql500_ci', 'ucs2', 159, '', 'Yes', 1),\n('cp866_general_ci', 'cp866', 36, 'Yes', 'Yes', 1),\n('cp866_bin', 'cp866', 68, '', 'Yes', 1),\n('keybcs2_general_ci', 'keybcs2', 37, 'Yes', 'Yes', 1),\n('keybcs2_bin', 'keybcs2', 73, '', 'Yes', 1),\n('macce_general_ci', 'macce', 38, 'Yes', 'Yes', 1),\n('macce_bin', 'macce', 43, '', 'Yes', 1),\n('macroman_general_ci', 'macroman', 39, 'Yes', 'Yes', 1),\n('macroman_bin', 'macroman', 53, '', 'Yes', 1),\n('cp852_general_ci', 'cp852', 40, 'Yes', 'Yes', 1),\n('cp852_bin', 'cp852', 81, '', 'Yes', 1),\n('latin7_estonian_cs', 'latin7', 20, '', 'Yes', 1),\n('latin7_general_ci', 'latin7', 41, 'Yes', 'Yes', 1),\n('latin7_general_cs', 'latin7', 42, '', 'Yes', 1),\n('latin7_bin', 'latin7', 79, '', 'Yes', 1),\n('utf8mb4_general_ci', 'utf8mb4', 45, 'Yes', 'Yes', 1),\n('utf8mb4_bin', 'utf8mb4', 46, '', 'Yes', 1),\n('utf8mb4_unicode_ci', 'utf8mb4', 224, '', 'Yes', 8),\n('utf8mb4_icelandic_ci', 'utf8mb4', 225, '', 'Yes', 8),\n('utf8mb4_latvian_ci', 'utf8mb4', 226, '', 'Yes', 8),\n('utf8mb4_romanian_ci', 'utf8mb4', 227, '', 'Yes', 8),\n('utf8mb4_slovenian_ci', 'utf8mb4', 228, '', 'Yes', 8),\n('utf8mb4_polish_ci', 'utf8mb4', 229, '', 'Yes', 8),\n('utf8mb4_estonian_ci', 'utf8mb4', 230, '', 'Yes', 8),\n('utf8mb4_spanish_ci', 'utf8mb4', 231, '', 'Yes', 8),\n('utf8mb4_swedish_ci', 'utf8mb4', 232, '', 'Yes', 8),\n('utf8mb4_turkish_ci', 'utf8mb4', 233, '', 'Yes', 8),\n('utf8mb4_czech_ci', 'utf8mb4', 234, '', 'Yes', 8),\n('utf8mb4_danish_ci', 'utf8mb4', 235, '', 'Yes', 8),\n('utf8mb4_lithuanian_ci', 'utf8mb4', 236, '', 'Yes', 8),\n('utf8mb4_slovak_ci', 'utf8mb4', 237, '', 'Yes', 8),\n('utf8mb4_spanish2_ci', 'utf8mb4', 238, '', 'Yes', 8),\n('utf8mb4_roman_ci', 'utf8mb4', 239, '', 'Yes', 8),\n('utf8mb4_persian_ci', 'utf8mb4', 240, '', 'Yes', 8),\n('utf8mb4_esperanto_ci', 'utf8mb4', 241, '', 'Yes', 8),\n('utf8mb4_hungarian_ci', 'utf8mb4', 242, '', 'Yes', 8),\n('utf8mb4_sinhala_ci', 'utf8mb4', 243, '', 'Yes', 8),\n('cp1251_bulgarian_ci', 'cp1251', 14, '', 'Yes', 1),\n('cp1251_ukrainian_ci', 'cp1251', 23, '', 'Yes', 1),\n('cp1251_bin', 'cp1251', 50, '', 'Yes', 1),\n('cp1251_general_ci', 'cp1251', 51, 'Yes', 'Yes', 1),\n('cp1251_general_cs', 'cp1251', 52, '', 'Yes', 1),\n('utf16_general_ci', 'utf16', 54, 'Yes', 'Yes', 1),\n('utf16_bin', 'utf16', 55, '', 'Yes', 1),\n('utf16_unicode_ci', 'utf16', 101, '', 'Yes', 8),\n('utf16_icelandic_ci', 'utf16', 102, '', 'Yes', 8),\n('utf16_latvian_ci', 'utf16', 103, '', 'Yes', 8),\n('utf16_romanian_ci', 'utf16', 104, '', 'Yes', 8),\n('utf16_slovenian_ci', 'utf16', 105, '', 'Yes', 8),\n('utf16_polish_ci', 'utf16', 106, '', 'Yes', 8),\n('utf16_estonian_ci', 'utf16', 107, '', 'Yes', 8),\n('utf16_spanish_ci', 'utf16', 108, '', 'Yes', 8),\n('utf16_swedish_ci', 'utf16', 109, '', 'Yes', 8),\n('utf16_turkish_ci', 'utf16', 110, '', 'Yes', 8),\n('utf16_czech_ci', 'utf16', 111, '', 'Yes', 8),\n('utf16_danish_ci', 'utf16', 112, '', 'Yes', 8),\n('utf16_lithuanian_ci', 'utf16', 113, '', 'Yes', 8),\n('utf16_slovak_ci', 'utf16', 114, '', 'Yes', 8),\n('utf16_spanish2_ci', 'utf16', 115, '', 'Yes', 8),\n('utf16_roman_ci', 'utf16', 116, '', 'Yes', 8),\n('utf16_persian_ci', 'utf16', 117, '', 'Yes', 8),\n('utf16_esperanto_ci', 'utf16', 118, '', 'Yes', 8),\n('utf16_hungarian_ci', 'utf16', 119, '', 'Yes', 8),\n('utf16_sinhala_ci', 'utf16', 120, '', 'Yes', 8),\n('cp1256_general_ci', 'cp1256', 57, 'Yes', 'Yes', 1),\n('cp1256_bin', 'cp1256', 67, '', 'Yes', 1),\n('cp1257_lithuanian_ci', 'cp1257', 29, '', 'Yes', 1),\n('cp1257_bin', 'cp1257', 58, '', 'Yes', 1),\n('cp1257_general_ci', 'cp1257', 59, 'Yes', 'Yes', 1),\n('utf32_general_ci', 'utf32', 60, 'Yes', 'Yes', 1),\n('utf32_bin', 'utf32', 61, '', 'Yes', 1),\n('utf32_unicode_ci', 'utf32', 160, '', 'Yes', 8),\n('utf32_icelandic_ci', 'utf32', 161, '', 'Yes', 8),\n('utf32_latvian_ci', 'utf32', 162, '', 'Yes', 8),\n('utf32_romanian_ci', 'utf32', 163, '', 'Yes', 8),\n('utf32_slovenian_ci', 'utf32', 164, '', 'Yes', 8),\n('utf32_polish_ci', 'utf32', 165, '', 'Yes', 8),\n('utf32_estonian_ci', 'utf32', 166, '', 'Yes', 8),\n('utf32_spanish_ci', 'utf32', 167, '', 'Yes', 8),\n('utf32_swedish_ci', 'utf32', 168, '', 'Yes', 8),\n('utf32_turkish_ci', 'utf32', 169, '', 'Yes', 8),\n('utf32_czech_ci', 'utf32', 170, '', 'Yes', 8),\n('utf32_danish_ci', 'utf32', 171, '', 'Yes', 8),\n('utf32_lithuanian_ci', 'utf32', 172, '', 'Yes', 8),\n('utf32_slovak_ci', 'utf32', 173, '', 'Yes', 8),\n('utf32_spanish2_ci', 'utf32', 174, '', 'Yes', 8),\n('utf32_roman_ci', 'utf32', 175, '', 'Yes', 8),\n('utf32_persian_ci', 'utf32', 176, '', 'Yes', 8),\n('utf32_esperanto_ci', 'utf32', 177, '', 'Yes', 8),\n('utf32_hungarian_ci', 'utf32', 178, '', 'Yes', 8),\n('utf32_sinhala_ci', 'utf32', 179, '', 'Yes', 8),\n('binary', 'binary', 63, 'Yes', 'Yes', 1),\n('geostd8_general_ci', 'geostd8', 92, 'Yes', 'Yes', 1),\n('geostd8_bin', 'geostd8', 93, '', 'Yes', 1),\n('cp932_japanese_ci', 'cp932', 95, 'Yes', 'Yes', 1),\n('cp932_bin', 'cp932', 96, '', 'Yes', 1),\n('eucjpms_japanese_ci', 'eucjpms', 97, 'Yes', 'Yes', 1),\n('eucjpms_bin', 'eucjpms', 98, '', 'Yes', 1);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `COLLATION_CHARACTER_SET_APPLICABILITY`\n--\n\nCREATE TEMPORARY TABLE `COLLATION_CHARACTER_SET_APPLICABILITY` (\n `COLLATION_NAME` varchar(32) NOT NULL DEFAULT '',\n `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `COLLATION_CHARACTER_SET_APPLICABILITY`\n--\n\nINSERT INTO `COLLATION_CHARACTER_SET_APPLICABILITY` (`COLLATION_NAME`, `CHARACTER_SET_NAME`) VALUES\n('big5_chinese_ci', 'big5'),\n('big5_bin', 'big5'),\n('dec8_swedish_ci', 'dec8'),\n('dec8_bin', 'dec8'),\n('cp850_general_ci', 'cp850'),\n('cp850_bin', 'cp850'),\n('hp8_english_ci', 'hp8'),\n('hp8_bin', 'hp8'),\n('koi8r_general_ci', 'koi8r'),\n('koi8r_bin', 'koi8r'),\n('latin1_german1_ci', 'latin1'),\n('latin1_swedish_ci', 'latin1'),\n('latin1_danish_ci', 'latin1'),\n('latin1_german2_ci', 'latin1'),\n('latin1_bin', 'latin1'),\n('latin1_general_ci', 'latin1'),\n('latin1_general_cs', 'latin1'),\n('latin1_spanish_ci', 'latin1'),\n('latin2_czech_cs', 'latin2'),\n('latin2_general_ci', 'latin2'),\n('latin2_hungarian_ci', 'latin2'),\n('latin2_croatian_ci', 'latin2'),\n('latin2_bin', 'latin2'),\n('swe7_swedish_ci', 'swe7'),\n('swe7_bin', 'swe7'),\n('ascii_general_ci', 'ascii'),\n('ascii_bin', 'ascii'),\n('ujis_japanese_ci', 'ujis'),\n('ujis_bin', 'ujis'),\n('sjis_japanese_ci', 'sjis'),\n('sjis_bin', 'sjis'),\n('hebrew_general_ci', 'hebrew'),\n('hebrew_bin', 'hebrew'),\n('tis620_thai_ci', 'tis620'),\n('tis620_bin', 'tis620'),\n('euckr_korean_ci', 'euckr'),\n('euckr_bin', 'euckr'),\n('koi8u_general_ci', 'koi8u'),\n('koi8u_bin', 'koi8u'),\n('gb2312_chinese_ci', 'gb2312'),\n('gb2312_bin', 'gb2312'),\n('greek_general_ci', 'greek'),\n('greek_bin', 'greek'),\n('cp1250_general_ci', 'cp1250'),\n('cp1250_czech_cs', 'cp1250'),\n('cp1250_croatian_ci', 'cp1250'),\n('cp1250_bin', 'cp1250'),\n('cp1250_polish_ci', 'cp1250'),\n('gbk_chinese_ci', 'gbk'),\n('gbk_bin', 'gbk'),\n('latin5_turkish_ci', 'latin5'),\n('latin5_bin', 'latin5'),\n('armscii8_general_ci', 'armscii8'),\n('armscii8_bin', 'armscii8'),\n('utf8_general_ci', 'utf8'),\n('utf8_bin', 'utf8'),\n('utf8_unicode_ci', 'utf8'),\n('utf8_icelandic_ci', 'utf8'),\n('utf8_latvian_ci', 'utf8'),\n('utf8_romanian_ci', 'utf8'),\n('utf8_slovenian_ci', 'utf8'),\n('utf8_polish_ci', 'utf8'),\n('utf8_estonian_ci', 'utf8'),\n('utf8_spanish_ci', 'utf8'),\n('utf8_swedish_ci', 'utf8'),\n('utf8_turkish_ci', 'utf8'),\n('utf8_czech_ci', 'utf8'),\n('utf8_danish_ci', 'utf8'),\n('utf8_lithuanian_ci', 'utf8'),\n('utf8_slovak_ci', 'utf8'),\n('utf8_spanish2_ci', 'utf8'),\n('utf8_roman_ci', 'utf8'),\n('utf8_persian_ci', 'utf8'),\n('utf8_esperanto_ci', 'utf8'),\n('utf8_hungarian_ci', 'utf8'),\n('utf8_sinhala_ci', 'utf8'),\n('utf8_general_mysql500_ci', 'utf8'),\n('ucs2_general_ci', 'ucs2'),\n('ucs2_bin', 'ucs2'),\n('ucs2_unicode_ci', 'ucs2'),\n('ucs2_icelandic_ci', 'ucs2'),\n('ucs2_latvian_ci', 'ucs2'),\n('ucs2_romanian_ci', 'ucs2'),\n('ucs2_slovenian_ci', 'ucs2'),\n('ucs2_polish_ci', 'ucs2'),\n('ucs2_estonian_ci', 'ucs2'),\n('ucs2_spanish_ci', 'ucs2'),\n('ucs2_swedish_ci', 'ucs2'),\n('ucs2_turkish_ci', 'ucs2'),\n('ucs2_czech_ci', 'ucs2'),\n('ucs2_danish_ci', 'ucs2'),\n('ucs2_lithuanian_ci', 'ucs2'),\n('ucs2_slovak_ci', 'ucs2'),\n('ucs2_spanish2_ci', 'ucs2'),\n('ucs2_roman_ci', 'ucs2'),\n('ucs2_persian_ci', 'ucs2'),\n('ucs2_esperanto_ci', 'ucs2'),\n('ucs2_hungarian_ci', 'ucs2'),\n('ucs2_sinhala_ci', 'ucs2'),\n('ucs2_general_mysql500_ci', 'ucs2'),\n('cp866_general_ci', 'cp866'),\n('cp866_bin', 'cp866'),\n('keybcs2_general_ci', 'keybcs2'),\n('keybcs2_bin', 'keybcs2'),\n('macce_general_ci', 'macce'),\n('macce_bin', 'macce'),\n('macroman_general_ci', 'macroman'),\n('macroman_bin', 'macroman'),\n('cp852_general_ci', 'cp852'),\n('cp852_bin', 'cp852'),\n('latin7_estonian_cs', 'latin7'),\n('latin7_general_ci', 'latin7'),\n('latin7_general_cs', 'latin7'),\n('latin7_bin', 'latin7'),\n('utf8mb4_general_ci', 'utf8mb4'),\n('utf8mb4_bin', 'utf8mb4'),\n('utf8mb4_unicode_ci', 'utf8mb4'),\n('utf8mb4_icelandic_ci', 'utf8mb4'),\n('utf8mb4_latvian_ci', 'utf8mb4'),\n('utf8mb4_romanian_ci', 'utf8mb4'),\n('utf8mb4_slovenian_ci', 'utf8mb4'),\n('utf8mb4_polish_ci', 'utf8mb4'),\n('utf8mb4_estonian_ci', 'utf8mb4'),\n('utf8mb4_spanish_ci', 'utf8mb4'),\n('utf8mb4_swedish_ci', 'utf8mb4'),\n('utf8mb4_turkish_ci', 'utf8mb4'),\n('utf8mb4_czech_ci', 'utf8mb4'),\n('utf8mb4_danish_ci', 'utf8mb4'),\n('utf8mb4_lithuanian_ci', 'utf8mb4'),\n('utf8mb4_slovak_ci', 'utf8mb4'),\n('utf8mb4_spanish2_ci', 'utf8mb4'),\n('utf8mb4_roman_ci', 'utf8mb4'),\n('utf8mb4_persian_ci', 'utf8mb4'),\n('utf8mb4_esperanto_ci', 'utf8mb4'),\n('utf8mb4_hungarian_ci', 'utf8mb4'),\n('utf8mb4_sinhala_ci', 'utf8mb4'),\n('cp1251_bulgarian_ci', 'cp1251'),\n('cp1251_ukrainian_ci', 'cp1251'),\n('cp1251_bin', 'cp1251'),\n('cp1251_general_ci', 'cp1251'),\n('cp1251_general_cs', 'cp1251'),\n('utf16_general_ci', 'utf16'),\n('utf16_bin', 'utf16'),\n('utf16_unicode_ci', 'utf16'),\n('utf16_icelandic_ci', 'utf16'),\n('utf16_latvian_ci', 'utf16'),\n('utf16_romanian_ci', 'utf16'),\n('utf16_slovenian_ci', 'utf16'),\n('utf16_polish_ci', 'utf16'),\n('utf16_estonian_ci', 'utf16'),\n('utf16_spanish_ci', 'utf16'),\n('utf16_swedish_ci', 'utf16'),\n('utf16_turkish_ci', 'utf16'),\n('utf16_czech_ci', 'utf16'),\n('utf16_danish_ci', 'utf16'),\n('utf16_lithuanian_ci', 'utf16'),\n('utf16_slovak_ci', 'utf16'),\n('utf16_spanish2_ci', 'utf16'),\n('utf16_roman_ci', 'utf16'),\n('utf16_persian_ci', 'utf16'),\n('utf16_esperanto_ci', 'utf16'),\n('utf16_hungarian_ci', 'utf16'),\n('utf16_sinhala_ci', 'utf16'),\n('cp1256_general_ci', 'cp1256'),\n('cp1256_bin', 'cp1256'),\n('cp1257_lithuanian_ci', 'cp1257'),\n('cp1257_bin', 'cp1257'),\n('cp1257_general_ci', 'cp1257'),\n('utf32_general_ci', 'utf32'),\n('utf32_bin', 'utf32'),\n('utf32_unicode_ci', 'utf32'),\n('utf32_icelandic_ci', 'utf32'),\n('utf32_latvian_ci', 'utf32'),\n('utf32_romanian_ci', 'utf32'),\n('utf32_slovenian_ci', 'utf32'),\n('utf32_polish_ci', 'utf32'),\n('utf32_estonian_ci', 'utf32'),\n('utf32_spanish_ci', 'utf32'),\n('utf32_swedish_ci', 'utf32'),\n('utf32_turkish_ci', 'utf32'),\n('utf32_czech_ci', 'utf32'),\n('utf32_danish_ci', 'utf32'),\n('utf32_lithuanian_ci', 'utf32'),\n('utf32_slovak_ci', 'utf32'),\n('utf32_spanish2_ci', 'utf32'),\n('utf32_roman_ci', 'utf32'),\n('utf32_persian_ci', 'utf32'),\n('utf32_esperanto_ci', 'utf32'),\n('utf32_hungarian_ci', 'utf32'),\n('utf32_sinhala_ci', 'utf32'),\n('binary', 'binary'),\n('geostd8_general_ci', 'geostd8'),\n('geostd8_bin', 'geostd8'),\n('cp932_japanese_ci', 'cp932'),\n('cp932_bin', 'cp932'),\n('eucjpms_japanese_ci', 'eucjpms'),\n('eucjpms_bin', 'eucjpms');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `COLUMNS`\n--\n\nCREATE TEMPORARY TABLE `COLUMNS` (\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '',\n `ORDINAL_POSITION` bigint(21) unsigned NOT NULL DEFAULT '0',\n `COLUMN_DEFAULT` longtext,\n `IS_NULLABLE` varchar(3) NOT NULL DEFAULT '',\n `DATA_TYPE` varchar(64) NOT NULL DEFAULT '',\n `CHARACTER_MAXIMUM_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `CHARACTER_OCTET_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `NUMERIC_PRECISION` bigint(21) unsigned DEFAULT NULL,\n `NUMERIC_SCALE` bigint(21) unsigned DEFAULT NULL,\n `CHARACTER_SET_NAME` varchar(32) DEFAULT NULL,\n `COLLATION_NAME` varchar(32) DEFAULT NULL,\n `COLUMN_TYPE` longtext NOT NULL,\n `COLUMN_KEY` varchar(3) NOT NULL DEFAULT '',\n `EXTRA` varchar(27) NOT NULL DEFAULT '',\n `PRIVILEGES` varchar(80) NOT NULL DEFAULT '',\n `COLUMN_COMMENT` varchar(1024) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `COLUMNS`\n--\n\nINSERT INTO `COLUMNS` (`TABLE_CATALOG`, `TABLE_SCHEMA`, `TABLE_NAME`, `COLUMN_NAME`, `ORDINAL_POSITION`, `COLUMN_DEFAULT`, `IS_NULLABLE`, `DATA_TYPE`, `CHARACTER_MAXIMUM_LENGTH`, `CHARACTER_OCTET_LENGTH`, `NUMERIC_PRECISION`, `NUMERIC_SCALE`, `CHARACTER_SET_NAME`, `COLLATION_NAME`, `COLUMN_TYPE`, `COLUMN_KEY`, `EXTRA`, `PRIVILEGES`, `COLUMN_COMMENT`) VALUES\n('def', 'information_schema', 'CHARACTER_SETS', 'CHARACTER_SET_NAME', 1, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'CHARACTER_SETS', 'DEFAULT_COLLATE_NAME', 2, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'CHARACTER_SETS', 'DESCRIPTION', 3, '', 'NO', 'varchar', 60, 180, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(60)', '', '', 'select', ''),\n('def', 'information_schema', 'CHARACTER_SETS', 'MAXLEN', 4, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(3)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATIONS', 'COLLATION_NAME', 1, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATIONS', 'CHARACTER_SET_NAME', 2, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATIONS', 'ID', 3, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(11)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATIONS', 'IS_DEFAULT', 4, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATIONS', 'IS_COMPILED', 5, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATIONS', 'SORTLEN', 6, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(3)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATION_CHARACTER_SET_APPLICABILITY', 'COLLATION_NAME', 1, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'COLLATION_CHARACTER_SET_APPLICABILITY', 'CHARACTER_SET_NAME', 2, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'TABLE_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'TABLE_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'TABLE_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'COLUMN_NAME', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'ORDINAL_POSITION', 5, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'COLUMN_DEFAULT', 6, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'IS_NULLABLE', 7, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'DATA_TYPE', 8, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'CHARACTER_MAXIMUM_LENGTH', 9, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'CHARACTER_OCTET_LENGTH', 10, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'NUMERIC_PRECISION', 11, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'NUMERIC_SCALE', 12, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'CHARACTER_SET_NAME', 13, NULL, 'YES', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'COLLATION_NAME', 14, NULL, 'YES', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'COLUMN_TYPE', 15, NULL, 'NO', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'COLUMN_KEY', 16, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'EXTRA', 17, '', 'NO', 'varchar', 27, 81, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(27)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'PRIVILEGES', 18, '', 'NO', 'varchar', 80, 240, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(80)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMNS', 'COLUMN_COMMENT', 19, '', 'NO', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'GRANTEE', 1, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'TABLE_CATALOG', 2, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'TABLE_SCHEMA', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'TABLE_NAME', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'COLUMN_NAME', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'PRIVILEGE_TYPE', 6, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'IS_GRANTABLE', 7, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'ENGINES', 'ENGINE', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ENGINES', 'SUPPORT', 2, '', 'NO', 'varchar', 8, 24, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8)', '', '', 'select', ''),\n('def', 'information_schema', 'ENGINES', 'COMMENT', 3, '', 'NO', 'varchar', 80, 240, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(80)', '', '', 'select', ''),\n('def', 'information_schema', 'ENGINES', 'TRANSACTIONS', 4, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'ENGINES', 'XA', 5, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'ENGINES', 'SAVEPOINTS', 6, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_CATALOG', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'DEFINER', 4, '', 'NO', 'varchar', 77, 231, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(77)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'TIME_ZONE', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_BODY', 6, '', 'NO', 'varchar', 8, 24, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_DEFINITION', 7, NULL, 'NO', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_TYPE', 8, '', 'NO', 'varchar', 9, 27, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(9)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EXECUTE_AT', 9, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'INTERVAL_VALUE', 10, NULL, 'YES', 'varchar', 256, 768, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(256)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'INTERVAL_FIELD', 11, NULL, 'YES', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'SQL_MODE', 12, '', 'NO', 'varchar', 8192, 24576, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8192)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'STARTS', 13, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'ENDS', 14, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'STATUS', 15, '', 'NO', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'ON_COMPLETION', 16, '', 'NO', 'varchar', 12, 36, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(12)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'CREATED', 17, '0000-00-00 00:00:00', 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'LAST_ALTERED', 18, '0000-00-00 00:00:00', 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'LAST_EXECUTED', 19, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'EVENT_COMMENT', 20, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'ORIGINATOR', 21, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(10)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'CHARACTER_SET_CLIENT', 22, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'COLLATION_CONNECTION', 23, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'EVENTS', 'DATABASE_COLLATION', 24, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'FILE_ID', 1, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'FILE_NAME', 2, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'FILE_TYPE', 3, '', 'NO', 'varchar', 20, 60, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(20)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TABLESPACE_NAME', 4, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TABLE_CATALOG', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TABLE_SCHEMA', 6, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TABLE_NAME', 7, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'LOGFILE_GROUP_NAME', 8, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'LOGFILE_GROUP_NUMBER', 9, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'ENGINE', 10, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'FULLTEXT_KEYS', 11, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'DELETED_ROWS', 12, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'UPDATE_COUNT', 13, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'FREE_EXTENTS', 14, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TOTAL_EXTENTS', 15, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'EXTENT_SIZE', 16, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'INITIAL_SIZE', 17, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'MAXIMUM_SIZE', 18, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'AUTOEXTEND_SIZE', 19, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'CREATION_TIME', 20, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'LAST_UPDATE_TIME', 21, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'LAST_ACCESS_TIME', 22, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'RECOVER_TIME', 23, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TRANSACTION_COUNTER', 24, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'VERSION', 25, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'ROW_FORMAT', 26, NULL, 'YES', 'varchar', 10, 30, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(10)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'TABLE_ROWS', 27, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'AVG_ROW_LENGTH', 28, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'DATA_LENGTH', 29, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'MAX_DATA_LENGTH', 30, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'INDEX_LENGTH', 31, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'DATA_FREE', 32, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'CREATE_TIME', 33, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'UPDATE_TIME', 34, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'CHECK_TIME', 35, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'CHECKSUM', 36, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'STATUS', 37, '', 'NO', 'varchar', 20, 60, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(20)', '', '', 'select', ''),\n('def', 'information_schema', 'FILES', 'EXTRA', 38, NULL, 'YES', 'varchar', 255, 765, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'information_schema', 'GLOBAL_STATUS', 'VARIABLE_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'GLOBAL_STATUS', 'VARIABLE_VALUE', 2, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'GLOBAL_VARIABLES', 'VARIABLE_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'GLOBAL_VARIABLES', 'VARIABLE_VALUE', 2, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'CONSTRAINT_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'CONSTRAINT_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'CONSTRAINT_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'TABLE_CATALOG', 4, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'TABLE_SCHEMA', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'TABLE_NAME', 6, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'COLUMN_NAME', 7, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'ORDINAL_POSITION', 8, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(10)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'POSITION_IN_UNIQUE_CONSTRAINT', 9, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(10)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'REFERENCED_TABLE_SCHEMA', 10, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'REFERENCED_TABLE_NAME', 11, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'REFERENCED_COLUMN_NAME', 12, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'SPECIFIC_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'SPECIFIC_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'SPECIFIC_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'ORDINAL_POSITION', 4, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'PARAMETER_MODE', 5, NULL, 'YES', 'varchar', 5, 15, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(5)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'PARAMETER_NAME', 6, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'DATA_TYPE', 7, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'CHARACTER_MAXIMUM_LENGTH', 8, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'CHARACTER_OCTET_LENGTH', 9, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'NUMERIC_PRECISION', 10, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'NUMERIC_SCALE', 11, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'CHARACTER_SET_NAME', 12, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'COLLATION_NAME', 13, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'DTD_IDENTIFIER', 14, NULL, 'NO', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'PARAMETERS', 'ROUTINE_TYPE', 15, '', 'NO', 'varchar', 9, 27, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(9)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'TABLE_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'TABLE_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'TABLE_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'PARTITION_NAME', 4, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'SUBPARTITION_NAME', 5, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'PARTITION_ORDINAL_POSITION', 6, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'SUBPARTITION_ORDINAL_POSITION', 7, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'PARTITION_METHOD', 8, NULL, 'YES', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'SUBPARTITION_METHOD', 9, NULL, 'YES', 'varchar', 12, 36, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(12)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'PARTITION_EXPRESSION', 10, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'SUBPARTITION_EXPRESSION', 11, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'PARTITION_DESCRIPTION', 12, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'TABLE_ROWS', 13, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'AVG_ROW_LENGTH', 14, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'DATA_LENGTH', 15, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'MAX_DATA_LENGTH', 16, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'INDEX_LENGTH', 17, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'DATA_FREE', 18, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'CREATE_TIME', 19, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'UPDATE_TIME', 20, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'CHECK_TIME', 21, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'CHECKSUM', 22, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'PARTITION_COMMENT', 23, '', 'NO', 'varchar', 80, 240, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(80)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'NODEGROUP', 24, '', 'NO', 'varchar', 12, 36, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(12)', '', '', 'select', ''),\n('def', 'information_schema', 'PARTITIONS', 'TABLESPACE_NAME', 25, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_VERSION', 2, '', 'NO', 'varchar', 20, 60, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_STATUS', 3, '', 'NO', 'varchar', 10, 30, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(10)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_TYPE', 4, '', 'NO', 'varchar', 80, 240, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(80)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_TYPE_VERSION', 5, '', 'NO', 'varchar', 20, 60, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_LIBRARY', 6, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_LIBRARY_VERSION', 7, NULL, 'YES', 'varchar', 20, 60, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_AUTHOR', 8, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_DESCRIPTION', 9, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'PLUGIN_LICENSE', 10, NULL, 'YES', 'varchar', 80, 240, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(80)', '', '', 'select', ''),\n('def', 'information_schema', 'PLUGINS', 'LOAD_OPTION', 11, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'ID', 1, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'USER', 2, '', 'NO', 'varchar', 16, 48, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(16)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'HOST', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'DB', 4, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'COMMAND', 5, '', 'NO', 'varchar', 16, 48, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(16)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'TIME', 6, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(7)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'STATE', 7, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'PROCESSLIST', 'INFO', 8, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'QUERY_ID', 1, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'SEQ', 2, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'STATE', 3, '', 'NO', 'varchar', 30, 90, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(30)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'DURATION', 4, '0.000000', 'NO', 'decimal', NULL, NULL, 9, 6, NULL, NULL, 'decimal(9,6)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'CPU_USER', 5, NULL, 'YES', 'decimal', NULL, NULL, 9, 6, NULL, NULL, 'decimal(9,6)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'CPU_SYSTEM', 6, NULL, 'YES', 'decimal', NULL, NULL, 9, 6, NULL, NULL, 'decimal(9,6)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'CONTEXT_VOLUNTARY', 7, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'CONTEXT_INVOLUNTARY', 8, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'BLOCK_OPS_IN', 9, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'BLOCK_OPS_OUT', 10, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'MESSAGES_SENT', 11, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'MESSAGES_RECEIVED', 12, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'PAGE_FAULTS_MAJOR', 13, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'PAGE_FAULTS_MINOR', 14, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'SWAPS', 15, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'SOURCE_FUNCTION', 16, NULL, 'YES', 'varchar', 30, 90, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(30)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'SOURCE_FILE', 17, NULL, 'YES', 'varchar', 20, 60, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(20)', '', '', 'select', ''),\n('def', 'information_schema', 'PROFILING', 'SOURCE_LINE', 18, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(20)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'CONSTRAINT_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'CONSTRAINT_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'CONSTRAINT_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'UNIQUE_CONSTRAINT_CATALOG', 4, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'UNIQUE_CONSTRAINT_SCHEMA', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'UNIQUE_CONSTRAINT_NAME', 6, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'MATCH_OPTION', 7, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'UPDATE_RULE', 8, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'DELETE_RULE', 9, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'TABLE_NAME', 10, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'REFERENCED_TABLE_NAME', 11, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'SPECIFIC_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_CATALOG', 2, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_SCHEMA', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_NAME', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_TYPE', 5, '', 'NO', 'varchar', 9, 27, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(9)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'DATA_TYPE', 6, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'CHARACTER_MAXIMUM_LENGTH', 7, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'CHARACTER_OCTET_LENGTH', 8, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'NUMERIC_PRECISION', 9, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'NUMERIC_SCALE', 10, NULL, 'YES', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(21)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'CHARACTER_SET_NAME', 11, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'COLLATION_NAME', 12, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'DTD_IDENTIFIER', 13, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_BODY', 14, '', 'NO', 'varchar', 8, 24, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_DEFINITION', 15, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'EXTERNAL_NAME', 16, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'EXTERNAL_LANGUAGE', 17, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'PARAMETER_STYLE', 18, '', 'NO', 'varchar', 8, 24, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'IS_DETERMINISTIC', 19, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'SQL_DATA_ACCESS', 20, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'SQL_PATH', 21, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'SECURITY_TYPE', 22, '', 'NO', 'varchar', 7, 21, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(7)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'CREATED', 23, '0000-00-00 00:00:00', 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'LAST_ALTERED', 24, '0000-00-00 00:00:00', 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'SQL_MODE', 25, '', 'NO', 'varchar', 8192, 24576, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8192)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'ROUTINE_COMMENT', 26, NULL, 'NO', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'DEFINER', 27, '', 'NO', 'varchar', 77, 231, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(77)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'CHARACTER_SET_CLIENT', 28, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'COLLATION_CONNECTION', 29, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'ROUTINES', 'DATABASE_COLLATION', 30, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMATA', 'CATALOG_NAME', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMATA', 'SCHEMA_NAME', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMATA', 'DEFAULT_CHARACTER_SET_NAME', 3, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMATA', 'DEFAULT_COLLATION_NAME', 4, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMATA', 'SQL_PATH', 5, NULL, 'YES', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', 'GRANTEE', 1, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', 'TABLE_CATALOG', 2, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', 'TABLE_SCHEMA', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', 'PRIVILEGE_TYPE', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', 'IS_GRANTABLE', 5, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'SESSION_STATUS', 'VARIABLE_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'SESSION_STATUS', 'VARIABLE_VALUE', 2, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'SESSION_VARIABLES', 'VARIABLE_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'SESSION_VARIABLES', 'VARIABLE_VALUE', 2, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'TABLE_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'TABLE_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'TABLE_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'NON_UNIQUE', 4, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(1)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'INDEX_SCHEMA', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'INDEX_NAME', 6, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'SEQ_IN_INDEX', 7, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(2)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'COLUMN_NAME', 8, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'COLLATION', 9, NULL, 'YES', 'varchar', 1, 3, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'CARDINALITY', 10, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(21)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'SUB_PART', 11, NULL, 'YES', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(3)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'PACKED', 12, NULL, 'YES', 'varchar', 10, 30, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(10)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'NULLABLE', 13, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'INDEX_TYPE', 14, '', 'NO', 'varchar', 16, 48, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(16)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'COMMENT', 15, NULL, 'YES', 'varchar', 16, 48, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(16)', '', '', 'select', ''),\n('def', 'information_schema', 'STATISTICS', 'INDEX_COMMENT', 16, '', 'NO', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_TYPE', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'ENGINE', 5, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'VERSION', 6, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'ROW_FORMAT', 7, NULL, 'YES', 'varchar', 10, 30, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(10)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_ROWS', 8, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'AVG_ROW_LENGTH', 9, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'DATA_LENGTH', 10, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'MAX_DATA_LENGTH', 11, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'INDEX_LENGTH', 12, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'DATA_FREE', 13, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'AUTO_INCREMENT', 14, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'CREATE_TIME', 15, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'UPDATE_TIME', 16, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'CHECK_TIME', 17, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_COLLATION', 18, NULL, 'YES', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'CHECKSUM', 19, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'CREATE_OPTIONS', 20, NULL, 'YES', 'varchar', 255, 765, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLES', 'TABLE_COMMENT', 21, '', 'NO', 'varchar', 2048, 6144, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(2048)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'TABLESPACE_NAME', 1, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'ENGINE', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'TABLESPACE_TYPE', 3, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'LOGFILE_GROUP_NAME', 4, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'EXTENT_SIZE', 5, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'AUTOEXTEND_SIZE', 6, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'MAXIMUM_SIZE', 7, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'TABLESPACES', 'NODEGROUP_ID', 8, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', '');\nINSERT INTO `COLUMNS` (`TABLE_CATALOG`, `TABLE_SCHEMA`, `TABLE_NAME`, `COLUMN_NAME`, `ORDINAL_POSITION`, `COLUMN_DEFAULT`, `IS_NULLABLE`, `DATA_TYPE`, `CHARACTER_MAXIMUM_LENGTH`, `CHARACTER_OCTET_LENGTH`, `NUMERIC_PRECISION`, `NUMERIC_SCALE`, `CHARACTER_SET_NAME`, `COLLATION_NAME`, `COLUMN_TYPE`, `COLUMN_KEY`, `EXTRA`, `PRIVILEGES`, `COLUMN_COMMENT`) VALUES\n('def', 'information_schema', 'TABLESPACES', 'TABLESPACE_COMMENT', 9, NULL, 'YES', 'varchar', 2048, 6144, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(2048)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'CONSTRAINT_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'CONSTRAINT_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'CONSTRAINT_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'TABLE_SCHEMA', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'TABLE_NAME', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'CONSTRAINT_TYPE', 6, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'GRANTEE', 1, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'TABLE_CATALOG', 2, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'TABLE_SCHEMA', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'TABLE_NAME', 4, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'PRIVILEGE_TYPE', 5, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'IS_GRANTABLE', 6, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'TRIGGER_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'TRIGGER_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'TRIGGER_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'EVENT_MANIPULATION', 4, '', 'NO', 'varchar', 6, 18, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(6)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'EVENT_OBJECT_CATALOG', 5, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'EVENT_OBJECT_SCHEMA', 6, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'EVENT_OBJECT_TABLE', 7, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_ORDER', 8, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(4)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_CONDITION', 9, NULL, 'YES', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_STATEMENT', 10, NULL, 'NO', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_ORIENTATION', 11, '', 'NO', 'varchar', 9, 27, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(9)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_TIMING', 12, '', 'NO', 'varchar', 6, 18, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(6)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_REFERENCE_OLD_TABLE', 13, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_REFERENCE_NEW_TABLE', 14, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_REFERENCE_OLD_ROW', 15, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'ACTION_REFERENCE_NEW_ROW', 16, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'CREATED', 17, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'SQL_MODE', 18, '', 'NO', 'varchar', 8192, 24576, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8192)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'DEFINER', 19, '', 'NO', 'varchar', 77, 231, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(77)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'CHARACTER_SET_CLIENT', 20, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'COLLATION_CONNECTION', 21, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'TRIGGERS', 'DATABASE_COLLATION', 22, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'USER_PRIVILEGES', 'GRANTEE', 1, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'USER_PRIVILEGES', 'TABLE_CATALOG', 2, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'USER_PRIVILEGES', 'PRIVILEGE_TYPE', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'USER_PRIVILEGES', 'IS_GRANTABLE', 4, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'TABLE_CATALOG', 1, '', 'NO', 'varchar', 512, 1536, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(512)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'TABLE_SCHEMA', 2, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'TABLE_NAME', 3, '', 'NO', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'VIEW_DEFINITION', 4, NULL, 'NO', 'longtext', 4294967295, 4294967295, NULL, NULL, 'utf8', 'utf8_general_ci', 'longtext', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'CHECK_OPTION', 5, '', 'NO', 'varchar', 8, 24, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'IS_UPDATABLE', 6, '', 'NO', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'DEFINER', 7, '', 'NO', 'varchar', 77, 231, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(77)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'SECURITY_TYPE', 8, '', 'NO', 'varchar', 7, 21, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(7)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'CHARACTER_SET_CLIENT', 9, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'VIEWS', 'COLLATION_CONNECTION', 10, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'POOL_ID', 1, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'BLOCK_ID', 2, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'SPACE', 3, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'PAGE_NUMBER', 4, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'PAGE_TYPE', 5, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'FLUSH_TYPE', 6, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'FIX_COUNT', 7, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'IS_HASHED', 8, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'NEWEST_MODIFICATION', 9, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'OLDEST_MODIFICATION', 10, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'ACCESS_TIME', 11, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'TABLE_NAME', 12, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'INDEX_NAME', 13, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'NUMBER_RECORDS', 14, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'DATA_SIZE', 15, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'COMPRESSED_SIZE', 16, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'PAGE_STATE', 17, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'IO_FIX', 18, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'IS_OLD', 19, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'FREE_PAGE_CLOCK', 20, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_id', 1, '', 'NO', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_state', 2, '', 'NO', 'varchar', 13, 39, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(13)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_started', 3, '0000-00-00 00:00:00', 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_requested_lock_id', 4, NULL, 'YES', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_wait_started', 5, NULL, 'YES', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_weight', 6, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_mysql_thread_id', 7, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_query', 8, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_operation_state', 9, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_tables_in_use', 10, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_tables_locked', 11, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_lock_structs', 12, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_lock_memory_bytes', 13, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_rows_locked', 14, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_rows_modified', 15, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_concurrency_tickets', 16, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_isolation_level', 17, '', 'NO', 'varchar', 16, 48, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(16)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_unique_checks', 18, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(1)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_foreign_key_checks', 19, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(1)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_last_foreign_key_error', 20, NULL, 'YES', 'varchar', 256, 768, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(256)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_adaptive_hash_latched', 21, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(1)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_TRX', 'trx_adaptive_hash_timeout', 22, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'POOL_ID', 1, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'POOL_SIZE', 2, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'FREE_BUFFERS', 3, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'DATABASE_PAGES', 4, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'OLD_DATABASE_PAGES', 5, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'MODIFIED_DATABASE_PAGES', 6, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PENDING_DECOMPRESS', 7, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PENDING_READS', 8, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PENDING_FLUSH_LRU', 9, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PENDING_FLUSH_LIST', 10, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_MADE_YOUNG', 11, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_NOT_MADE_YOUNG', 12, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_MADE_YOUNG_RATE', 13, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_MADE_NOT_YOUNG_RATE', 14, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NUMBER_PAGES_READ', 15, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NUMBER_PAGES_CREATED', 16, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NUMBER_PAGES_WRITTEN', 17, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_READ_RATE', 18, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_CREATE_RATE', 19, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'PAGES_WRITTEN_RATE', 20, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NUMBER_PAGES_GET', 21, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'HIT_RATE', 22, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'YOUNG_MAKE_PER_THOUSAND_GETS', 23, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NOT_YOUNG_MAKE_PER_THOUSAND_GETS', 24, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NUMBER_PAGES_READ_AHEAD', 25, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'NUMBER_READ_AHEAD_EVICTED', 26, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'READ_AHEAD_RATE', 27, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'READ_AHEAD_EVICTED_RATE', 28, '0', 'NO', 'double', NULL, NULL, 12, NULL, NULL, NULL, 'double', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'LRU_IO_TOTAL', 29, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'LRU_IO_CURRENT', 30, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'UNCOMPRESS_TOTAL', 31, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'UNCOMPRESS_CURRENT', 32, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCK_WAITS', 'requesting_trx_id', 1, '', 'NO', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCK_WAITS', 'requested_lock_id', 2, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCK_WAITS', 'blocking_trx_id', 3, '', 'NO', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCK_WAITS', 'blocking_lock_id', 4, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'page_size', 1, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(5)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'buffer_pool_instance', 2, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'pages_used', 3, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'pages_free', 4, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'relocation_ops', 5, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(21)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'relocation_time', 6, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP', 'page_size', 1, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(5)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP', 'compress_ops', 2, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP', 'compress_ops_ok', 3, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP', 'compress_time', 4, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP', 'uncompress_ops', 5, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP', 'uncompress_time', 6, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_id', 1, '', 'NO', 'varchar', 81, 243, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(81)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_trx_id', 2, '', 'NO', 'varchar', 18, 54, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(18)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_mode', 3, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_type', 4, '', 'NO', 'varchar', 32, 96, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(32)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_table', 5, '', 'NO', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_index', 6, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_space', 7, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_page', 8, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_rec', 9, NULL, 'YES', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'lock_data', 10, NULL, 'YES', 'varchar', 8192, 24576, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(8192)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'page_size', 1, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(5)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'buffer_pool_instance', 2, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'pages_used', 3, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'pages_free', 4, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'relocation_ops', 5, '0', 'NO', 'bigint', NULL, NULL, 19, 0, NULL, NULL, 'bigint(21)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'relocation_time', 6, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'page_size', 1, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(5)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'compress_ops', 2, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'compress_ops_ok', 3, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'compress_time', 4, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'uncompress_ops', 5, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'uncompress_time', 6, '0', 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'POOL_ID', 1, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'LRU_POSITION', 2, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'SPACE', 3, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'PAGE_NUMBER', 4, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'PAGE_TYPE', 5, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'FLUSH_TYPE', 6, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'FIX_COUNT', 7, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'IS_HASHED', 8, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'NEWEST_MODIFICATION', 9, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'OLDEST_MODIFICATION', 10, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'ACCESS_TIME', 11, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'TABLE_NAME', 12, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'INDEX_NAME', 13, NULL, 'YES', 'varchar', 1024, 3072, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(1024)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'NUMBER_RECORDS', 14, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'DATA_SIZE', 15, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'COMPRESSED_SIZE', 16, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'COMPRESSED', 17, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'IO_FIX', 18, NULL, 'YES', 'varchar', 64, 192, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(64)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'IS_OLD', 19, NULL, 'YES', 'varchar', 3, 9, NULL, NULL, 'utf8', 'utf8_general_ci', 'varchar(3)', '', '', 'select', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'FREE_PAGE_CLOCK', 20, '0', 'NO', 'bigint', NULL, NULL, 20, 0, NULL, NULL, 'bigint(21) unsigned', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'ID', 1, NULL, 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(255)', 'MUL', '', 'select', ''),\n('def', 'inventory', 'Inout', 'StudentID', 2, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'Use', 3, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'DateIn', 4, '0000-00-00 00:00:00', 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'DateOut', 5, NULL, 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'UserOut', 6, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'UserIn', 7, 'N/A', 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inout', 'Issues', 8, NULL, 'NO', 'text', 65535, 65535, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'text', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'ID', 1, NULL, 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(255)', 'PRI', 'auto_increment', 'select', ''),\n('def', 'inventory', 'Inventory', 'SerialNumber', 2, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'DeviceSerial', 3, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'Type', 4, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'Description', 5, NULL, 'NO', 'text', 65535, 65535, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'text', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'Issues', 6, NULL, 'NO', 'text', 65535, 65535, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'text', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'PhotoName', 7, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Inventory', 'State', 8, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Sessions', 'ID', 1, NULL, 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(255)', 'PRI', 'auto_increment', 'select', ''),\n('def', 'inventory', 'Sessions', 'SessionID', 2, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Sessions', 'UserName', 3, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Sessions', 'IP', 4, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Sessions', 'Token', 5, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Sessions', 'Date', 6, NULL, 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'inventory', 'Wiped', 'ID', 1, NULL, 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(255)', 'MUL', '', 'select', ''),\n('def', 'inventory', 'Wiped', 'DeviceID', 2, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Wiped', 'UserName', 3, NULL, 'NO', 'varchar', 255, 255, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(255)', '', '', 'select', ''),\n('def', 'inventory', 'Wiped', 'Date', 4, NULL, 'NO', 'datetime', NULL, NULL, NULL, NULL, NULL, NULL, 'datetime', '', '', 'select', ''),\n('def', 'inventory', 'barcodes', 'id', 1, NULL, 'NO', 'int', NULL, NULL, 10, 0, NULL, NULL, 'int(11)', 'PRI', 'auto_increment', 'select', ''),\n('def', 'inventory', 'barcodes', 'serials', 2, NULL, 'NO', 'varchar', 10, 10, NULL, NULL, 'latin1', 'latin1_swedish_ci', 'varchar(10)', '', '', 'select', '');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `COLUMN_PRIVILEGES`\n--\n\nCREATE TEMPORARY TABLE `COLUMN_PRIVILEGES` (\n `GRANTEE` varchar(81) NOT NULL DEFAULT '',\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '',\n `PRIVILEGE_TYPE` varchar(64) NOT NULL DEFAULT '',\n `IS_GRANTABLE` varchar(3) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `ENGINES`\n--\n\nCREATE TEMPORARY TABLE `ENGINES` (\n `ENGINE` varchar(64) NOT NULL DEFAULT '',\n `SUPPORT` varchar(8) NOT NULL DEFAULT '',\n `COMMENT` varchar(80) NOT NULL DEFAULT '',\n `TRANSACTIONS` varchar(3) DEFAULT NULL,\n `XA` varchar(3) DEFAULT NULL,\n `SAVEPOINTS` varchar(3) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `ENGINES`\n--\n\nINSERT INTO `ENGINES` (`ENGINE`, `SUPPORT`, `COMMENT`, `TRANSACTIONS`, `XA`, `SAVEPOINTS`) VALUES\n('MEMORY', 'YES', 'Hash based, stored in memory, useful for temporary tables', 'NO', 'NO', 'NO'),\n('CSV', 'YES', 'CSV storage engine', 'NO', 'NO', 'NO'),\n('MRG_MYISAM', 'YES', 'Collection of identical MyISAM tables', 'NO', 'NO', 'NO'),\n('BLACKHOLE', 'YES', '/dev/null storage engine (anything you write to it disappears)', 'NO', 'NO', 'NO'),\n('MyISAM', 'YES', 'MyISAM storage engine', 'NO', 'NO', 'NO'),\n('PERFORMANCE_SCHEMA', 'YES', 'Performance Schema', 'NO', 'NO', 'NO'),\n('ARCHIVE', 'YES', 'Archive storage engine', 'NO', 'NO', 'NO'),\n('InnoDB', 'DEFAULT', 'Supports transactions, row-level locking, and foreign keys', 'YES', 'YES', 'YES'),\n('FEDERATED', 'NO', 'Federated MySQL storage engine', NULL, NULL, NULL);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `EVENTS`\n--\n\nCREATE TEMPORARY TABLE `EVENTS` (\n `EVENT_CATALOG` varchar(64) NOT NULL DEFAULT '',\n `EVENT_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `EVENT_NAME` varchar(64) NOT NULL DEFAULT '',\n `DEFINER` varchar(77) NOT NULL DEFAULT '',\n `TIME_ZONE` varchar(64) NOT NULL DEFAULT '',\n `EVENT_BODY` varchar(8) NOT NULL DEFAULT '',\n `EVENT_DEFINITION` longtext NOT NULL,\n `EVENT_TYPE` varchar(9) NOT NULL DEFAULT '',\n `EXECUTE_AT` datetime DEFAULT NULL,\n `INTERVAL_VALUE` varchar(256) DEFAULT NULL,\n `INTERVAL_FIELD` varchar(18) DEFAULT NULL,\n `SQL_MODE` varchar(8192) NOT NULL DEFAULT '',\n `STARTS` datetime DEFAULT NULL,\n `ENDS` datetime DEFAULT NULL,\n `STATUS` varchar(18) NOT NULL DEFAULT '',\n `ON_COMPLETION` varchar(12) NOT NULL DEFAULT '',\n `CREATED` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `LAST_ALTERED` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `LAST_EXECUTED` datetime DEFAULT NULL,\n `EVENT_COMMENT` varchar(64) NOT NULL DEFAULT '',\n `ORIGINATOR` bigint(10) NOT NULL DEFAULT '0',\n `CHARACTER_SET_CLIENT` varchar(32) NOT NULL DEFAULT '',\n `COLLATION_CONNECTION` varchar(32) NOT NULL DEFAULT '',\n `DATABASE_COLLATION` varchar(32) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `FILES`\n--\n\nCREATE TEMPORARY TABLE `FILES` (\n `FILE_ID` bigint(4) NOT NULL DEFAULT '0',\n `FILE_NAME` varchar(64) DEFAULT NULL,\n `FILE_TYPE` varchar(20) NOT NULL DEFAULT '',\n `TABLESPACE_NAME` varchar(64) DEFAULT NULL,\n `TABLE_CATALOG` varchar(64) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) DEFAULT NULL,\n `TABLE_NAME` varchar(64) DEFAULT NULL,\n `LOGFILE_GROUP_NAME` varchar(64) DEFAULT NULL,\n `LOGFILE_GROUP_NUMBER` bigint(4) DEFAULT NULL,\n `ENGINE` varchar(64) NOT NULL DEFAULT '',\n `FULLTEXT_KEYS` varchar(64) DEFAULT NULL,\n `DELETED_ROWS` bigint(4) DEFAULT NULL,\n `UPDATE_COUNT` bigint(4) DEFAULT NULL,\n `FREE_EXTENTS` bigint(4) DEFAULT NULL,\n `TOTAL_EXTENTS` bigint(4) DEFAULT NULL,\n `EXTENT_SIZE` bigint(4) NOT NULL DEFAULT '0',\n `INITIAL_SIZE` bigint(21) unsigned DEFAULT NULL,\n `MAXIMUM_SIZE` bigint(21) unsigned DEFAULT NULL,\n `AUTOEXTEND_SIZE` bigint(21) unsigned DEFAULT NULL,\n `CREATION_TIME` datetime DEFAULT NULL,\n `LAST_UPDATE_TIME` datetime DEFAULT NULL,\n `LAST_ACCESS_TIME` datetime DEFAULT NULL,\n `RECOVER_TIME` bigint(4) DEFAULT NULL,\n `TRANSACTION_COUNTER` bigint(4) DEFAULT NULL,\n `VERSION` bigint(21) unsigned DEFAULT NULL,\n `ROW_FORMAT` varchar(10) DEFAULT NULL,\n `TABLE_ROWS` bigint(21) unsigned DEFAULT NULL,\n `AVG_ROW_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `DATA_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `MAX_DATA_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `INDEX_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `DATA_FREE` bigint(21) unsigned DEFAULT NULL,\n `CREATE_TIME` datetime DEFAULT NULL,\n `UPDATE_TIME` datetime DEFAULT NULL,\n `CHECK_TIME` datetime DEFAULT NULL,\n `CHECKSUM` bigint(21) unsigned DEFAULT NULL,\n `STATUS` varchar(20) NOT NULL DEFAULT '',\n `EXTRA` varchar(255) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `GLOBAL_STATUS`\n--\n\nCREATE TEMPORARY TABLE `GLOBAL_STATUS` (\n `VARIABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `VARIABLE_VALUE` varchar(1024) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `GLOBAL_STATUS`\n--\n\nINSERT INTO `GLOBAL_STATUS` (`VARIABLE_NAME`, `VARIABLE_VALUE`) VALUES\n('ABORTED_CLIENTS', '1'),\n('ABORTED_CONNECTS', '114'),\n('BINLOG_CACHE_DISK_USE', '0'),\n('BINLOG_CACHE_USE', '0'),\n('BINLOG_STMT_CACHE_DISK_USE', '0'),\n('BINLOG_STMT_CACHE_USE', '0'),\n('BYTES_RECEIVED', '26482861'),\n('BYTES_SENT', '148728302'),\n('COM_ADMIN_COMMANDS', '127'),\n('COM_ASSIGN_TO_KEYCACHE', '0'),\n('COM_ALTER_DB', '0'),\n('COM_ALTER_DB_UPGRADE', '0'),\n('COM_ALTER_EVENT', '0'),\n('COM_ALTER_FUNCTION', '0'),\n('COM_ALTER_PROCEDURE', '0'),\n('COM_ALTER_SERVER', '0'),\n('COM_ALTER_TABLE', '0'),\n('COM_ALTER_TABLESPACE', '0'),\n('COM_ANALYZE', '0'),\n('COM_BEGIN', '257'),\n('COM_BINLOG', '0'),\n('COM_CALL_PROCEDURE', '0'),\n('COM_CHANGE_DB', '6899'),\n('COM_CHANGE_MASTER', '0'),\n('COM_CHECK', '0'),\n('COM_CHECKSUM', '0'),\n('COM_COMMIT', '50'),\n('COM_CREATE_DB', '0'),\n('COM_CREATE_EVENT', '0'),\n('COM_CREATE_FUNCTION', '0'),\n('COM_CREATE_INDEX', '0'),\n('COM_CREATE_PROCEDURE', '0'),\n('COM_CREATE_SERVER', '0'),\n('COM_CREATE_TABLE', '0'),\n('COM_CREATE_TRIGGER', '0'),\n('COM_CREATE_UDF', '0'),\n('COM_CREATE_USER', '1'),\n('COM_CREATE_VIEW', '0'),\n('COM_DEALLOC_SQL', '0'),\n('COM_DELETE', '5186'),\n('COM_DELETE_MULTI', '5228'),\n('COM_DO', '0'),\n('COM_DROP_DB', '0'),\n('COM_DROP_EVENT', '0'),\n('COM_DROP_FUNCTION', '0'),\n('COM_DROP_INDEX', '0'),\n('COM_DROP_PROCEDURE', '0'),\n('COM_DROP_SERVER', '0'),\n('COM_DROP_TABLE', '0'),\n('COM_DROP_TRIGGER', '0'),\n('COM_DROP_USER', '0'),\n('COM_DROP_VIEW', '0'),\n('COM_EMPTY_QUERY', '0'),\n('COM_EXECUTE_SQL', '0'),\n('COM_FLUSH', '19'),\n('COM_GRANT', '3'),\n('COM_HA_CLOSE', '0'),\n('COM_HA_OPEN', '0'),\n('COM_HA_READ', '0'),\n('COM_HELP', '0'),\n('COM_INSERT', '362'),\n('COM_INSERT_SELECT', '0'),\n('COM_INSTALL_PLUGIN', '0'),\n('COM_KILL', '0'),\n('COM_LOAD', '0'),\n('COM_LOCK_TABLES', '0'),\n('COM_OPTIMIZE', '7'),\n('COM_PRELOAD_KEYS', '0'),\n('COM_PREPARE_SQL', '0'),\n('COM_PURGE', '0'),\n('COM_PURGE_BEFORE_DATE', '0'),\n('COM_RELEASE_SAVEPOINT', '0'),\n('COM_RENAME_TABLE', '0'),\n('COM_RENAME_USER', '0'),\n('COM_REPAIR', '0'),\n('COM_REPLACE', '1057'),\n('COM_REPLACE_SELECT', '0'),\n('COM_RESET', '0'),\n('COM_RESIGNAL', '0'),\n('COM_REVOKE', '4'),\n('COM_REVOKE_ALL', '0'),\n('COM_ROLLBACK', '0'),\n('COM_ROLLBACK_TO_SAVEPOINT', '0'),\n('COM_SAVEPOINT', '0'),\n('COM_SELECT', '43061'),\n('COM_SET_OPTION', '37923'),\n('COM_SIGNAL', '0'),\n('COM_SHOW_AUTHORS', '0'),\n('COM_SHOW_BINLOG_EVENTS', '0'),\n('COM_SHOW_BINLOGS', '58'),\n('COM_SHOW_CHARSETS', '0'),\n('COM_SHOW_COLLATIONS', '0'),\n('COM_SHOW_CONTRIBUTORS', '0'),\n('COM_SHOW_CREATE_DB', '0'),\n('COM_SHOW_CREATE_EVENT', '0'),\n('COM_SHOW_CREATE_FUNC', '0'),\n('COM_SHOW_CREATE_PROC', '0'),\n('COM_SHOW_CREATE_TABLE', '149'),\n('COM_SHOW_CREATE_TRIGGER', '0'),\n('COM_SHOW_DATABASES', '56'),\n('COM_SHOW_ENGINE_LOGS', '0'),\n('COM_SHOW_ENGINE_MUTEX', '0'),\n('COM_SHOW_ENGINE_STATUS', '0'),\n('COM_SHOW_EVENTS', '0'),\n('COM_SHOW_ERRORS', '0'),\n('COM_SHOW_FIELDS', '251'),\n('COM_SHOW_FUNCTION_STATUS', '4'),\n('COM_SHOW_GRANTS', '16'),\n('COM_SHOW_KEYS', '97'),\n('COM_SHOW_MASTER_STATUS', '22'),\n('COM_SHOW_OPEN_TABLES', '0'),\n('COM_SHOW_PLUGINS', '266'),\n('COM_SHOW_PRIVILEGES', '0'),\n('COM_SHOW_PROCEDURE_STATUS', '4'),\n('COM_SHOW_PROCESSLIST', '1'),\n('COM_SHOW_PROFILE', '0'),\n('COM_SHOW_PROFILES', '0'),\n('COM_SHOW_RELAYLOG_EVENTS', '0'),\n('COM_SHOW_SLAVE_HOSTS', '0'),\n('COM_SHOW_SLAVE_STATUS', '22'),\n('COM_SHOW_STATUS', '2'),\n('COM_SHOW_STORAGE_ENGINES', '1'),\n('COM_SHOW_TABLE_STATUS', '415'),\n('COM_SHOW_TABLES', '771'),\n('COM_SHOW_TRIGGERS', '87'),\n('COM_SHOW_VARIABLES', '98'),\n('COM_SHOW_WARNINGS', '0'),\n('COM_SLAVE_START', '0'),\n('COM_SLAVE_STOP', '0'),\n('COM_STMT_CLOSE', '24752'),\n('COM_STMT_EXECUTE', '25010'),\n('COM_STMT_FETCH', '0'),\n('COM_STMT_PREPARE', '25010'),\n('COM_STMT_REPREPARE', '0'),\n('COM_STMT_RESET', '0'),\n('COM_STMT_SEND_LONG_DATA', '0'),\n('COM_TRUNCATE', '0'),\n('COM_UNINSTALL_PLUGIN', '0'),\n('COM_UNLOCK_TABLES', '0'),\n('COM_UPDATE', '2977'),\n('COM_UPDATE_MULTI', '0'),\n('COM_XA_COMMIT', '0'),\n('COM_XA_END', '0'),\n('COM_XA_PREPARE', '0'),\n('COM_XA_RECOVER', '0'),\n('COM_XA_ROLLBACK', '0'),\n('COM_XA_START', '0'),\n('COMPRESSION', 'OFF'),\n('CONNECTIONS', '8341'),\n('CREATED_TMP_DISK_TABLES', '10826'),\n('CREATED_TMP_FILES', '5'),\n('CREATED_TMP_TABLES', '21790'),\n('DELAYED_ERRORS', '0'),\n('DELAYED_INSERT_THREADS', '0'),\n('DELAYED_WRITES', '0'),\n('FLUSH_COMMANDS', '1'),\n('HANDLER_COMMIT', '52302'),\n('HANDLER_DELETE', '128'),\n('HANDLER_DISCOVER', '0'),\n('HANDLER_PREPARE', '0'),\n('HANDLER_READ_FIRST', '17613'),\n('HANDLER_READ_KEY', '84924'),\n('HANDLER_READ_LAST', '4'),\n('HANDLER_READ_NEXT', '112673'),\n('HANDLER_READ_PREV', '64'),\n('HANDLER_READ_RND', '19380'),\n('HANDLER_READ_RND_NEXT', '482434'),\n('HANDLER_ROLLBACK', '6258'),\n('HANDLER_SAVEPOINT', '0'),\n('HANDLER_SAVEPOINT_ROLLBACK', '0'),\n('HANDLER_UPDATE', '4785'),\n('HANDLER_WRITE', '9284960'),\n('INNODB_BUFFER_POOL_PAGES_DATA', '2274'),\n('INNODB_BUFFER_POOL_BYTES_DATA', '37257216'),\n('INNODB_BUFFER_POOL_PAGES_DIRTY', '0'),\n('INNODB_BUFFER_POOL_BYTES_DIRTY', '0'),\n('INNODB_BUFFER_POOL_PAGES_FLUSHED', '16045'),\n('INNODB_BUFFER_POOL_PAGES_FREE', '5908'),\n('INNODB_BUFFER_POOL_PAGES_MISC', '10'),\n('INNODB_BUFFER_POOL_PAGES_TOTAL', '8192'),\n('INNODB_BUFFER_POOL_READ_AHEAD_RND', '0'),\n('INNODB_BUFFER_POOL_READ_AHEAD', '0'),\n('INNODB_BUFFER_POOL_READ_AHEAD_EVICTED', '0'),\n('INNODB_BUFFER_POOL_READ_REQUESTS', '1115220'),\n('INNODB_BUFFER_POOL_READS', '2156'),\n('INNODB_BUFFER_POOL_WAIT_FREE', '0'),\n('INNODB_BUFFER_POOL_WRITE_REQUESTS', '33637'),\n('INNODB_DATA_FSYNCS', '16044'),\n('INNODB_DATA_PENDING_FSYNCS', '0'),\n('INNODB_DATA_PENDING_READS', '0'),\n('INNODB_DATA_PENDING_WRITES', '0'),\n('INNODB_DATA_READ', '37507072'),\n('INNODB_DATA_READS', '2166'),\n('INNODB_DATA_WRITES', '27353'),\n('INNODB_DATA_WRITTEN', '538588672'),\n('INNODB_DBLWR_PAGES_WRITTEN', '16045'),\n('INNODB_DBLWR_WRITES', '3847'),\n('INNODB_HAVE_ATOMIC_BUILTINS', 'ON'),\n('INNODB_LOG_WAITS', '0'),\n('INNODB_LOG_WRITE_REQUESTS', '18477'),\n('INNODB_LOG_WRITES', '5010'),\n('INNODB_OS_LOG_FSYNCS', '8350'),\n('INNODB_OS_LOG_PENDING_FSYNCS', '0'),\n('INNODB_OS_LOG_PENDING_WRITES', '0'),\n('INNODB_OS_LOG_WRITTEN', '11115520'),\n('INNODB_PAGE_SIZE', '16384'),\n('INNODB_PAGES_CREATED', '119'),\n('INNODB_PAGES_READ', '2155'),\n('INNODB_PAGES_WRITTEN', '16045'),\n('INNODB_ROW_LOCK_CURRENT_WAITS', '0'),\n('INNODB_ROW_LOCK_TIME', '0'),\n('INNODB_ROW_LOCK_TIME_AVG', '0'),\n('INNODB_ROW_LOCK_TIME_MAX', '0'),\n('INNODB_ROW_LOCK_WAITS', '0'),\n('INNODB_ROWS_DELETED', '127'),\n('INNODB_ROWS_INSERTED', '901'),\n('INNODB_ROWS_READ', '459398'),\n('INNODB_ROWS_UPDATED', '3746'),\n('INNODB_TRUNCATED_STATUS_WRITES', '0'),\n('KEY_BLOCKS_NOT_FLUSHED', '0'),\n('KEY_BLOCKS_UNUSED', '13316'),\n('KEY_BLOCKS_USED', '83'),\n('KEY_READ_REQUESTS', '14926002'),\n('KEY_READS', '71'),\n('KEY_WRITE_REQUESTS', '12646'),\n('KEY_WRITES', '405'),\n('LAST_QUERY_COST', '0.000000'),\n('MAX_USED_CONNECTIONS', '7'),\n('NOT_FLUSHED_DELAYED_ROWS', '0'),\n('OPEN_FILES', '92'),\n('OPEN_STREAMS', '0'),\n('OPEN_TABLE_DEFINITIONS', '278'),\n('OPEN_TABLES', '297'),\n('OPENED_FILES', '43807'),\n('OPENED_TABLE_DEFINITIONS', '306'),\n('OPENED_TABLES', '7448'),\n('PERFORMANCE_SCHEMA_COND_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_COND_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_FILE_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_FILE_HANDLES_LOST', '0'),\n('PERFORMANCE_SCHEMA_FILE_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_LOCKER_LOST', '0'),\n('PERFORMANCE_SCHEMA_MUTEX_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_MUTEX_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_RWLOCK_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_RWLOCK_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_TABLE_HANDLES_LOST', '0'),\n('PERFORMANCE_SCHEMA_TABLE_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_THREAD_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_THREAD_INSTANCES_LOST', '0'),\n('PREPARED_STMT_COUNT', '0'),\n('QCACHE_FREE_BLOCKS', '206'),\n('QCACHE_FREE_MEMORY', '12584640'),\n('QCACHE_HITS', '42433'),\n('QCACHE_INSERTS', '15500'),\n('QCACHE_LOWMEM_PRUNES', '0'),\n('QCACHE_NOT_CACHED', '27544'),\n('QCACHE_QUERIES_IN_CACHE', '2084'),\n('QCACHE_TOTAL_BLOCKS', '4487'),\n('QUERIES', '205918'),\n('QUESTIONS', '156028'),\n('RPL_STATUS', 'AUTH_MASTER'),\n('SELECT_FULL_JOIN', '764'),\n('SELECT_FULL_RANGE_JOIN', '0'),\n('SELECT_RANGE', '322'),\n('SELECT_RANGE_CHECK', '0'),\n('SELECT_SCAN', '18875'),\n('SLAVE_HEARTBEAT_PERIOD', '0.000'),\n('SLAVE_OPEN_TEMP_TABLES', '0'),\n('SLAVE_RECEIVED_HEARTBEATS', '0'),\n('SLAVE_RETRIED_TRANSACTIONS', '0'),\n('SLAVE_RUNNING', 'OFF'),\n('SLOW_LAUNCH_THREADS', '0'),\n('SLOW_QUERIES', '0'),\n('SORT_MERGE_PASSES', '0'),\n('SORT_RANGE', '543'),\n('SORT_ROWS', '76563'),\n('SORT_SCAN', '15653'),\n('SSL_ACCEPT_RENEGOTIATES', '0'),\n('SSL_ACCEPTS', '0'),\n('SSL_CALLBACK_CACHE_HITS', '0'),\n('SSL_CIPHER', ''),\n('SSL_CIPHER_LIST', ''),\n('SSL_CLIENT_CONNECTS', '0'),\n('SSL_CONNECT_RENEGOTIATES', '0'),\n('SSL_CTX_VERIFY_DEPTH', '0'),\n('SSL_CTX_VERIFY_MODE', '0'),\n('SSL_DEFAULT_TIMEOUT', '0'),\n('SSL_FINISHED_ACCEPTS', '0'),\n('SSL_FINISHED_CONNECTS', '0'),\n('SSL_SESSION_CACHE_HITS', '0'),\n('SSL_SESSION_CACHE_MISSES', '0'),\n('SSL_SESSION_CACHE_MODE', 'NONE'),\n('SSL_SESSION_CACHE_OVERFLOWS', '0'),\n('SSL_SESSION_CACHE_SIZE', '0'),\n('SSL_SESSION_CACHE_TIMEOUTS', '0'),\n('SSL_SESSIONS_REUSED', '0'),\n('SSL_USED_SESSION_CACHE_ENTRIES', '0'),\n('SSL_VERIFY_DEPTH', '0'),\n('SSL_VERIFY_MODE', '0'),\n('SSL_VERSION', ''),\n('TABLE_LOCKS_IMMEDIATE', '69135'),\n('TABLE_LOCKS_WAITED', '0'),\n('TC_LOG_MAX_PAGES_USED', '0'),\n('TC_LOG_PAGE_SIZE', '0'),\n('TC_LOG_PAGE_WAITS', '0'),\n('THREADS_CACHED', '5'),\n('THREADS_CONNECTED', '2'),\n('THREADS_CREATED', '7'),\n('THREADS_RUNNING', '1'),\n('UPTIME', '1532568'),\n('UPTIME_SINCE_FLUSH_STATUS', '1532568');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `GLOBAL_VARIABLES`\n--\n\nCREATE TEMPORARY TABLE `GLOBAL_VARIABLES` (\n `VARIABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `VARIABLE_VALUE` varchar(1024) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `GLOBAL_VARIABLES`\n--\n\nINSERT INTO `GLOBAL_VARIABLES` (`VARIABLE_NAME`, `VARIABLE_VALUE`) VALUES\n('MAX_PREPARED_STMT_COUNT', '16382'),\n('INNODB_BUFFER_POOL_SIZE', '134217728'),\n('HAVE_CRYPT', 'YES'),\n('PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_LONG_SIZE', '10000'),\n('INNODB_VERSION', '5.5.40'),\n('QUERY_PREALLOC_SIZE', '8192'),\n('DELAYED_QUEUE_SIZE', '1000'),\n('PERFORMANCE_SCHEMA_MAX_COND_INSTANCES', '1000'),\n('SSL_CIPHER', ''),\n('COLLATION_SERVER', 'latin1_swedish_ci'),\n('SECURE_FILE_PRIV', ''),\n('TIMED_MUTEXES', 'OFF'),\n('DELAYED_INSERT_TIMEOUT', '300'),\n('PERFORMANCE_SCHEMA_MAX_MUTEX_INSTANCES', '1000000'),\n('LC_TIME_NAMES', 'en_US'),\n('PERFORMANCE_SCHEMA_MAX_RWLOCK_INSTANCES', '1000000'),\n('TIME_FORMAT', '%H:%i:%s'),\n('PERFORMANCE_SCHEMA_MAX_RWLOCK_CLASSES', '30'),\n('BASEDIR', '/usr'),\n('PERFORMANCE_SCHEMA_MAX_MUTEX_CLASSES', '200'),\n('UPDATABLE_VIEWS_WITH_LIMIT', 'YES'),\n('BACK_LOG', '50'),\n('SLOW_LAUNCH_TIME', '2'),\n('EVENT_SCHEDULER', 'OFF'),\n('MAX_SEEKS_FOR_KEY', '18446744073709551615'),\n('PERFORMANCE_SCHEMA_MAX_THREAD_CLASSES', '50'),\n('RELAY_LOG_INDEX', ''),\n('FT_STOPWORD_FILE', '(built-in)'),\n('SQL_QUOTE_SHOW_CREATE', 'ON'),\n('PERFORMANCE_SCHEMA', 'OFF'),\n('QUERY_CACHE_SIZE', '16777216'),\n('BINLOG_FORMAT', 'STATEMENT'),\n('WAIT_TIMEOUT', '28800'),\n('LONG_QUERY_TIME', '10.000000'),\n('PERFORMANCE_SCHEMA_MAX_TABLE_HANDLES', '100000'),\n('CHARACTER_SETS_DIR', '/usr/share/mysql/charsets/'),\n('LOWER_CASE_TABLE_NAMES', '0'),\n('BINLOG_CACHE_SIZE', '32768'),\n('REPORT_HOST', ''),\n('CHARACTER_SET_RESULTS', 'latin1'),\n('MYISAM_SORT_BUFFER_SIZE', '8388608'),\n('CHARACTER_SET_CONNECTION', 'latin1'),\n('INNODB_ROLLBACK_SEGMENTS', '128'),\n('PRELOAD_BUFFER_SIZE', '32768'),\n('LARGE_FILES_SUPPORT', 'ON'),\n('MAX_WRITE_LOCK_COUNT', '18446744073709551615'),\n('SQL_SAFE_UPDATES', 'OFF'),\n('MAX_JOIN_SIZE', '18446744073709551615'),\n('NET_BUFFER_LENGTH', '16384'),\n('FT_QUERY_EXPANSION_LIMIT', '20'),\n('SKIP_SHOW_DATABASE', 'OFF'),\n('FT_MAX_WORD_LEN', '84'),\n('GROUP_CONCAT_MAX_LEN', '1024'),\n('MAX_SP_RECURSION_DEPTH', '0'),\n('RANGE_ALLOC_BLOCK_SIZE', '4096'),\n('SYNC_RELAY_LOG', '0'),\n('OPTIMIZER_PRUNE_LEVEL', '1'),\n('HAVE_QUERY_CACHE', 'YES'),\n('INNODB_LOG_FILE_SIZE', '5242880'),\n('DELAY_KEY_WRITE', 'ON'),\n('TRANSACTION_PREALLOC_SIZE', '4096'),\n('INTERACTIVE_TIMEOUT', '28800'),\n('MYISAM_RECOVER_OPTIONS', 'BACKUP'),\n('AUTOMATIC_SP_PRIVILEGES', 'ON'),\n('PROTOCOL_VERSION', '10'),\n('DELAYED_INSERT_LIMIT', '100'),\n('LOW_PRIORITY_UPDATES', 'OFF'),\n('COMPLETION_TYPE', 'NO_CHAIN'),\n('REPORT_PASSWORD', ''),\n('BINLOG_DIRECT_NON_TRANSACTIONAL_UPDATES', 'OFF'),\n('MAX_INSERT_DELAYED_THREADS', '20'),\n('VERSION_COMMENT', '(Ubuntu)'),\n('SQL_BIG_SELECTS', 'ON'),\n('AUTO_INCREMENT_OFFSET', '1'),\n('TRANSACTION_ALLOC_BLOCK_SIZE', '8192'),\n('JOIN_BUFFER_SIZE', '131072'),\n('THREAD_CACHE_SIZE', '8'),\n('CONNECT_TIMEOUT', '10'),\n('INNODB_DOUBLEWRITE', 'ON'),\n('SQL_LOW_PRIORITY_UPDATES', 'OFF'),\n('IGNORE_BUILTIN_INNODB', 'OFF'),\n('INIT_FILE', ''),\n('DEFAULT_WEEK_FORMAT', '0'),\n('LARGE_PAGES', 'OFF'),\n('LOG_OUTPUT', 'FILE'),\n('LARGE_PAGE_SIZE', '0'),\n('INNODB_IO_CAPACITY', '200'),\n('INIT_SLAVE', ''),\n('INNODB_USE_NATIVE_AIO', 'OFF'),\n('MAX_BINLOG_SIZE', '104857600'),\n('HAVE_SYMLINK', 'YES'),\n('MAX_ERROR_COUNT', '64'),\n('TIME_ZONE', 'SYSTEM'),\n('MAX_CONNECTIONS', '151'),\n('INNODB_TABLE_LOCKS', 'ON'),\n('INNODB_AUTOEXTEND_INCREMENT', '8'),\n('READ_BUFFER_SIZE', '131072'),\n('MYISAM_DATA_POINTER_SIZE', '6'),\n('INNODB_THREAD_SLEEP_DELAY', '10000'),\n('LOG_QUERIES_NOT_USING_INDEXES', 'OFF'),\n('SQL_AUTO_IS_NULL', 'OFF'),\n('LOWER_CASE_FILE_SYSTEM', 'OFF'),\n('SLAVE_TRANSACTION_RETRIES', '10'),\n('SORT_BUFFER_SIZE', '2097152'),\n('KEEP_FILES_ON_CREATE', 'OFF'),\n('MAX_HEAP_TABLE_SIZE', '16777216'),\n('SYNC_RELAY_LOG_INFO', '0'),\n('LOCK_WAIT_TIMEOUT', '31536000'),\n('INNODB_REPLICATION_DELAY', '0'),\n('KEY_CACHE_AGE_THRESHOLD', '300'),\n('QUERY_CACHE_MIN_RES_UNIT', '4096'),\n('NET_RETRY_COUNT', '10'),\n('INNODB_STATS_ON_METADATA', 'ON'),\n('LOG_WARNINGS', '1'),\n('INNODB_ROLLBACK_ON_TIMEOUT', 'OFF'),\n('FLUSH', 'OFF'),\n('PROFILING_HISTORY_SIZE', '15'),\n('MAX_LONG_DATA_SIZE', '16777216'),\n('INNODB_CHANGE_BUFFERING', 'all'),\n('CHARACTER_SET_SERVER', 'latin1'),\n('READ_RND_BUFFER_SIZE', '262144'),\n('SLAVE_MAX_ALLOWED_PACKET', '1073741824'),\n('INNODB_FILE_FORMAT', 'Antelope'),\n('FLUSH_TIME', '0'),\n('BIG_TABLES', 'OFF'),\n('CHARACTER_SET_DATABASE', 'latin1'),\n('SQL_SELECT_LIMIT', '18446744073709551615'),\n('BULK_INSERT_BUFFER_SIZE', '8388608'),\n('DATE_FORMAT', '%Y-%m-%d'),\n('CHARACTER_SET_FILESYSTEM', 'binary'),\n('READ_ONLY', 'OFF'),\n('BINLOG_STMT_CACHE_SIZE', '32768'),\n('MAX_BINLOG_CACHE_SIZE', '18446744073709547520'),\n('INNODB_DATA_FILE_PATH', 'ibdata1:10M:autoextend'),\n('PERFORMANCE_SCHEMA_MAX_FILE_CLASSES', '50'),\n('INNODB_PURGE_THREADS', '0'),\n('MAX_SORT_LENGTH', '1024'),\n('PROFILING', 'OFF'),\n('PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_SIZE', '10'),\n('INNODB_STRICT_MODE', 'OFF'),\n('SLAVE_COMPRESSED_PROTOCOL', 'OFF'),\n('KEY_CACHE_DIVISION_LIMIT', '100'),\n('OLD_PASSWORDS', 'OFF'),\n('GENERAL_LOG_FILE', '/var/lib/mysql/Intranet.log'),\n('NET_WRITE_TIMEOUT', '60'),\n('PERFORMANCE_SCHEMA_MAX_COND_CLASSES', '80'),\n('QUERY_CACHE_TYPE', 'ON'),\n('AUTO_INCREMENT_INCREMENT', '1'),\n('METADATA_LOCKS_CACHE_SIZE', '1024'),\n('TMPDIR', '/tmp'),\n('QUERY_CACHE_LIMIT', '1048576'),\n('EXPIRE_LOGS_DAYS', '10'),\n('TX_ISOLATION', 'REPEATABLE-READ'),\n('HAVE_PARTITIONING', 'YES'),\n('LOG_ERROR', '/var/log/mysql/error.log'),\n('FOREIGN_KEY_CHECKS', 'ON'),\n('MAX_LENGTH_FOR_SORT_DATA', '1024'),\n('RELAY_LOG_INFO_FILE', 'relay-log.info'),\n('THREAD_STACK', '196608'),\n('INNODB_AUTOINC_LOCK_MODE', '1'),\n('NEW', 'OFF'),\n('INNODB_COMMIT_CONCURRENCY', '0'),\n('SKIP_NAME_RESOLVE', 'OFF'),\n('INNODB_MIRRORED_LOG_GROUPS', '1'),\n('PID_FILE', '/var/run/mysqld/mysqld.pid'),\n('INNODB_PURGE_BATCH_SIZE', '20'),\n('MAX_ALLOWED_PACKET', '16777216'),\n('VERSION', '5.5.40-0ubuntu0.12.04.1'),\n('CONCURRENT_INSERT', 'AUTO'),\n('INNODB_SUPPORT_XA', 'ON'),\n('TABLE_DEFINITION_CACHE', '400'),\n('INNODB_SYNC_SPIN_LOOPS', '30'),\n('QUERY_ALLOC_BLOCK_SIZE', '8192'),\n('COLLATION_CONNECTION', 'latin1_swedish_ci'),\n('MYISAM_REPAIR_THREADS', '1'),\n('INNODB_ADAPTIVE_FLUSHING', 'ON'),\n('FT_BOOLEAN_SYNTAX', '+ -><()~*:\"\"&|'),\n('INNODB_ADAPTIVE_HASH_INDEX', 'ON'),\n('VERSION_COMPILE_MACHINE', 'x86_64'),\n('SYSTEM_TIME_ZONE', 'EST'),\n('QUERY_CACHE_WLOCK_INVALIDATE', 'OFF'),\n('DIV_PRECISION_INCREMENT', '4'),\n('SYNC_FRM', 'ON'),\n('STORED_PROGRAM_CACHE', '256'),\n('TMP_TABLE_SIZE', '16777216'),\n('INNODB_DATA_HOME_DIR', ''),\n('PERFORMANCE_SCHEMA_MAX_THREAD_INSTANCES', '1000'),\n('INNODB_READ_IO_THREADS', '4'),\n('MULTI_RANGE_COUNT', '256'),\n('INNODB_WRITE_IO_THREADS', '4'),\n('SERVER_ID', '0'),\n('INNODB_BUFFER_POOL_INSTANCES', '1'),\n('SKIP_NETWORKING', 'OFF'),\n('INNODB_FORCE_RECOVERY', '0'),\n('CHARACTER_SET_SYSTEM', 'utf8'),\n('INNODB_LOG_FILES_IN_GROUP', '2'),\n('INIT_CONNECT', ''),\n('OPTIMIZER_SEARCH_DEPTH', '62'),\n('HAVE_DYNAMIC_LOADING', 'YES'),\n('AUTOCOMMIT', 'ON'),\n('SYNC_BINLOG', '0'),\n('SSL_CAPATH', ''),\n('INNODB_PRINT_ALL_DEADLOCKS', 'OFF'),\n('SLAVE_EXEC_MODE', 'STRICT'),\n('INNODB_OPEN_FILES', '300'),\n('GENERAL_LOG', 'OFF'),\n('INNODB_FILE_FORMAT_CHECK', 'ON'),\n('INNODB_READ_AHEAD_THRESHOLD', '56'),\n('HOSTNAME', 'Intranet'),\n('KEY_CACHE_BLOCK_SIZE', '1024'),\n('OLD', 'OFF'),\n('KEY_BUFFER_SIZE', '16777216'),\n('REPORT_PORT', '3306'),\n('HAVE_NDBCLUSTER', 'NO'),\n('SQL_LOG_BIN', 'ON'),\n('THREAD_HANDLING', 'one-thread-per-connection'),\n('INNODB_STATS_METHOD', 'nulls_equal'),\n('LOG_BIN', 'OFF'),\n('INNODB_FAST_SHUTDOWN', '1'),\n('RELAY_LOG_SPACE_LIMIT', '0'),\n('SSL_CA', ''),\n('MAX_USER_CONNECTIONS', '0'),\n('INNODB_THREAD_CONCURRENCY', '0'),\n('SQL_MAX_JOIN_SIZE', '18446744073709551615'),\n('SLAVE_NET_TIMEOUT', '3600'),\n('TABLE_OPEN_CACHE', '400'),\n('INNODB_STATS_SAMPLE_PAGES', '8'),\n('SQL_BIG_TABLES', 'OFF'),\n('LOCAL_INFILE', 'ON'),\n('SQL_BUFFER_RESULT', 'OFF'),\n('HAVE_RTREE_KEYS', 'YES'),\n('ENGINE_CONDITION_PUSHDOWN', 'ON'),\n('HAVE_PROFILING', 'YES'),\n('LC_MESSAGES_DIR', '/usr/share/mysql/'),\n('OLD_ALTER_TABLE', 'OFF'),\n('HAVE_INNODB', 'YES'),\n('MYISAM_MMAP_SIZE', '18446744073709551615'),\n('SQL_MODE', ''),\n('PERFORMANCE_SCHEMA_MAX_FILE_HANDLES', '32768'),\n('RELAY_LOG_RECOVERY', 'OFF'),\n('REPORT_USER', ''),\n('MAX_DELAYED_THREADS', '20'),\n('HAVE_GEOMETRY', 'YES'),\n('DATETIME_FORMAT', '%Y-%m-%d %H:%i:%s'),\n('SLOW_QUERY_LOG', 'OFF'),\n('INNODB_FLUSH_LOG_AT_TRX_COMMIT', '1'),\n('LC_MESSAGES', 'en_US'),\n('MAX_RELAY_LOG_SIZE', '0'),\n('LOG', 'OFF'),\n('INNODB_RANDOM_READ_AHEAD', 'OFF'),\n('OPEN_FILES_LIMIT', '1024'),\n('HAVE_CSV', 'YES'),\n('DATADIR', '/var/lib/mysql/'),\n('PORT', '3306'),\n('FT_MIN_WORD_LEN', '4'),\n('INNODB_CONCURRENCY_TICKETS', '500'),\n('VERSION_COMPILE_OS', 'debian-linux-gnu'),\n('LOG_BIN_TRUST_FUNCTION_CREATORS', 'OFF'),\n('INNODB_LOCKS_UNSAFE_FOR_BINLOG', 'OFF'),\n('INNODB_FORCE_LOAD_CORRUPTED', 'OFF'),\n('SQL_WARNINGS', 'OFF'),\n('HAVE_OPENSSL', 'DISABLED'),\n('RELAY_LOG', ''),\n('MAX_BINLOG_STMT_CACHE_SIZE', '18446744073709547520'),\n('PLUGIN_DIR', '/usr/lib/mysql/plugin/'),\n('PERFORMANCE_SCHEMA_MAX_FILE_INSTANCES', '10000'),\n('LOG_SLOW_QUERIES', 'OFF'),\n('INNODB_SPIN_WAIT_DELAY', '6'),\n('MAX_TMP_TABLES', '32'),\n('INNODB_FILE_FORMAT_MAX', 'Antelope'),\n('SQL_LOG_OFF', 'OFF'),\n('DEFAULT_STORAGE_ENGINE', 'InnoDB'),\n('SLOW_QUERY_LOG_FILE', '/var/lib/mysql/Intranet-slow.log'),\n('INNODB_LOCK_WAIT_TIMEOUT', '50'),\n('SQL_SLAVE_SKIP_COUNTER', '0'),\n('INNODB_OLD_BLOCKS_TIME', '0'),\n('SECURE_AUTH', 'OFF'),\n('RPL_RECOVERY_RANK', '0'),\n('NET_READ_TIMEOUT', '30'),\n('MYISAM_STATS_METHOD', 'nulls_unequal'),\n('OPTIMIZER_SWITCH', 'index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on'),\n('MAX_CONNECT_ERRORS', '10'),\n('LOCKED_IN_MEMORY', 'OFF'),\n('INNODB_FLUSH_METHOD', ''),\n('INNODB_LARGE_PREFIX', 'OFF'),\n('INNODB_CHECKSUMS', 'ON'),\n('STORAGE_ENGINE', 'InnoDB'),\n('SSL_KEY', ''),\n('HAVE_SSL', 'DISABLED'),\n('MYISAM_USE_MMAP', 'OFF'),\n('SLAVE_SKIP_ERRORS', 'OFF'),\n('MIN_EXAMINED_ROW_LIMIT', '0'),\n('LOG_SLAVE_UPDATES', 'OFF'),\n('RELAY_LOG_PURGE', 'ON'),\n('SYNC_MASTER_INFO', '0'),\n('COLLATION_DATABASE', 'latin1_swedish_ci'),\n('INNODB_FILE_PER_TABLE', 'OFF'),\n('INNODB_LOG_GROUP_HOME_DIR', './'),\n('SSL_CERT', ''),\n('INNODB_LOG_BUFFER_SIZE', '8388608'),\n('SOCKET', '/var/run/mysqld/mysqld.sock'),\n('CHARACTER_SET_CLIENT', 'latin1'),\n('INNODB_MAX_PURGE_LAG', '0'),\n('SKIP_EXTERNAL_LOCKING', 'ON'),\n('MYISAM_MAX_SORT_FILE_SIZE', '9223372036853727232'),\n('SLAVE_LOAD_TMPDIR', '/tmp'),\n('SLAVE_TYPE_CONVERSIONS', ''),\n('INNODB_ADDITIONAL_MEM_POOL_SIZE', '8388608'),\n('SQL_NOTES', 'ON'),\n('INNODB_USE_SYS_MALLOC', 'ON'),\n('LICENSE', 'GPL'),\n('INNODB_MAX_DIRTY_PAGES_PCT', '75'),\n('PERFORMANCE_SCHEMA_MAX_TABLE_INSTANCES', '50000'),\n('THREAD_CONCURRENCY', '10'),\n('UNIQUE_CHECKS', 'ON'),\n('INNODB_OLD_BLOCKS_PCT', '37'),\n('HAVE_COMPRESS', 'YES');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `KEY_COLUMN_USAGE`\n--\n\nCREATE TEMPORARY TABLE `KEY_COLUMN_USAGE` (\n `CONSTRAINT_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `CONSTRAINT_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `CONSTRAINT_NAME` varchar(64) NOT NULL DEFAULT '',\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '',\n `ORDINAL_POSITION` bigint(10) NOT NULL DEFAULT '0',\n `POSITION_IN_UNIQUE_CONSTRAINT` bigint(10) DEFAULT NULL,\n `REFERENCED_TABLE_SCHEMA` varchar(64) DEFAULT NULL,\n `REFERENCED_TABLE_NAME` varchar(64) DEFAULT NULL,\n `REFERENCED_COLUMN_NAME` varchar(64) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `KEY_COLUMN_USAGE`\n--\n\nINSERT INTO `KEY_COLUMN_USAGE` (`CONSTRAINT_CATALOG`, `CONSTRAINT_SCHEMA`, `CONSTRAINT_NAME`, `TABLE_CATALOG`, `TABLE_SCHEMA`, `TABLE_NAME`, `COLUMN_NAME`, `ORDINAL_POSITION`, `POSITION_IN_UNIQUE_CONSTRAINT`, `REFERENCED_TABLE_SCHEMA`, `REFERENCED_TABLE_NAME`, `REFERENCED_COLUMN_NAME`) VALUES\n('def', 'inventory', 'PRIMARY', 'def', 'inventory', 'Inventory', 'ID', 1, NULL, NULL, NULL, NULL),\n('def', 'inventory', 'PRIMARY', 'def', 'inventory', 'Sessions', 'ID', 1, NULL, NULL, NULL, NULL),\n('def', 'inventory', 'PRIMARY', 'def', 'inventory', 'barcodes', 'id', 1, NULL, NULL, NULL, NULL);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `PARAMETERS`\n--\n\nCREATE TEMPORARY TABLE `PARAMETERS` (\n `SPECIFIC_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `SPECIFIC_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `SPECIFIC_NAME` varchar(64) NOT NULL DEFAULT '',\n `ORDINAL_POSITION` int(21) NOT NULL DEFAULT '0',\n `PARAMETER_MODE` varchar(5) DEFAULT NULL,\n `PARAMETER_NAME` varchar(64) DEFAULT NULL,\n `DATA_TYPE` varchar(64) NOT NULL DEFAULT '',\n `CHARACTER_MAXIMUM_LENGTH` int(21) DEFAULT NULL,\n `CHARACTER_OCTET_LENGTH` int(21) DEFAULT NULL,\n `NUMERIC_PRECISION` int(21) DEFAULT NULL,\n `NUMERIC_SCALE` int(21) DEFAULT NULL,\n `CHARACTER_SET_NAME` varchar(64) DEFAULT NULL,\n `COLLATION_NAME` varchar(64) DEFAULT NULL,\n `DTD_IDENTIFIER` longtext NOT NULL,\n `ROUTINE_TYPE` varchar(9) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `PARTITIONS`\n--\n\nCREATE TEMPORARY TABLE `PARTITIONS` (\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `PARTITION_NAME` varchar(64) DEFAULT NULL,\n `SUBPARTITION_NAME` varchar(64) DEFAULT NULL,\n `PARTITION_ORDINAL_POSITION` bigint(21) unsigned DEFAULT NULL,\n `SUBPARTITION_ORDINAL_POSITION` bigint(21) unsigned DEFAULT NULL,\n `PARTITION_METHOD` varchar(18) DEFAULT NULL,\n `SUBPARTITION_METHOD` varchar(12) DEFAULT NULL,\n `PARTITION_EXPRESSION` longtext,\n `SUBPARTITION_EXPRESSION` longtext,\n `PARTITION_DESCRIPTION` longtext,\n `TABLE_ROWS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `AVG_ROW_LENGTH` bigint(21) unsigned NOT NULL DEFAULT '0',\n `DATA_LENGTH` bigint(21) unsigned NOT NULL DEFAULT '0',\n `MAX_DATA_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `INDEX_LENGTH` bigint(21) unsigned NOT NULL DEFAULT '0',\n `DATA_FREE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `CREATE_TIME` datetime DEFAULT NULL,\n `UPDATE_TIME` datetime DEFAULT NULL,\n `CHECK_TIME` datetime DEFAULT NULL,\n `CHECKSUM` bigint(21) unsigned DEFAULT NULL,\n `PARTITION_COMMENT` varchar(80) NOT NULL DEFAULT '',\n `NODEGROUP` varchar(12) NOT NULL DEFAULT '',\n `TABLESPACE_NAME` varchar(64) DEFAULT NULL\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `PARTITIONS`\n--\n\nINSERT INTO `PARTITIONS` (`TABLE_CATALOG`, `TABLE_SCHEMA`, `TABLE_NAME`, `PARTITION_NAME`, `SUBPARTITION_NAME`, `PARTITION_ORDINAL_POSITION`, `SUBPARTITION_ORDINAL_POSITION`, `PARTITION_METHOD`, `SUBPARTITION_METHOD`, `PARTITION_EXPRESSION`, `SUBPARTITION_EXPRESSION`, `PARTITION_DESCRIPTION`, `TABLE_ROWS`, `AVG_ROW_LENGTH`, `DATA_LENGTH`, `MAX_DATA_LENGTH`, `INDEX_LENGTH`, `DATA_FREE`, `CREATE_TIME`, `UPDATE_TIME`, `CHECK_TIME`, `CHECKSUM`, `PARTITION_COMMENT`, `NODEGROUP`, `TABLESPACE_NAME`) VALUES\n('def', 'information_schema', 'CHARACTER_SETS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 384, 0, 16434816, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'COLLATIONS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 231, 0, 16704765, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'COLLATION_CHARACTER_SET_APPLICABILITY', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 195, 0, 16357770, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'COLUMNS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 2565, 0, 16757145, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'ENGINES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 490, 0, 16574250, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'EVENTS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'FILES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 2677, 0, 16758020, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'GLOBAL_STATUS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 3268, 0, 16755036, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'GLOBAL_VARIABLES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 3268, 0, 16755036, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 4637, 0, 16762755, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'PARAMETERS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'PARTITIONS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'PLUGINS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'PROCESSLIST', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'PROFILING', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 308, 0, 16562084, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 4814, 0, 16767162, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'ROUTINES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'SCHEMATA', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 3464, 0, 16738048, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 2179, 0, 16736899, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'SESSION_STATUS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 3268, 0, 16755036, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'SESSION_VARIABLES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 3268, 0, 16755036, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'STATISTICS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 5753, 0, 16752736, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'TABLES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 9450, 0, 16764300, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'TABLESPACES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 6951, 0, 16772763, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 2504, 0, 16721712, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'TABLE_PRIVILEGES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 2372, 0, 16748692, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'TRIGGERS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'USER_PRIVILEGES', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 1986, 0, 16726092, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'VIEWS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0, 281474976710655, 1024, 0, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 6852, 0, 16766844, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_TRX', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 4534, 0, 16766732, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 257, 0, 16332350, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_LOCK_WAITS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 599, 0, 16749238, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_CMPMEM', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 29, 0, 15204352, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_CMP', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 25, 0, 13107200, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_LOCKS', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 31244, 0, 16746784, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 29, 0, 15204352, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_CMP_RESET', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 25, 0, 13107200, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 6669, 0, 16765866, 0, 0, '2016-02-15 14:00:13', NULL, NULL, NULL, '', '', NULL),\n('def', 'inventory', 'Inout', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 871, 112, 98304, NULL, 16384, 7340032, '2014-10-31 19:54:48', NULL, NULL, NULL, '', '', NULL),\n('def', 'inventory', 'Inventory', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 373, 219, 81920, NULL, 0, 7340032, '2014-10-31 19:54:48', NULL, NULL, NULL, '', '', NULL),\n('def', 'inventory', 'Sessions', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 777, 147, 114688, NULL, 0, 7340032, '2014-10-31 19:54:48', NULL, NULL, NULL, '', '', NULL),\n('def', 'inventory', 'Wiped', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1, 16384, 16384, NULL, 16384, 7340032, '2014-10-31 19:54:48', NULL, NULL, NULL, '', '', NULL),\n('def', 'inventory', 'barcodes', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 361, 45, 16384, NULL, 0, 7340032, '2014-10-31 19:54:48', NULL, NULL, NULL, '', '', NULL);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `PLUGINS`\n--\n\nCREATE TEMPORARY TABLE `PLUGINS` (\n `PLUGIN_NAME` varchar(64) NOT NULL DEFAULT '',\n `PLUGIN_VERSION` varchar(20) NOT NULL DEFAULT '',\n `PLUGIN_STATUS` varchar(10) NOT NULL DEFAULT '',\n `PLUGIN_TYPE` varchar(80) NOT NULL DEFAULT '',\n `PLUGIN_TYPE_VERSION` varchar(20) NOT NULL DEFAULT '',\n `PLUGIN_LIBRARY` varchar(64) DEFAULT NULL,\n `PLUGIN_LIBRARY_VERSION` varchar(20) DEFAULT NULL,\n `PLUGIN_AUTHOR` varchar(64) DEFAULT NULL,\n `PLUGIN_DESCRIPTION` longtext,\n `PLUGIN_LICENSE` varchar(80) DEFAULT NULL,\n `LOAD_OPTION` varchar(64) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `PLUGINS`\n--\n\nINSERT INTO `PLUGINS` (`PLUGIN_NAME`, `PLUGIN_VERSION`, `PLUGIN_STATUS`, `PLUGIN_TYPE`, `PLUGIN_TYPE_VERSION`, `PLUGIN_LIBRARY`, `PLUGIN_LIBRARY_VERSION`, `PLUGIN_AUTHOR`, `PLUGIN_DESCRIPTION`, `PLUGIN_LICENSE`, `LOAD_OPTION`) VALUES\n('binlog', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'MySQL AB', 'This is a pseudo storage engine to represent the binlog in a transaction', 'GPL', 'FORCE'),\n('mysql_native_password', '1.0', 'ACTIVE', 'AUTHENTICATION', '1.0', NULL, NULL, 'R.J.Silk, Sergei Golubchik', 'Native MySQL authentication', 'GPL', 'FORCE'),\n('mysql_old_password', '1.0', 'ACTIVE', 'AUTHENTICATION', '1.0', NULL, NULL, 'R.J.Silk, Sergei Golubchik', 'Old MySQL-4.0 authentication', 'GPL', 'FORCE'),\n('MEMORY', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'MySQL AB', 'Hash based, stored in memory, useful for temporary tables', 'GPL', 'FORCE'),\n('MRG_MYISAM', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'MySQL AB', 'Collection of identical MyISAM tables', 'GPL', 'FORCE'),\n('CSV', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'Brian Aker, MySQL AB', 'CSV storage engine', 'GPL', 'FORCE'),\n('MyISAM', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'MySQL AB', 'MyISAM storage engine', 'GPL', 'FORCE'),\n('PERFORMANCE_SCHEMA', '0.1', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'Marc Alff, Oracle', 'Performance Schema', 'GPL', 'FORCE'),\n('ARCHIVE', '3.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'Brian Aker, MySQL AB', 'Archive storage engine', 'GPL', 'ON'),\n('InnoDB', '5.5', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'Oracle Corporation', 'Supports transactions, row-level locking, and foreign keys', 'GPL', 'ON'),\n('INNODB_TRX', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'InnoDB transactions', 'GPL', 'ON'),\n('INNODB_LOCKS', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'InnoDB conflicting locks', 'GPL', 'ON'),\n('INNODB_LOCK_WAITS', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'InnoDB which lock is blocking which', 'GPL', 'ON'),\n('INNODB_CMP', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'Statistics for the InnoDB compression', 'GPL', 'ON'),\n('INNODB_CMP_RESET', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'Statistics for the InnoDB compression; reset cumulated counts', 'GPL', 'ON'),\n('INNODB_CMPMEM', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'Statistics for the InnoDB compressed buffer pool', 'GPL', 'ON'),\n('INNODB_CMPMEM_RESET', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'Statistics for the InnoDB compressed buffer pool; reset cumulated counts', 'GPL', 'ON'),\n('INNODB_BUFFER_PAGE', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'InnoDB Buffer Page Information', 'GPL', 'ON'),\n('INNODB_BUFFER_PAGE_LRU', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'InnoDB Buffer Page in LRU', 'GPL', 'ON'),\n('INNODB_BUFFER_POOL_STATS', '5.5', 'ACTIVE', 'INFORMATION SCHEMA', '50540.0', NULL, NULL, 'Oracle Corporation', 'InnoDB Buffer Pool Statistics Information ', 'GPL', 'ON'),\n('BLACKHOLE', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'MySQL AB', '/dev/null storage engine (anything you write to it disappears)', 'GPL', 'ON'),\n('FEDERATED', '1.0', 'DISABLED', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'Patrick Galbraith and Brian Aker, MySQL AB', 'Federated MySQL storage engine', 'GPL', 'OFF'),\n('partition', '1.0', 'ACTIVE', 'STORAGE ENGINE', '50540.0', NULL, NULL, 'Mikael Ronstrom, MySQL AB', 'Partition Storage Engine Helper', 'GPL', 'ON');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `PROCESSLIST`\n--\n\nCREATE TEMPORARY TABLE `PROCESSLIST` (\n `ID` bigint(4) NOT NULL DEFAULT '0',\n `USER` varchar(16) NOT NULL DEFAULT '',\n `HOST` varchar(64) NOT NULL DEFAULT '',\n `DB` varchar(64) DEFAULT NULL,\n `COMMAND` varchar(16) NOT NULL DEFAULT '',\n `TIME` int(7) NOT NULL DEFAULT '0',\n `STATE` varchar(64) DEFAULT NULL,\n `INFO` longtext\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `PROCESSLIST`\n--\n\nINSERT INTO `PROCESSLIST` (`ID`, `USER`, `HOST`, `DB`, `COMMAND`, `TIME`, `STATE`, `INFO`) VALUES\n(8340, 'matt', 'localhost', NULL, 'Query', 0, 'executing', 'SELECT * FROM `information_schema`.`PROCESSLIST`');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `PROFILING`\n--\n\nCREATE TEMPORARY TABLE `PROFILING` (\n `QUERY_ID` int(20) NOT NULL DEFAULT '0',\n `SEQ` int(20) NOT NULL DEFAULT '0',\n `STATE` varchar(30) NOT NULL DEFAULT '',\n `DURATION` decimal(9,6) NOT NULL DEFAULT '0.000000',\n `CPU_USER` decimal(9,6) DEFAULT NULL,\n `CPU_SYSTEM` decimal(9,6) DEFAULT NULL,\n `CONTEXT_VOLUNTARY` int(20) DEFAULT NULL,\n `CONTEXT_INVOLUNTARY` int(20) DEFAULT NULL,\n `BLOCK_OPS_IN` int(20) DEFAULT NULL,\n `BLOCK_OPS_OUT` int(20) DEFAULT NULL,\n `MESSAGES_SENT` int(20) DEFAULT NULL,\n `MESSAGES_RECEIVED` int(20) DEFAULT NULL,\n `PAGE_FAULTS_MAJOR` int(20) DEFAULT NULL,\n `PAGE_FAULTS_MINOR` int(20) DEFAULT NULL,\n `SWAPS` int(20) DEFAULT NULL,\n `SOURCE_FUNCTION` varchar(30) DEFAULT NULL,\n `SOURCE_FILE` varchar(20) DEFAULT NULL,\n `SOURCE_LINE` int(20) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `REFERENTIAL_CONSTRAINTS`\n--\n\nCREATE TEMPORARY TABLE `REFERENTIAL_CONSTRAINTS` (\n `CONSTRAINT_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `CONSTRAINT_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `CONSTRAINT_NAME` varchar(64) NOT NULL DEFAULT '',\n `UNIQUE_CONSTRAINT_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `UNIQUE_CONSTRAINT_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `UNIQUE_CONSTRAINT_NAME` varchar(64) DEFAULT NULL,\n `MATCH_OPTION` varchar(64) NOT NULL DEFAULT '',\n `UPDATE_RULE` varchar(64) NOT NULL DEFAULT '',\n `DELETE_RULE` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `REFERENCED_TABLE_NAME` varchar(64) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `ROUTINES`\n--\n\nCREATE TEMPORARY TABLE `ROUTINES` (\n `SPECIFIC_NAME` varchar(64) NOT NULL DEFAULT '',\n `ROUTINE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `ROUTINE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `ROUTINE_NAME` varchar(64) NOT NULL DEFAULT '',\n `ROUTINE_TYPE` varchar(9) NOT NULL DEFAULT '',\n `DATA_TYPE` varchar(64) NOT NULL DEFAULT '',\n `CHARACTER_MAXIMUM_LENGTH` int(21) DEFAULT NULL,\n `CHARACTER_OCTET_LENGTH` int(21) DEFAULT NULL,\n `NUMERIC_PRECISION` int(21) DEFAULT NULL,\n `NUMERIC_SCALE` int(21) DEFAULT NULL,\n `CHARACTER_SET_NAME` varchar(64) DEFAULT NULL,\n `COLLATION_NAME` varchar(64) DEFAULT NULL,\n `DTD_IDENTIFIER` longtext,\n `ROUTINE_BODY` varchar(8) NOT NULL DEFAULT '',\n `ROUTINE_DEFINITION` longtext,\n `EXTERNAL_NAME` varchar(64) DEFAULT NULL,\n `EXTERNAL_LANGUAGE` varchar(64) DEFAULT NULL,\n `PARAMETER_STYLE` varchar(8) NOT NULL DEFAULT '',\n `IS_DETERMINISTIC` varchar(3) NOT NULL DEFAULT '',\n `SQL_DATA_ACCESS` varchar(64) NOT NULL DEFAULT '',\n `SQL_PATH` varchar(64) DEFAULT NULL,\n `SECURITY_TYPE` varchar(7) NOT NULL DEFAULT '',\n `CREATED` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `LAST_ALTERED` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `SQL_MODE` varchar(8192) NOT NULL DEFAULT '',\n `ROUTINE_COMMENT` longtext NOT NULL,\n `DEFINER` varchar(77) NOT NULL DEFAULT '',\n `CHARACTER_SET_CLIENT` varchar(32) NOT NULL DEFAULT '',\n `COLLATION_CONNECTION` varchar(32) NOT NULL DEFAULT '',\n `DATABASE_COLLATION` varchar(32) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `SCHEMATA`\n--\n\nCREATE TEMPORARY TABLE `SCHEMATA` (\n `CATALOG_NAME` varchar(512) NOT NULL DEFAULT '',\n `SCHEMA_NAME` varchar(64) NOT NULL DEFAULT '',\n `DEFAULT_CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '',\n `DEFAULT_COLLATION_NAME` varchar(32) NOT NULL DEFAULT '',\n `SQL_PATH` varchar(512) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `SCHEMATA`\n--\n\nINSERT INTO `SCHEMATA` (`CATALOG_NAME`, `SCHEMA_NAME`, `DEFAULT_CHARACTER_SET_NAME`, `DEFAULT_COLLATION_NAME`, `SQL_PATH`) VALUES\n('def', 'information_schema', 'utf8', 'utf8_general_ci', NULL),\n('def', 'inventory', 'latin1', 'latin1_swedish_ci', NULL),\n('def', 'test', 'latin1', 'latin1_swedish_ci', NULL);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `SCHEMA_PRIVILEGES`\n--\n\nCREATE TEMPORARY TABLE `SCHEMA_PRIVILEGES` (\n `GRANTEE` varchar(81) NOT NULL DEFAULT '',\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `PRIVILEGE_TYPE` varchar(64) NOT NULL DEFAULT '',\n `IS_GRANTABLE` varchar(3) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `SCHEMA_PRIVILEGES`\n--\n\nINSERT INTO `SCHEMA_PRIVILEGES` (`GRANTEE`, `TABLE_CATALOG`, `TABLE_SCHEMA`, `PRIVILEGE_TYPE`, `IS_GRANTABLE`) VALUES\n('''matt''@''localhost''', 'def', 'inventory', 'SELECT', 'NO');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `SESSION_STATUS`\n--\n\nCREATE TEMPORARY TABLE `SESSION_STATUS` (\n `VARIABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `VARIABLE_VALUE` varchar(1024) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `SESSION_STATUS`\n--\n\nINSERT INTO `SESSION_STATUS` (`VARIABLE_NAME`, `VARIABLE_VALUE`) VALUES\n('ABORTED_CLIENTS', '1'),\n('ABORTED_CONNECTS', '114'),\n('BINLOG_CACHE_DISK_USE', '0'),\n('BINLOG_CACHE_USE', '0'),\n('BINLOG_STMT_CACHE_DISK_USE', '0'),\n('BINLOG_STMT_CACHE_USE', '0'),\n('BYTES_RECEIVED', '7694'),\n('BYTES_SENT', '224235'),\n('COM_ADMIN_COMMANDS', '0'),\n('COM_ASSIGN_TO_KEYCACHE', '0'),\n('COM_ALTER_DB', '0'),\n('COM_ALTER_DB_UPGRADE', '0'),\n('COM_ALTER_EVENT', '0'),\n('COM_ALTER_FUNCTION', '0'),\n('COM_ALTER_PROCEDURE', '0'),\n('COM_ALTER_SERVER', '0'),\n('COM_ALTER_TABLE', '0'),\n('COM_ALTER_TABLESPACE', '0'),\n('COM_ANALYZE', '0'),\n('COM_BEGIN', '0'),\n('COM_BINLOG', '0'),\n('COM_CALL_PROCEDURE', '0'),\n('COM_CHANGE_DB', '0'),\n('COM_CHANGE_MASTER', '0'),\n('COM_CHECK', '0'),\n('COM_CHECKSUM', '0'),\n('COM_COMMIT', '0'),\n('COM_CREATE_DB', '0'),\n('COM_CREATE_EVENT', '0'),\n('COM_CREATE_FUNCTION', '0'),\n('COM_CREATE_INDEX', '0'),\n('COM_CREATE_PROCEDURE', '0'),\n('COM_CREATE_SERVER', '0'),\n('COM_CREATE_TABLE', '0'),\n('COM_CREATE_TRIGGER', '0'),\n('COM_CREATE_UDF', '0'),\n('COM_CREATE_USER', '0'),\n('COM_CREATE_VIEW', '0'),\n('COM_DEALLOC_SQL', '0'),\n('COM_DELETE', '0'),\n('COM_DELETE_MULTI', '0'),\n('COM_DO', '0'),\n('COM_DROP_DB', '0'),\n('COM_DROP_EVENT', '0'),\n('COM_DROP_FUNCTION', '0'),\n('COM_DROP_INDEX', '0'),\n('COM_DROP_PROCEDURE', '0'),\n('COM_DROP_SERVER', '0'),\n('COM_DROP_TABLE', '0'),\n('COM_DROP_TRIGGER', '0'),\n('COM_DROP_USER', '0'),\n('COM_DROP_VIEW', '0'),\n('COM_EMPTY_QUERY', '0'),\n('COM_EXECUTE_SQL', '0'),\n('COM_FLUSH', '0'),\n('COM_GRANT', '0'),\n('COM_HA_CLOSE', '0'),\n('COM_HA_OPEN', '0'),\n('COM_HA_READ', '0'),\n('COM_HELP', '0'),\n('COM_INSERT', '0'),\n('COM_INSERT_SELECT', '0'),\n('COM_INSTALL_PLUGIN', '0'),\n('COM_KILL', '0'),\n('COM_LOAD', '0'),\n('COM_LOCK_TABLES', '0'),\n('COM_OPTIMIZE', '0'),\n('COM_PRELOAD_KEYS', '0'),\n('COM_PREPARE_SQL', '0'),\n('COM_PURGE', '0'),\n('COM_PURGE_BEFORE_DATE', '0'),\n('COM_RELEASE_SAVEPOINT', '0'),\n('COM_RENAME_TABLE', '0'),\n('COM_RENAME_USER', '0'),\n('COM_REPAIR', '0'),\n('COM_REPLACE', '0'),\n('COM_REPLACE_SELECT', '0'),\n('COM_RESET', '0'),\n('COM_RESIGNAL', '0'),\n('COM_REVOKE', '0'),\n('COM_REVOKE_ALL', '0'),\n('COM_ROLLBACK', '0'),\n('COM_ROLLBACK_TO_SAVEPOINT', '0'),\n('COM_SAVEPOINT', '0'),\n('COM_SELECT', '22'),\n('COM_SET_OPTION', '25'),\n('COM_SIGNAL', '0'),\n('COM_SHOW_AUTHORS', '0'),\n('COM_SHOW_BINLOG_EVENTS', '0'),\n('COM_SHOW_BINLOGS', '0'),\n('COM_SHOW_CHARSETS', '0'),\n('COM_SHOW_COLLATIONS', '0'),\n('COM_SHOW_CONTRIBUTORS', '0'),\n('COM_SHOW_CREATE_DB', '0'),\n('COM_SHOW_CREATE_EVENT', '0'),\n('COM_SHOW_CREATE_FUNC', '0'),\n('COM_SHOW_CREATE_PROC', '0'),\n('COM_SHOW_CREATE_TABLE', '21'),\n('COM_SHOW_CREATE_TRIGGER', '0'),\n('COM_SHOW_DATABASES', '1'),\n('COM_SHOW_ENGINE_LOGS', '0'),\n('COM_SHOW_ENGINE_MUTEX', '0'),\n('COM_SHOW_ENGINE_STATUS', '0'),\n('COM_SHOW_EVENTS', '0'),\n('COM_SHOW_ERRORS', '0'),\n('COM_SHOW_FIELDS', '0'),\n('COM_SHOW_FUNCTION_STATUS', '1'),\n('COM_SHOW_GRANTS', '0'),\n('COM_SHOW_KEYS', '0'),\n('COM_SHOW_MASTER_STATUS', '0'),\n('COM_SHOW_OPEN_TABLES', '0'),\n('COM_SHOW_PLUGINS', '1'),\n('COM_SHOW_PRIVILEGES', '0'),\n('COM_SHOW_PROCEDURE_STATUS', '1'),\n('COM_SHOW_PROCESSLIST', '0'),\n('COM_SHOW_PROFILE', '0'),\n('COM_SHOW_PROFILES', '0'),\n('COM_SHOW_RELAYLOG_EVENTS', '0'),\n('COM_SHOW_SLAVE_HOSTS', '0'),\n('COM_SHOW_SLAVE_STATUS', '0'),\n('COM_SHOW_STATUS', '0'),\n('COM_SHOW_STORAGE_ENGINES', '0'),\n('COM_SHOW_TABLE_STATUS', '42'),\n('COM_SHOW_TABLES', '1'),\n('COM_SHOW_TRIGGERS', '20'),\n('COM_SHOW_VARIABLES', '0'),\n('COM_SHOW_WARNINGS', '0'),\n('COM_SLAVE_START', '0'),\n('COM_SLAVE_STOP', '0'),\n('COM_STMT_CLOSE', '0'),\n('COM_STMT_EXECUTE', '0'),\n('COM_STMT_FETCH', '0'),\n('COM_STMT_PREPARE', '0'),\n('COM_STMT_REPREPARE', '0'),\n('COM_STMT_RESET', '0'),\n('COM_STMT_SEND_LONG_DATA', '0'),\n('COM_TRUNCATE', '0'),\n('COM_UNINSTALL_PLUGIN', '0'),\n('COM_UNLOCK_TABLES', '0'),\n('COM_UPDATE', '0'),\n('COM_UPDATE_MULTI', '0'),\n('COM_XA_COMMIT', '0'),\n('COM_XA_END', '0'),\n('COM_XA_PREPARE', '0'),\n('COM_XA_RECOVER', '0'),\n('COM_XA_ROLLBACK', '0'),\n('COM_XA_START', '0'),\n('COMPRESSION', 'OFF'),\n('CONNECTIONS', '8341'),\n('CREATED_TMP_DISK_TABLES', '94'),\n('CREATED_TMP_FILES', '5'),\n('CREATED_TMP_TABLES', '331'),\n('DELAYED_ERRORS', '0'),\n('DELAYED_INSERT_THREADS', '0'),\n('DELAYED_WRITES', '0'),\n('FLUSH_COMMANDS', '1'),\n('HANDLER_COMMIT', '0'),\n('HANDLER_DELETE', '0'),\n('HANDLER_DISCOVER', '0'),\n('HANDLER_PREPARE', '0'),\n('HANDLER_READ_FIRST', '4'),\n('HANDLER_READ_KEY', '0'),\n('HANDLER_READ_LAST', '0'),\n('HANDLER_READ_NEXT', '0'),\n('HANDLER_READ_PREV', '0'),\n('HANDLER_READ_RND', '0'),\n('HANDLER_READ_RND_NEXT', '1851'),\n('HANDLER_ROLLBACK', '0'),\n('HANDLER_SAVEPOINT', '0'),\n('HANDLER_SAVEPOINT_ROLLBACK', '0'),\n('HANDLER_UPDATE', '0'),\n('HANDLER_WRITE', '1934'),\n('INNODB_BUFFER_POOL_PAGES_DATA', '2274'),\n('INNODB_BUFFER_POOL_BYTES_DATA', '37257216'),\n('INNODB_BUFFER_POOL_PAGES_DIRTY', '0'),\n('INNODB_BUFFER_POOL_BYTES_DIRTY', '0'),\n('INNODB_BUFFER_POOL_PAGES_FLUSHED', '16045'),\n('INNODB_BUFFER_POOL_PAGES_FREE', '5908'),\n('INNODB_BUFFER_POOL_PAGES_MISC', '10'),\n('INNODB_BUFFER_POOL_PAGES_TOTAL', '8192'),\n('INNODB_BUFFER_POOL_READ_AHEAD_RND', '0'),\n('INNODB_BUFFER_POOL_READ_AHEAD', '0'),\n('INNODB_BUFFER_POOL_READ_AHEAD_EVICTED', '0'),\n('INNODB_BUFFER_POOL_READ_REQUESTS', '1115511'),\n('INNODB_BUFFER_POOL_READS', '2156'),\n('INNODB_BUFFER_POOL_WAIT_FREE', '0'),\n('INNODB_BUFFER_POOL_WRITE_REQUESTS', '33637'),\n('INNODB_DATA_FSYNCS', '16044'),\n('INNODB_DATA_PENDING_FSYNCS', '0'),\n('INNODB_DATA_PENDING_READS', '0'),\n('INNODB_DATA_PENDING_WRITES', '0'),\n('INNODB_DATA_READ', '37507072'),\n('INNODB_DATA_READS', '2166'),\n('INNODB_DATA_WRITES', '27353'),\n('INNODB_DATA_WRITTEN', '538588672'),\n('INNODB_DBLWR_PAGES_WRITTEN', '16045'),\n('INNODB_DBLWR_WRITES', '3847'),\n('INNODB_HAVE_ATOMIC_BUILTINS', 'ON'),\n('INNODB_LOG_WAITS', '0'),\n('INNODB_LOG_WRITE_REQUESTS', '18477'),\n('INNODB_LOG_WRITES', '5010'),\n('INNODB_OS_LOG_FSYNCS', '8350'),\n('INNODB_OS_LOG_PENDING_FSYNCS', '0'),\n('INNODB_OS_LOG_PENDING_WRITES', '0'),\n('INNODB_OS_LOG_WRITTEN', '11115520'),\n('INNODB_PAGE_SIZE', '16384'),\n('INNODB_PAGES_CREATED', '119'),\n('INNODB_PAGES_READ', '2155'),\n('INNODB_PAGES_WRITTEN', '16045'),\n('INNODB_ROW_LOCK_CURRENT_WAITS', '0'),\n('INNODB_ROW_LOCK_TIME', '0'),\n('INNODB_ROW_LOCK_TIME_AVG', '0'),\n('INNODB_ROW_LOCK_TIME_MAX', '0'),\n('INNODB_ROW_LOCK_WAITS', '0'),\n('INNODB_ROWS_DELETED', '127'),\n('INNODB_ROWS_INSERTED', '901'),\n('INNODB_ROWS_READ', '459398'),\n('INNODB_ROWS_UPDATED', '3746'),\n('INNODB_TRUNCATED_STATUS_WRITES', '0'),\n('KEY_BLOCKS_NOT_FLUSHED', '0'),\n('KEY_BLOCKS_UNUSED', '13316'),\n('KEY_BLOCKS_USED', '83'),\n('KEY_READ_REQUESTS', '14926002'),\n('KEY_READS', '71'),\n('KEY_WRITE_REQUESTS', '12646'),\n('KEY_WRITES', '405'),\n('LAST_QUERY_COST', '10.499000'),\n('MAX_USED_CONNECTIONS', '7'),\n('NOT_FLUSHED_DELAYED_ROWS', '0'),\n('OPEN_FILES', '92'),\n('OPEN_STREAMS', '0'),\n('OPEN_TABLE_DEFINITIONS', '278'),\n('OPEN_TABLES', '297'),\n('OPENED_FILES', '44063'),\n('OPENED_TABLE_DEFINITIONS', '0'),\n('OPENED_TABLES', '5'),\n('PERFORMANCE_SCHEMA_COND_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_COND_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_FILE_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_FILE_HANDLES_LOST', '0'),\n('PERFORMANCE_SCHEMA_FILE_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_LOCKER_LOST', '0'),\n('PERFORMANCE_SCHEMA_MUTEX_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_MUTEX_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_RWLOCK_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_RWLOCK_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_TABLE_HANDLES_LOST', '0'),\n('PERFORMANCE_SCHEMA_TABLE_INSTANCES_LOST', '0'),\n('PERFORMANCE_SCHEMA_THREAD_CLASSES_LOST', '0'),\n('PERFORMANCE_SCHEMA_THREAD_INSTANCES_LOST', '0'),\n('PREPARED_STMT_COUNT', '0'),\n('QCACHE_FREE_BLOCKS', '206'),\n('QCACHE_FREE_MEMORY', '12584640'),\n('QCACHE_HITS', '42433'),\n('QCACHE_INSERTS', '15500'),\n('QCACHE_LOWMEM_PRUNES', '0'),\n('QCACHE_NOT_CACHED', '27556'),\n('QCACHE_QUERIES_IN_CACHE', '2084'),\n('QCACHE_TOTAL_BLOCKS', '4487'),\n('QUERIES', '205990'),\n('QUESTIONS', '135'),\n('RPL_STATUS', 'AUTH_MASTER'),\n('SELECT_FULL_JOIN', '0'),\n('SELECT_FULL_RANGE_JOIN', '0'),\n('SELECT_RANGE', '0'),\n('SELECT_RANGE_CHECK', '0'),\n('SELECT_SCAN', '88'),\n('SLAVE_HEARTBEAT_PERIOD', '0.000'),\n('SLAVE_OPEN_TEMP_TABLES', '0'),\n('SLAVE_RECEIVED_HEARTBEATS', '0'),\n('SLAVE_RETRIED_TRANSACTIONS', '0'),\n('SLAVE_RUNNING', 'OFF'),\n('SLOW_LAUNCH_THREADS', '0'),\n('SLOW_QUERIES', '0'),\n('SORT_MERGE_PASSES', '0'),\n('SORT_RANGE', '0'),\n('SORT_ROWS', '0'),\n('SORT_SCAN', '0'),\n('SSL_ACCEPT_RENEGOTIATES', '0'),\n('SSL_ACCEPTS', '0'),\n('SSL_CALLBACK_CACHE_HITS', '0'),\n('SSL_CIPHER', ''),\n('SSL_CIPHER_LIST', ''),\n('SSL_CLIENT_CONNECTS', '0'),\n('SSL_CONNECT_RENEGOTIATES', '0'),\n('SSL_CTX_VERIFY_DEPTH', '0'),\n('SSL_CTX_VERIFY_MODE', '0'),\n('SSL_DEFAULT_TIMEOUT', '0'),\n('SSL_FINISHED_ACCEPTS', '0'),\n('SSL_FINISHED_CONNECTS', '0'),\n('SSL_SESSION_CACHE_HITS', '0'),\n('SSL_SESSION_CACHE_MISSES', '0'),\n('SSL_SESSION_CACHE_MODE', 'NONE'),\n('SSL_SESSION_CACHE_OVERFLOWS', '0'),\n('SSL_SESSION_CACHE_SIZE', '0'),\n('SSL_SESSION_CACHE_TIMEOUTS', '0'),\n('SSL_SESSIONS_REUSED', '0'),\n('SSL_USED_SESSION_CACHE_ENTRIES', '0'),\n('SSL_VERIFY_DEPTH', '0'),\n('SSL_VERIFY_MODE', '0'),\n('SSL_VERSION', ''),\n('TABLE_LOCKS_IMMEDIATE', '69137'),\n('TABLE_LOCKS_WAITED', '0'),\n('TC_LOG_MAX_PAGES_USED', '0'),\n('TC_LOG_PAGE_SIZE', '0'),\n('TC_LOG_PAGE_WAITS', '0'),\n('THREADS_CACHED', '5'),\n('THREADS_CONNECTED', '2'),\n('THREADS_CREATED', '7'),\n('THREADS_RUNNING', '1'),\n('UPTIME', '1532568'),\n('UPTIME_SINCE_FLUSH_STATUS', '1532568');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `SESSION_VARIABLES`\n--\n\nCREATE TEMPORARY TABLE `SESSION_VARIABLES` (\n `VARIABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `VARIABLE_VALUE` varchar(1024) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `SESSION_VARIABLES`\n--\n\nINSERT INTO `SESSION_VARIABLES` (`VARIABLE_NAME`, `VARIABLE_VALUE`) VALUES\n('MAX_PREPARED_STMT_COUNT', '16382'),\n('INNODB_BUFFER_POOL_SIZE', '134217728'),\n('HAVE_CRYPT', 'YES'),\n('PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_LONG_SIZE', '10000'),\n('INNODB_VERSION', '5.5.40'),\n('QUERY_PREALLOC_SIZE', '8192'),\n('DELAYED_QUEUE_SIZE', '1000'),\n('PERFORMANCE_SCHEMA_MAX_COND_INSTANCES', '1000'),\n('SSL_CIPHER', ''),\n('COLLATION_SERVER', 'latin1_swedish_ci'),\n('SECURE_FILE_PRIV', ''),\n('TIMED_MUTEXES', 'OFF'),\n('DELAYED_INSERT_TIMEOUT', '300'),\n('PERFORMANCE_SCHEMA_MAX_MUTEX_INSTANCES', '1000000'),\n('LC_TIME_NAMES', 'en_US'),\n('PERFORMANCE_SCHEMA_MAX_RWLOCK_INSTANCES', '1000000'),\n('TIME_FORMAT', '%H:%i:%s'),\n('PERFORMANCE_SCHEMA_MAX_RWLOCK_CLASSES', '30'),\n('BASEDIR', '/usr'),\n('PERFORMANCE_SCHEMA_MAX_MUTEX_CLASSES', '200'),\n('UPDATABLE_VIEWS_WITH_LIMIT', 'YES'),\n('BACK_LOG', '50'),\n('SLOW_LAUNCH_TIME', '2'),\n('EVENT_SCHEDULER', 'OFF'),\n('MAX_SEEKS_FOR_KEY', '18446744073709551615'),\n('PERFORMANCE_SCHEMA_MAX_THREAD_CLASSES', '50'),\n('RELAY_LOG_INDEX', ''),\n('FT_STOPWORD_FILE', '(built-in)'),\n('SQL_QUOTE_SHOW_CREATE', 'ON'),\n('PERFORMANCE_SCHEMA', 'OFF'),\n('QUERY_CACHE_SIZE', '16777216'),\n('BINLOG_FORMAT', 'STATEMENT'),\n('WAIT_TIMEOUT', '28800'),\n('LONG_QUERY_TIME', '10.000000'),\n('PERFORMANCE_SCHEMA_MAX_TABLE_HANDLES', '100000'),\n('CHARACTER_SETS_DIR', '/usr/share/mysql/charsets/'),\n('LOWER_CASE_TABLE_NAMES', '0'),\n('BINLOG_CACHE_SIZE', '32768'),\n('REPORT_HOST', ''),\n('CHARACTER_SET_RESULTS', 'utf8'),\n('MYISAM_SORT_BUFFER_SIZE', '8388608'),\n('CHARACTER_SET_CONNECTION', 'utf8'),\n('INNODB_ROLLBACK_SEGMENTS', '128'),\n('PRELOAD_BUFFER_SIZE', '32768'),\n('LARGE_FILES_SUPPORT', 'ON'),\n('MAX_WRITE_LOCK_COUNT', '18446744073709551615'),\n('SQL_SAFE_UPDATES', 'OFF'),\n('MAX_JOIN_SIZE', '18446744073709551615'),\n('NET_BUFFER_LENGTH', '16384'),\n('FT_QUERY_EXPANSION_LIMIT', '20'),\n('SKIP_SHOW_DATABASE', 'OFF'),\n('FT_MAX_WORD_LEN', '84'),\n('GROUP_CONCAT_MAX_LEN', '1024'),\n('MAX_SP_RECURSION_DEPTH', '0'),\n('RANGE_ALLOC_BLOCK_SIZE', '4096'),\n('SYNC_RELAY_LOG', '0'),\n('OPTIMIZER_PRUNE_LEVEL', '1'),\n('HAVE_QUERY_CACHE', 'YES'),\n('INNODB_LOG_FILE_SIZE', '5242880'),\n('DELAY_KEY_WRITE', 'ON'),\n('TRANSACTION_PREALLOC_SIZE', '4096'),\n('INTERACTIVE_TIMEOUT', '28800'),\n('MYISAM_RECOVER_OPTIONS', 'BACKUP'),\n('AUTOMATIC_SP_PRIVILEGES', 'ON'),\n('PROTOCOL_VERSION', '10'),\n('DELAYED_INSERT_LIMIT', '100'),\n('LOW_PRIORITY_UPDATES', 'OFF'),\n('COMPLETION_TYPE', 'NO_CHAIN'),\n('REPORT_PASSWORD', ''),\n('BINLOG_DIRECT_NON_TRANSACTIONAL_UPDATES', 'OFF'),\n('MAX_INSERT_DELAYED_THREADS', '20'),\n('VERSION_COMMENT', '(Ubuntu)'),\n('SQL_BIG_SELECTS', 'ON'),\n('AUTO_INCREMENT_OFFSET', '1'),\n('TRANSACTION_ALLOC_BLOCK_SIZE', '8192'),\n('JOIN_BUFFER_SIZE', '131072'),\n('THREAD_CACHE_SIZE', '8'),\n('CONNECT_TIMEOUT', '10'),\n('INNODB_DOUBLEWRITE', 'ON'),\n('SQL_LOW_PRIORITY_UPDATES', 'OFF'),\n('IGNORE_BUILTIN_INNODB', 'OFF'),\n('INIT_FILE', ''),\n('DEFAULT_WEEK_FORMAT', '0'),\n('LARGE_PAGES', 'OFF'),\n('LOG_OUTPUT', 'FILE'),\n('LARGE_PAGE_SIZE', '0'),\n('INNODB_IO_CAPACITY', '200'),\n('INIT_SLAVE', ''),\n('INNODB_USE_NATIVE_AIO', 'OFF'),\n('MAX_BINLOG_SIZE', '104857600'),\n('HAVE_SYMLINK', 'YES'),\n('MAX_ERROR_COUNT', '64'),\n('TIME_ZONE', '+00:00'),\n('MAX_CONNECTIONS', '151'),\n('INNODB_TABLE_LOCKS', 'ON'),\n('PROXY_USER', ''),\n('INNODB_AUTOEXTEND_INCREMENT', '8'),\n('READ_BUFFER_SIZE', '131072'),\n('MYISAM_DATA_POINTER_SIZE', '6'),\n('PSEUDO_THREAD_ID', '8340'),\n('INNODB_THREAD_SLEEP_DELAY', '10000'),\n('LOG_QUERIES_NOT_USING_INDEXES', 'OFF'),\n('SQL_AUTO_IS_NULL', 'OFF'),\n('LOWER_CASE_FILE_SYSTEM', 'OFF'),\n('SLAVE_TRANSACTION_RETRIES', '10'),\n('SORT_BUFFER_SIZE', '2097152'),\n('KEEP_FILES_ON_CREATE', 'OFF'),\n('MAX_HEAP_TABLE_SIZE', '16777216'),\n('SYNC_RELAY_LOG_INFO', '0'),\n('LOCK_WAIT_TIMEOUT', '31536000'),\n('INNODB_REPLICATION_DELAY', '0'),\n('KEY_CACHE_AGE_THRESHOLD', '300'),\n('QUERY_CACHE_MIN_RES_UNIT', '4096'),\n('NET_RETRY_COUNT', '10'),\n('INNODB_STATS_ON_METADATA', 'ON'),\n('LOG_WARNINGS', '1'),\n('INNODB_ROLLBACK_ON_TIMEOUT', 'OFF'),\n('FLUSH', 'OFF'),\n('PROFILING_HISTORY_SIZE', '15'),\n('MAX_LONG_DATA_SIZE', '16777216'),\n('INNODB_CHANGE_BUFFERING', 'all'),\n('CHARACTER_SET_SERVER', 'latin1'),\n('READ_RND_BUFFER_SIZE', '262144'),\n('SLAVE_MAX_ALLOWED_PACKET', '1073741824'),\n('INNODB_FILE_FORMAT', 'Antelope'),\n('FLUSH_TIME', '0'),\n('BIG_TABLES', 'OFF'),\n('CHARACTER_SET_DATABASE', 'latin1'),\n('SQL_SELECT_LIMIT', '18446744073709551615'),\n('BULK_INSERT_BUFFER_SIZE', '8388608'),\n('DATE_FORMAT', '%Y-%m-%d'),\n('CHARACTER_SET_FILESYSTEM', 'binary'),\n('READ_ONLY', 'OFF'),\n('BINLOG_STMT_CACHE_SIZE', '32768'),\n('RAND_SEED1', '0'),\n('MAX_BINLOG_CACHE_SIZE', '18446744073709547520'),\n('INNODB_DATA_FILE_PATH', 'ibdata1:10M:autoextend'),\n('PERFORMANCE_SCHEMA_MAX_FILE_CLASSES', '50'),\n('INNODB_PURGE_THREADS', '0'),\n('MAX_SORT_LENGTH', '1024'),\n('PROFILING', 'OFF'),\n('PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_SIZE', '10'),\n('INNODB_STRICT_MODE', 'OFF'),\n('SLAVE_COMPRESSED_PROTOCOL', 'OFF'),\n('KEY_CACHE_DIVISION_LIMIT', '100'),\n('OLD_PASSWORDS', 'OFF'),\n('GENERAL_LOG_FILE', '/var/lib/mysql/Intranet.log'),\n('NET_WRITE_TIMEOUT', '60'),\n('PERFORMANCE_SCHEMA_MAX_COND_CLASSES', '80'),\n('QUERY_CACHE_TYPE', 'ON'),\n('AUTO_INCREMENT_INCREMENT', '1'),\n('METADATA_LOCKS_CACHE_SIZE', '1024'),\n('TMPDIR', '/tmp'),\n('QUERY_CACHE_LIMIT', '1048576'),\n('EXPIRE_LOGS_DAYS', '10'),\n('TX_ISOLATION', 'REPEATABLE-READ'),\n('HAVE_PARTITIONING', 'YES'),\n('LOG_ERROR', '/var/log/mysql/error.log'),\n('FOREIGN_KEY_CHECKS', 'ON'),\n('MAX_LENGTH_FOR_SORT_DATA', '1024'),\n('RELAY_LOG_INFO_FILE', 'relay-log.info'),\n('THREAD_STACK', '196608'),\n('INNODB_AUTOINC_LOCK_MODE', '1'),\n('NEW', 'OFF'),\n('INNODB_COMMIT_CONCURRENCY', '0'),\n('SKIP_NAME_RESOLVE', 'OFF'),\n('INNODB_MIRRORED_LOG_GROUPS', '1'),\n('PID_FILE', '/var/run/mysqld/mysqld.pid'),\n('INNODB_PURGE_BATCH_SIZE', '20'),\n('MAX_ALLOWED_PACKET', '16777216'),\n('VERSION', '5.5.40-0ubuntu0.12.04.1'),\n('CONCURRENT_INSERT', 'AUTO'),\n('INNODB_SUPPORT_XA', 'ON'),\n('TABLE_DEFINITION_CACHE', '400'),\n('INNODB_SYNC_SPIN_LOOPS', '30'),\n('QUERY_ALLOC_BLOCK_SIZE', '8192'),\n('COLLATION_CONNECTION', 'utf8_general_ci'),\n('MYISAM_REPAIR_THREADS', '1'),\n('INNODB_ADAPTIVE_FLUSHING', 'ON'),\n('FT_BOOLEAN_SYNTAX', '+ -><()~*:\"\"&|'),\n('INNODB_ADAPTIVE_HASH_INDEX', 'ON'),\n('VERSION_COMPILE_MACHINE', 'x86_64'),\n('SYSTEM_TIME_ZONE', 'EST'),\n('QUERY_CACHE_WLOCK_INVALIDATE', 'OFF'),\n('DIV_PRECISION_INCREMENT', '4'),\n('SYNC_FRM', 'ON'),\n('STORED_PROGRAM_CACHE', '256'),\n('TMP_TABLE_SIZE', '16777216'),\n('INNODB_DATA_HOME_DIR', ''),\n('PERFORMANCE_SCHEMA_MAX_THREAD_INSTANCES', '1000'),\n('INNODB_READ_IO_THREADS', '4'),\n('MULTI_RANGE_COUNT', '256'),\n('INNODB_WRITE_IO_THREADS', '4'),\n('SERVER_ID', '0'),\n('INNODB_BUFFER_POOL_INSTANCES', '1'),\n('SKIP_NETWORKING', 'OFF'),\n('INNODB_FORCE_RECOVERY', '0'),\n('CHARACTER_SET_SYSTEM', 'utf8'),\n('INNODB_LOG_FILES_IN_GROUP', '2'),\n('INIT_CONNECT', ''),\n('ERROR_COUNT', '0'),\n('OPTIMIZER_SEARCH_DEPTH', '62'),\n('HAVE_DYNAMIC_LOADING', 'YES'),\n('AUTOCOMMIT', 'ON'),\n('SYNC_BINLOG', '0'),\n('SSL_CAPATH', ''),\n('INNODB_PRINT_ALL_DEADLOCKS', 'OFF'),\n('SLAVE_EXEC_MODE', 'STRICT'),\n('INNODB_OPEN_FILES', '300'),\n('GENERAL_LOG', 'OFF'),\n('INNODB_FILE_FORMAT_CHECK', 'ON'),\n('LAST_INSERT_ID', '0'),\n('INNODB_READ_AHEAD_THRESHOLD', '56'),\n('HOSTNAME', 'Intranet'),\n('KEY_CACHE_BLOCK_SIZE', '1024'),\n('OLD', 'OFF'),\n('KEY_BUFFER_SIZE', '16777216'),\n('REPORT_PORT', '3306'),\n('HAVE_NDBCLUSTER', 'NO'),\n('SQL_LOG_BIN', 'ON'),\n('PSEUDO_SLAVE_MODE', 'OFF'),\n('THREAD_HANDLING', 'one-thread-per-connection'),\n('INNODB_STATS_METHOD', 'nulls_equal'),\n('LOG_BIN', 'OFF'),\n('INNODB_FAST_SHUTDOWN', '1'),\n('RELAY_LOG_SPACE_LIMIT', '0'),\n('SSL_CA', ''),\n('MAX_USER_CONNECTIONS', '0'),\n('INNODB_THREAD_CONCURRENCY', '0'),\n('SQL_MAX_JOIN_SIZE', '18446744073709551615'),\n('SLAVE_NET_TIMEOUT', '3600'),\n('TABLE_OPEN_CACHE', '400'),\n('INNODB_STATS_SAMPLE_PAGES', '8'),\n('SQL_BIG_TABLES', 'OFF'),\n('LOCAL_INFILE', 'ON'),\n('SQL_BUFFER_RESULT', 'OFF'),\n('HAVE_RTREE_KEYS', 'YES'),\n('ENGINE_CONDITION_PUSHDOWN', 'ON'),\n('HAVE_PROFILING', 'YES'),\n('LC_MESSAGES_DIR', '/usr/share/mysql/'),\n('OLD_ALTER_TABLE', 'OFF'),\n('HAVE_INNODB', 'YES'),\n('MYISAM_MMAP_SIZE', '18446744073709551615'),\n('SQL_MODE', ''),\n('PERFORMANCE_SCHEMA_MAX_FILE_HANDLES', '32768'),\n('TIMESTAMP', '1455544813'),\n('RELAY_LOG_RECOVERY', 'OFF'),\n('REPORT_USER', ''),\n('MAX_DELAYED_THREADS', '20'),\n('HAVE_GEOMETRY', 'YES'),\n('DATETIME_FORMAT', '%Y-%m-%d %H:%i:%s'),\n('SLOW_QUERY_LOG', 'OFF'),\n('INNODB_FLUSH_LOG_AT_TRX_COMMIT', '1'),\n('LC_MESSAGES', 'en_US'),\n('MAX_RELAY_LOG_SIZE', '0'),\n('LOG', 'OFF'),\n('INNODB_RANDOM_READ_AHEAD', 'OFF'),\n('OPEN_FILES_LIMIT', '1024'),\n('HAVE_CSV', 'YES'),\n('DATADIR', '/var/lib/mysql/'),\n('PORT', '3306'),\n('FT_MIN_WORD_LEN', '4'),\n('INNODB_CONCURRENCY_TICKETS', '500'),\n('VERSION_COMPILE_OS', 'debian-linux-gnu'),\n('LOG_BIN_TRUST_FUNCTION_CREATORS', 'OFF'),\n('INNODB_LOCKS_UNSAFE_FOR_BINLOG', 'OFF'),\n('INNODB_FORCE_LOAD_CORRUPTED', 'OFF'),\n('SQL_WARNINGS', 'OFF'),\n('HAVE_OPENSSL', 'DISABLED'),\n('RELAY_LOG', ''),\n('MAX_BINLOG_STMT_CACHE_SIZE', '18446744073709547520'),\n('PLUGIN_DIR', '/usr/lib/mysql/plugin/'),\n('PERFORMANCE_SCHEMA_MAX_FILE_INSTANCES', '10000'),\n('LOG_SLOW_QUERIES', 'OFF'),\n('INNODB_SPIN_WAIT_DELAY', '6'),\n('MAX_TMP_TABLES', '32'),\n('INNODB_FILE_FORMAT_MAX', 'Antelope'),\n('SQL_LOG_OFF', 'OFF'),\n('DEFAULT_STORAGE_ENGINE', 'InnoDB'),\n('SLOW_QUERY_LOG_FILE', '/var/lib/mysql/Intranet-slow.log'),\n('INNODB_LOCK_WAIT_TIMEOUT', '50'),\n('SQL_SLAVE_SKIP_COUNTER', '0'),\n('INNODB_OLD_BLOCKS_TIME', '0'),\n('SECURE_AUTH', 'OFF'),\n('RPL_RECOVERY_RANK', '0'),\n('NET_READ_TIMEOUT', '30'),\n('WARNING_COUNT', '0'),\n('MYISAM_STATS_METHOD', 'nulls_unequal'),\n('OPTIMIZER_SWITCH', 'index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on'),\n('MAX_CONNECT_ERRORS', '10'),\n('LOCKED_IN_MEMORY', 'OFF'),\n('INNODB_FLUSH_METHOD', ''),\n('EXTERNAL_USER', ''),\n('INNODB_LARGE_PREFIX', 'OFF'),\n('INNODB_CHECKSUMS', 'ON'),\n('STORAGE_ENGINE', 'InnoDB'),\n('SSL_KEY', ''),\n('HAVE_SSL', 'DISABLED'),\n('MYISAM_USE_MMAP', 'OFF'),\n('RAND_SEED2', '0'),\n('SLAVE_SKIP_ERRORS', 'OFF'),\n('MIN_EXAMINED_ROW_LIMIT', '0'),\n('INSERT_ID', '0'),\n('LOG_SLAVE_UPDATES', 'OFF'),\n('RELAY_LOG_PURGE', 'ON'),\n('SYNC_MASTER_INFO', '0'),\n('COLLATION_DATABASE', 'latin1_swedish_ci'),\n('INNODB_FILE_PER_TABLE', 'OFF'),\n('INNODB_LOG_GROUP_HOME_DIR', './'),\n('SSL_CERT', ''),\n('INNODB_LOG_BUFFER_SIZE', '8388608'),\n('SOCKET', '/var/run/mysqld/mysqld.sock'),\n('CHARACTER_SET_CLIENT', 'utf8'),\n('IDENTITY', '0'),\n('INNODB_MAX_PURGE_LAG', '0'),\n('SKIP_EXTERNAL_LOCKING', 'ON'),\n('MYISAM_MAX_SORT_FILE_SIZE', '9223372036853727232'),\n('SLAVE_LOAD_TMPDIR', '/tmp'),\n('SLAVE_TYPE_CONVERSIONS', ''),\n('INNODB_ADDITIONAL_MEM_POOL_SIZE', '8388608'),\n('SQL_NOTES', 'ON'),\n('INNODB_USE_SYS_MALLOC', 'ON'),\n('LICENSE', 'GPL'),\n('INNODB_MAX_DIRTY_PAGES_PCT', '75'),\n('PERFORMANCE_SCHEMA_MAX_TABLE_INSTANCES', '50000'),\n('THREAD_CONCURRENCY', '10'),\n('UNIQUE_CHECKS', 'ON'),\n('INNODB_OLD_BLOCKS_PCT', '37'),\n('HAVE_COMPRESS', 'YES');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `STATISTICS`\n--\n\nCREATE TEMPORARY TABLE `STATISTICS` (\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `NON_UNIQUE` bigint(1) NOT NULL DEFAULT '0',\n `INDEX_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `INDEX_NAME` varchar(64) NOT NULL DEFAULT '',\n `SEQ_IN_INDEX` bigint(2) NOT NULL DEFAULT '0',\n `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '',\n `COLLATION` varchar(1) DEFAULT NULL,\n `CARDINALITY` bigint(21) DEFAULT NULL,\n `SUB_PART` bigint(3) DEFAULT NULL,\n `PACKED` varchar(10) DEFAULT NULL,\n `NULLABLE` varchar(3) NOT NULL DEFAULT '',\n `INDEX_TYPE` varchar(16) NOT NULL DEFAULT '',\n `COMMENT` varchar(16) DEFAULT NULL,\n `INDEX_COMMENT` varchar(1024) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `STATISTICS`\n--\n\nINSERT INTO `STATISTICS` (`TABLE_CATALOG`, `TABLE_SCHEMA`, `TABLE_NAME`, `NON_UNIQUE`, `INDEX_SCHEMA`, `INDEX_NAME`, `SEQ_IN_INDEX`, `COLUMN_NAME`, `COLLATION`, `CARDINALITY`, `SUB_PART`, `PACKED`, `NULLABLE`, `INDEX_TYPE`, `COMMENT`, `INDEX_COMMENT`) VALUES\n('def', 'inventory', 'Inout', 1, 'inventory', 'ID', 1, 'ID', 'A', 305, NULL, NULL, '', 'BTREE', '', ''),\n('def', 'inventory', 'Inventory', 0, 'inventory', 'PRIMARY', 1, 'ID', 'A', 373, NULL, NULL, '', 'BTREE', '', ''),\n('def', 'inventory', 'Sessions', 0, 'inventory', 'PRIMARY', 1, 'ID', 'A', 785, NULL, NULL, '', 'BTREE', '', ''),\n('def', 'inventory', 'Wiped', 1, 'inventory', 'ID', 1, 'ID', 'A', 1, NULL, NULL, '', 'BTREE', '', ''),\n('def', 'inventory', 'barcodes', 0, 'inventory', 'PRIMARY', 1, 'id', 'A', 361, NULL, NULL, '', 'BTREE', '', '');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `TABLES`\n--\n\nCREATE TEMPORARY TABLE `TABLES` (\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `TABLE_TYPE` varchar(64) NOT NULL DEFAULT '',\n `ENGINE` varchar(64) DEFAULT NULL,\n `VERSION` bigint(21) unsigned DEFAULT NULL,\n `ROW_FORMAT` varchar(10) DEFAULT NULL,\n `TABLE_ROWS` bigint(21) unsigned DEFAULT NULL,\n `AVG_ROW_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `DATA_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `MAX_DATA_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `INDEX_LENGTH` bigint(21) unsigned DEFAULT NULL,\n `DATA_FREE` bigint(21) unsigned DEFAULT NULL,\n `AUTO_INCREMENT` bigint(21) unsigned DEFAULT NULL,\n `CREATE_TIME` datetime DEFAULT NULL,\n `UPDATE_TIME` datetime DEFAULT NULL,\n `CHECK_TIME` datetime DEFAULT NULL,\n `TABLE_COLLATION` varchar(32) DEFAULT NULL,\n `CHECKSUM` bigint(21) unsigned DEFAULT NULL,\n `CREATE_OPTIONS` varchar(255) DEFAULT NULL,\n `TABLE_COMMENT` varchar(2048) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `TABLES`\n--\n\nINSERT INTO `TABLES` (`TABLE_CATALOG`, `TABLE_SCHEMA`, `TABLE_NAME`, `TABLE_TYPE`, `ENGINE`, `VERSION`, `ROW_FORMAT`, `TABLE_ROWS`, `AVG_ROW_LENGTH`, `DATA_LENGTH`, `MAX_DATA_LENGTH`, `INDEX_LENGTH`, `DATA_FREE`, `AUTO_INCREMENT`, `CREATE_TIME`, `UPDATE_TIME`, `CHECK_TIME`, `TABLE_COLLATION`, `CHECKSUM`, `CREATE_OPTIONS`, `TABLE_COMMENT`) VALUES\n('def', 'information_schema', 'CHARACTER_SETS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 384, 0, 16434816, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=43690', ''),\n('def', 'information_schema', 'COLLATIONS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 231, 0, 16704765, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=72628', ''),\n('def', 'information_schema', 'COLLATION_CHARACTER_SET_APPLICABILITY', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 195, 0, 16357770, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=86037', ''),\n('def', 'information_schema', 'COLUMNS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=2802', ''),\n('def', 'information_schema', 'COLUMN_PRIVILEGES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 2565, 0, 16757145, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=6540', ''),\n('def', 'information_schema', 'ENGINES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 490, 0, 16574250, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=34239', ''),\n('def', 'information_schema', 'EVENTS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=618', ''),\n('def', 'information_schema', 'FILES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 2677, 0, 16758020, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=6267', ''),\n('def', 'information_schema', 'GLOBAL_STATUS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 3268, 0, 16755036, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=5133', ''),\n('def', 'information_schema', 'GLOBAL_VARIABLES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 3268, 0, 16755036, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=5133', ''),\n('def', 'information_schema', 'KEY_COLUMN_USAGE', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 4637, 0, 16762755, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=3618', ''),\n('def', 'information_schema', 'PARAMETERS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=6050', ''),\n('def', 'information_schema', 'PARTITIONS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=5579', ''),\n('def', 'information_schema', 'PLUGINS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=11328', ''),\n('def', 'information_schema', 'PROCESSLIST', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=23899', ''),\n('def', 'information_schema', 'PROFILING', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 308, 0, 16562084, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=54471', ''),\n('def', 'information_schema', 'REFERENTIAL_CONSTRAINTS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 4814, 0, 16767162, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=3485', ''),\n('def', 'information_schema', 'ROUTINES', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=583', ''),\n('def', 'information_schema', 'SCHEMATA', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 3464, 0, 16738048, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=4843', ''),\n('def', 'information_schema', 'SCHEMA_PRIVILEGES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 2179, 0, 16736899, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=7699', ''),\n('def', 'information_schema', 'SESSION_STATUS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 3268, 0, 16755036, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=5133', ''),\n('def', 'information_schema', 'SESSION_VARIABLES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 3268, 0, 16755036, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=5133', ''),\n('def', 'information_schema', 'STATISTICS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 5753, 0, 16752736, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=2916', ''),\n('def', 'information_schema', 'TABLES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 9450, 0, 16764300, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=1775', ''),\n('def', 'information_schema', 'TABLESPACES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 6951, 0, 16772763, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=2413', ''),\n('def', 'information_schema', 'TABLE_CONSTRAINTS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 2504, 0, 16721712, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=6700', ''),\n('def', 'information_schema', 'TABLE_PRIVILEGES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 2372, 0, 16748692, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=7073', ''),\n('def', 'information_schema', 'TRIGGERS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=569', ''),\n('def', 'information_schema', 'USER_PRIVILEGES', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 1986, 0, 16726092, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=8447', ''),\n('def', 'information_schema', 'VIEWS', 'SYSTEM VIEW', 'MyISAM', 10, 'Dynamic', NULL, 0, 0, 281474976710655, 1024, 0, NULL, '2016-02-15 14:00:13', '2016-02-15 14:00:13', NULL, 'utf8_general_ci', NULL, 'max_rows=6935', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 6852, 0, 16766844, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=2448', ''),\n('def', 'information_schema', 'INNODB_TRX', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 4534, 0, 16766732, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=3700', ''),\n('def', 'information_schema', 'INNODB_BUFFER_POOL_STATS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 257, 0, 16332350, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=65280', ''),\n('def', 'information_schema', 'INNODB_LOCK_WAITS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 599, 0, 16749238, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=28008', ''),\n('def', 'information_schema', 'INNODB_CMPMEM', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 29, 0, 15204352, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=578524', ''),\n('def', 'information_schema', 'INNODB_CMP', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 25, 0, 13107200, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=671088', ''),\n('def', 'information_schema', 'INNODB_LOCKS', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 31244, 0, 16746784, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=536', ''),\n('def', 'information_schema', 'INNODB_CMPMEM_RESET', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 29, 0, 15204352, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=578524', ''),\n('def', 'information_schema', 'INNODB_CMP_RESET', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 25, 0, 13107200, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=671088', ''),\n('def', 'information_schema', 'INNODB_BUFFER_PAGE_LRU', 'SYSTEM VIEW', 'MEMORY', 10, 'Fixed', NULL, 6669, 0, 16765866, 0, 0, NULL, '2016-02-15 14:00:13', NULL, NULL, 'utf8_general_ci', NULL, 'max_rows=2515', ''),\n('def', 'inventory', 'Inout', 'BASE TABLE', 'InnoDB', 10, 'Compact', 569, 172, 98304, 0, 16384, 7340032, NULL, '2014-10-31 19:54:48', NULL, NULL, 'latin1_swedish_ci', NULL, '', ''),\n('def', 'inventory', 'Inventory', 'BASE TABLE', 'InnoDB', 10, 'Compact', 373, 219, 81920, 0, 0, 7340032, 282, '2014-10-31 19:54:48', NULL, NULL, 'latin1_swedish_ci', NULL, '', ''),\n('def', 'inventory', 'Sessions', 'BASE TABLE', 'InnoDB', 10, 'Compact', 714, 160, 114688, 0, 0, 7340032, 1000, '2014-10-31 19:54:48', NULL, NULL, 'latin1_swedish_ci', NULL, '', ''),\n('def', 'inventory', 'Wiped', 'BASE TABLE', 'InnoDB', 10, 'Compact', 1, 16384, 16384, 0, 16384, 7340032, NULL, '2014-10-31 19:54:48', NULL, NULL, 'latin1_swedish_ci', NULL, '', ''),\n('def', 'inventory', 'barcodes', 'BASE TABLE', 'InnoDB', 10, 'Compact', 361, 45, 16384, 0, 0, 7340032, 362, '2014-10-31 19:54:48', NULL, NULL, 'latin1_swedish_ci', NULL, '', '');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `TABLESPACES`\n--\n\nCREATE TEMPORARY TABLE `TABLESPACES` (\n `TABLESPACE_NAME` varchar(64) NOT NULL DEFAULT '',\n `ENGINE` varchar(64) NOT NULL DEFAULT '',\n `TABLESPACE_TYPE` varchar(64) DEFAULT NULL,\n `LOGFILE_GROUP_NAME` varchar(64) DEFAULT NULL,\n `EXTENT_SIZE` bigint(21) unsigned DEFAULT NULL,\n `AUTOEXTEND_SIZE` bigint(21) unsigned DEFAULT NULL,\n `MAXIMUM_SIZE` bigint(21) unsigned DEFAULT NULL,\n `NODEGROUP_ID` bigint(21) unsigned DEFAULT NULL,\n `TABLESPACE_COMMENT` varchar(2048) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `TABLE_CONSTRAINTS`\n--\n\nCREATE TEMPORARY TABLE `TABLE_CONSTRAINTS` (\n `CONSTRAINT_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `CONSTRAINT_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `CONSTRAINT_NAME` varchar(64) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `CONSTRAINT_TYPE` varchar(64) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `TABLE_CONSTRAINTS`\n--\n\nINSERT INTO `TABLE_CONSTRAINTS` (`CONSTRAINT_CATALOG`, `CONSTRAINT_SCHEMA`, `CONSTRAINT_NAME`, `TABLE_SCHEMA`, `TABLE_NAME`, `CONSTRAINT_TYPE`) VALUES\n('def', 'inventory', 'PRIMARY', 'inventory', 'Inventory', 'PRIMARY KEY'),\n('def', 'inventory', 'PRIMARY', 'inventory', 'Sessions', 'PRIMARY KEY'),\n('def', 'inventory', 'PRIMARY', 'inventory', 'barcodes', 'PRIMARY KEY');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `TABLE_PRIVILEGES`\n--\n\nCREATE TEMPORARY TABLE `TABLE_PRIVILEGES` (\n `GRANTEE` varchar(81) NOT NULL DEFAULT '',\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `PRIVILEGE_TYPE` varchar(64) NOT NULL DEFAULT '',\n `IS_GRANTABLE` varchar(3) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `TRIGGERS`\n--\n\nCREATE TEMPORARY TABLE `TRIGGERS` (\n `TRIGGER_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TRIGGER_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TRIGGER_NAME` varchar(64) NOT NULL DEFAULT '',\n `EVENT_MANIPULATION` varchar(6) NOT NULL DEFAULT '',\n `EVENT_OBJECT_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `EVENT_OBJECT_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `EVENT_OBJECT_TABLE` varchar(64) NOT NULL DEFAULT '',\n `ACTION_ORDER` bigint(4) NOT NULL DEFAULT '0',\n `ACTION_CONDITION` longtext,\n `ACTION_STATEMENT` longtext NOT NULL,\n `ACTION_ORIENTATION` varchar(9) NOT NULL DEFAULT '',\n `ACTION_TIMING` varchar(6) NOT NULL DEFAULT '',\n `ACTION_REFERENCE_OLD_TABLE` varchar(64) DEFAULT NULL,\n `ACTION_REFERENCE_NEW_TABLE` varchar(64) DEFAULT NULL,\n `ACTION_REFERENCE_OLD_ROW` varchar(3) NOT NULL DEFAULT '',\n `ACTION_REFERENCE_NEW_ROW` varchar(3) NOT NULL DEFAULT '',\n `CREATED` datetime DEFAULT NULL,\n `SQL_MODE` varchar(8192) NOT NULL DEFAULT '',\n `DEFINER` varchar(77) NOT NULL DEFAULT '',\n `CHARACTER_SET_CLIENT` varchar(32) NOT NULL DEFAULT '',\n `COLLATION_CONNECTION` varchar(32) NOT NULL DEFAULT '',\n `DATABASE_COLLATION` varchar(32) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `USER_PRIVILEGES`\n--\n\nCREATE TEMPORARY TABLE `USER_PRIVILEGES` (\n `GRANTEE` varchar(81) NOT NULL DEFAULT '',\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `PRIVILEGE_TYPE` varchar(64) NOT NULL DEFAULT '',\n `IS_GRANTABLE` varchar(3) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `USER_PRIVILEGES`\n--\n\nINSERT INTO `USER_PRIVILEGES` (`GRANTEE`, `TABLE_CATALOG`, `PRIVILEGE_TYPE`, `IS_GRANTABLE`) VALUES\n('''matt''@''localhost''', 'def', 'USAGE', 'NO');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `VIEWS`\n--\n\nCREATE TEMPORARY TABLE `VIEWS` (\n `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',\n `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',\n `TABLE_NAME` varchar(64) NOT NULL DEFAULT '',\n `VIEW_DEFINITION` longtext NOT NULL,\n `CHECK_OPTION` varchar(8) NOT NULL DEFAULT '',\n `IS_UPDATABLE` varchar(3) NOT NULL DEFAULT '',\n `DEFINER` varchar(77) NOT NULL DEFAULT '',\n `SECURITY_TYPE` varchar(7) NOT NULL DEFAULT '',\n `CHARACTER_SET_CLIENT` varchar(32) NOT NULL DEFAULT '',\n `COLLATION_CONNECTION` varchar(32) NOT NULL DEFAULT ''\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_BUFFER_PAGE`\n--\n\nCREATE TEMPORARY TABLE `INNODB_BUFFER_PAGE` (\n `POOL_ID` bigint(21) unsigned NOT NULL DEFAULT '0',\n `BLOCK_ID` bigint(21) unsigned NOT NULL DEFAULT '0',\n `SPACE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGE_NUMBER` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGE_TYPE` varchar(64) DEFAULT NULL,\n `FLUSH_TYPE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `FIX_COUNT` bigint(21) unsigned NOT NULL DEFAULT '0',\n `IS_HASHED` varchar(3) DEFAULT NULL,\n `NEWEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',\n `OLDEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',\n `ACCESS_TIME` bigint(21) unsigned NOT NULL DEFAULT '0',\n `TABLE_NAME` varchar(1024) DEFAULT NULL,\n `INDEX_NAME` varchar(1024) DEFAULT NULL,\n `NUMBER_RECORDS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `DATA_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `COMPRESSED_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGE_STATE` varchar(64) DEFAULT NULL,\n `IO_FIX` varchar(64) DEFAULT NULL,\n `IS_OLD` varchar(3) DEFAULT NULL,\n `FREE_PAGE_CLOCK` bigint(21) unsigned NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_TRX`\n--\n\nCREATE TEMPORARY TABLE `INNODB_TRX` (\n `trx_id` varchar(18) NOT NULL DEFAULT '',\n `trx_state` varchar(13) NOT NULL DEFAULT '',\n `trx_started` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `trx_requested_lock_id` varchar(81) DEFAULT NULL,\n `trx_wait_started` datetime DEFAULT NULL,\n `trx_weight` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_mysql_thread_id` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_query` varchar(1024) DEFAULT NULL,\n `trx_operation_state` varchar(64) DEFAULT NULL,\n `trx_tables_in_use` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_tables_locked` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_lock_structs` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_lock_memory_bytes` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_rows_locked` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_rows_modified` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_concurrency_tickets` bigint(21) unsigned NOT NULL DEFAULT '0',\n `trx_isolation_level` varchar(16) NOT NULL DEFAULT '',\n `trx_unique_checks` int(1) NOT NULL DEFAULT '0',\n `trx_foreign_key_checks` int(1) NOT NULL DEFAULT '0',\n `trx_last_foreign_key_error` varchar(256) DEFAULT NULL,\n `trx_adaptive_hash_latched` int(1) NOT NULL DEFAULT '0',\n `trx_adaptive_hash_timeout` bigint(21) unsigned NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_BUFFER_POOL_STATS`\n--\n\nCREATE TEMPORARY TABLE `INNODB_BUFFER_POOL_STATS` (\n `POOL_ID` bigint(21) unsigned NOT NULL DEFAULT '0',\n `POOL_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `FREE_BUFFERS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `DATABASE_PAGES` bigint(21) unsigned NOT NULL DEFAULT '0',\n `OLD_DATABASE_PAGES` bigint(21) unsigned NOT NULL DEFAULT '0',\n `MODIFIED_DATABASE_PAGES` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PENDING_DECOMPRESS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PENDING_READS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PENDING_FLUSH_LRU` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PENDING_FLUSH_LIST` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGES_MADE_YOUNG` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGES_NOT_MADE_YOUNG` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGES_MADE_YOUNG_RATE` double NOT NULL DEFAULT '0',\n `PAGES_MADE_NOT_YOUNG_RATE` double NOT NULL DEFAULT '0',\n `NUMBER_PAGES_READ` bigint(21) unsigned NOT NULL DEFAULT '0',\n `NUMBER_PAGES_CREATED` bigint(21) unsigned NOT NULL DEFAULT '0',\n `NUMBER_PAGES_WRITTEN` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGES_READ_RATE` double NOT NULL DEFAULT '0',\n `PAGES_CREATE_RATE` double NOT NULL DEFAULT '0',\n `PAGES_WRITTEN_RATE` double NOT NULL DEFAULT '0',\n `NUMBER_PAGES_GET` bigint(21) unsigned NOT NULL DEFAULT '0',\n `HIT_RATE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `YOUNG_MAKE_PER_THOUSAND_GETS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `NOT_YOUNG_MAKE_PER_THOUSAND_GETS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `NUMBER_PAGES_READ_AHEAD` bigint(21) unsigned NOT NULL DEFAULT '0',\n `NUMBER_READ_AHEAD_EVICTED` bigint(21) unsigned NOT NULL DEFAULT '0',\n `READ_AHEAD_RATE` double NOT NULL DEFAULT '0',\n `READ_AHEAD_EVICTED_RATE` double NOT NULL DEFAULT '0',\n `LRU_IO_TOTAL` bigint(21) unsigned NOT NULL DEFAULT '0',\n `LRU_IO_CURRENT` bigint(21) unsigned NOT NULL DEFAULT '0',\n `UNCOMPRESS_TOTAL` bigint(21) unsigned NOT NULL DEFAULT '0',\n `UNCOMPRESS_CURRENT` bigint(21) unsigned NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_LOCK_WAITS`\n--\n\nCREATE TEMPORARY TABLE `INNODB_LOCK_WAITS` (\n `requesting_trx_id` varchar(18) NOT NULL DEFAULT '',\n `requested_lock_id` varchar(81) NOT NULL DEFAULT '',\n `blocking_trx_id` varchar(18) NOT NULL DEFAULT '',\n `blocking_lock_id` varchar(81) NOT NULL DEFAULT ''\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_CMPMEM`\n--\n\nCREATE TEMPORARY TABLE `INNODB_CMPMEM` (\n `page_size` int(5) NOT NULL DEFAULT '0',\n `buffer_pool_instance` int(11) NOT NULL DEFAULT '0',\n `pages_used` int(11) NOT NULL DEFAULT '0',\n `pages_free` int(11) NOT NULL DEFAULT '0',\n `relocation_ops` bigint(21) NOT NULL DEFAULT '0',\n `relocation_time` int(11) NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_CMP`\n--\n\nCREATE TEMPORARY TABLE `INNODB_CMP` (\n `page_size` int(5) NOT NULL DEFAULT '0',\n `compress_ops` int(11) NOT NULL DEFAULT '0',\n `compress_ops_ok` int(11) NOT NULL DEFAULT '0',\n `compress_time` int(11) NOT NULL DEFAULT '0',\n `uncompress_ops` int(11) NOT NULL DEFAULT '0',\n `uncompress_time` int(11) NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_LOCKS`\n--\n\nCREATE TEMPORARY TABLE `INNODB_LOCKS` (\n `lock_id` varchar(81) NOT NULL DEFAULT '',\n `lock_trx_id` varchar(18) NOT NULL DEFAULT '',\n `lock_mode` varchar(32) NOT NULL DEFAULT '',\n `lock_type` varchar(32) NOT NULL DEFAULT '',\n `lock_table` varchar(1024) NOT NULL DEFAULT '',\n `lock_index` varchar(1024) DEFAULT NULL,\n `lock_space` bigint(21) unsigned DEFAULT NULL,\n `lock_page` bigint(21) unsigned DEFAULT NULL,\n `lock_rec` bigint(21) unsigned DEFAULT NULL,\n `lock_data` varchar(8192) DEFAULT NULL\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_CMPMEM_RESET`\n--\n\nCREATE TEMPORARY TABLE `INNODB_CMPMEM_RESET` (\n `page_size` int(5) NOT NULL DEFAULT '0',\n `buffer_pool_instance` int(11) NOT NULL DEFAULT '0',\n `pages_used` int(11) NOT NULL DEFAULT '0',\n `pages_free` int(11) NOT NULL DEFAULT '0',\n `relocation_ops` bigint(21) NOT NULL DEFAULT '0',\n `relocation_time` int(11) NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_CMP_RESET`\n--\n\nCREATE TEMPORARY TABLE `INNODB_CMP_RESET` (\n `page_size` int(5) NOT NULL DEFAULT '0',\n `compress_ops` int(11) NOT NULL DEFAULT '0',\n `compress_ops_ok` int(11) NOT NULL DEFAULT '0',\n `compress_time` int(11) NOT NULL DEFAULT '0',\n `uncompress_ops` int(11) NOT NULL DEFAULT '0',\n `uncompress_time` int(11) NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `INNODB_BUFFER_PAGE_LRU`\n--\n\nCREATE TEMPORARY TABLE `INNODB_BUFFER_PAGE_LRU` (\n `POOL_ID` bigint(21) unsigned NOT NULL DEFAULT '0',\n `LRU_POSITION` bigint(21) unsigned NOT NULL DEFAULT '0',\n `SPACE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGE_NUMBER` bigint(21) unsigned NOT NULL DEFAULT '0',\n `PAGE_TYPE` varchar(64) DEFAULT NULL,\n `FLUSH_TYPE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `FIX_COUNT` bigint(21) unsigned NOT NULL DEFAULT '0',\n `IS_HASHED` varchar(3) DEFAULT NULL,\n `NEWEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',\n `OLDEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',\n `ACCESS_TIME` bigint(21) unsigned NOT NULL DEFAULT '0',\n `TABLE_NAME` varchar(1024) DEFAULT NULL,\n `INDEX_NAME` varchar(1024) DEFAULT NULL,\n `NUMBER_RECORDS` bigint(21) unsigned NOT NULL DEFAULT '0',\n `DATA_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `COMPRESSED_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',\n `COMPRESSED` varchar(3) DEFAULT NULL,\n `IO_FIX` varchar(64) DEFAULT NULL,\n `IS_OLD` varchar(3) DEFAULT NULL,\n `FREE_PAGE_CLOCK` bigint(21) unsigned NOT NULL DEFAULT '0'\n) ENGINE=MEMORY DEFAULT CHARSET=utf8;\n-- in use (#1227 - Access denied; you need (at least one of) the PROCESS privilege(s) for this operation)\n--\n-- Database: `inventory`\n--\nCREATE DATABASE `inventory` DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci;\nUSE `inventory`;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `Inout`\n--\n\nCREATE TABLE IF NOT EXISTS `Inout` (\n `ID` int(255) NOT NULL,\n `StudentID` varchar(255) NOT NULL,\n `Use` varchar(255) NOT NULL,\n `DateIn` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `DateOut` datetime NOT NULL,\n `UserOut` varchar(255) NOT NULL,\n `UserIn` varchar(255) NOT NULL DEFAULT 'N/A',\n `Issues` text NOT NULL,\n KEY `ID` (`ID`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `Inout`\n--\n\nINSERT INTO `Inout` (`ID`, `StudentID`, `Use`, `DateIn`, `DateOut`, `UserOut`, `UserIn`, `Issues`) VALUES\n(1, '', 'test', '2014-10-31 15:59:31', '2014-10-31 15:59:25', 'netadmin', 'netadmin', 'pickles'),\n(3, 'Jake', 'test', '2014-11-04 12:40:19', '2014-11-04 12:40:00', 'williams', 'williams', 'test2'),\n(20, 'Kayla Willford', '', '2014-11-04 19:53:38', '2014-11-04 16:03:01', 'jbonaccorsi', 'jbonaccorsi', ''),\n(19, 'Kayla Willford', '', '2014-11-04 19:53:22', '2014-11-04 16:04:11', 'jbonaccorsi', 'jbonaccorsi', ''),\n(84, '', '', '2014-11-04 16:05:15', '2014-11-04 16:04:45', 'jbonaccorsi', 'jbonaccorsi', ''),\n(84, '', '', '2014-11-04 19:52:09', '2014-11-04 16:05:37', 'jbonaccorsi', 'jbonaccorsi', ''),\n(12, 'Adam Sattler', 'DataGen', '2014-11-04 19:50:30', '2014-11-04 17:43:56', 'jbonaccorsi', 'jbonaccorsi', ''),\n(3, 'joe', 'test', '2014-11-05 08:57:04', '2014-11-05 08:56:44', 'williams', 'williams', 'asfads'),\n(3, 'joe', 'test', '2014-11-05 08:58:27', '2014-11-05 08:57:47', 'williams', 'williams', ''),\n(93, '', 'XRY Tutorial', '2014-11-05 15:26:45', '2014-11-05 11:26:22', 'williams', 'mfortier', ''),\n(92, '', 'Making PDFs', '2014-11-05 14:17:26', '2014-11-05 12:53:09', 'mfortier', 'mfortier', ''),\n(87, 'Andre MacCarone', 'NetAdmin', '2014-11-05 14:23:31', '2014-11-05 13:03:20', 'mfortier', 'mfortier', ''),\n(19, 'Kelsey', 'Mobile Extraction', '2014-11-05 17:48:34', '2014-11-05 15:56:28', 'mfortier', 'mgreen', 'None'),\n(20, '851339', '', '2014-11-05 15:59:55', '2014-11-05 15:56:59', 'mfortier', 'mfortier', ''),\n(84, '851339', '', '2014-11-05 15:58:58', '2014-11-05 15:57:31', 'mfortier', 'mfortier', ''),\n(84, 'Kelsey W.', '', '2014-11-05 18:11:39', '2014-11-05 15:59:15', 'mfortier', 'mgreen', 'None'),\n(20, 'Kelsey W.', '', '2014-11-05 18:02:44', '2014-11-05 16:00:03', 'mfortier', 'mgreen', 'None'),\n(19, '', 'iOS Handoff', '2014-11-05 18:01:34', '2014-11-05 17:48:53', 'mgreen', 'mgreen', 'oops wrong device'),\n(11, '', 'iOS Handoff', '2014-11-05 19:48:48', '2014-11-05 18:02:20', 'mgreen', 'mgreen', ''),\n(2, 'Ashley', 'testing', '2014-11-06 09:27:13', '2014-11-06 09:26:26', 'williams', 'williams', 'leaving for the day'),\n(2, 'Ashley', 'has drive', '2014-11-06 15:40:57', '2014-11-06 09:27:53', 'williams', 'williams', 'done'),\n(111, '', 'Transferring 2TB backups from forensics backup to NAS', '2014-11-06 12:11:45', '2014-11-06 11:48:05', 'adible', 'adible', 'Leaving for the day'),\n(105, '', 'Research project to install and test some software.', '2014-11-06 11:53:16', '2014-11-06 11:52:36', 'adible', 'adible', 'Leaving for the day'),\n(105, '', 'Research Project to install and test some software.', '2014-11-10 13:04:47', '2014-11-06 12:11:11', 'adible', 'ddeloge', ''),\n(111, '', 'Transferring 2TB backups from forensics backup to NAS', '2014-12-02 12:10:49', '2014-11-06 12:11:55', 'adible', 'jbonaccorsi', 'case'),\n(66, 'Austin', 'Project', '2014-11-06 14:47:25', '2014-11-06 13:36:12', 'bcampbell', 'bcampbell', 'Done.'),\n(20, 'Kayla', 'Project', '2014-11-06 17:24:32', '2014-11-06 15:41:10', 'williams', 'mgreen', 'None'),\n(98, 'Olivia H.', '', '2014-11-07 14:08:36', '2014-11-07 11:26:07', 'ascallion', 'ddeloge', ''),\n(93, '', 'XRY', '2014-11-07 15:24:34', '2014-11-07 13:21:01', 'ddeloge', 'ddeloge', ''),\n(83, '', 'XRY', '2014-11-07 15:23:48', '2014-11-07 13:21:46', 'ddeloge', 'ddeloge', ''),\n(10, '882783', 'Mobile Device Apps', '2014-11-07 14:44:29', '2014-11-07 13:34:46', 'ddeloge', 'ddeloge', ''),\n(9, '882783', 'Mobile Device Apps', '2014-11-07 14:39:53', '2014-11-07 13:40:08', 'ddeloge', 'ddeloge', ''),\n(5, '', 'Access Equipment Cabinet', '2014-11-07 16:16:13', '2014-11-07 14:09:42', 'ddeloge', 'mfortier', ''),\n(92, '882783', 'Mobile Device Apps', '2014-11-07 14:44:53', '2014-11-07 14:13:45', 'ddeloge', 'ddeloge', ''),\n(86, 'Elizabeth', 'LCDI 1 on 1', '2014-11-07 15:44:28', '2014-11-07 15:22:27', 'ddeloge', 'ddeloge', ''),\n(1, 'rawr', 'rawrrrr', '2014-11-07 15:50:49', '2014-11-07 15:50:00', 'netadmin', 'netadmin', 'still rawring'),\n(9, 'Chris A', 'project', '2014-11-10 15:36:33', '2014-11-10 12:36:58', 'williams', 'ddeloge', ''),\n(10, 'Chris A', 'project', '2014-11-10 15:36:15', '2014-11-10 12:37:39', 'williams', 'ddeloge', ''),\n(105, '', '', '2014-11-10 13:05:20', '2014-11-10 13:05:00', 'ddeloge', 'ddeloge', ''),\n(5, '', 'Access Equipment Cabinet', '2014-11-10 15:48:32', '2014-11-10 13:08:19', 'ddeloge', 'ddeloge', ''),\n(86, '', 'Add Items to Inventory', '2014-11-10 13:27:28', '2014-11-10 13:08:50', 'ddeloge', 'ddeloge', ''),\n(89, '', 'Add Items to Inventory', '2014-11-10 13:27:08', '2014-11-10 13:13:51', 'ddeloge', 'ddeloge', ''),\n(83, 'Micheal C.', 'Project Use', '2014-11-11 12:50:54', '2014-11-11 10:12:10', 'ascallion', 'jbonaccorsi', ''),\n(100, 'Micheal C.', 'Project Use', '2014-11-11 12:51:50', '2014-11-11 10:18:12', 'ascallion', 'jbonaccorsi', ''),\n(81, 'Brent', '', '2014-11-11 15:49:12', '2014-11-11 15:38:15', 'jbonaccorsi', 'jbonaccorsi', ''),\n(100, 'Micheal C', '', '2014-11-11 19:13:53', '2014-11-11 15:55:43', 'jbonaccorsi', 'jbonaccorsi', ''),\n(9, '851339', '', '2014-11-11 19:05:14', '2014-11-11 16:01:37', 'jbonaccorsi', 'jbonaccorsi', ''),\n(84, '', '', '2014-11-11 19:51:13', '2014-11-11 16:38:18', 'jbonaccorsi', 'jbonaccorsi', ''),\n(84, '', '', '2014-11-19 09:37:50', '2014-11-11 16:38:18', 'jbonaccorsi', 'mgreen', 'None'),\n(17, '', '', '2014-11-11 18:39:45', '2014-11-11 16:39:10', 'jbonaccorsi', 'jbonaccorsi', ''),\n(83, 'Micheal C', '', '2014-11-11 19:05:29', '2014-11-11 16:49:58', 'jbonaccorsi', 'jbonaccorsi', ''),\n(83, 'Parker D.', 'XRY Tutorial', '2014-11-12 14:29:33', '2014-11-12 10:59:25', 'mgreen', 'mfortier', 'Returned'),\n(92, 'Parker D.', 'XRY Tutorial', '2014-11-12 14:46:43', '2014-11-12 11:00:30', 'mgreen', 'mfortier', ''),\n(116, 'Parker D.', 'XRY', '2014-11-12 14:30:10', '2014-11-12 13:58:36', 'mfortier', 'mfortier', 'Returned'),\n(84, 'Kelsey W.', 'Mobile Extraction', '2014-11-19 09:42:11', '2014-11-12 15:44:43', 'mfortier', 'mgreen', 'None'),\n(20, 'Kelsey W.', 'Mobile extraction', '2014-11-12 19:47:28', '2014-11-12 15:45:53', 'mfortier', 'mgreen', 'None'),\n(67, '', 'Joes Class', '2014-11-13 11:58:37', '2014-11-13 09:25:54', 'adible', 'adible', 'Leaving for the day'),\n(69, '', 'Joes Class', '2014-11-13 09:37:43', '2014-11-13 09:26:59', 'adible', 'adible', 'Missing parts'),\n(109, '', 'Joes Class', '2014-11-13 11:59:23', '2014-11-13 09:27:30', 'adible', 'adible', 'Leaving for the day'),\n(68, 'Shane', 'Joes Class', '2014-11-13 09:38:20', '2014-11-13 09:38:10', 'adible', '', 'Leaving for the day'),\n(67, 'Joe', 'For Joes Class', '2014-11-18 16:18:29', '2014-11-13 11:58:49', 'adible', 'jbonaccorsi', ''),\n(109, 'Joe', 'Joes Class', '2014-11-19 09:42:29', '2014-11-13 11:59:37', 'adible', 'mgreen', 'NONE'),\n(6, 'Dillon', 'Guest access to front room', '2014-11-14 17:18:48', '2014-11-14 12:45:12', 'ddeloge', 'mfortier', ''),\n(5, 'Dillon', '', '2014-11-14 15:55:46', '2014-11-14 14:18:30', 'ddeloge', 'ddeloge', ''),\n(89, 'Dillon', 'Add Items to Inventory', '2014-11-14 15:42:45', '2014-11-14 14:18:56', 'ddeloge', 'ddeloge', ''),\n(119, 'Chris A', 'Forensic Case', '2014-11-14 14:55:27', '2014-11-14 14:55:09', 'ddeloge', 'ddeloge', ''),\n(120, 'Kyle', '', '2014-11-19 10:01:46', '2014-11-17 14:06:54', 'ddeloge', 'mgreen', 'None'),\n(5, '', 'Access Equipment Cabinet', '2014-11-17 15:55:47', '2014-11-17 14:10:23', 'ddeloge', 'ddeloge', ''),\n(118, '693587', '', '2014-11-19 10:02:01', '2014-11-18 10:59:03', 'ascallion', 'mgreen', 'None'),\n(91, '693587', '', '2014-11-19 10:10:35', '2014-11-18 11:04:03', 'ascallion', 'mgreen', 'None'),\n(71, 'Adam Sattler', '', '2014-11-19 08:25:16', '2014-11-18 15:54:08', 'jbonaccorsi', 'mgreen', 'None'),\n(17, '', 'mobile ext', '2014-11-18 20:04:08', '2014-11-18 16:10:16', 'jbonaccorsi', 'jbonaccorsi', ''),\n(9, '851339', 'Mobile ext', '2014-11-18 20:03:46', '2014-11-18 16:11:08', 'jbonaccorsi', 'jbonaccorsi', ''),\n(67, 'adam sattler', '', '2014-11-19 08:26:41', '2014-11-18 16:18:41', 'jbonaccorsi', 'mgreen', 'None'),\n(88, 'Adam Sattler', '', '2014-11-19 09:34:06', '2014-11-18 16:36:15', 'jbonaccorsi', 'mgreen', 'None'),\n(12, 'Adam Sattler', '', '2014-11-19 08:24:31', '2014-11-18 17:42:08', 'jbonaccorsi', 'mgreen', 'None'),\n(83, 'Mitch Green', 'Digital Signage Software Test ', '2014-11-19 18:48:04', '2014-11-19 11:19:26', 'mgreen', 'mgreen', 'None'),\n(134, 'Andre M.', '', '2014-11-19 16:39:52', '2014-11-19 12:31:25', 'mfortier', 'williams', 'Decommissioned'),\n(135, 'Andre M.', '', '2014-11-19 16:39:49', '2014-11-19 12:31:39', 'mfortier', 'williams', 'Decommissioned'),\n(43, 'Andre M.', '', '2014-11-19 16:39:50', '2014-11-19 12:31:53', 'mfortier', 'williams', 'Decommissioned'),\n(116, 'Parker D.', 'XRY', '2014-11-19 13:27:03', '2014-11-19 12:32:27', 'mfortier', 'mfortier', ''),\n(18, 'Parker D.', 'XRY', '2014-11-19 13:27:20', '2014-11-19 12:33:01', 'mfortier', 'mfortier', ''),\n(116, 'Parker D.', '', '2014-11-19 13:36:14', '2014-11-19 13:36:05', 'mfortier', 'mfortier', ''),\n(86, 'Parker D.', '', '2014-11-19 14:33:38', '2014-11-19 13:43:59', 'mfortier', 'mfortier', ''),\n(121, 'Kyle T.', '', '2014-11-20 09:47:06', '2014-11-19 14:09:55', 'mfortier', 'adible', ''),\n(9, 'Kelsey W.', '', '2014-11-19 18:47:47', '2014-11-19 15:28:53', 'mfortier', 'mgreen', 'None'),\n(89, 'Kyle T.', '', '2014-11-19 18:49:08', '2014-11-19 15:52:03', 'mfortier', 'mgreen', 'None'),\n(120, '693587', 'None Given', '2014-11-20 09:46:00', '2014-11-19 16:30:08', 'mgreen', 'adible', 'Joe gave them to OA to sign back in'),\n(11, 'Zach S. ', 'Handoff', '2014-11-19 19:03:59', '2014-11-19 16:51:58', 'mgreen', 'mgreen', 'None'),\n(91, 'Jon', 'Mobile Forensics Class', '2014-11-19 18:57:45', '2014-11-19 17:43:34', 'mgreen', 'mgreen', '1=1'),\n(86, '693587', 'Case', '2014-11-19 18:49:53', '2014-11-19 17:44:10', 'mgreen', 'mgreen', 'None'),\n(117, '693587', 'Case', '2014-11-19 18:54:33', '2014-11-19 17:45:20', 'mgreen', 'mgreen', 'Charging Cable'),\n(118, '693587', 'Case', '2014-11-19 18:54:13', '2014-11-19 17:46:15', 'mgreen', 'mgreen', 'None'),\n(91, 'Jon', 'Mobile Forensics Class', '2014-11-19 18:58:25', '2014-11-19 18:58:05', 'mgreen', 'mgreen', 'Left on Jons door'),\n(121, '', '', '2014-11-20 12:34:40', '2014-11-20 09:47:12', '', 'bcampbell', 'All Done'),\n(9, 'Kayla W.', 'Mobile Extraction ', '2014-11-20 18:39:10', '2014-11-20 17:03:12', 'mgreen', 'mgreen', 'None'),\n(5, 'Dillon', 'Access Equipment Cabinet', '2014-11-21 11:58:12', '2014-11-21 11:56:38', 'ddeloge', '', ''),\n(118, 'Chris P', '', '2014-11-24 15:08:48', '2014-11-24 14:37:59', 'ddeloge', 'ddeloge', ''),\n(5, 'Dillon', 'Access Equipment Cabinet', '2014-11-24 15:53:52', '2014-11-24 14:38:34', 'ddeloge', 'ddeloge', ''),\n(5, 'Dillon', 'Access Equipment Cabinet', '2014-11-25 08:18:34', '2014-11-25 08:18:10', 'ddeloge', '', ''),\n(6, 'SHOW databases', '', '2014-11-25 10:36:55', '2014-11-25 10:36:50', 'netadmin', 'netadmin', 'testing for vulnerabilities'),\n(6, 'OR SHOW databases -- ', '', '2014-11-25 10:38:25', '2014-11-25 10:38:13', 'netadmin', 'netadmin', ''),\n(6, 'SHOW tables', '', '2014-11-25 10:39:42', '2014-11-25 10:38:41', 'netadmin', 'netadmin', ''),\n(6, 'SHOW tables;', '', '2014-11-25 10:40:38', '2014-11-25 10:40:23', 'netadmin', 'netadmin', ''),\n(121, '693587', 'Case', '2014-12-02 13:05:55', '2014-11-25 12:42:36', 'williams', 'jbonaccorsi', ''),\n(114, '693587', 'Case', '2015-04-16 14:48:18', '2014-11-25 13:24:33', 'williams', 'adible', ''),\n(91, 'Lexi S', 'Case', '2014-12-09 10:33:29', '2014-11-25 13:50:50', 'williams', 'ascallion', ''),\n(118, 'Lexi S', 'Case', '2014-12-05 15:42:38', '2014-11-25 13:56:57', 'williams', 'ddeloge', ''),\n(78, 'Chris A', 'Case', '2015-09-02 14:48:29', '2014-12-01 13:43:18', 'ddeloge', 'bcampbell', ''),\n(111, 'chapin b', 'case', '2014-12-09 14:39:37', '2014-12-02 12:11:20', 'jbonaccorsi', 'ohatalsky', 'item was recorded twice - this duplicated record was decommissioned '),\n(116, 'micheal c', '', '2014-12-02 12:50:43', '2014-12-02 12:12:09', 'jbonaccorsi', 'jbonaccorsi', ''),\n(92, 'Brent F', '', '2014-12-03 09:35:57', '2014-12-02 15:43:57', 'jbonaccorsi', 'mgreen', 'None'),\n(100, 'scott b', '', '2014-12-03 15:31:23', '2014-12-02 16:48:54', 'jbonaccorsi', 'mfortier', ''),\n(123, 'Adam S', '', '2014-12-03 08:45:36', '2014-12-02 17:12:09', 'jbonaccorsi', 'mgreen', 'None'),\n(72, 'adam s', '', '2014-12-03 08:44:53', '2014-12-02 17:14:56', 'jbonaccorsi', 'mgreen', ''),\n(84, 'Kelsey w', '', '2014-12-03 08:44:34', '2014-12-02 17:15:40', 'jbonaccorsi', 'mgreen', 'None'),\n(93, 'Austin T.', 'iOS & OSX Forensics', '2014-12-09 09:01:35', '2014-12-03 09:33:41', 'mgreen', 'ascallion', ''),\n(77, 'test', 'test', '2014-12-03 14:42:19', '2014-12-03 14:41:02', 'netadmin', 'netadmin', 'test'),\n(77, 'test', ''' test', '2014-12-03 14:44:53', '2014-12-03 14:44:37', 'netadmin', 'netadmin', 'test'),\n(77, 'test', '''', '2014-12-03 14:46:31', '2014-12-03 14:45:28', 'netadmin', 'netadmin', 'test'),\n(77, '''', 'test', '2014-12-03 14:53:15', '2014-12-03 14:47:32', 'netadmin', 'netadmin', ''''),\n(77, '''', '''', '2014-12-03 14:53:35', '2014-12-03 14:53:24', 'netadmin', 'netadmin', ''''),\n(77, 'Kyle T.', 'Windows 8', '2014-12-03 16:53:12', '2014-12-03 15:57:40', 'mfortier', 'mgreen', 'None'),\n(84, 'Kelsey W.', '', '2014-12-09 08:58:03', '2014-12-03 16:04:31', 'mfortier', 'ascallion', ''),\n(11, 'Zach S.', 'Handoff', '2014-12-03 19:44:02', '2014-12-03 16:26:55', 'mgreen', 'mgreen', 'None'),\n(116, 'Zach S.', 'Handoff', '2014-12-03 19:39:57', '2014-12-03 18:57:06', 'mgreen', 'mgreen', 'None'),\n(90, 'Cooper D.', 'Data Gen', '2014-12-09 10:48:31', '2014-12-03 18:57:28', 'mgreen', 'ascallion', ''),\n(73, 'Austin', 'Project', '2014-12-05 13:22:27', '2014-12-04 13:06:00', 'bcampbell', 'ddeloge', ''),\n(66, 'Austin', 'Project', '2014-12-09 09:02:08', '2014-12-04 13:06:24', 'bcampbell', 'ascallion', ''),\n(29, 'Chapin', '', '2015-01-13 11:25:34', '2014-12-04 15:23:37', 'bcampbell', 'jallibone', ''),\n(28, 'Chapin', '', '2015-01-13 11:26:13', '2014-12-04 15:23:59', 'bcampbell', 'jallibone', ''),\n(123, 'David P', 'Data Recovery', '2014-12-06 14:33:46', '2014-12-06 14:31:17', 'twright', '', ''),\n(124, 'Chapin Bryce', 'Case', '2015-04-20 14:05:00', '2014-12-09 14:34:55', 'ohatalsky', 'bcampbell', ''),\n(123, 'Kyle T', 'imaging', '2014-12-09 19:07:50', '2014-12-09 14:45:45', 'ohatalsky', 'OHATALSKY', ''),\n(118, 'Kyle T', 'case', '2015-01-13 09:52:08', '2014-12-09 14:47:42', 'ohatalsky', 'jallibone', 'Net Admin Test'),\n(117, 'Kyle T', 'Case', '2015-01-13 13:08:50', '2014-12-09 14:48:32', 'ohatalsky', 'twright', ''),\n(119, 'Chapin Bryce', 'Case', '2015-01-16 11:04:51', '2014-12-09 14:48:59', 'ohatalsky', 'ddeloge', ''),\n(121, 'Kyle T', 'Case', '2014-12-10 17:17:16', '2014-12-09 15:00:27', 'ohatalsky', 'mgreen', 'None'),\n(88, 'Adam', 'imaging', '2015-01-13 11:54:12', '2014-12-09 15:12:44', 'ohatalsky', 'jallibone', 'The the equipment drawer'),\n(20, 'Kelsey ', '', '2014-12-09 19:08:41', '2014-12-09 15:41:50', 'ohatalsky', 'OHATALSKY', ''),\n(120, 'Kelsey ', 'Data Recovery', '2014-12-09 19:08:13', '2014-12-09 18:06:32', 'ohatalsky', 'OHATALSKY', ''),\n(130, 'Kyle T.', 'Investigation', '2015-01-13 13:19:42', '2014-12-10 15:36:32', 'mfortier', 'twright', ''),\n(142, 'Andre M', '', '2015-01-20 14:28:48', '2015-01-12 13:29:01', 'bcampbell', 'twright', ''),\n(144, 'Andre M', '', '2015-01-23 16:21:54', '2015-01-12 13:29:18', 'bcampbell', 'ddeloge', ''),\n(143, 'Andre M', '', '2015-01-23 09:52:54', '2015-01-12 13:29:31', 'bcampbell', 'ddeloge', ''),\n(43, 'Andre M', '', '2015-01-26 18:11:11', '2015-01-12 13:30:02', 'bcampbell', 'adible', 'was not signed back in, in use for FOR 490 project now'),\n(126, 'Andre M', '', '2015-01-19 15:41:51', '2015-01-12 13:30:40', 'bcampbell', 'bcampbell', ''),\n(5, '693587', '', '2015-01-13 10:37:24', '2015-01-12 14:09:29', 'bcampbell', 'williams', 'kyle has'),\n(5, '693587', 'using', '2015-01-14 14:12:29', '2015-01-13 10:37:40', 'williams', 'jbonaccorsi', ''),\n(29, 'Chapin', 'Case 40', '2015-04-16 14:47:12', '2015-01-13 11:25:48', 'jallibone', 'adible', ''),\n(28, 'Chapin', 'Case 40', '2015-04-16 14:47:35', '2015-01-13 11:26:23', 'jallibone', 'adible', ''),\n(134, 'Chapin', 'Case 40', '2015-01-28 09:18:21', '2015-01-13 11:26:50', 'jallibone', 'jdiorio', 'Decommisioned'),\n(5, 'jake bonaccorsi', '', '2015-01-19 10:35:41', '2015-01-14 14:12:38', 'jbonaccorsi', 'ascallion', ''),\n(9, 'chris p', 'setting up project ', '2015-01-15 16:49:45', '2015-01-15 15:55:12', 'adible', 'tchermely', 'Turning iPhone Back In'),\n(1, 'chris p', 'setting up project ', '2015-01-19 10:36:24', '2015-01-15 15:55:52', 'adible', 'ascallion', 'Removing sign ins for Joe'),\n(9, 'Chris', 'For project', '2015-01-19 10:51:32', '2015-01-15 16:56:31', 'tchermely', 'ascallion', ''),\n(10, 'Chris P', 'Mobile Extraction', '2015-01-19 10:52:13', '2015-01-16 08:36:32', 'ddeloge', 'ascallion', ''),\n(93, '', '', '2015-01-19 10:51:03', '2015-01-19 09:09:52', 'ascallion', 'ascallion', ''),\n(123, '693587', '', '2015-03-17 10:29:28', '2015-01-19 15:15:36', 'bcampbell', 'jallibone', 'Turned in'),\n(17, '', 'Mobile Extraction Project', '2015-01-19 18:00:55', '2015-01-19 16:37:04', 'adible', 'adible', ''),\n(11, '851339', 'Mobile Extraction Project', '2015-01-19 18:24:50', '2015-01-19 16:37:43', 'adible', 'adible', ''),\n(141, '', 'Mobile Extraction Project', '2015-01-19 18:03:22', '2015-01-19 16:38:28', 'adible', 'adible', ''),\n(164, 'Alex', 'Windows 10', '2015-01-19 20:00:57', '2015-01-19 16:46:39', 'adible', 'adible', ''),\n(161, 'Alex', 'Windows 10', '2015-01-19 20:01:14', '2015-01-19 16:49:40', 'adible', 'adible', ''),\n(102, 'Kevin D', '', '2015-01-20 16:40:26', '2015-01-20 13:41:56', 'twright', 'twright', ''),\n(10, 'Chris P', '', '2015-01-20 18:36:29', '2015-01-20 15:41:45', 'twright', 'jbonaccorsi', ''),\n(9, 'Chris P', 'Project', '2015-01-20 18:35:28', '2015-01-20 15:42:08', 'twright', 'jbonaccorsi', ''),\n(163, 'Chris P', 'Project', '2015-01-20 18:41:26', '2015-01-20 16:42:19', 'twright', 'jbonaccorsi', ''),\n(162, 'Chris P', '', '2015-01-20 18:42:54', '2015-01-20 18:42:33', 'jbonaccorsi', 'jbonaccorsi', ''),\n(9, 'Chris P', 'Mobile Project', '2015-01-21 10:27:18', '2015-01-21 09:06:25', 'jdiorio', 'jdiorio', 'Mobile Project'),\n(162, 'Chris P', 'Mobile Project', '2015-01-21 10:28:07', '2015-01-21 09:07:18', 'jdiorio', 'jdiorio', 'Mobile Project'),\n(163, 'Chris P', 'Mobile Project', '2015-01-21 10:28:54', '2015-01-21 09:07:46', 'jdiorio', 'jdiorio', 'Mobile Project'),\n(93, 'Austin', 'project', '2015-01-21 14:01:19', '2015-01-21 11:50:19', 'williams', 'jbonaccorsi', ''),\n(6, 'Adam Sattler', '', '2015-01-26 10:39:44', '2015-01-21 13:30:24', 'jbonaccorsi', 'ascallion', ''),\n(9, 'Nick M', '', '2015-01-22 12:00:41', '2015-01-22 08:31:08', 'ascallion', 'adible', 'Passcode 0001'),\n(162, 'Nick M', '', '2015-01-22 10:17:48', '2015-01-22 08:31:40', 'ascallion', 'ascallion', ''),\n(163, 'Nick M', '', '2015-01-22 12:21:12', '2015-01-22 10:18:10', 'ascallion', 'adible', ''),\n(90, 'Mitch Green', 'EnCase Project', '2015-01-22 16:42:55', '2015-01-22 12:35:03', 'adible', 'tchermely', 'turned in'),\n(9, 'Chris pazden', 'Mobile Device Apps', '2015-01-22 18:23:01', '2015-01-22 15:42:06', 'adible', 'tchermely', 'signing in phone'),\n(91, 'Chris', 'for boot drive', '2015-01-22 16:58:18', '2015-01-22 16:09:06', 'tchermely', 'tchermely', 'signed in'),\n(118, 'Chris', 'Mobile device project', '2015-01-22 18:24:15', '2015-01-22 16:52:03', 'tchermely', 'tchermely', 'Signing in usb Dongle'),\n(91, 'Chris', 'Needs again for project', '2015-01-22 18:24:49', '2015-01-22 17:00:30', 'tchermely', 'tchermely', 'signing flash drive in again'),\n(162, 'Chris', 'Mobile device project', '2015-01-22 18:23:40', '2015-01-22 17:20:58', 'tchermely', 'tchermely', 'Signing in pw Cracker'),\n(143, 'Matt Baptiste', '', '2015-01-23 15:12:07', '2015-01-23 09:53:02', 'ddeloge', 'ascallion', ''),\n(90, 'Chris A', '', '2015-01-23 14:16:24', '2015-01-23 12:50:28', 'ascallion', 'ascallion', ''),\n(142, 'Kyle T', '', '2015-01-23 15:30:40', '2015-01-23 14:05:30', 'ascallion', 'ascallion', ''),\n(144, '886779', 'FOR 490', '2015-01-23 18:09:52', '2015-01-23 16:22:00', 'ddeloge', 'ddeloge', ''),\n(93, 'Shane C', '', '2015-01-30 13:44:50', '2015-01-26 09:22:27', 'ascallion', 'jdiorio', ''),\n(92, 'John R', '', '2015-01-26 14:17:51', '2015-01-26 10:41:37', 'ascallion', 'bcampbell', ''),\n(105, 'Tyler P', 'Presentation Work', '2015-02-17 16:38:41', '2015-01-26 11:46:52', 'ascallion', 'twright', ''),\n(118, 'Scott B', '', '2015-01-26 13:41:21', '2015-01-26 12:00:37', 'bcampbell', 'bcampbell', ''),\n(126, 'Chris P', '', '2015-01-30 10:51:08', '2015-01-26 12:43:02', 'bcampbell', 'ddeloge', ''),\n(161, 'Alex', 'Windows 10', '2015-01-26 19:59:09', '2015-01-26 16:13:42', 'adible', 'adible', ''),\n(164, 'Alex', 'Windows 10', '2015-01-26 19:58:47', '2015-01-26 16:14:08', 'adible', 'adible', ''),\n(87, 'Alex', 'Windows 10', '2015-01-26 19:59:29', '2015-01-26 17:03:28', 'adible', 'adible', ''),\n(92, 'Alex', 'Windows 10', '2015-01-26 19:59:52', '2015-01-26 17:03:46', 'adible', 'adible', ''),\n(142, 'Kyle T', 'FOR 490', '2015-01-26 19:07:26', '2015-01-26 17:21:38', 'adible', 'adible', ''),\n(43, '', 'FOR 490', '2015-01-27 09:08:05', '2015-01-26 18:11:40', 'adible', 'jallibone', ''),\n(101, 'Tyler W', 'Personal', '2015-01-26 19:58:20', '2015-01-26 18:15:01', 'adible', 'adible', ''),\n(162, 'Nick Micallaf', 'iPhone Crack', '2015-01-27 10:26:16', '2015-01-27 08:21:56', 'jallibone', 'jallibone', 'iPhone Crack'),\n(9, 'Nick Micallaf', 'iPhone Crack', '2015-01-27 10:27:04', '2015-01-27 08:22:47', 'jallibone', 'jallibone', 'iPhone Crack'),\n(90, 'Mitch Green', 'Wiki', '2015-01-27 12:20:03', '2015-01-27 10:35:16', 'jallibone', 'twright', ''),\n(162, 'Nick Micallaf', 'iPhone Crack', '2015-01-27 11:50:29', '2015-01-27 10:35:41', 'jallibone', 'jallibone', ''),\n(8, 'Nick', 'Micallaf', '2015-01-27 11:08:55', '2015-01-27 10:37:11', 'jallibone', 'jallibone', 'Signed back from Nick'),\n(11, 'Nick Micallaf', 'iPhone Crack', '2015-01-27 11:50:04', '2015-01-27 11:10:46', 'jallibone', 'jallibone', ''),\n(19, 'Kelsey', 'Project', '2015-01-27 15:51:06', '2015-01-27 13:38:28', 'twright', 'twright', ''),\n(20, 'Kayla W', 'Project', '2015-01-27 15:51:35', '2015-01-27 13:39:10', 'twright', 'twright', ''),\n(9, 'Chris P', 'Project', '2015-01-28 08:56:02', '2015-01-27 16:08:10', 'twright', 'jdiorio', 'Project'),\n(162, 'Chris P', 'Project', '2015-01-28 10:50:28', '2015-01-27 16:08:32', 'twright', 'jdiorio', ''),\n(17, 'Chris P', 'Project', '2015-01-28 08:55:06', '2015-01-27 17:10:42', 'twright', 'jdiorio', 'Project'),\n(11, 'Chris P', 'Project', '2015-01-28 08:55:33', '2015-01-27 17:11:10', 'twright', 'jdiorio', 'Project'),\n(139, 'Chris P', 'Project', '2015-01-28 10:49:44', '2015-01-27 17:11:32', 'twright', 'jdiorio', 'Project'),\n(118, 'Chris P', 'Project', '2015-01-28 11:58:23', '2015-01-28 09:02:38', 'jdiorio', 'jdiorio', ''),\n(143, 'Matt Baptist', '', '2015-01-28 15:03:41', '2015-01-28 10:52:38', 'jdiorio', 'williams', 'Internship'),\n(92, '', 'Data Generation', '2015-01-29 08:12:25', '2015-01-28 12:07:32', 'jdiorio', 'ascallion', ''),\n(149, 'Lexi S', 'Capstone', '2015-02-12 10:36:22', '2015-01-28 14:27:10', 'williams', 'williams', 'capstone'),\n(166, 'Lexi S', 'Capstone', '2015-09-02 14:50:56', '2015-01-28 14:33:38', 'williams', 'bcampbell', ''),\n(165, 'Lexi S', 'Capstone', '2015-09-02 14:51:35', '2015-01-28 14:33:54', 'williams', 'bcampbell', ''),\n(139, 'Nick Micallef', '', '2015-01-29 12:10:22', '2015-01-29 08:19:41', 'ascallion', 'adible', ''),\n(162, 'Nick Micallef', '', '2015-01-29 11:59:54', '2015-01-29 08:20:12', 'ascallion', 'ascallion', ''),\n(148, '', 'Capstone', '2015-02-04 13:17:21', '2015-01-29 13:12:16', 'adible', 'williams', 'OUT capstone'),\n(90, 'Mitch Green', 'EnCase', '2015-01-29 16:40:03', '2015-01-29 13:16:11', 'adible', 'tchermely', 'DO NOT USE'),\n(139, 'chris p', 'project', '2015-02-05 12:03:15', '2015-01-29 15:46:07', 'adible', 'adible', ''),\n(143, 'Matt Baptiste', 'FOR 490', '2015-01-30 14:57:04', '2015-01-30 10:01:00', 'ddeloge', 'jdiorio', 'n/a'),\n(126, 'Chris P', '', '2015-01-30 14:49:25', '2015-01-30 12:59:11', 'jdiorio', 'jdiorio', ''),\n(93, 'Chris P', 'Project', '2015-01-30 14:58:37', '2015-01-30 13:45:31', 'jdiorio', 'jdiorio', 'n/a'),\n(86, 'Chapin', '', '2015-01-30 18:37:17', '2015-01-30 17:33:25', 'ddeloge', 'ddeloge', ''),\n(100, 'Matt F', '', '2015-02-02 15:58:53', '2015-02-02 14:00:56', 'bcampbell', 'bcampbell', ''),\n(90, 'Mitch Green', 'User Experience/EnCase', '2015-02-03 12:08:22', '2015-02-03 08:49:16', 'mgreen', 'mgreen', 'DO NOT USE'),\n(16, 'Nick MiCallaf', 'Kik Messanger', '2015-02-03 11:58:19', '2015-02-03 08:58:40', 'jallibone', 'jallibone', ''),\n(9, 'Nick MiCallaf', 'Kik Messanger', '2015-02-03 11:57:30', '2015-02-03 08:59:18', 'jallibone', 'jallibone', ''),\n(4, 'Nick Micallaf', 'Kik Messanger', '2015-02-03 11:57:11', '2015-02-03 09:00:02', 'jallibone', 'jallibone', ''),\n(117, '', '', '2015-02-03 10:09:41', '2015-02-03 10:09:32', 'jallibone', 'jallibone', ''),\n(117, 'Nick Micallaf', 'Kik Device Apps', '2015-02-03 11:08:47', '2015-02-03 10:09:46', 'jallibone', 'jallibone', ''),\n(11, 'austin truax', 'Mac OS X project', '2015-02-03 12:36:47', '2015-02-03 10:23:43', 'jallibone', 'twright', ''),\n(92, 'Autin Truax', 'Mac OS X project', '2015-02-03 12:35:25', '2015-02-03 10:24:19', 'jallibone', 'twright', ''),\n(92, 'Kevin D', '', '2015-02-04 10:57:30', '2015-02-03 13:19:37', 'twright', 'williams', 'proejct'),\n(6, 'Chris A', 'access', '2015-02-03 18:51:00', '2015-02-03 15:52:19', 'twright', 'jbonaccorsi', ''),\n(118, 'Chris P', 'Project', '2015-02-03 17:33:23', '2015-02-03 16:11:39', 'twright', 'jbonaccorsi', ''),\n(91, 'Chris A', '', '2015-02-05 18:39:43', '2015-02-03 18:11:07', 'jbonaccorsi', 'tchermely', ''),\n(10, 'Chris P ', 'Project', '2015-02-04 10:56:56', '2015-02-04 08:51:56', 'jdiorio', 'williams', 'done'),\n(9, 'Chris P', 'Project', '2015-02-04 10:56:25', '2015-02-04 08:52:30', 'jdiorio', 'williams', 'done'),\n(143, 'Matt Baptiste', '', '2015-02-04 15:08:01', '2015-02-04 10:06:03', 'jdiorio', 'jbonaccorsi', ''),\n(118, 'Chris P', 'Project', '2015-02-04 13:15:03', '2015-02-04 10:20:52', 'jdiorio', 'jbonaccorsi', ''),\n(92, 'Kevin D', 'Projects', '2015-02-04 14:03:06', '2015-02-04 10:57:49', 'williams', 'williams', 'testing'),\n(126, 'Chris P', 'Out for FOR490', '2015-02-06 12:38:19', '2015-02-04 11:13:00', 'williams', 'jdiorio', ''),\n(148, 'Kevin D', 'Out for Capstone', '2015-02-12 10:37:08', '2015-02-04 13:18:01', 'williams', 'williams', 'Capstone'),\n(92, 'Kevin D', 'Project', '2015-02-05 13:53:14', '2015-02-04 14:03:35', 'williams', 'adible', ''),\n(6, 'Adam Sattler', '', '2015-02-04 15:09:30', '2015-02-04 15:09:19', 'jbonaccorsi', '', ''),\n(8, 'Nick Mi', '', '2015-02-05 08:24:37', '2015-02-05 08:23:10', 'ascallion', 'ascallion', ''),\n(86, 'Nick Mi', '', '2015-02-05 12:03:04', '2015-02-05 08:23:43', 'ascallion', 'adible', ''),\n(162, 'Nick Mi', '', '2015-02-05 12:02:55', '2015-02-05 08:25:14', 'ascallion', 'adible', ''),\n(89, 'Nick Mi', '', '2015-02-05 12:04:01', '2015-02-05 08:26:18', 'ascallion', 'adible', ''),\n(167, '', 'Out- Capstone, long-term', '2015-04-23 15:20:15', '2015-02-05 13:28:31', 'adible', 'adible', ''),\n(92, 'Kevin D', 'F-Response', '2015-02-05 16:58:51', '2015-02-05 13:53:26', 'adible', 'tchermely', ''),\n(90, 'Mitch', 'User Experience/EnCase', '2015-02-05 14:31:41', '2015-02-05 14:31:28', 'mgreen', '', ''),\n(90, 'Mitch', 'User Experience/EnCase', '2015-02-10 12:03:54', '2015-02-05 14:31:28', 'mgreen', 'jallibone', ''),\n(93, 'Chris A', '', '2015-02-05 18:39:22', '2015-02-05 15:41:24', 'adible', 'tchermely', ''),\n(160, 'Cameron', '', '2015-02-11 16:14:54', '2015-02-05 17:00:21', 'tchermely', 'ascallion', ''),\n(10, 'Chris P', '', '2015-02-05 17:27:23', '2015-02-05 17:02:18', 'tchermely', 'tchermely', ''),\n(9, 'Andrew', 'Needs charger', '2015-02-05 18:58:39', '2015-02-05 17:16:05', 'tchermely', 'tchermely', ''),\n(92, 'Tyler W', 'file transfers', '2015-02-05 18:13:30', '2015-02-05 18:01:06', 'tchermely', 'tchermely', ''),\n(83, 'Hunter', '', '2015-02-09 11:33:12', '2015-02-06 08:46:17', 'ddeloge', 'ascallion', ''),\n(143, 'Matt', 'FOR 490', '2015-02-06 15:04:12', '2015-02-06 10:00:17', 'ddeloge', 'jdiorio', 'In for FOR490'),\n(126, 'Chris P ', '', '2015-02-06 14:48:26', '2015-02-06 12:38:28', 'jdiorio', 'jdiorio', 'in for FOR490'),\n(142, 'Ykle T', '', '2015-02-07 15:58:45', '2015-02-06 14:03:01', 'jdiorio', 'bcampbell', ''),\n(119, 'Kyle', '', '2015-02-06 14:20:21', '2015-02-06 14:20:13', 'jdiorio', 'jdiorio', ''),\n(119, 'Kyle', 'out capstone', '2015-02-06 14:20:45', '2015-02-06 14:20:28', 'jdiorio', 'jdiorio', 'in capstone'),\n(119, 'Kyle', 'Long Term', '2015-02-07 15:56:55', '2015-02-06 14:21:35', 'jdiorio', 'bcampbell', 'All Done.'),\n(6, 'Adam Sattler', '', '2015-02-09 11:32:34', '2015-02-09 09:46:57', 'ascallion', 'ascallion', ''),\n(147, 'Hunter G.', '', '2015-02-09 11:32:53', '2015-02-09 09:50:50', 'ascallion', 'ascallion', ''),\n(5, 'Alex Caron', 'Out longterm', '2015-02-10 15:29:46', '2015-02-09 12:33:08', 'bcampbell', 'twright', ''),\n(126, 'Chris P', 'Out for FOR490', '2015-02-09 15:04:54', '2015-02-09 13:12:38', 'bcampbell', 'bcampbell', ''),\n(83, '693587', 'remote digital forensics law enforcement project, long term.', '2015-02-09 18:45:46', '2015-02-09 13:36:41', 'bcampbell', 'adible', 'No longer in use for project'),\n(40, 'Andrew S.', 'Training Video', '2015-02-09 17:11:24', '2015-02-09 16:15:55', 'adible', 'adible', ''),\n(89, 'Andrew S.', 'Training Video', '2015-02-10 13:50:01', '2015-02-09 16:16:22', 'adible', 'twright', ''),\n(93, 'Alex P', '', '2015-02-09 19:58:25', '2015-02-09 17:35:33', 'adible', 'adible', ''),\n(91, 'Tyler', 'Project', '2015-02-09 17:58:02', '2015-02-09 17:47:24', 'adible', 'adible', ''),\n(140, 'Andrew S.', 'Video', '2015-02-09 19:58:00', '2015-02-09 17:47:52', 'adible', 'adible', ''),\n(92, 'Alex P', '', '2015-02-09 19:57:33', '2015-02-09 18:49:12', 'adible', 'adible', ''),\n(90, 'Mitch Green', 'Project', '2015-02-10 13:50:36', '2015-02-10 10:35:22', 'jallibone', 'twright', ''),\n(104, 'Nick MiCallaf', '', '2015-02-10 12:02:57', '2015-02-10 11:33:03', 'jallibone', 'jallibone', ''),\n(118, 'Samantha K.', 'Capstone', '2015-02-10 17:17:01', '2015-02-10 15:40:04', 'twright', 'jbonaccorsi', ''),\n(104, 'Alex M.', 'Video Editing', '2015-02-10 19:42:24', '2015-02-10 16:30:04', 'twright', 'jbonaccorsi', ''),\n(118, 'Mary', 'Project', '2015-02-12 17:05:41', '2015-02-11 08:17:34', 'jdiorio', 'tchermely', 'Was not signed in'),\n(9, 'Chris P', 'Project', '2015-02-11 10:54:28', '2015-02-11 08:57:27', 'jdiorio', 'jdiorio', 'In from project'),\n(9, 'Chris P', 'Project', '2015-02-12 10:31:49', '2015-02-11 08:57:27', 'jdiorio', 'williams', 'In iOS HAndoff'),\n(9, 'Chris P', 'Project', '2015-02-12 10:32:27', '2015-02-11 08:57:27', 'jdiorio', 'williams', 'IN-iOS HAndoff'),\n(10, 'Chris P', 'Project', '2015-02-11 10:55:43', '2015-02-11 08:58:07', 'jdiorio', 'jdiorio', 'In from Project All set'),\n(98, 'Chris P', 'Project', '2015-02-11 11:02:29', '2015-02-11 09:01:32', 'jdiorio', 'jdiorio', 'Project'),\n(143, 'Matt B', 'FOR 490', '2015-02-13 10:13:35', '2015-02-11 09:52:53', 'jdiorio', 'ddeloge', ''),\n(126, 'Chris P', 'Out for FOR490', '2015-02-13 08:22:04', '2015-02-11 11:01:41', 'jdiorio', 'ddeloge', ''),\n(140, 'Austin T', 'Project', '2015-02-18 17:52:29', '2015-02-11 11:36:20', 'jdiorio', 'ascallion', ''),\n(89, 'Cameron M', 'Video Training', '2015-02-12 10:35:31', '2015-02-11 16:14:17', 'ascallion', 'williams', 'IN-Project FOR190'),\n(160, 'Kyle T', 'DR-021115-49', '2015-03-23 15:42:58', '2015-02-11 16:17:52', 'ascallion', 'bcampbell', ''),\n(9, 'Adam Sattler', 'IOS Handoff Project', '2015-02-12 10:33:16', '2015-02-11 16:27:42', 'ascallion', 'williams', 'IN-iOS HAndoff'),\n(86, 'Cameron M.', 'Transfer Video Files', '2015-02-12 10:35:05', '2015-02-11 16:42:48', 'ascallion', 'williams', 'In-project'),\n(43, 'Seth J', 'Internship', '2015-02-12 10:33:51', '2015-02-11 18:08:00', 'ascallion', 'williams', 'In - Internship'),\n(144, 'Scott B.', 'Internship', '2015-02-19 12:31:10', '2015-02-12 10:27:03', 'ascallion', 'ascallion', ''),\n(149, 'Lexi S', 'Out Capstone Longterm', '2015-09-02 14:51:52', '2015-02-12 10:36:39', 'williams', 'bcampbell', ''),\n(148, 'Kevin D', 'Out Capstone Longterm', '2015-04-23 15:21:18', '2015-02-12 10:37:25', 'williams', 'adible', ''),\n(86, 'Nick Micallef', 'Transfering audio data', '2015-02-17 09:56:36', '2015-02-12 11:55:41', 'ascallion', 'jallibone', ''),\n(91, 'Kevin', '', '2015-02-12 16:45:29', '2015-02-12 16:28:28', 'tchermely', 'tchermely', ''),\n(89, 'Elizabeth', '', '2015-02-12 16:34:33', '2015-02-12 16:34:21', 'tchermely', 'tchermely', ''),\n(89, 'Elizabeth', '', '2015-02-12 17:03:29', '2015-02-12 16:46:30', 'tchermely', 'tchermely', ''),\n(118, 'Chris P', '', '2015-02-12 18:47:05', '2015-02-12 17:05:59', 'tchermely', 'tchermely', ''),\n(143, 'Matt B', 'FOR 490', '2015-02-13 15:34:09', '2015-02-13 10:13:45', 'ddeloge', 'jdiorio', ''),\n(119, 'Matt B', 'Project', '2015-02-13 15:34:35', '2015-02-13 12:19:15', 'jdiorio', 'jdiorio', 'project'),\n(126, 'CHRIS P', 'For FOR490', '2015-02-13 14:15:34', '2015-02-13 12:47:55', 'jdiorio', 'jdiorio', 'in from for490'),\n(156, 'Parker D', '', '2015-02-16 13:28:21', '2015-02-16 12:50:50', 'bcampbell', 'bcampbell', ''),\n(89, '', 'Filming', '2015-02-16 17:30:11', '2015-02-16 15:57:05', 'bcampbell', 'adible', ''),\n(121, '', 'Testing Prototype box', '2015-02-16 20:13:12', '2015-02-16 18:58:53', 'adible', 'adible', ''),\n(142, 'Kyle', '', '2015-02-16 19:37:54', '2015-02-16 19:34:41', 'adible', 'adible', ''),\n(121, 'John', 'Lazarus', '2015-02-17 11:54:12', '2015-02-17 09:33:24', 'jallibone', 'jallibone', ''),\n(90, 'Mitch Green', 'Wiki', '2015-02-17 13:20:11', '2015-02-17 09:45:57', 'jallibone', 'twright', ''),\n(86, 'Nick MiCallaf', 'iPhone forensics', '2015-02-17 11:14:24', '2015-02-17 09:57:04', 'jallibone', 'jallibone', ''),\n(93, 'Zach Smith', '', '2015-02-17 11:54:38', '2015-02-17 10:09:31', 'jallibone', 'jallibone', ''),\n(118, 'Chris P', 'Project', '2015-02-17 19:19:55', '2015-02-17 15:51:23', 'twright', 'jbonaccorsi', ''),\n(118, 'Chris P', 'Project', '2015-02-24 17:13:31', '2015-02-17 15:51:23', 'twright', 'twright', ''),\n(118, 'Chris P', 'Project', '2015-02-24 17:13:31', '2015-02-17 15:51:23', 'twright', 'twright', ''),\n(118, 'Chris P', 'Project', '2015-02-24 17:13:50', '2015-02-17 15:51:23', 'twright', 'twright', ''),\n(118, 'Chris P', 'Project', '2015-02-24 17:14:12', '2015-02-17 15:51:23', 'twright', 'twright', ''),\n(89, 'Elizabeth H', '', '2015-02-17 17:46:00', '2015-02-17 17:30:40', 'jbonaccorsi', 'jbonaccorsi', ''),\n(86, 'liz H', '', '2015-02-17 17:56:55', '2015-02-17 17:56:40', 'jbonaccorsi', 'jbonaccorsi', ''),\n(118, 'Mary R', 'Project', '2015-02-24 17:14:59', '2015-02-18 10:03:49', 'jdiorio', 'twright', ''),\n(118, 'Mary R', 'Project', '2015-02-24 18:20:28', '2015-02-18 10:03:49', 'jdiorio', 'jbonaccorsi', ''),\n(118, 'Mary R', 'Project', '2015-02-24 19:04:33', '2015-02-18 10:03:49', 'jdiorio', 'jbonaccorsi', ''),\n(118, 'Mary R', 'Project', '2015-02-24 19:04:33', '2015-02-18 10:03:49', 'jdiorio', 'jbonaccorsi', ''),\n(143, 'Matt B', 'FOR 490', '2015-02-18 15:39:11', '2015-02-18 10:09:55', 'jdiorio', 'jbonaccorsi', ''),\n(126, 'Chris P', 'FOR 490', '2015-02-18 12:47:21', '2015-02-18 10:59:10', 'jdiorio', 'williams', 'In: Intern'),\n(126, 'Chris P', 'FOR 490', '2015-02-25 11:07:15', '2015-02-18 10:59:10', 'jdiorio', 'jdiorio', 'internship'),\n(9, 'Austin T', 'Out: LCDI Handoff', '2015-02-18 13:46:34', '2015-02-18 11:54:23', 'williams', 'williams', 'In: Handoff'),\n(150, 'Shane C', 'Out: Mobile Pro', '2015-02-19 14:44:50', '2015-02-18 12:32:00', 'williams', 'adible', ''),\n(88, 'Adam S', 'Out: LCDI Handoff', '2015-02-18 15:39:36', '2015-02-18 13:41:54', 'williams', 'jbonaccorsi', ''),\n(9, 'Adam S', 'Out: LCDI Handoff', '2015-02-18 15:39:58', '2015-02-18 13:46:55', 'williams', 'jbonaccorsi', ''),\n(121, '', '', '2015-02-18 18:25:25', '2015-02-18 16:17:00', 'jbonaccorsi', 'ascallion', ''),\n(140, 'Kiosk', 'Will be placed in kiosk semi-permanately', '2015-03-17 11:09:08', '2015-02-18 17:52:33', 'ascallion', 'jallibone', 'In'),\n(86, '', 'Video File Transfer', '2015-02-18 19:12:51', '2015-02-18 17:58:48', 'ascallion', 'ascallion', ''),\n(162, 'Nick Micallef', 'IP Box work', '2015-02-19 12:08:02', '2015-02-19 09:11:29', 'ascallion', 'ascallion', ''),\n(9, 'Nick Micallef', 'IP Box Work', '2015-02-19 12:07:26', '2015-02-19 09:17:33', 'ascallion', 'ascallion', ''),\n(86, 'Nick Micallef', 'File Transfer', '2015-02-19 12:08:26', '2015-02-19 09:37:00', 'ascallion', 'ascallion', ''),\n(150, 'Kevin D', 'project', '2015-02-20 08:58:44', '2015-02-19 14:46:44', 'adible', 'williams', 'project'),\n(169, 'Chris', 'Mobile memory analysis', '2015-02-20 16:17:40', '2015-02-19 17:38:05', 'tchermely', 'ascallion', ''),\n(143, 'Matt B', 'FOR 490', '2015-02-20 15:27:39', '2015-02-20 10:11:54', 'williams', 'ascallion', ''),\n(119, 'Matt B', 'FOR 490', '2015-02-20 15:27:19', '2015-02-20 10:20:06', 'williams', 'ascallion', ''),\n(147, 'Chris A', '', '2015-03-20 14:42:50', '2015-02-20 13:44:03', 'ascallion', 'jdiorio', ''),\n(142, 'Kyle T', '', '2015-02-21 14:43:31', '2015-02-21 14:43:01', 'bcampbell', 'bcampbell', ''),\n(150, '', '', '2015-02-23 11:05:35', '2015-02-23 08:54:36', 'ascallion', 'ascallion', ''),\n(130, '886779', 'Internship-Long Term', '2015-02-26 12:11:30', '2015-02-23 10:09:56', 'ascallion', 'ascallion', ''),\n(126, 'Chris P', 'internship', '2015-02-25 11:08:10', '2015-02-23 13:24:34', 'williams', 'jdiorio', ''),\n(20, '851339', 'Tutorial for Mobile Extraction', '2015-02-23 18:18:31', '2015-02-23 16:24:50', 'adible', 'adible', ''),\n(9, 'Nick MiCallaf', 'IP BOX', '2015-02-24 11:02:48', '2015-02-24 09:10:25', 'jallibone', 'jallibone', ''),\n(150, 'Kevin D', 'Mobile Forensics', '2015-02-24 16:10:57', '2015-02-24 12:15:44', 'twright', 'twright', ''),\n(20, 'Kayla W', 'Mobile Extraction', '2015-02-24 16:22:49', '2015-02-24 13:52:52', 'twright', 'twright', ''),\n(169, 'Chris A', 'Google Time', '2015-02-24 19:06:27', '2015-02-24 15:55:12', 'twright', 'jbonaccorsi', ''),\n(10, 'Chris P', 'Project', '2015-02-24 18:41:14', '2015-02-24 16:15:38', 'twright', 'jbonaccorsi', ''),\n(117, 'Samantha', 'Capstone', '2015-02-24 18:20:02', '2015-02-24 16:34:55', 'twright', 'jbonaccorsi', ''),\n(118, 'Samantha', 'Capstone', '2015-02-24 19:04:57', '2015-02-24 17:15:10', 'twright', 'jbonaccorsi', ''),\n(16, 'Chris A', '', '2015-02-24 19:04:16', '2015-02-24 18:45:01', 'jbonaccorsi', 'jbonaccorsi', ''),\n(145, 'Chris P', 'Project', '2015-02-25 11:58:30', '2015-02-25 09:06:59', 'jdiorio', 'jdiorio', ''),\n(146, 'Chris P', 'Project', '2015-02-25 11:59:06', '2015-02-25 09:07:28', 'jdiorio', 'jdiorio', ''),\n(143, 'Matt B', 'For FOR490', '2015-02-25 15:24:24', '2015-02-25 10:12:17', 'jdiorio', 'jbonaccorsi', ''),\n(126, 'Chris P', 'For FOR490', '2015-02-25 13:29:12', '2015-02-25 11:08:23', 'jdiorio', 'jbonaccorsi', ''),\n(77, 'Austin', 'Project', '2015-03-03 12:53:39', '2015-02-25 11:47:50', 'jdiorio', 'twright', ''),\n(119, '886779', '', '2015-02-26 12:11:15', '2015-02-25 16:41:50', 'jbonaccorsi', 'ascallion', ''),\n(4, 'Nick Micallef', '', '2015-02-26 12:23:15', '2015-02-26 10:25:32', 'ascallion', 'ascallion', ''),\n(163, 'Nick Micallef', '', '2015-02-26 12:13:56', '2015-02-26 10:26:04', 'ascallion', 'ascallion', ''),\n(10, 'Nick Micallef', 'Charging the Tablet', '2015-02-26 12:22:53', '2015-02-26 10:30:36', 'ascallion', 'ascallion', ''),\n(93, 'MITCH GREEN', 'PHOTOS', '2015-02-26 18:28:40', '2015-02-26 14:27:53', 'adible', 'tchermely', ''),\n(150, 'Chris', 'Long term project', '2015-02-26 19:18:35', '2015-02-26 16:01:53', 'tchermely', 'tchermely', ''),\n(10, 'Chris p>', 'Forensics Project', '2015-02-26 17:24:49', '2015-02-26 17:24:28', 'tchermely', 'tchermely', ''),\n(10, 'Chris P', 'Project', '2015-02-26 19:17:34', '2015-02-26 17:25:29', 'tchermely', 'tchermely', ''),\n(126, 'Chris P', 'Project', '2015-02-26 18:49:54', '2015-02-26 18:49:24', 'tchermely', 'tchermely', ''),\n(126, 'Chris P', 'For490', '2015-02-27 10:08:13', '2015-02-26 18:50:15', 'tchermely', 'ddeloge', ''),\n(143, 'Matt B', 'FOR 490', '2015-02-27 15:18:31', '2015-02-27 10:09:17', 'ddeloge', 'jdiorio', 'FOR 490'),\n(150, 'Chris A', 'Project', '2015-02-27 14:46:31', '2015-02-27 12:47:04', 'jdiorio', 'jdiorio', 'Project'),\n(10, 'Chris A', 'Project', '2015-02-27 14:45:59', '2015-02-27 13:07:51', 'jdiorio', 'jdiorio', 'Project'),\n(10, 'Chris A', 'Project', '2015-03-03 12:09:25', '2015-02-27 13:07:51', 'jdiorio', 'jallibone', ''),\n(126, 'Chris P', 'For FOR490', '2015-02-27 15:19:20', '2015-02-27 13:08:29', 'jdiorio', 'jdiorio', ''),\n(126, 'Chris P', 'For FOR490', '2015-03-04 13:33:00', '2015-02-27 13:08:29', 'jdiorio', 'jbonaccorsi', ''),\n(119, 'Chris P', 'Project For490', '2015-02-27 15:19:52', '2015-02-27 13:32:38', 'jdiorio', 'jdiorio', 'FOR 490'),\n(144, 'scott b', '', '2015-03-02 14:10:24', '2015-03-02 14:10:11', 'bcampbell', 'bcampbell', ''),\n(9, 'Austin', '', '2015-03-02 16:07:38', '2015-03-02 14:52:53', 'bcampbell', 'bcampbell', ''),\n(9, 'Austin', '', '2015-03-17 10:02:02', '2015-03-02 14:52:53', 'bcampbell', 'jallibone', 'signed in from after break'),\n(88, 'Austin', '', '2015-03-02 16:08:10', '2015-03-02 14:53:50', 'bcampbell', 'bcampbell', ''),\n(164, 'Alex P', 'Windows 10 Project', '2015-03-02 20:21:14', '2015-03-02 16:48:25', 'adible', 'adible', ''),\n(171, 'Alex P', 'Windows 10 Project', '2015-03-02 20:20:55', '2015-03-02 16:48:50', 'adible', 'adible', ''),\n(170, 'Alex P', 'Windows 10 Project', '2015-03-02 20:20:43', '2015-03-02 16:49:06', 'adible', 'adible', ''),\n(4, 'Nick MiCallaf', 'Android Crack', '2015-03-03 12:10:26', '2015-03-03 08:54:08', 'jallibone', 'jallibone', ''),\n(10, 'Nick MiCallaf', 'Android Crack', '2015-03-03 15:35:11', '2015-03-03 08:55:22', 'jallibone', 'twright', ''),\n(159, 'Cam Dumont', 'DataGen', '2015-03-17 10:30:01', '2015-03-03 10:28:09', 'jallibone', 'jallibone', 'Turned In'),\n(119, 'Alex Caron', '', '2015-03-03 15:45:18', '2015-03-03 10:38:37', 'jallibone', 'twright', ''),\n(104, 'Shane C', '', '2015-03-03 12:47:05', '2015-03-03 11:47:51', 'jallibone', 'twright', ''),\n(150, 'Kevin D', 'Mobile Forensics', '2015-03-03 16:38:33', '2015-03-03 13:09:20', 'twright', 'twright', ''),\n(10, 'Chris A', '', '2015-03-03 18:47:17', '2015-03-03 15:35:42', 'twright', 'jbonaccorsi', ''),\n(162, 'Chris P', 'Project', '2015-03-04 09:20:32', '2015-03-04 09:05:48', 'jdiorio', 'jdiorio', 'Project'),\n(91, 'Alex', 'Project', '2015-03-04 15:12:50', '2015-03-04 09:07:12', 'jdiorio', 'jbonaccorsi', ''),\n(118, 'Samantha', 'Capston', '2015-03-04 10:53:07', '2015-03-04 09:24:34', 'jdiorio', 'jdiorio', 'Capstone'),\n(169, 'Chris', 'Movie', '2015-03-05 18:24:50', '2015-03-04 09:52:41', 'jdiorio', 'tchermely', ''),\n(143, 'matt B', 'For FOR490', '2015-03-04 15:13:55', '2015-03-04 10:08:18', 'jdiorio', 'jbonaccorsi', ''),\n(126, 'Chris P', 'For490', '2015-03-04 13:33:22', '2015-03-04 11:41:25', 'jdiorio', 'jbonaccorsi', ''),\n(76, 'Cam Dumont', '', '2015-03-04 14:21:33', '2015-03-04 13:31:23', 'jbonaccorsi', 'jbonaccorsi', ''),\n(76, 'Cam Dumont', '', '2015-03-04 13:32:00', '2015-03-04 13:31:23', 'jbonaccorsi', 'N/A', ''),\n(76, 'Cam Dumont', '', '2015-03-04 13:32:00', '2015-03-04 13:31:23', 'jbonaccorsi', 'N/A', ''),\n(93, 'Alex Caron', '', '2015-03-24 15:34:56', '2015-03-05 10:43:11', 'ascallion', 'twright', ''),\n(93, 'Alex Caron', '', '2015-03-27 14:14:56', '2015-03-05 10:43:11', 'ascallion', 'jdiorio', ''),\n(6, '', '', '2015-03-16 12:15:12', '2015-03-05 10:51:04', 'ascallion', 'bcampbell', ''),\n(144, 'Scott B', 'Internship', '2015-03-05 14:18:03', '2015-03-05 11:36:05', 'ascallion', 'adible', ''),\n(144, 'Scott B', 'Internship', '2015-03-05 14:18:03', '2015-03-05 11:36:05', 'ascallion', 'adible', ''),\n(150, 'Chris', '', '2015-03-05 18:26:13', '2015-03-05 15:42:12', 'adible', 'tchermely', 'N/A'),\n(118, 'Sam K', 'Capstone', '2015-03-06 11:05:40', '2015-03-06 09:39:59', 'ddeloge', 'ddeloge', ''),\n(118, 'Sam K', 'Capstone', '2015-03-19 15:37:23', '2015-03-06 09:39:59', 'ddeloge', 'adible', ''),\n(118, 'Sam', '', '2015-03-19 15:37:23', '2015-03-06 09:39:59', 'ddeloge', 'adible', ''),\n(143, 'Matt B', 'FOR 490', '2015-03-06 13:59:17', '2015-03-06 09:50:52', 'ddeloge', 'netadmin', 'signed in by hunter'),\n(86, 'Micheal', 'Film', '2015-03-06 16:33:01', '2015-03-06 16:32:34', 'ddeloge', 'ddeloge', ''),\n(172, '951058', 'Netadmin brah', '2015-03-16 10:06:51', '2015-03-16 10:06:36', 'netadmin', '', ''),\n(172, 'hunter', '<script>alert(''We_Do_It_Live'');</script>', '2015-03-16 11:08:35', '2015-03-16 11:07:43', 'netadmin', 'netadmin', ''),\n(173, 'Andre', '', '2015-03-20 14:45:55', '2015-03-16 12:14:42', 'bcampbell', 'jdiorio', ''),\n(126, 'Chris P', '', '2015-03-16 14:55:39', '2015-03-16 12:48:13', 'bcampbell', 'bcampbell', ''),\n(144, 'Scott B', '', '2015-03-16 13:29:52', '2015-03-16 12:49:02', 'bcampbell', 'bcampbell', ''),\n(43, '', 'for490', '2015-03-30 18:03:31', '2015-03-16 17:56:07', 'adible', 'adible', ''),\n(43, '', 'for490', '2015-09-02 14:48:14', '2015-03-16 17:56:07', 'adible', 'bcampbell', ''),\n(9, '', '', '2015-03-17 10:02:53', '2015-03-17 10:01:25', 'jallibone', 'jallibone', 'signed in from after break'),\n(9, 'Austin', '', '2015-03-18 10:54:57', '2015-03-17 10:01:25', 'jallibone', 'jdiorio', 'in yo'),\n(9, '', '', '2015-03-18 11:11:51', '2015-03-17 10:03:20', 'jallibone', 'jdiorio', ''),\n(9, 'Nick MiCallaf', 'IPBox', '2015-03-18 11:11:51', '2015-03-17 10:03:20', 'jallibone', 'jdiorio', ''),\n(140, 'Austin Traux', 'DataGen', '2015-03-17 12:17:27', '2015-03-17 11:09:24', 'jallibone', 'twright', ''),\n(135, '', 'FOR490', '2015-03-20 15:03:59', '2015-03-17 14:14:09', 'twright', 'bfagersten', 'decom'),\n(1, 'test', 'rawring', '2015-03-17 14:23:14', '2015-03-17 14:18:27', 'netadmin', 'netadmin', 'rawr'),\n(150, 'Kevin D', 'Mobile Forensics', '2015-03-17 16:14:35', '2015-03-17 15:41:20', 'twright', 'twright', ''),\n(150, 'Chris A', 'Mobile Forensics', '2015-03-17 18:27:47', '2015-03-17 16:14:49', 'twright', 'jbonaccorsi', ''),\n(10, 'chris P', 'Project', '2015-03-18 10:55:30', '2015-03-18 09:30:10', 'JDIORIO', 'jdiorio', 'in'),\n(10, 'chris P', 'Project', '2015-03-18 09:40:00', '2015-03-18 09:30:10', 'JDIORIO', 'N/A', ''),\n(118, 'Chris P', 'PRoject', '2015-03-19 15:38:01', '2015-03-18 09:30:41', 'JDIORIO', 'adible', ''),\n(118, 'Chris P', 'PRoject', '2015-03-20 14:00:19', '2015-03-18 09:30:41', 'JDIORIO', 'jdiorio', ''),\n(126, 'Chris P', '', '2015-03-18 13:57:57', '2015-03-18 09:52:50', 'JDIORIO', 'jbonaccorsi', ''),\n(143, 'Matt B', 'FOR FOR490', '2015-03-18 15:12:46', '2015-03-18 09:53:27', 'JDIORIO', 'jbonaccorsi', ''),\n(9, 'austin T', 'Project', '2015-03-18 13:56:48', '2015-03-18 11:14:10', 'jdiorio', 'jbonaccorsi', ''),\n(140, 'Scott', '', '2015-03-18 17:00:41', '2015-03-18 14:40:52', 'jbonaccorsi', 'ascallion', ''),\n(88, 'Scott', '', '2015-03-23 12:12:16', '2015-03-18 14:44:25', 'jbonaccorsi', 'bcampbell', ''),\n(150, 'Kevin D', 'Mobile Forensics', '2015-03-19 18:18:03', '2015-03-19 14:40:14', 'adible', 'tchermely', ''),\n(118, 'Chris', 'Project', '2015-03-20 14:00:19', '2015-03-19 15:38:11', 'adible', 'jdiorio', ''),\n(143, 'Matt B', 'FOR 490', '2015-03-20 15:11:37', '2015-03-20 10:01:22', 'lcdioa', 'jdiorio', 'For 490'),\n(150, 'Chris A', 'Project', '2015-03-20 15:14:08', '2015-03-20 12:37:40', 'jdiorio', 'jdiorio', 'Project'),\n(91, 'Zac R', 'Project', '2015-09-02 14:49:25', '2015-03-20 13:50:40', 'jdiorio', 'bcampbell', ''),\n(15, '', '', '2015-09-02 14:48:08', '2015-03-23 08:46:03', 'ascallion', 'bcampbell', ''),\n(140, 'Scott', '', '2015-03-23 12:11:25', '2015-03-23 09:56:44', 'ascallion', 'bcampbell', ''),\n(164, 'Alex P', 'Windows 10 project', '2015-03-31 19:36:25', '2015-03-23 19:29:07', 'adible', 'jbonaccorsi', ''),\n(12, 'Nick MiCallaf', '', '2015-09-02 14:47:56', '2015-03-24 10:02:35', 'lcdioa', 'bcampbell', ''),\n(139, 'Nick Micallaf', '', '2015-09-02 14:50:06', '2015-03-24 10:03:08', 'lcdioa', 'bcampbell', ''),\n(93, 'Chris A', 'Project', '2015-03-31 13:59:52', '2015-03-24 15:35:09', 'twright', 'twright', ''),\n(170, 'Alex P', 'Windows 10 Project', '2015-03-31 19:28:56', '2015-03-24 16:17:06', 'twright', 'jbonaccorsi', ''),\n(171, 'Alex P', 'Windows 10 Project', '2015-03-31 19:27:08', '2015-03-24 16:17:35', 'twright', 'jbonaccorsi', ''),\n(87, 'tyler', '', '2015-03-30 17:25:56', '2015-03-24 17:37:34', 'jbonaccorsi', 'adible', ''),\n(140, 'Scott', '', '2015-03-25 16:02:18', '2015-03-25 14:15:08', 'jbonaccorsi', 'sbarrett', 'Put back in the equipment drawer'),\n(88, 'scott', '', '2015-03-25 16:03:38', '2015-03-25 14:15:56', 'jbonaccorsi', 'sbarrett', 'Returned to the equipment drawer'),\n(86, 'Nick Micallef', '', '2015-03-26 11:48:53', '2015-03-26 09:58:51', 'ascallion', 'ascallion', ''),\n(89, 'Nick Micallef', '', '2015-03-26 11:49:15', '2015-03-26 11:29:03', 'ascallion', 'ascallion', ''),\n(140, 'Paul', '', '2015-03-26 17:29:21', '2015-03-26 16:09:23', 'tchermely', 'tchermely', ''),\n(118, 'Chris P', '', '2015-03-26 18:02:40', '2015-03-26 16:12:02', 'tchermely', 'tchermely', 'Project'),\n(160, 'Paul', '', '2015-03-26 17:29:42', '2015-03-26 16:21:20', 'tchermely', 'tchermely', ''),\n(150, 'Chris A', '', '2015-03-26 18:42:19', '2015-03-26 16:30:34', 'tchermely', 'tchermely', ''),\n(73, 'Paul', '', '2015-04-01 15:37:01', '2015-03-26 16:50:06', 'tchermely', 'jbonaccorsi', ''),\n(156, '', 'Project', '2015-03-26 18:41:48', '2015-03-26 17:31:26', 'tchermely', 'tchermely', ''),\n(143, 'Matt B', 'FOR 490', '2015-04-01 09:57:14', '2015-03-27 10:00:12', 'ddeloge', 'jdiorio', ''),\n(5, 'Joe', 'Access to back room for someone else', '2015-04-07 12:40:29', '2015-03-27 11:16:55', 'ddeloge', '', ''),\n(9, 'Austin T.', 'MAC OS Forensics', '2015-03-27 13:28:22', '2015-03-27 11:37:03', 'ktellers', 'jdiorio', 'in'),\n(140, 'Austin T', 'For Mac Forensics ', '2015-03-27 13:47:38', '2015-03-27 13:32:11', 'jdiorio', 'jdiorio', ''),\n(135, 'Andrea', 'FOR for490', '2015-09-02 14:47:31', '2015-03-27 13:46:47', 'jdiorio', 'bcampbell', ''),\n(88, 'SCott', 'project', '2015-04-01 15:37:33', '2015-03-27 14:59:34', 'jdiorio', 'jbonaccorsi', ''),\n(88, 'SCott', 'project', '2015-03-27 15:00:00', '2015-03-27 14:59:34', 'jdiorio', 'N/A', ''),\n(66, 'Scott', 'Project', '2015-04-17 15:34:46', '2015-03-27 15:10:46', 'jdiorio', 'jdiorio', ''),\n(121, 'Joe', '', '2015-09-02 14:50:50', '2015-03-29 12:32:26', 'twright', 'bcampbell', ''),\n(9, 'Alex Caron', '', '2015-03-30 09:35:10', '2015-03-30 09:28:01', 'ascallion', 'ascallion', ''),\n(126, 'Chris', '', '2015-03-30 18:04:00', '2015-03-30 17:25:19', 'adible', 'adible', ''),\n(87, 'Alex P', '', '2015-03-31 19:34:54', '2015-03-30 17:26:03', 'adible', 'jbonaccorsi', ''),\n(43, 'Seth J', '', '2015-09-02 14:48:21', '2015-03-30 18:03:39', 'adible', 'bcampbell', ''),\n(118, 'Nick Mcallaf', '', '2015-03-31 12:02:58', '2015-03-31 08:42:11', 'lcdioa', 'twright', ''),\n(86, 'Ben K', 'Two-Man Project', '2015-03-31 13:29:03', '2015-03-31 13:28:45', 'twright', 'twright', ''),\n(93, 'Lawrence S', 'Two-Man Project', '2015-03-31 19:35:31', '2015-03-31 14:00:09', 'twright', 'jbonaccorsi', '');\nINSERT INTO `Inout` (`ID`, `StudentID`, `Use`, `DateIn`, `DateOut`, `UserOut`, `UserIn`, `Issues`) VALUES\n(118, 'Sam K', 'Capstone', '2015-03-31 17:24:19', '2015-03-31 15:04:37', 'twright', 'twright', ''),\n(93, 'Mary R', 'Project', '2015-04-01 09:38:12', '2015-04-01 09:37:30', 'jdiorio', 'jdiorio', 'Proejct'),\n(143, 'Matt B', 'FOR490', '2015-04-01 17:25:04', '2015-04-01 09:57:35', 'jdiorio', 'ascallion', ''),\n(118, 'Mary R', 'Project', '2015-04-02 08:50:08', '2015-04-01 11:22:28', 'jdiorio', 'ascallion', ''),\n(162, 'Alex C', 'Data Project', '2015-09-02 14:49:48', '2015-04-01 12:04:40', 'jdiorio', 'bcampbell', ''),\n(162, 'Alex C', 'Data Project', '2015-04-01 12:05:00', '2015-04-01 12:04:40', 'jdiorio', 'N/A', ''),\n(93, 'Austin T', 'Project', '2015-04-01 14:28:29', '2015-04-01 12:08:24', 'jdiorio', 'jbonaccorsi', ''),\n(72, 'Alex C', '', '2015-09-02 14:48:38', '2015-04-01 15:51:24', 'ascallion', 'bcampbell', ''),\n(34, 'Zach Smith ', '', '2015-04-01 16:20:24', '2015-04-01 15:58:05', 'ascallion', 'ascallion', ''),\n(123, 'Zach Smith', '', '2015-04-01 16:20:39', '2015-04-01 16:00:37', 'ascallion', 'ascallion', ''),\n(86, 'Parker', 'File Transfer', '2015-04-02 12:35:23', '2015-04-02 11:32:18', 'ascallion', 'adible', ''),\n(156, 'Michael C.', 'Project', '2015-04-02 19:25:29', '2015-04-02 16:54:09', 'tchermely', 'tchermely', ''),\n(118, 'Sam K', 'Capstone', '2015-04-07 12:39:14', '2015-04-03 10:02:23', 'ddeloge', 'twright', ''),\n(117, 'Sam K', 'Capstone', '2015-09-02 14:50:26', '2015-04-03 10:03:11', 'ddeloge', 'bcampbell', ''),\n(143, 'Matt B', 'FOR 490', '2015-04-03 15:10:06', '2015-04-03 10:03:47', 'ddeloge', 'jdiorio', ''),\n(86, 'Parker D', '', '2015-04-06 12:27:46', '2015-04-06 11:39:35', 'ascallion', 'bcampbell', ''),\n(102, 'Lawrence', '', '2015-04-06 16:09:07', '2015-04-06 15:55:19', 'bcampbell', 'adible', ''),\n(93, '', '', '2015-04-10 08:44:40', '2015-04-06 17:17:58', 'adible', 'ddeloge', ''),\n(118, 'Sam K', 'Capstone', '2015-04-07 16:45:37', '2015-04-07 15:25:01', 'twright', 'twright', ''),\n(118, 'Chris P', 'Report', '2015-04-08 17:34:53', '2015-04-07 16:45:52', 'twright', 'ascallion', ''),\n(156, 'Michael C', 'Project', '2015-09-02 14:51:58', '2015-04-07 16:48:35', 'twright', 'bcampbell', ''),\n(143, 'Matt B', 'FOR490', '2015-04-10 10:03:49', '2015-04-08 09:57:45', 'jdiorio', 'ddeloge', ''),\n(143, 'Matt B', 'FOR 490', '2015-04-10 14:07:28', '2015-04-10 10:04:02', 'ddeloge', 'jdiorio', ''),\n(123, 'Alex', 'Forensics Job', '2015-04-10 13:41:02', '2015-04-10 13:40:38', 'jdiorio', 'jdiorio', ''),\n(93, 'Scott B.', '', '2015-09-02 14:49:39', '2015-04-13 10:56:07', 'ascallion', 'bcampbell', ''),\n(86, 'Scott B.', '', '2015-04-27 14:58:02', '2015-04-13 11:40:51', 'ascallion', 'bcampbell', ''),\n(118, 'Nick MiCallaf', 'App Forensics', '2015-09-02 14:50:34', '2015-04-14 10:59:23', 'lcdioa', 'bcampbell', ''),\n(118, 'Nick MiCallaf', 'App Forensics', '2015-09-15 14:45:25', '2015-04-14 10:59:23', 'lcdioa', 'dparadise', ''),\n(109, 'Kevin D', 'Project', '2015-04-14 13:14:42', '2015-04-14 13:14:13', 'twright', 'twright', 'Decommissioned'),\n(157, '', '', '2015-09-02 14:51:07', '2015-04-16 14:46:57', 'adible', 'bcampbell', ''),\n(119, 'Hannah', 'Test for Jon''s Class', '2015-04-28 10:08:57', '2015-04-17 12:16:28', 'ddeloge', 'acaron', ''),\n(145, 'Dillon D', 'GoPro Remote', '2015-09-02 14:51:19', '2015-04-24 08:58:17', 'ddeloge', 'bcampbell', ''),\n(147, 'Dillon D', 'GoPro Remote', '2015-09-02 14:51:26', '2015-04-24 09:19:34', 'ddeloge', 'bcampbell', ''),\n(122, 'Alex', '', '2015-04-28 10:07:30', '2015-04-27 14:16:10', 'bcampbell', 'acaron', ''),\n(86, 'Alex', '', '2015-04-28 14:00:58', '2015-04-27 14:58:16', 'bcampbell', 'acaron', ''),\n(119, 'Alex Caron', 'Case Work', '2015-04-28 13:59:14', '2015-04-28 10:09:11', 'acaron', 'acaron', ''),\n(92, 'Andre M', '', '2015-09-02 14:48:02', '2015-09-02 11:23:19', 'ascallion', 'bcampbell', ''),\n(6, 'Andre M', 'Forgot Key Card', '2015-09-15 14:49:08', '2015-09-14 12:55:01', 'ascallion', 'dparadise', ''),\n(86, '', 'Transfer Bio Photos', '2015-09-14 13:42:25', '2015-09-14 13:42:01', 'ascallion', 'ascallion', ''),\n(86, '', 'Transfer Bio Photos', '2015-09-23 08:08:44', '2015-09-14 13:42:01', 'ascallion', 'ascallion', ''),\n(9, 'Nick', 'Projects', '2015-09-15 14:44:27', '2015-09-15 11:03:24', 'tchermely', 'dparadise', ''),\n(148, 'Nick', 'Projects', '2015-09-15 14:45:37', '2015-09-15 11:04:18', 'tchermely', 'dparadise', ''),\n(118, 'Nick', 'Projects', '2015-09-15 14:45:51', '2015-09-15 11:08:23', 'tchermely', 'dparadise', ''),\n(117, 'Nick', 'Projects', '2015-09-15 14:45:11', '2015-09-15 11:08:56', 'tchermely', 'dparadise', ''),\n(69, '', '', '2015-09-16 10:53:22', '2015-09-16 08:40:37', 'ascallion', 'ascallion', ''),\n(119, 'Alex C', 'Forensics ', '2015-09-17 11:01:19', '2015-09-16 09:59:35', 'ascallion', 'acaron', ''),\n(83, '', 'Incognito Project', '2015-09-16 11:59:24', '2015-09-16 11:47:41', 'ascallion', 'ascallion', ''),\n(97, 'Cameron D', 'Incognito Project', '2015-09-16 14:11:05', '2015-09-16 11:59:53', 'ascallion', 'bcampbell', ''),\n(86, 'Alex C.', 'Windows 10 Project', '2015-09-23 15:50:28', '2015-09-17 11:02:34', 'acaron', 'jwilliams', ''),\n(66, 'Alex C.', 'Research Windows Images', '2015-09-17 13:51:28', '2015-09-17 13:03:52', 'acaron', 'acaron', ''),\n(119, '', 'Forensic Case', '2015-09-17 16:31:12', '2015-09-17 14:07:31', 'acaron', 'acaron', ''),\n(6, 'Amanda J', 'missing id', '2015-09-21 11:25:43', '2015-09-18 16:02:59', 'dparadise', 'dparadise', ''),\n(67, '', '', '2015-09-21 16:46:26', '2015-09-21 14:18:28', 'ascallion', 'bfagersten', ''),\n(9, 'Nick Micallef', 'Mobile Apps', '2015-09-21 17:36:29', '2015-09-21 14:26:44', 'ascallion', 'bfagersten', ''),\n(150, 'Nick M', 'Mobile Apps', '2015-09-21 17:36:44', '2015-09-21 14:27:14', 'ascallion', 'bfagersten', ''),\n(117, 'Nick M', 'Project Work', '2015-09-21 17:36:32', '2015-09-21 15:05:21', 'ascallion', 'bfagersten', ''),\n(92, 'Nick M', 'Project Work', '2015-09-21 17:36:30', '2015-09-21 15:05:56', 'ascallion', 'bfagersten', ''),\n(1, 'netadmin', 'test', '2015-09-23 08:03:14', '2015-09-23 08:03:04', 'amaccarone', 'amaccarone', 'test'),\n(1, '', 'Test', '2015-09-23 08:04:28', '2015-09-23 08:04:15', 'ascallion', 'ascallion', ''),\n(200, '', 'Rasp Pi Project', '2015-09-23 11:08:43', '2015-09-23 10:06:35', 'ascallion', 'ascallion', ''),\n(203, 'Brandon M.', 'Rasp Pi Project', '2015-09-23 11:11:22', '2015-09-23 10:07:08', 'ascallion', 'ascallion', ''),\n(202, 'Nancy C', 'Rasp Pi Project', '2015-09-23 11:08:29', '2015-09-23 10:10:06', 'ascallion', 'ascallion', ''),\n(204, 'Ethan B', 'Rasp Pi Project', '2015-09-23 11:11:04', '2015-09-23 10:10:27', 'ascallion', 'ascallion', ''),\n(119, 'Alex C', 'Forensics Work', '2015-09-23 15:50:45', '2015-09-23 10:14:53', 'ascallion', 'jwilliams', ''),\n(196, '', 'Rasp Pi Project', '2015-09-23 15:50:00', '2015-09-23 10:15:26', 'ascallion', 'jwilliams', ''),\n(197, 'Mathew Fortier', 'Rasp Pi Project', '2015-09-23 15:49:49', '2015-09-23 10:15:57', 'ascallion', 'jwilliams', ''),\n(192, '', '', '2015-09-23 15:49:31', '2015-09-23 11:17:07', 'ascallion', 'jwilliams', ''),\n(46, 'Tyler D', 'Rasp Pi Project', '2015-09-23 15:51:05', '2015-09-23 11:53:36', 'ascallion', 'jwilliams', ''),\n(25, 'Tyler D', 'Rasp Pi Project', '2015-09-23 11:55:00', '2015-09-23 11:54:39', 'ascallion', 'N/A', ''),\n(76, 'CMcGonnigal', 'Malware Project', '2015-03-04 13:32:00', '2015-09-24 16:31:22', 'acaron', 'N/A', ''),\n(42, '', 'inventory script check', '2015-09-25 14:35:34', '2015-09-25 14:35:10', 'dyost', 'dyost', 'inventory script check'),\n(205, '', 'Rasp Pi Project', '2015-09-29 11:10:03', '2015-09-28 14:24:30', 'ascallion', 'jwilliams', ''),\n(203, 'Brandon M.', 'Rasp Pi Project', '2015-09-29 11:10:01', '2015-09-28 14:29:39', 'ascallion', 'jwilliams', ''),\n(204, 'Ethan B', 'Rasp Pi Project', '2015-09-28 15:24:21', '2015-09-28 14:29:55', 'ascallion', 'ascallion', ''),\n(192, '', 'Project Work', '2015-09-28 17:32:54', '2015-09-28 14:30:26', 'ascallion', 'bfagersten', ''),\n(199, 'Tyler Nettleson', 'Rasp Pi Project', '2015-09-29 11:10:04', '2015-09-28 14:31:02', 'ascallion', 'jwilliams', ''),\n(83, 'Tyler D', 'Rasp Pi Project', '2015-09-30 08:31:12', '2015-09-28 14:53:04', 'ascallion', 'ascallion', ''),\n(148, 'Nick M', 'Project Work', '2015-09-28 17:31:13', '2015-09-28 14:57:19', 'ascallion', 'bfagersten', ''),\n(9, 'Nick M', 'Project Work', '2015-09-28 17:31:11', '2015-09-28 14:58:01', 'ascallion', 'bfagersten', ''),\n(46, 'Tyler D', 'Rasp Pi', '2015-09-30 12:20:54', '2015-09-28 15:18:45', 'ascallion', 'dparadise', 'Drive is dead'),\n(122, 'Tyler D', 'Rasp Pi Project', '2015-09-30 12:19:47', '2015-09-28 15:19:20', 'ascallion', 'dparadise', ''),\n(140, 'Tyler D', 'Rasp Pi Project', '2015-10-05 17:08:39', '2015-09-28 15:19:55', 'ascallion', 'jwilliams', 'back in'),\n(81, 'Tyler D', 'Rasp Pi Project', '2015-09-30 12:19:34', '2015-09-28 15:20:28', 'ascallion', 'dparadise', ''),\n(77, 'Forensic Kit', 'Permanently Assigned', '2016-01-19 14:09:08', '2015-09-29 09:07:23', 'sbarrett', 'dparadise', 'Permanently signed out by FI.'),\n(213, 'Forensic Kit', 'Permanently Assigned', '2016-01-19 14:07:48', '2015-09-29 09:11:28', 'acaron', 'dparadise', 'Permanently signed out by FI.'),\n(87, 'Forensic Kit', 'Permanently Assigned', '2016-01-19 14:18:20', '2015-09-29 09:18:32', 'sbarrett', 'dparadise', 'Permanently signed out by FI.'),\n(117, 'Forensic Kit', 'Permanently Assigned', '2015-09-29 11:06:16', '2015-09-29 09:22:23', 'sbarrett', 'jwilliams', ''),\n(118, 'Forensic Kit', 'Permanently Assigned', '2015-09-29 11:07:30', '2015-09-29 09:23:16', 'sbarrett', 'jwilliams', ''),\n(148, 'NIck', 'Mobile Project', '2015-09-29 14:30:55', '2015-09-29 11:04:50', 'jwilliams', 'ohatalsky', ''),\n(117, 'NIck', 'Mobile Project', '2015-09-29 14:23:20', '2015-09-29 11:07:06', 'jwilliams', 'ohatalsky', ''),\n(118, 'NIck', 'Mobile Project', '2015-09-29 14:23:38', '2015-09-29 11:07:48', 'jwilliams', 'ohatalsky', ''),\n(9, 'NIck', 'Mobile Project', '2015-09-29 14:31:10', '2015-09-29 11:08:14', 'jwilliams', 'ohatalsky', ''),\n(197, 'NIck', 'Mobile Project', '2015-09-29 14:31:45', '2015-09-29 11:09:48', 'jwilliams', 'ohatalsky', ''),\n(114, 'Scott', 'Permanently signed out', '2016-01-19 14:19:54', '2015-09-29 11:15:01', 'jwilliams', 'dparadise', 'Permanently signed out by FI.'),\n(216, 'Scott', 'Permanently signed out', '2016-01-19 14:07:35', '2015-09-29 11:15:56', 'jwilliams', 'dparadise', 'Permanently signed out by FI.'),\n(118, 'David P', 'Test', '2015-09-29 15:16:32', '2015-09-29 15:15:05', 'dparadise', 'dparadise', 'Test'),\n(206, 'Chris A', 'IoT', '2015-09-30 08:25:00', '2015-09-29 15:32:37', 'dparadise', 'ascallion', ''),\n(212, 'Chris A', 'IoT', '2015-09-30 08:25:32', '2015-09-29 15:34:14', 'dparadise', 'ascallion', ''),\n(198, 'Matt F', 'Pi Forensics', '2015-09-30 08:24:42', '2015-09-29 16:11:00', 'dparadise', 'ascallion', ''),\n(205, '', 'Rasp Pi Project', '2015-09-30 10:55:19', '2015-09-30 08:08:18', 'ascallion', 'ascallion', ''),\n(201, 'Tyler D', 'Rasp Pi Project', '2015-09-30 12:20:11', '2015-09-30 08:30:24', 'ascallion', 'dparadise', ''),\n(217, '', 'Project Work', '2015-09-30 10:26:57', '2015-09-30 09:16:26', 'ascallion', 'ascallion', ''),\n(197, '', 'Rasp Pi Project', '2015-09-30 10:55:35', '2015-09-30 09:41:12', 'ascallion', 'ascallion', ''),\n(196, '', 'Rasp Pi Project', '2015-09-30 10:55:46', '2015-09-30 09:41:47', 'ascallion', 'ascallion', ''),\n(199, 'Tyler Nettleson', 'Rasp Pi Project', '2015-09-30 11:45:04', '2015-09-30 10:14:48', 'ascallion', 'ascallion', ''),\n(6, 'Tyler D', 'Missing ID', '2015-09-30 12:16:50', '2015-09-30 10:49:02', 'ascallion', 'dparadise', ''),\n(83, 'Tyler D', 'Rasp Pi Project', '2015-09-30 12:19:59', '2015-09-30 10:51:11', 'ascallion', 'dparadise', ''),\n(196, 'Tyler D', 'Rasp Pi Project', '2015-09-30 12:18:32', '2015-09-30 11:19:50', 'ascallion', 'dparadise', ''),\n(197, 'Tyler D', 'Rasp Pi Project', '2015-09-30 12:18:13', '2015-09-30 11:20:26', 'ascallion', 'dparadise', ''),\n(169, 'Jon', 'FOR310', '0000-00-00 00:00:00', '2015-09-30 13:53:01', 'bcampbell', 'N/A', ''),\n(147, 'Jon', 'FOR310', '2015-12-09 09:52:02', '2015-09-30 13:53:25', 'bcampbell', 'ascallion', ''),\n(146, 'Jon', 'FOR310', '2015-12-09 10:25:26', '2015-09-30 13:53:40', 'bcampbell', 'ascallion', ''),\n(149, 'Jon', 'FOR310', '2015-12-08 17:13:00', '2015-09-30 13:53:54', 'bcampbell', 'mfortier', 'Returned from class member'),\n(192, '', 'Project Work', '2015-10-01 12:51:04', '2015-09-30 14:41:34', 'bcampbell', 'jwilliams', ''),\n(192, 'Cameron', 'Win 10', '2015-10-05 17:23:35', '2015-10-01 12:51:18', 'jwilliams', 'nmicallef', ''),\n(109, 'M. Green', 'Cyber Pi', '2015-10-26 09:17:21', '2015-10-05 09:33:23', 'dhartman', 'dhartman', ''),\n(9, 'Nick Micallef', 'Mobile Apps', '2015-10-05 18:12:22', '2015-10-05 14:35:52', 'ascallion', 'nmicallef', ''),\n(148, 'Nick Micallef', 'Mobile Apps', '2015-10-05 18:13:18', '2015-10-05 14:36:33', 'ascallion', 'nmicallef', ''),\n(117, 'Nick Micallef', 'Mobile Apps', '2015-10-05 18:14:14', '2015-10-05 14:36:59', 'ascallion', 'nmicallef', ''),\n(118, 'Nick Micallef', 'Mobile Apps', '2015-10-05 18:13:55', '2015-10-05 14:37:18', 'ascallion', 'nmicallef', ''),\n(92, 'Nick Micallef', 'Mobile Apps', '2015-10-05 18:12:57', '2015-10-05 14:37:49', 'ascallion', 'nmicallef', ''),\n(92, 'Nick Micallef', 'Mobile Apps', '2015-11-05 10:09:14', '2015-10-05 14:37:49', 'ascallion', 'vchaudhari', ''),\n(214, 'Cameron D', 'Project Work', '2016-02-01 08:29:52', '2015-10-05 14:38:21', 'ascallion', 'ascallion', ''),\n(201, 'Tyler D', 'Rasp Pi Project', '2015-10-20 18:46:41', '2015-10-05 15:02:57', 'ascallion', 'bfagersten', ''),\n(89, 'Tyler D', '', '2015-10-07 10:35:42', '2015-10-05 15:28:23', 'ascallion', 'ascallion', ''),\n(86, 'Tyler D', '', '2015-10-05 17:10:12', '2015-10-05 15:28:58', 'ascallion', 'jwilliams', 'back in'),\n(119, 'Mary R.', 'Forensics', '2015-10-06 15:43:10', '2015-10-06 13:53:53', 'dhartman', 'dhartman', ''),\n(117, 'Mary R.', 'Forensics', '2015-10-20 14:10:03', '2015-10-06 15:27:37', 'dhartman', 'dhartman', ''),\n(118, 'Mary R.', 'Mobile Forensics', '2015-10-20 15:02:27', '2015-10-06 15:45:44', 'dhartman', 'dhartman', ''),\n(83, 'Tyler D', 'Raspberry Pi Forensics ', '2015-10-20 10:40:28', '2015-10-06 17:16:12', 'mfortier', 'jfernandez', 'Found'),\n(83, 'Tyler D', 'Raspberry Pi Forensics ', '2015-10-22 09:51:27', '2015-10-06 17:16:12', 'mfortier', '', ''),\n(218, '', 'Mac Forensics', '2015-10-07 08:08:17', '2015-10-06 17:32:20', 'mfortier', 'amaccarone', ''),\n(122, 'Emily S', 'Forensics Pi''', '2015-10-21 11:23:20', '2015-10-06 18:31:35', 'mfortier', 'ascallion', ''),\n(123, 'Tyler N', 'Forensics Pi', '2015-10-14 11:02:05', '2015-10-06 18:32:11', 'mfortier', 'ascallion', ''),\n(200, '', 'Rasp Pi Project', '2015-12-09 09:04:46', '2015-10-07 08:36:53', 'ascallion', 'ascallion', ''),\n(204, 'Ethan B', 'Rasp Pi Project', '2015-12-09 09:04:23', '2015-10-07 08:58:29', 'ascallion', 'ascallion', ''),\n(202, 'Nancy C', 'Rasp Pi Project', '2015-12-09 09:05:02', '2015-10-07 08:58:48', 'ascallion', 'ascallion', ''),\n(203, 'Brandon M', 'Rasp Pi Project', '2015-12-09 09:05:56', '2015-10-07 09:21:25', 'ascallion', 'ascallion', ''),\n(205, 'Emily S', 'Rasp Pi Project', '2015-10-14 10:31:05', '2015-10-07 09:40:09', 'ascallion', 'ascallion', ''),\n(67, 'Emily S', 'Rasp Pi Project', '2015-10-07 10:13:38', '2015-10-07 09:41:29', 'ascallion', 'ascallion', ''),\n(121, 'Emily S', 'Rasp Pi Project', '2015-11-02 12:04:02', '2015-10-07 10:12:57', 'ascallion', 'ascallion', ''),\n(206, 'Jon R', 'Rigging ', '2015-10-07 17:05:45', '2015-10-07 10:47:41', 'ascallion', 'jnicastro', 'project'),\n(9, 'TylerW', '', '2015-10-07 13:13:45', '2015-10-07 12:18:59', 'dparadise', 'dparadise', ''),\n(192, 'Cameron', '', '2015-10-19 15:51:32', '2015-10-07 13:08:28', 'dparadise', 'cdumont', ''),\n(206, 'ChrisA', 'IoT', '2015-10-07 17:06:13', '2015-10-07 17:06:01', 'jnicastro', '', 'IoT'),\n(206, 'Chris A', 'Research', '2015-10-08 14:21:28', '2015-10-08 14:21:04', 'vchaudhari', 'vchaudhari', ''),\n(6, 'Murphy', '1 Day, Forgot ID Card', '2015-10-14 12:54:39', '2015-10-08 14:32:46', 'vchaudhari', 'bcampbell', ''),\n(76, '', 'Mac Fornesics', '2015-10-09 13:23:14', '2015-10-09 13:19:37', 'ohatalsky', 'ohatalsky', 'Drive had other project info on it, did not use'),\n(159, '', 'Mac Fornesics', '2015-10-09 13:23:11', '2015-10-09 13:21:45', 'ohatalsky', 'ohatalsky', 'incorrect info on signout'),\n(159, '', 'Mac Fornesics', '0000-00-00 00:00:00', '2015-10-09 13:24:10', 'ohatalsky', 'N/A', ''),\n(218, 'Emily S', 'Project Use', '2015-10-28 08:01:41', '2015-10-14 09:56:10', 'ascallion', 'ascallion', ''),\n(86, 'Tyler D', 'File Transfer', '2015-10-14 12:36:16', '2015-10-14 10:05:15', 'ascallion', 'bcampbell', ''),\n(140, 'Tyler D', 'Rasp Pi Project', '2015-10-20 17:28:22', '2015-10-14 10:30:24', 'ascallion', 'tpeyton', 'Returned'),\n(68, 'Tyler W', 'Project work', '2015-10-14 13:50:38', '2015-10-14 11:14:36', 'ascallion', 'bcampbell', ''),\n(123, 'Tyler D', 'Rasp Pi Project', '2015-10-21 08:14:23', '2015-10-14 11:56:18', 'ascallion', 'ascallion', ''),\n(6, 'Mary-Braden', '1 Day, lost ID', '2015-10-14 15:27:04', '2015-10-14 12:54:47', 'bcampbell', 'bcampbell', ''),\n(209, 'Zach', '', '2015-10-19 11:50:33', '2015-10-19 11:46:49', 'dparadise', 'dparadise', ''),\n(91, 'David D''Amico', 'Practicing FTK', '2015-10-19 15:53:26', '2015-10-19 13:13:09', 'acaron', 'ohatalsky', 'signed in'),\n(52, 'Emily S', 'Rasp Pi', '2015-10-28 07:59:58', '2015-10-19 14:25:08', 'ohatalsky', 'ascallion', ''),\n(9, 'Nick Micallef', 'Mobile App''s', '2015-10-19 18:19:03', '2015-10-19 14:57:39', 'nmicallef', 'nmicallef', ''),\n(9, 'Nick Micallef', 'Mobile App''s', '2015-10-26 18:18:47', '2015-10-19 14:57:39', 'nmicallef', 'nmicallef', ''),\n(9, 'Nick Micallef', 'Mobile App''s', '2015-10-26 18:19:19', '2015-10-19 14:57:39', 'nmicallef', 'nmicallef', ''),\n(192, 'Justin', '', '2015-10-19 17:27:46', '2015-10-19 15:52:36', 'cdumont', 'bfagersten', ''),\n(11, 'Austin T.', 'Sync w/ Computer', '2015-10-20 12:02:49', '2015-10-20 10:39:13', 'jfernandez', 'jfernandez', 'Turned In'),\n(148, 'Nicholas M.', '', '2015-10-20 14:59:31', '2015-10-20 12:11:53', 'jfernandez', 'dhartman', ''),\n(117, 'Nicholas M.', 'Mobile Apps', '2015-10-20 15:00:33', '2015-10-20 14:11:46', 'dhartman', 'dhartman', ''),\n(209, 'ChrisA', '', '2015-10-20 17:39:55', '2015-10-20 16:37:09', 'dparadise', 'tpeyton', 'Returned'),\n(123, 'Emily S', 'Rasp Pi Project', '2015-10-21 10:46:55', '2015-10-21 08:14:30', 'ascallion', 'ascallion', ''),\n(89, 'Mitch G', 'Photos for Rasp Pi Porject', '2015-10-21 09:47:47', '2015-10-21 08:57:17', 'ascallion', 'ascallion', ''),\n(86, 'Ethan B', 'File Transfer', '2015-10-21 09:47:16', '2015-10-21 08:57:59', 'ascallion', 'ascallion', ''),\n(192, 'Cameron D', 'Windows 10 Project', '2015-10-21 14:30:29', '2015-10-21 11:16:44', 'ascallion', 'cdumont', ''),\n(123, 'Tyler D', 'Rasp Pi Project', '2015-10-21 11:50:01', '2015-10-21 11:20:36', 'ascallion', 'ascallion', ''),\n(122, 'Tyler D', 'Rasp Pi Project', '2015-10-28 08:00:58', '2015-10-21 11:23:28', 'ascallion', 'ascallion', ''),\n(118, 'TylerW', '', '2015-10-26 14:31:42', '2015-10-21 13:20:00', 'dparadise', 'ascallion', ''),\n(18, 'Cameron', 'Project', '2015-10-21 14:30:09', '2015-10-21 13:27:27', 'bcampbell', 'cdumont', ''),\n(192, 'Justin', '', '2015-10-26 17:27:21', '2015-10-21 14:36:46', 'bcampbell', 'nmicallef', ''),\n(206, 'Chris A', '', '0000-00-00 00:00:00', '2015-10-21 15:15:48', 'bcampbell', 'N/A', ''),\n(83, 'Michael', 'Research', '2015-10-26 09:50:32', '2015-10-22 09:51:11', 'vchaudhari', 'dhartman', ''),\n(1, 'brent ', 'testing', '2015-10-23 12:16:25', '2015-10-23 12:15:35', 'bfagersten', 'bfagersten', 'no notes'),\n(109, 'M. Green', 'Cyber Pi', '2015-12-09 09:03:48', '2015-10-26 09:17:48', 'dhartman', 'ascallion', ''),\n(119, 'Parker ', 'Game Chat', '2015-10-27 11:52:57', '2015-10-26 10:20:17', 'dhartman', 'jfernandez', ''),\n(166, 'Cameron D', '', '2015-11-02 14:42:52', '2015-10-26 14:26:18', 'ascallion', 'ascallion', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-10-27 15:06:33', '2015-10-26 14:30:57', 'ascallion', 'dhartman', ''),\n(118, 'Nicholas M.', 'Mobile Apps', '2015-10-26 18:20:56', '2015-10-26 14:31:55', 'ascallion', 'nmicallef', ''),\n(148, 'Nicholas M.', 'Mobile Apps', '2015-10-26 18:20:15', '2015-10-26 14:32:38', 'ascallion', 'nmicallef', ''),\n(88, 'Tyler D', 'Pi Forensics', '2015-10-26 17:05:39', '2015-10-26 14:38:48', 'ascallion', 'nmicallef', ''),\n(148, '', '', '2015-10-27 15:07:10', '2015-10-27 11:52:10', 'jfernandez', 'dhartman', ''),\n(117, 'Nicholas M', '', '2015-10-27 15:05:52', '2015-10-27 12:11:05', 'jfernandez', 'dhartman', ''),\n(88, 'Tyler D', 'Raspberry Pi Forensics', '2015-10-28 09:51:41', '2015-10-27 17:14:59', 'mfortier', 'ascallion', 'Signed in to be signed out'),\n(52, 'Emily S', 'Rasp Pi Project', '2015-11-09 16:35:19', '2015-10-28 08:00:07', 'ascallion', 'ohatalsky', ''),\n(205, 'Emily S', 'Rasp Pi Project', '2015-12-09 09:04:11', '2015-10-28 08:00:39', 'ascallion', 'ascallion', ''),\n(122, 'Emily S', 'Rasp Pi Project', '2015-11-09 16:34:31', '2015-10-28 08:01:06', 'ascallion', 'ohatalsky', ''),\n(218, 'Emily S', 'Rasp Pi Project', '2015-10-28 09:54:36', '2015-10-28 08:01:49', 'ascallion', 'ascallion', ''),\n(88, 'Tyler D', 'Rasp Pi Project', '2015-10-28 12:10:56', '2015-10-28 09:51:56', 'ascallion', 'dparadise', ''),\n(139, '', 'Tech Jam Use', '2015-10-28 13:02:04', '2015-10-28 09:52:42', 'ascallion', 'bcampbell', ''),\n(86, 'Tyler D', 'Rasp Pi Project', '2015-10-28 11:25:08', '2015-10-28 09:53:36', 'ascallion', 'ascallion', 'GIven to me to use for file transfer then put away'),\n(81, 'Emily S', 'Rasp Pi Project', '2015-11-09 16:34:57', '2015-10-28 09:55:25', 'ascallion', 'ohatalsky', ''),\n(123, 'Tyler N', 'Rasp Pi Project', '2015-10-28 11:56:30', '2015-10-28 10:18:20', 'ascallion', 'ascallion', ''),\n(111, 'Tyler N', 'Rasp Pi Project', '2015-10-28 11:56:15', '2015-10-28 10:18:42', 'ascallion', 'ascallion', ''),\n(80, 'Joe W', 'Give to Tyler N for Rasp Pi', '2015-10-28 11:52:31', '2015-10-28 10:27:15', 'ascallion', 'ascallion', ''),\n(124, 'Tyler N', 'Did not take full drive, only power cable.', '2015-11-02 07:52:18', '2015-10-28 11:10:21', 'ascallion', 'dhartman', ''),\n(73, 'Austin', 'Mac Forensics', '0000-00-00 00:00:00', '2015-10-28 18:06:45', 'jnicastro', 'N/A', ''),\n(1, 'Brent', 'Im testing the dirve', '2015-10-30 12:33:55', '2015-10-30 12:09:14', 'bfagersten', 'bfagersten', ''),\n(124, 'Parker', 'Permanently signed out for Game Chat', '2015-12-15 11:03:40', '2015-11-02 09:10:06', 'dhartman', 'jfernandez', ''),\n(121, 'Aaron L', 'Data Transfer', '2015-11-02 14:15:35', '2015-11-02 12:04:11', 'ascallion', 'ascallion', ''),\n(192, 'Cameron D', 'Project Use', '2015-11-02 14:54:31', '2015-11-02 12:39:00', 'ascallion', 'ascallion', ''),\n(66, 'Tyler D', 'Rasp Pi Project', '2015-12-16 15:35:41', '2015-11-02 14:14:52', 'ascallion', 'administrator', 'In Cabinet, Returned'),\n(201, 'Tyler D', 'Rasp Pi Project', '2015-12-09 09:05:48', '2015-11-02 14:15:10', 'ascallion', 'ascallion', ''),\n(118, 'Nicholas M.', 'Mobile Apps', '2015-11-02 17:51:36', '2015-11-02 14:18:57', 'ascallion', 'nmicallef', ''),\n(148, 'Nicholas M.', 'Mobile Apps', '2015-11-02 17:51:11', '2015-11-02 14:19:18', 'ascallion', 'nmicallef', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-11-02 17:50:47', '2015-11-02 14:19:38', 'ascallion', 'nmicallef', ''),\n(86, 'Tyler D', 'Rasp Pi Project', '2015-11-02 17:07:02', '2015-11-02 14:20:25', 'ascallion', 'nmicallef', ''),\n(121, 'Tyler D', 'Rasp Pi Project', '2015-11-20 15:25:38', '2015-11-02 14:39:48', 'ascallion', 'dparadise', ''),\n(97, 'Tyler D', '', '2015-11-02 14:40:21', '2015-11-02 14:40:09', 'ohatalsky', 'ohatalsky', ''),\n(97, 'David P', '', '2015-11-02 16:45:38', '2015-11-02 14:41:28', 'ohatalsky', 'dparadise', ''),\n(118, 'Nicholas M', '', '2015-11-03 14:45:04', '2015-11-03 11:06:25', 'jfernandez', 'dhartman', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-11-03 14:45:28', '2015-11-03 12:54:12', 'dhartman', 'dhartman', ''),\n(123, 'Tyler N', 'Rasp Pi Project', '2015-11-04 11:57:34', '2015-11-04 10:00:48', 'ascallion', 'ascallion', ''),\n(199, 'Tyler N', 'Rasp Pi Project', '2015-11-04 11:57:11', '2015-11-04 10:01:16', 'ascallion', 'ascallion', ''),\n(92, '', '', '0000-00-00 00:00:00', '2015-11-05 10:08:54', 'vchaudhari', 'N/A', ''),\n(218, '', 'Image Mac HDD', '2015-12-02 10:14:43', '2015-11-05 10:57:55', 'vchaudhari', 'ascallion', ''),\n(9, 'Tyler', '', '2015-11-09 14:30:14', '2015-11-06 11:51:09', 'cgreen', 'ascallion', ''),\n(139, 'Olivia H', '', '0000-00-00 00:00:00', '2015-11-09 14:28:45', 'ascallion', 'N/A', ''),\n(118, 'Nicholas M.', '', '2015-11-09 18:20:02', '2015-11-09 14:29:33', 'ascallion', 'nmicallef', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-11-09 18:20:28', '2015-11-09 14:30:25', 'ascallion', 'nmicallef', ''),\n(118, 'Nicholas M.', '', '2015-11-10 14:49:22', '2015-11-10 11:02:59', 'jfernandez', 'dhartman', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-11-10 14:49:57', '2015-11-10 11:04:18', 'jfernandez', 'dhartman', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-11-17 14:59:04', '2015-11-17 11:16:45', 'jfernandez', 'dhartman', ''),\n(117, 'Nicholas M.', 'Mobile Apps', '2015-11-17 14:21:32', '2015-11-17 11:19:45', 'jfernandez', 'dhartman', ''),\n(6, '', '1 Day, lost id', '2015-11-18 10:51:13', '2015-11-18 08:03:02', 'amaccarone', 'amaccarone', 'Returned'),\n(86, 'Mitch G', '', '2015-12-02 09:21:33', '2015-11-18 10:04:46', 'ascallion', 'ascallion', ''),\n(6, 'Tyler W', 'Temp Lab Access (Lost Card)', '2015-12-07 13:20:46', '2015-11-18 10:55:44', 'ascallion', 'ascallion', ''),\n(148, 'Nicholas M.', '', '2015-11-30 17:57:35', '2015-11-30 14:37:59', 'ascallion', 'nmicallef', ''),\n(118, 'Nicholas M.', '', '2015-11-30 17:56:56', '2015-11-30 14:39:13', 'ascallion', 'nmicallef', ''),\n(118, 'Nicholas M.', 'Mobile Apps', '2015-12-01 14:59:35', '2015-12-01 11:11:50', 'jfernandez', 'dhartman', ''),\n(118, 'Nicholas M.', 'Mobile Apps', '2015-12-08 15:15:35', '2015-12-01 11:11:50', 'jfernandez', 'dhartman', ''),\n(237, 'Mary R.', 'Mobile Device Certification', '2015-12-01 16:00:08', '2015-12-01 15:03:22', 'dhartman', 'dhartman', ''),\n(1, 'joe', 'i want this', '2015-12-01 18:00:40', '2015-12-01 17:59:47', 'jwilliams', 'jwilliams', ''),\n(157, 'Austin T.', '', '0000-00-00 00:00:00', '2015-12-02 11:18:20', 'ascallion', 'N/A', ''),\n(6, 'Andre M. ', '', '2015-12-07 15:43:55', '2015-12-07 13:20:56', 'ascallion', 'amaccarone', ''),\n(118, 'Nick Micallef', '', '2015-12-08 15:15:54', '2015-12-07 14:26:48', 'ascallion', 'dhartman', ''),\n(118, 'Nick Micallef', '', '2016-02-01 08:46:39', '2015-12-07 14:26:48', 'ascallion', 'ascallion', ''),\n(118, 'Nick Micallef', '', '2016-02-01 08:46:39', '2015-12-07 14:26:48', 'ascallion', 'ascallion', ''),\n(118, 'Nick Micallef', '', '2016-02-01 08:46:39', '2015-12-07 14:26:48', 'ascallion', 'ascallion', ''),\n(118, 'Nick Micallef', '', '2016-02-01 08:47:01', '2015-12-07 14:26:48', 'ascallion', 'ascallion', ''),\n(118, 'Nick Micallef', '', '2016-02-01 08:47:34', '2015-12-07 14:26:48', 'ascallion', 'ascallion', ''),\n(9, 'Nicholas M.', 'Mobile Apps', '2015-12-08 15:14:19', '2015-12-08 11:38:11', 'jfernandez', 'dhartman', ''),\n(148, 'Nicholas M.', 'Mobile Apps', '2015-12-08 15:14:56', '2015-12-08 11:40:41', 'jfernandez', 'dhartman', ''),\n(86, 'Mitch G', '', '2015-12-09 08:12:33', '2015-12-09 08:12:22', 'ascallion', '', ''),\n(66, 'A. Caron', 'Forensics', '2016-01-19 14:08:39', '2015-12-16 15:36:07', 'administrator', 'dparadise', 'Permanently signed out by FI.'),\n(51, '', 'Intake Documentation Cert', '2016-01-19 14:19:00', '2016-01-14 11:45:54', 'sbarrett', 'dparadise', 'Permanently signed out by FI.'),\n(263, 'Jakob Bonaccorsi', 'FOR 490', '2016-01-19 13:59:54', '2016-01-19 08:43:26', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(260, 'Kelsey Ward', 'FOR 490', '2016-01-19 14:00:49', '2016-01-19 08:44:54', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(265, 'FOR 490 Spare Drive', 'FOR 490', '2016-01-19 13:59:32', '2016-01-19 08:45:43', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(264, 'Michael Albrecht', 'FOR 490', '2016-01-19 13:59:42', '2016-01-19 08:46:17', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(262, 'Sean Boyle', 'FOR 490', '2016-01-19 14:00:17', '2016-01-19 08:46:55', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(266, 'Matthew Lantagne', 'FOR 490', '2016-01-19 13:59:19', '2016-01-19 08:47:20', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(267, 'FOR 490 Spare Drive', 'FOR 490', '2016-01-19 13:58:01', '2016-01-19 08:47:51', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(261, 'FOR 490 Spare Drive', 'FOR 490', '2016-01-19 14:00:37', '2016-01-19 08:48:35', 'sbarrett', 'dparadise', 'FOR490 - Permanent Sign-out.'),\n(234, 'LCDI Leadership', 'F-Response Test Computer 1', '2016-01-19 14:30:16', '2016-01-19 11:21:03', 'sbarrett', 'dparadise', 'Permanently signed out by FI.'),\n(112, 'LCDI Leadership', 'F-Response Test Computer 2', '2016-01-19 14:19:30', '2016-01-19 11:26:56', 'sbarrett', 'dparadise', 'Permanently signed out by FI.'),\n(252, 'David P', 'Capstone - Sign out til end of semester', '0000-00-00 00:00:00', '2016-01-19 13:34:43', 'dparadise', 'N/A', ''),\n(260, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:31:33', 'dparadise', 'N/A', ''),\n(261, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:32:15', 'dparadise', 'N/A', ''),\n(262, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:32:40', 'dparadise', 'N/A', ''),\n(263, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:32:51', 'dparadise', 'N/A', ''),\n(264, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:33:03', 'dparadise', 'N/A', ''),\n(265, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:33:14', 'dparadise', 'N/A', ''),\n(266, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:33:26', 'dparadise', 'N/A', ''),\n(267, 'David P', 'FOR490 - Permanent Sign-out.', '0000-00-00 00:00:00', '2016-01-19 14:33:37', 'dparadise', 'N/A', ''),\n(255, 'Talon', '', '0000-00-00 00:00:00', '2016-01-19 15:22:38', 'cgreen', 'N/A', ''),\n(254, 'Talon', '', '0000-00-00 00:00:00', '2016-01-19 15:23:25', 'cgreen', 'N/A', ''),\n(146, 'Talon', '', '0000-00-00 00:00:00', '2016-01-19 15:26:03', 'cgreen', 'N/A', ''),\n(273, 'Mitch', 'Cloud Forensics', '2016-01-25 12:16:01', '2016-01-19 15:27:06', 'cgreen', 'ascallion', ''),\n(258, 'Alex LaFleur', 'Capstone Permanent sign-out', '0000-00-00 00:00:00', '2016-01-21 13:19:15', 'dparadise', 'N/A', ''),\n(145, 'Mary Riley', '', '0000-00-00 00:00:00', '2016-01-22 14:17:35', 'cgreen', 'N/A', ''),\n(149, 'Mary Riley', 'Capstone', '0000-00-00 00:00:00', '2016-01-22 14:18:13', 'cgreen', 'N/A', ''),\n(83, '', 'Echo', '0000-00-00 00:00:00', '2016-01-22 16:03:14', 'amaccarone', 'N/A', ''),\n(274, 'Chris P', 'Capstone', '0000-00-00 00:00:00', '2016-01-25 09:16:06', 'ascallion', 'N/A', ''),\n(257, 'Micth G.', 'Cloud Forensics', '2016-01-25 12:15:28', '2016-01-25 09:18:51', 'ascallion', 'ascallion', ''),\n(237, 'Mitch G', 'Cloud Forensics', '2016-01-25 12:15:45', '2016-01-25 09:19:40', 'ascallion', 'ascallion', ''),\n(250, 'Justin W.', '', '2016-01-26 11:02:23', '2016-01-26 08:39:47', 'jfernandez', 'jfernandez', ''),\n(9, 'Justin W.', '', '2016-01-26 11:01:52', '2016-01-26 08:40:49', 'jfernandez', 'jfernandez', ''),\n(253, 'Kelsey W.', '', '2016-01-26 11:35:11', '2016-01-26 08:41:31', 'jfernandez', 'jfernandez', ''),\n(117, 'Cameron D.', '', '2016-02-01 08:30:29', '2016-01-26 12:20:47', 'jfernandez', 'ascallion', ''),\n(118, 'Cam', 'Moble device extraction', '2016-02-01 08:47:34', '2016-01-26 12:45:16', 'jwilliams', 'ascallion', ''),\n(118, 'Cam', 'Moble device extraction', '2016-02-01 08:47:34', '2016-01-26 12:45:16', 'jwilliams', 'ascallion', ''),\n(118, 'Cam', 'Moble device extraction', '2016-02-01 08:47:34', '2016-01-26 12:45:16', 'jwilliams', 'ascallion', ''),\n(118, 'Cam', 'Moble device extraction', '2016-02-01 08:47:34', '2016-01-26 12:45:16', 'jwilliams', 'ascallion', ''),\n(118, 'Cam', 'Moble device extraction', '2016-02-01 08:47:34', '2016-01-26 12:45:16', 'jwilliams', 'ascallion', ''),\n(237, 'Mitch G', 'Cloud Forensics', '2016-02-04 15:13:52', '2016-01-26 15:03:27', 'cgreen', 'cgreen', ''),\n(257, 'mitch G', 'cloud forensics', '0000-00-00 00:00:00', '2016-01-26 15:06:05', 'cgreen', 'N/A', ''),\n(273, 'mitch G', 'Cloud Forensics', '2016-02-04 15:15:23', '2016-01-26 15:06:45', 'cgreen', 'cgreen', ''),\n(123, 'Michael C.', '', '2016-01-27 15:40:48', '2016-01-27 14:33:39', 'jfernandez', 'jfernandez', ''),\n(253, 'Matt L.', 'Amazon Echo', '2016-02-02 08:47:08', '2016-01-27 14:58:55', 'jfernandez', 'jfernandez', ''),\n(32, 'Tyler W', 'Splunk Team', '2016-01-28 13:32:19', '2016-01-28 11:40:39', 'ohatalsky', 'dparadise', ''),\n(122, 'Tyler W', 'Splunk Team', '2016-01-28 13:32:36', '2016-01-28 11:41:59', 'ohatalsky', 'dparadise', ''),\n(138, 'Chris P', '', '2016-02-01 12:03:13', '2016-02-01 08:29:37', 'ascallion', 'ascallion', ''),\n(214, 'Chris P', '', '2016-02-01 09:18:18', '2016-02-01 08:30:07', 'ascallion', 'ascallion', ''),\n(117, 'Chris P', '', '2016-02-01 12:02:23', '2016-02-01 08:30:43', 'ascallion', 'ascallion', ''),\n(241, 'Chris P', '', '0000-00-00 00:00:00', '2016-02-01 09:18:07', 'ascallion', 'N/A', ''),\n(214, '', '', '2016-02-01 11:57:42', '2016-02-01 11:57:32', 'ascallion', 'ascallion', ''),\n(214, 'Chris Pazden', '', '2016-02-01 12:02:52', '2016-02-01 11:57:46', 'ascallion', 'ascallion', ''),\n(250, 'Justin W.', 'Wearable ', '2016-02-01 15:37:11', '2016-02-01 14:00:19', 'jcastro', 'jwilliams', ''),\n(9, 'Justin W.', 'Wearable ', '2016-02-01 15:37:45', '2016-02-01 14:01:31', 'jcastro', 'jwilliams', ''),\n(209, '', 'IOT ', '2016-02-01 16:05:12', '2016-02-01 14:21:14', 'jcastro', 'jwilliams', ''),\n(214, 'Zach S', 'testing FI', '2016-02-01 16:58:12', '2016-02-01 15:27:26', 'jwilliams', 'acaron', 'Returned to Cabinet'),\n(138, 'Scott B.', '', '0000-00-00 00:00:00', '2016-02-02 08:44:35', 'jfernandez', 'N/A', ''),\n(151, 'Scott B.', '', '0000-00-00 00:00:00', '2016-02-02 08:45:04', 'jfernandez', 'N/A', ''),\n(20, 'Scott B.', '', '0000-00-00 00:00:00', '2016-02-02 08:45:23', 'jfernandez', 'N/A', ''),\n(20, 'Scott B.', '', '0000-00-00 00:00:00', '2016-02-02 08:45:23', 'jfernandez', 'N/A', ''),\n(253, 'Kelsey W.', 'Amazon Echo', '2016-02-02 11:15:43', '2016-02-02 08:47:57', 'jfernandez', 'jfernandez', ''),\n(18, 'Scott B.', '', '0000-00-00 00:00:00', '2016-02-02 08:49:13', 'jfernandez', 'N/A', ''),\n(18, 'Scott B.', '', '0000-00-00 00:00:00', '2016-02-02 08:49:13', 'jfernandez', 'N/A', ''),\n(209, 'MaryBraden Murphy', 'IOT', '2016-02-03 11:46:49', '2016-02-03 10:40:39', 'svashaw', 'svashaw', 'ITO'),\n(117, 'Chris P.', '', '2016-02-03 15:37:53', '2016-02-03 12:41:02', 'jfernandez', 'jfernandez', ''),\n(118, 'Chris P.', '', '2016-02-03 15:38:31', '2016-02-03 12:41:28', 'jfernandez', 'jfernandez', ''),\n(253, 'Kelsey W.', 'Amazon Echo', '2016-02-09 11:49:01', '2016-02-03 14:13:08', 'jfernandez', 'jfernandez', ''),\n(253, 'Kelsey W.', 'Amazon Echo', '0000-00-00 00:00:00', '2016-02-03 14:13:08', 'jfernandez', 'N/A', ''),\n(116, 'Kelsey W.', 'Amazon Echo', '2016-02-03 15:39:02', '2016-02-03 15:22:33', 'jfernandez', 'jfernandez', ''),\n(117, 'Kelsey W.', 'Amazon Echo', '2016-02-09 11:48:11', '2016-02-09 10:43:30', 'jfernandez', 'jfernandez', ''),\n(117, 'Kelsey W.', 'Amazon Echo', '2016-02-11 13:58:34', '2016-02-09 10:43:30', 'jfernandez', 'dparadise', ''),\n(118, 'Kelsey W.', '', '2016-02-09 11:48:39', '2016-02-09 10:46:32', 'jfernandez', 'jfernandez', ''),\n(118, 'Kelsey W.', 'Amazon Echo', '2016-02-10 14:32:47', '2016-02-09 10:46:32', 'jfernandez', 'jfernandez', ''),\n(118, 'Kelsey W.', 'Amazon Echo', '2016-02-11 14:00:13', '2016-02-09 10:46:32', 'jfernandez', 'dparadise', ''),\n(117, 'Chris P.', '', '2016-02-11 13:59:00', '2016-02-09 12:12:55', 'jfernandez', 'dparadise', ''),\n(118, 'Chris P.', '', '2016-02-11 14:00:38', '2016-02-09 12:13:53', 'jfernandez', 'dparadise', ''),\n(97, '', 'Wearable Tech', '0000-00-00 00:00:00', '2016-02-09 12:17:02', 'cdumont', 'N/A', ''),\n(280, 'Kayla W', 'IOS9 Jailbreak', '2016-02-09 14:30:14', '2016-02-09 12:46:48', 'egoeben', 'cgreen', ''),\n(281, 'Michael', '', '0000-00-00 00:00:00', '2016-02-09 14:45:27', 'cgreen', 'N/A', ''),\n(280, 'Nicholas M.', '', '2016-02-10 14:32:12', '2016-02-10 13:53:26', 'jfernandez', 'jfernandez', ''),\n(280, 'Kayla W', 'iOS9', '2016-02-11 15:43:27', '2016-02-11 13:47:40', 'dparadise', 'bfagersten', 'looks good'),\n(117, 'Zack S', '', '0000-00-00 00:00:00', '2016-02-11 13:59:11', 'dparadise', 'N/A', ''),\n(118, 'Zack S', '', '0000-00-00 00:00:00', '2016-02-11 14:00:54', 'dparadise', 'N/A', ''),\n(280, 'Joe', 'Jailbreak', '2016-02-12 14:40:57', '2016-02-12 14:00:35', 'ohatalsky', 'mfortier', 'Done for the day');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `Inventory`\n--\n\nCREATE TABLE IF NOT EXISTS `Inventory` (\n `ID` int(255) NOT NULL AUTO_INCREMENT,\n `SerialNumber` varchar(255) NOT NULL,\n `DeviceSerial` varchar(255) NOT NULL,\n `Type` varchar(255) NOT NULL,\n `Description` text NOT NULL,\n `Issues` text NOT NULL,\n `PhotoName` varchar(255) NOT NULL,\n `State` varchar(255) NOT NULL,\n PRIMARY KEY (`ID`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=282 ;\n\n--\n-- Dumping data for table `Inventory`\n--\n\nINSERT INTO `Inventory` (`ID`, `SerialNumber`, `DeviceSerial`, `Type`, `Description`, `Issues`, `PhotoName`, `State`) VALUES\n(1, 'LCDI 06999', '0000-TEST', 'TEST', 'TEST', 'Decommissioned', 'images.jpg', 'decommissioned'),\n(2, 'LCDI07001', '', 'Google Glass', 'White Google Glasses', '', 'IMG_9778.JPG', 'operational'),\n(3, 'LCDI07000', '', 'Google Glass', 'Orange Google Glasses', '', 'IMG_9777.JPG', 'operational'),\n(4, 'LCDI07253', '0903b4a9', 'Nexus', 'Black Nexus 7', '', 'LCDI07002.JPG', 'operational'),\n(5, 'LCDI07003', '04648 11153283-1', 'Key Card', 'White Access Key #2', '', 'LCDI07003.JPG', 'operational'),\n(6, 'LCDI07004', '04650 11153283-1', 'Key Card', 'White Key Card #1', '', 'IMG_9781.JPG', 'operational'),\n(7, 'LCDI07005', '', 'iPad', '16GB Apple iPad, Silver back', '', 'IMG_9785.JPG', 'operational'),\n(8, 'LCDI07006', 'FQ051EGAZ38', 'iPad', '16GB Apple iPad, Silver back', '', 'IMG_9786.JPG', 'operational'),\n(9, 'LCDI07007', 'C39K2FY8DTTP', 'iPhone', 'iPhone 5, White, 16GB', 'Part #: MD639LL/A\\r\\n', 'LCDI07007.JPG', 'operational'),\n(10, 'LCDI07008', '019d86de09115c0b', 'Nexus', 'Nexus 5, 32GB, black', '', 'IMG_9788.JPG', 'operational'),\n(11, 'LCDI07009', 'DMPHNN91DJ8T', 'iPad', '16GB Apple iPad, Silver back, Grey cover', '', 'IMG_9789.JPG', 'operational'),\n(12, 'LCDI07010', 'DMPHPT8QDVD1', 'iPad', '16GB iPad, silver back, green cover', '', 'IMG_9790.JPG', 'operational'),\n(13, 'LCDI07011', 'DMQHNATPDJ8T', 'iPad', '16GB iPad, silver back, ', '', 'IMG_9791.JPG', 'operational'),\n(14, 'LCDI07012', '', 'iPhone', 'iPhone 3, 32GB', 'Assumed Missing or Destroyed -A.S. 11/30', 'LCDI07012.JPG', 'decommissioned'),\n(15, 'LCDI07013', '', 'Phone', 'Windows (Dell) Phone Black back, slides up', '', 'IMG_9794.JPG', 'operational'),\n(16, 'LCDI07014', 'L6X7NB9340815087', 'Phone', 'Huawei Fusion 2, Black back', 'Assumed Missing or Destroyed -A.S. 11/30', 'LCDI07014.JPG', 'decommissioned'),\n(17, 'LCDI07015', '012659007580839', 'iPhone', '8GB iPhone 3, black w/box', '', 'IMG_9795.JPG', 'operational'),\n(18, 'LCDI07016', '351700051344109', 'Phone', 'Nokia Lumia 800, blue with blue case', '', 'IMG_9796.JPG', 'operational'),\n(19, 'LCDI07017', 'L6X7NB1280617943', 'Phone', 'Fusion 2 AT&T, black back', 'Assumed Missing or Destroyed. -A.S. 11/30', 'LCDI07017.JPG', 'decommissioned'),\n(20, 'LCDI07018', '', 'Phone', 'LG Optimus F7, black back', '', 'IMG_9797.JPG', 'operational'),\n(21, 'LCDI07019', '5NJ100ZW', 'Hard Drive', 'Laptop Hard Drive, Seagate Momentus 120 GB', '', 'IMG_6682.JPG', 'decommissioned'),\n(22, 'LCDI07020', 'NW54T6225L6J', 'Hard Drive', 'Laptop Hard Drive. Fujitsu Brand. 80GB', '', 'IMG_9798.JPG', 'operational'),\n(23, 'LCDI07023', 'NW54T6225L6P', 'Hard Drive', 'Laptop Hard Drive. Fujitsu Brand. 80 GB', '', 'IMG_9801.JPG', 'operational'),\n(24, 'LCDI07022', 'NW54T6526FET', 'Hard Drive', 'Laptop Hard Drive. Fujitsu Brand. 80 GB', '', 'IMG_9800.JPG', 'operational'),\n(25, 'LCDI07021', 'NW54T6225GD8', 'Hard Drive', 'Laptop Hard Drive. Fujitsu Brand. 80 GB', '', 'IMG_9799.JPG', 'operational'),\n(26, 'LCDI07030', 'WMANS2363802', 'Hard Drive', 'Western Digital WD800ADFS 80GB Hard Drive', '', 'IMG_6690.JPG', 'operational'),\n(27, 'LCDI07032', 'WMANS2361411', 'Hard Drive', 'Western Digital WD800ADFS 80GB Hard Drive', '', 'IMG_6688.JPG', 'operational'),\n(28, 'LCDI07035', '6RYBF3B2', 'Hard Drive', 'Seagate Barracuda 7200 250 GB Hard Drive', '', 'IMG_6694.JPG', 'operational'),\n(29, 'LCDI07038', '6RYBF2WD', 'Hard Drive', 'Seagate Barracuda 7200 250 GB Hard Drive', '', 'IMG_6691.JPG', 'operational'),\n(30, 'LCDI07036', 'WMAM9P734470', 'Hard Drive', 'Western Digital WD800 80GB Hard Drive', '', 'IMG_6693.JPG', 'operational'),\n(31, 'LCDI07034', 'WMAM98486106', 'Hard Drive', 'Western Digital WD800 80GB Hard Drive', '', 'IMG_6695.JPG', 'operational'),\n(32, 'LCDI07037', 'WMAM98102320', 'Hard Drive', 'Western Digital WD800 80GB Hard Drive', '', 'IMG_6692.JPG', 'operational'),\n(33, 'LCDI07031', 'WCC1S2932576', 'Hard Drive', 'Western Digital WD5003AZEX 500GB Hard Drive', '', 'IMG_6689.JPG', 'operational'),\n(34, 'LCDI07033', 'WCANM2385066', 'Hard Drive', 'Western Digital WD1600 160 GB Hard Drive', '', 'IMG_6687.JPG', 'operational'),\n(35, 'LCDI07043', '6RYBF3RX', 'Hard Drive', 'Seagate Barracuda 7200 250 GB Hard Drive', '', 'IMG_6703.JPG', 'operational'),\n(36, 'LCDI07044', 'WCAU4C161834', 'Hard Drive', 'Western Digital WD10EAVS 1 TB Hard Drive', 'Failed to Wipe Numerous times, bad sectors.', 'IMG_6702.JPG', 'decommissioned'),\n(37, 'LCDI07039', 'WMAM97147404', 'Hard Drive', 'Western Digital WD800 80 GB Hard Drive', '', 'IMG_6699.JPG', 'operational'),\n(38, 'LCDI07042', 'WMAHL1687771', 'Hard Drive', 'Western Digital WD800 80 GB Hard Drive', '', 'IMG_6696.JPG', 'operational'),\n(39, 'LCDI07041', 'Y2G59ACE', 'Hard Drive', 'Maxtor DiamondMax Plus 9 80 GB Hard Drive', '', 'IMG_6697.JPG', 'operational'),\n(40, 'LCDI07046', 'WMAP92237644', 'Hard Drive', 'Western Digital WD1600AAJS 160 GB Hard Drive', '', 'IMG_6700.JPG', 'operational'),\n(41, 'LCDI07047', 'WMAM9RX17271', 'Hard Drive', 'Western Digital WD800JD 80 GB Hard Drive', '', 'IMG_6701.JPG', 'operational'),\n(42, 'LCDI07040', 'WMAM9Z202390', 'Hard Drive', 'Western Digital WD800JD 80 GB Hard Drive', '', 'IMG_6698.JPG', 'operational'),\n(43, 'LCDI07045', 'WMATV7508889', 'Hard Drive', 'Western Digital WD1001FALS 1 TB Hard Drive', '', 'IMG_6711.JPG', 'operational'),\n(44, 'LCDI07048', 'WCATRC351392', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', '', 'IMG_6710.JPG', 'decommissioned'),\n(45, 'LCDI07049', 'WMAM94955517', 'Hard Drive', 'Western Digital WD800 80 GB Hard Drive', '', 'IMG_6709.JPG', 'operational'),\n(46, 'LCDI07050', 'WMAM98485951', 'Hard Drive', 'Western Digital WD800 80 GB Hard Drive', '', 'IMG_6708.JPG', 'operational'),\n(47, 'LCDI07051', 'WMANS2364021', 'Hard Drive', 'Western Digital WD800ADFS 80 GB Hard Drive', '', 'IMG_6707.JPG', 'operational'),\n(48, 'LCDI07052', '6QZ2E7RY', 'Hard Drive', 'Seagate Barracuda 7200 80 GB Hard Drive', '', 'IMG_6706.JPG', 'decommissioned'),\n(49, 'LCDI07053', '6RYBF3EC', 'Hard Drive', 'Seagate Barracuda 7200 250 GB Hard Drive', '', 'IMG_6705.JPG', 'decommissioned'),\n(50, 'LCDI07056', '6RYBF2SP', 'Hard Drive', 'Seagate Barracuda 7200 250 GB Hard Drive', '', 'IMG_6704.JPG', 'decommissioned'),\n(51, 'LCDI07068', 'WMAM98102479', 'Hard Drive', 'Western Digital WD800 80 GB Hard Drive', '', 'IMG_6720.JPG', 'operational'),\n(52, 'LCDI07066', 'WMAM9N275565', 'Hard Drive', 'Western Digital WD800 80 GB Hard Drive', '', 'IMG_6712.JPG', 'operational'),\n(53, 'LCDI07065', 'Y2G3ZE4E', 'Hard Drive', 'Maxtor DiamondMax Plus 9 80 GB Hard Drive', '', 'IMG_6713.JPG', 'decommissioned'),\n(54, 'LCDI07064', 'Y2D7TA9E', 'Hard Drive', 'Maxtor DiamondMax Plus 9 80 GB Hard Drive', '', 'IMG_6714.JPG', 'decommissioned'),\n(55, 'LCDI07061', 'Y2QCPQBE', 'Hard Drive', 'Maxtor DiamondMax Plus 9 80 GB Hard Drive', '', 'IMG_6913.JPG', 'decommissioned'),\n(56, 'LCDI07063', 'T6HD9YAC', 'Hard Drive', 'Maxtor 5T060H6 60 GB Hard Drive', '', 'IMG_6911.JPG', 'decommissioned'),\n(57, 'LCDI07060', 'WMAM9RS65933', 'Hard Drive', 'Western Digital WD800JB 80 GB Hard Drive', '', 'IMG_6718.JPG', 'decommissioned'),\n(58, 'LCDI07067', '5QE3PME8', 'Hard Drive', 'Seagate Barracuda 7200 250 GB Hard Drive', '', 'IMG_6719.JPG', 'decommissioned'),\n(59, 'LCDI07062', 'WMA6Y5182818', 'Hard Drive', 'Western Digital WD200 20 GB Hard Drive', '', 'IMG_6912.JPG', 'decommissioned'),\n(60, 'LCDI07069', 'L4JJNY6C', 'Hard Drive', 'Hitachi Travlestar 60 GB Laptop Hard Drive', '', 'IMG_6726.JPG', 'operational'),\n(61, 'LCDI07070', 'Y2NC6ESE', 'Hard Drive', 'Maxtor DiamondMax Plus 9 80 GB Hard Drive', '', 'IMG_6725.JPG', 'decommissioned'),\n(62, 'LCDI07071', 'WCAM9K265959', 'Hard Drive', 'Western Digital WD800JB 80 GB Hard Drive', '', 'IMG_6724.JPG', 'decommissioned'),\n(63, 'LCDI07072', 'WCAM9V418911', 'Hard Drive', 'Western Digital WD800JB 80 GB Hard Drive', '', 'IMG_6723.JPG', 'operational'),\n(64, 'LCDI07073', '3HS455FH', 'Hard Drive', 'Seagate Barracuda ATA IV 40 GB Hard Drive', '', 'IMG_6722.JPG', 'decommissioned'),\n(65, 'LCDI07074', 'WCAM9V415543', 'Hard Drive', 'Western Digital WD800JB 80 GB Hard Drive', '', 'IMG_6721.JPG', 'decommissioned'),\n(66, 'LCDI07075', 'WXE1A1309214', 'External Drive', '1 TB WD My Passport drive', 'Permanently signed out by FI.', 'IMG_9898.JPG', 'operational'),\n(67, 'LCDI07093', '', 'Write Blocker', 'Write Blocker # 9 ', '', 'IMG_9846.JPG', 'operational'),\n(68, 'LCDI07091', '', 'Write Blocker', 'Write Blocker # 13', '', 'IMG_9845.JPG', 'operational'),\n(69, 'LCDI07092', '', 'Write Blocker', 'Forensic Dock #12', '', 'IMG_9844.JPG', 'operational'),\n(70, 'LCDI07090', '', 'Tool kit', 'Fellowes Tool kit', '', 'IMG_9840.JPG', 'operational'),\n(71, 'LCDI07084', '', 'Tool kit', 'IFixIt Tool Kit', '', 'IMG_6636.JPG', 'operational'),\n(72, 'LCDI07083', '', 'Tool kit', 'IFixIt Tool Kit', '', 'IMG_6746.JPG', 'operational'),\n(73, 'LCDI07123', '', 'Cable', 'Apple Thunderbolt Cable (0.5 m)', 'Part NO. MD862ZM/A Model NO. A1410', 'LCDI07123.JPG', 'operational'),\n(74, 'LCDI07086', '', 'Cable', 'Thunderbolt to FireWire Adapter W/ Box', '', 'IMG_9837.jpg', 'operational'),\n(75, 'LCDI07078', '', 'Label Maker', 'Brother Label Maker', '', 'IMG_6730.JPG', 'operational'),\n(76, 'LCDI07085', 'WX11A3307541', 'External Drive', '1 TB WD My Passport External Drive', '', 'IMG_9836.jpg', 'operational'),\n(77, 'LCDI07081', 'WX41A331317', 'External Drive', '1 TB WD My Passport External Drive', 'Permanently signed out by FI.', 'IMG_6733.JPG', 'operational'),\n(78, 'LCDI07080', 'WX41A331859', 'External Drive', '1 TB WD My Passport External Drive', '', 'IMG_6732.JPG', 'operational'),\n(79, 'LCDI07076', 'WXC1EC2VV834', 'External Drive', '1 TB WD My Passport External Drive', '', 'IMG_6728.JPG', 'operational'),\n(80, 'LCDI07079', '11597800680', 'Wireless Router', 'TP-Link Wireless Router W/ Power Cable', '', 'IMG_6731.JPG', 'operational'),\n(81, 'LCDI07077', 'DS-US007U3', 'Hard Drive Docking Kit', 'Masscool Hard drive docking station', '', 'IMG_0023.JPG', 'operational'),\n(82, 'LCDI07089', '', 'Hard Drive Docking Kit', 'Star Tech mSATA to 2.5in SATA SSD Converter/Enclosure\\r\\nPart#: SAT2MSAT25', 'Converts an mSATA mini-SSD into a standard 2.5in SATA SSD', 'IMG_9832.jpg', 'operational'),\n(83, 'LCDI07088', 'CB22774616', 'Laptop', 'Lenovo Laptop 02', '', 'IMG_9825_.jpg', 'operational'),\n(84, 'LCDI07087', '', 'Laptop', 'Dell Precision Laptop', '', 'IMG_982.jpg', 'operational'),\n(85, 'LCDI07094', '', 'Head Phones', 'Black Logitech Head phones', '', 'IMG_6747.JPG', 'operational'),\n(86, 'LCDI07097', '', 'Card Reader', 'green IOGEAR card reader', '', 'IMG_9848.JPG', 'operational'),\n(87, 'LCDI07095', 'WX41A3H0119', 'External Drive', '1 TB WD My passport external drive', 'Permanently signed out by FI.', 'IMG_6748.JPG', 'operational'),\n(88, 'LCDI07098', 'C02mR137G085', 'Laptop', 'Apple MacBook Air', '1.3 GHz Intel Core i5 processor\\r\\n4GB RAM\\r\\n128GB SSD.', 'IMG_9826_.jpg', 'operational'),\n(89, 'LCDI07102', '', 'Camera', 'Canon Camera', '', 'None.gif', 'operational'),\n(90, 'LCDI07099', '', 'Flash Drive', 'Kingston 32 GB Flash Drive', '', 'IMG_9847.JPG', 'operational'),\n(91, 'LCDI07096', '', 'Flash Drive', 'Kingston 32 GB Flash Drive', '', 'IMG_6752.JPG', 'operational'),\n(92, 'LCDI070100', '', 'Flash Drive', 'Kingston 32 GB Flash Drive', '', 'IMG_6754.JPG', 'operational'),\n(93, 'LCDI07101', '', 'Flash Drive', 'Kingston 32 GB Flash Drive', '', 'IMG_6751.JPG', 'operational'),\n(94, 'LCDI07103', '', 'Laptop', 'Toshiba \\r\\nDisplay Laptop 1\\r\\nUsername: laptop-03', '', 'IMG_9850.JPG', 'operational'),\n(95, 'LCDI07104', '', 'Laptop', 'Toshiba \\r\\nDisplay laptop 3\\r\\n', '', 'None.gif', 'operational'),\n(96, 'LCDI07105', '', 'Laptop', 'Display Laptop 3', '', 'None.gif', 'operational'),\n(97, 'LCDI07106', '', 'Laptop', 'Toshiba Laptop 04', 'Permanent Sign-out - Cameron D', 'IMG_9828.jpg', 'operational'),\n(98, 'LCDI07107', '', 'Head Phones', 'Black Logitech Head phones', '', 'IMG_6755.JPG', 'operational'),\n(99, 'LCDI07108', '', 'Laptop', 'Silver MacBook Pro', '', 'IMG_9827_.jpg', 'operational'),\n(100, 'LCDI07109', '', 'Head Phones', 'Black Logitech Head phones', '', 'None.gif', 'operational'),\n(101, 'LCDI07110', '', 'Head Phones', 'Black Logitech Head phones', '', 'LCDI07110.JPG', 'operational'),\n(102, 'LCDI07111', '', 'Head Phones', 'Black Logitech Head phones', '', 'None.gif', 'operational'),\n(103, 'LCDI07112', '', 'Head Phones', 'Black Logitech Head phones', '', 'LCDI07112.JPG', 'operational'),\n(104, 'LCDI07113', '', 'Head Phones', 'Black Logitech Head phones', '', 'LCDI07113.JPG', 'operational'),\n(105, 'LCDI07120', '5E040146P', 'Laptop', 'Black, 15.6\"\\r\\nToshiba Satellite C55t-B5110\\r\\nPart NO. PSCMQU-00T005\\r\\nToshiba Laptop 08\\r\\n', 'Windows 8.1\\r\\n4GB RAM\\r\\n750 GB HDD', 'IMG_9853.JPG', 'operational'),\n(106, 'LCDI07082', '', 'Cable', 'Apple Thunderbolt Cable (0.5M) W/ box. ', 'Part NO. MD862ZM/A\\r\\nModel NO. A1410', 'IMG_9858.JPG', 'operational'),\n(107, 'LCDI07122', 'S1DKNEAF404800L', 'Solid State Drive', 'Samsung SSD 840 EVO\\r\\n500GB\\r\\nw/ box.', 'Part#: MZ7TE500HMHP\\r\\nModel#: MZ-7TE500', 'LCDI07122.JPG', 'operational'),\n(108, 'LCDI07124', 'S1DKNEAF404802Y', 'Solid State Drive', 'Samsung SSD 840 EVO\\r\\n500G\\r\\nw/ box.', 'Part#: MZ7TE500HMHP\\r\\nModel#: MZ-7TE500', 'LCDI07124.JPG', 'operational'),\n(109, 'LCDI-07159', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(9).jpg', 'operational'),\n(110, 'LCDI07160', 'UM1412A800185', 'Wireless Router', 'Green Trendnet wireless router', 'In box', 'IMG_9889.JPG', 'operational'),\n(111, 'LCDI07161', 'WCC4E1980439', 'Hard Drive', '4 TB', '', 'IMG_6807.JPG', 'decommissioned'),\n(112, 'LCDI07162', 'S1ATNEAD641901V', 'Solid State Drive', 'Samsung 840 Pro SSD - 256GB', 'Permanently signed out by FI.', 'IMG_6843.JPG', 'operational'),\n(113, 'LCDI07163', 'WMATV3981471', 'Hard Drive', 'Western Digital WD1001FALS 1 TB Hard Drive', '', 'IMG_9838.jpg', 'operational'),\n(114, 'LCDI07164', 'WCATR4954013', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', 'Permanently signed out by FI.', 'IMG_6841.JPG', 'operational'),\n(115, 'LCDI07165', 'WCAWFD575006', 'Hard Drive', 'Western Digital WD3200AAKS 320 GB Hard Drive', '', 'IMG_6840.JPG', 'operational'),\n(116, 'LCDI07166', '', 'XRY', 'XRY Complete Kit', '', 'IMG_9856.JPG', 'operational'),\n(117, 'LCDI07167', '', 'Cellebrite', 'Cellebrite Device Kit', '', 'IMG_6856.JPG', 'operational'),\n(118, 'LCDI07168', '', 'Cellebrite', 'Cellebrite USB Dongle', '', 'IMG_9859.JPG', 'operational'),\n(119, 'LCDI07190', '', 'Internet Evidence Finder', 'Flash Drive', 'Internet Evidence Finter v5 by Magnet Forensics (Decommissioned)', '14 - 1.jpg', 'decommissioned'),\n(120, 'LCDI07172', '', 'Write Blocker', 'weibeTECH Forensic UltraDock v5', 'Decommissioned\\r\\n', 'IMG_6893.JPG', 'decommissioned'),\n(121, 'LCDI07171', '', 'Write Blocker', 'weibeTECH Forensic UltraDock v5', '', 'IMG_9829.jpg', 'operational'),\n(122, 'LCDI07170', '', 'Write Blocker', 'weibeTECH Forensic UltraDock v5', '', 'IMG_9831.jpg', 'operational'),\n(123, 'LCDI07169', '', 'Write Blocker', 'weibeTECH Forensic UltraDock v5', '', 'IMG_9830.jpg', 'operational'),\n(124, 'LCDI07173', 'WCC4E1980439', 'External Drive', 'Western Digital My Book External Hard Drive 4 TB', '', 'IMG_9860.JPG', 'operational'),\n(125, 'LCDI00051', 'WCATRC340477', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', '', 'IMG_6910.JPG', 'decommissioned'),\n(126, 'LCDI07158', 'WCATR5064441', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', '', 'IMG_9839.jpg', 'operational'),\n(127, 'LCDI07157', 'WMATV7678660', 'Hard Drive', 'Western Digital WD1001FALS 1 TB Hard Drive', 'Sean Oliver', 'IMG_6897.JPG', 'operational'),\n(128, 'LCDI00106', 'WCATR5064582', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', '', 'IMG_6908.JPG', 'decommissioned'),\n(129, 'LCDI07176', 'WCATRC251333', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', 'Decommissioned\\r\\n', 'IMG_6909.JPG', 'decommissioned'),\n(130, 'LCDI07175', 'WCATR5075414', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', ' LCDI-HDD-011 (Decommissioned)', 'IMG_6899.JPG', 'decommissioned'),\n(131, 'LCDI00005', 'WCATR48880404', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', 'Could not find, assumed destroyed. -A.S. 11/30', 'IMG_6901.JPG', 'decommissioned'),\n(132, 'LCDI00052', 'WCATRC302252', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', 'Decommissioned', 'None.gif', 'decommissioned'),\n(133, 'LCDI00144', 'WCC1S2827482', 'Hard Drive', 'Western Digital WD5003AZEX 500 GB Hard Drive', 'Chad Waibel', 'IMG_6903.JPG', 'decommissioned'),\n(134, 'LCDI07174', 'WCATRC341417', 'Hard Drive', 'Western Digital WD1002FAEX 1 TB Hard Drive', '', 'IMG_6904.JPG', 'decommissioned'),\n(135, 'LCDI00053', 'WCATRC287807', 'Hard Drive', 'WD Black 1TBWestern Digital WD1002FAEX 1 TB Hard Drive', '', 'IMG_6905.JPG', 'decommissioned'),\n(136, 'LCDI00145', 'WMATV5398016', 'Hard Drive', 'Western Digital WD1001FALS 1 TB Hard Drive', '', 'IMG_6906.JPG', 'decommissioned'),\n(137, '039534051547', '', 'Coffee', '12 OZ Coffee Cup', 'Warning: Contents may be hot. \\r\\nDecommissioned', 'None.gif', 'decommissioned'),\n(138, 'LCDI07156', 'DYVHQC7XDJ8T', 'iPad', 'Ipad with pink cover', '', 'IMG_9903.JPG', 'operational'),\n(139, 'LCDI07155', 'DMPHPQ6TDVD1', 'iPad', 'Ipad with green cover ', 'Has screen cracks', 'None.gif', 'operational'),\n(140, 'LCDI07154', 'DMQHNGYTDJ8T', 'iPad', 'Black Ipad', '', 'None.gif', ''),\n(141, 'LCDI07177', 'DMPHPNPYDVD1', 'iPad', 'White Ipad', '', 'IMG_9904.JPG', 'operational'),\n(142, 'LCDI07178', 'WMATV7674731', 'Hard Drive', 'Western Digital 1 TB Hard Drive', '', 'None.gif', 'decommissioned'),\n(143, 'LCDI07179', 'WCATR5064582', 'Hard Drive', '1 TB Western Digital Hard Drive', 'Decommissioned', 'None.gif', 'decommissioned'),\n(144, 'LCDI07150', 'WMATV5398016', 'Hard Drive', '1 TB Western Digital HD', '', 'None.gif', 'decommissioned'),\n(145, 'LCDI07300', 'D8OKBC769309', 'Nexus', 'Nexus 7 ASUS Black 16 GB', 'Missing charger brick', 'IMG_0031.JPG', 'operational'),\n(146, 'LCDI07301', 'D8OKBC769432', 'Nexus', 'Nexus 7 ASUS Black 16GB', '', 'IMG_0029.JPG', 'operational'),\n(147, 'LCDI07302', 'D8OKBC769275', 'Nexus', 'Nexus 7 ASUS Black 16GB', '', 'IMG_0030.JPG', 'operational'),\n(148, 'LCDI07303', 'D8OKBC758303', 'Nexus', 'Nexus7 ASUS Black 16 GB', '', 'IMG_9864.JPG', 'operational'),\n(149, 'LCDI07304', 'D8OKBC776678', 'Nexus', 'Nexus 7 ASUS Black 16 GB', '', 'IMG_7200.JPG', 'operational'),\n(150, 'LCDI07305', 'D8OKBC769433', 'Nexus', 'Nexus 7 ASUS Black 16 GB', '', 'IMG_7201.JPG', 'operational'),\n(151, 'LCDI00157', '871166KEEDG', 'iPhone', 'Apple iPhone 3GS 8GB Black Cell Phone', '', 'IMG_9884.JPG', 'operational'),\n(152, 'LCDI07308', '34BIP1EPTYN7', 'Hard Drive', 'TOSHIBA MQ01ABD075 750GB 5400RPM 8MB Cache Sata II 3.0Gb/s 2.5\" HDD', 'Decommissioned\\r\\n', 'IMG_7281.JPG', 'decommissioned'),\n(153, 'LCDI07306', '34H2PZGBTYN7', 'iPhone', 'TOSHIBA MQ01ABD075 750GB 5400RPM 8MB Cache Sata II 3.0Gb/s 2.5\" HDD\\r\\n', 'Decommissioned', 'LCDI07306.JPG', 'decommissioned'),\n(154, 'LCDI07307', '34CQTIWITYN7', 'iPhone', 'TOSHIBA MQ01ABD075 750GB 5400RPM 8MB Cache Sata II 3.0Gb/s 2.5\" HDD\\r\\n', 'Decommissioned', 'LCDI07307.JPG', 'decommissioned'),\n(155, '', '', 'Internet Evidence Finder', '', '', 'None.gif', 'operational'),\n(156, 'LCDI07309', '', 'Laptop', 'Toshiba Satellite C55t-B5110\\r\\nToshiba Laptop 6 ', '4GB RAM, 750GB Storage, Windows 8.1', 'satellite.JPG', 'operational'),\n(157, 'LCDI07182', 'WX41A4331859', 'External Drive', 'My Passport External Drive', '', 'IMG_9833.jpg', 'operational'),\n(158, 'LCDI07153', 'WX31E1347531', 'External Drive', 'My Passport Drive', 'Decommissioned', 'IMG_7311.JPG', 'decommissioned'),\n(159, 'LCDI07126', 'WX31E1397399', 'External Drive', 'My Passport Drive', '', 'IMG_7313.JPG', 'operational'),\n(160, 'LCDI07129', 'WX41A43H0526', 'External Drive', 'My Passport Drive', 'Decommissioned', 'IMG_7314.JPG', 'decommissioned'),\n(161, 'LCDI07151', '', 'Phone', 'Black Nokia Lumia', '', 'IMG_9892.JPG', 'operational'),\n(162, 'LCDI07152', '', 'Password Cracker', '1 Black Iphone Password Cracker W/ Iphone 4 cable, Iphone 5 cable, and micro usb and cord attachments for device.', '', 'IMG_9835.jpg', 'operational'),\n(163, 'LCDI07180', '', 'Password Cracker', 'MFC Dongle with Android, iPhone 4 and iPhone 5 Cables. As well as a small 2 pin adapter cable.\\r\\nUSB with micro chip and small foam box with 3 pin adapter. ', 'Decommissioned', 'None.gif', 'decommissioned'),\n(164, 'LCDI07191', 'CNU443CD1H', 'Tablet', 'HP stream 7 black front in box', '', 'IMG_9883.JPG', 'operational'),\n(165, 'LCDI07183', 'HA1244M02597', 'Watch', 'NIke+ Sportswatch GPS', '', 'IMG_9866.JPG', 'operational'),\n(166, 'LCDI07181', 'XRAFB401', 'Watch', 'BLACK FITBIT FLEX', '', 'IMG_9861.JPG', 'operational'),\n(167, 'LCDI07192', 'P027xq34v3', 'Watch', 'Black Android moto 360 Smartwatch', 'With box', 'IMG_9870.JPG', 'operational'),\n(168, 'LCDI-07125', '', 'Tripod', 'Dolica', 'Decommissioned', 'None.gif', 'decommissioned'),\n(169, 'lcdi07146', 'D80KBC769292', 'Nexus', 'Black Nexus Tablet', 'Decommissioned', 'None.gif', 'decommissioned'),\n(170, 'LCDI07193', '60694164062075', 'USB 2.0 Hub with Power Switches', '4-Port USB 2.0 Hub with Power Switches\\r\\nBlack, Sabrent\\r\\nMODEL: HB-UMLS', '', 'LCDI07193.jpg', 'operational'),\n(171, 'LCDI07194', '', 'Cable', 'Black connecting cable', '', 'LCDI07194.jpg', 'operational'),\n(172, 'LCDI-07127', '', 'Keys', 'Netadmin Keys', '', 'None.gif', 'operational'),\n(173, 'LCDI07127', '', 'Keys', 'Net Admin Keys', '', 'LCDI07127.JPG', 'operational'),\n(174, 'LCDI07195', 'S1STNEAD633826L', 'Solid State Drive', 'Samsung 840 Pro SSD 256 GB', '', 'IMG_9854.JPG', 'operational'),\n(175, 'LCDI07196', '2N113C3K57ACD', 'Switch', 'NETGEAR\\r\\nProSafe 5 Port Gigabit Switch\\r\\nMODEL GS105', 'Blue', 'None.gif', 'operational'),\n(176, 'LCDI07145', 'WMC5D0D1WCYM', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8498.JPG', 'operational'),\n(177, 'LCDI07143', 'WMC5D0D7S0HM', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8497.JPG', 'operational'),\n(178, 'LCDI07142', 'WMC5D0D0WPPR', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8496.JPG', 'operational'),\n(179, 'LCDI07141', 'WMC5D0D1U4S1', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8495.JPG', 'operational'),\n(180, 'LCDI07140', 'WMC5D0D1THVT', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8494.JPG', 'operational'),\n(181, 'LCDI07139', 'WMC5D0D48NLE', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8493.JPG', 'operational'),\n(182, 'LCDI07137', 'WMC5D0D5JZYE', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8492.JPG', 'operational'),\n(183, 'LCDI07138', 'WMC5D0D84VLS', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8500.JPG', 'operational'),\n(184, 'LCDI07136', 'WMC5D0D3M4VK', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8490.JPG', 'operational'),\n(185, 'LCDI07135', 'WMC5D0DA1HSO', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8489.JPG', 'operational'),\n(186, 'LCDI07134', 'WMC5D0D0164C', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8488.JPG', 'operational'),\n(187, 'LCDI07133', 'WMC5D0D5XZZ8', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8487.JPG', 'operational'),\n(188, 'LCDI07132', 'WMC5D0D5Z2A1', 'TEST', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8486.JPG', 'operational'),\n(189, 'LCDI07131', 'WMC5D0D7D4L9', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8485.JPG', 'operational'),\n(190, 'LCDI07130', 'WMC5D0D7RRY8', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8484.JPG', 'operational'),\n(191, 'LCDI07128', 'WMC5D0D95S48', 'Hard Drive', '4.0 TB WD Enterprise Class Hard Drive', '', 'IMG_8483.JPG', 'operational'),\n(192, 'LCDI07216', '001734650452', 'Tablet', 'MICROSOFT SURFACE 3', '', '2015-09-22 11.15.34.jpg', 'operational'),\n(193, 'LCDI07219', 'X000UL6KP9', 'IoT DEvice: Window/Door Sensors', '2 window/door sensors. Quirky, Wink', '', 'IMG_9876.jpg', 'operational'),\n(194, 'LCDI-07217', 'ABAA00027468', 'IoT Device: Egg Minder', 'egg carton, white holds 14 eggs. ', '', 'IMG_9875.jpg', 'operational'),\n(195, 'LCDI-07218', '18B4304E6DCE', 'Iot Device: NestCam', 'Nest Cam, black on edevice', '', '2015-09-22 13.23.05.jpg', 'operational'),\n(196, 'LCDI-07221', 'BM150925364B', 'Flash Drive', 'Scardisk USB 3.0 black 32GB', '', '2015-09-22 13.29.14.jpg', 'operational'),\n(197, 'LCDI07220', 'BM150925364B', 'Flash Drive', 'Scardisk USB 3.0 black 32GB', '', '2015-09-22 13.29.14 (1).jpg', 'operational'),\n(198, 'LCDI-07160', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123.jpg', 'operational'),\n(199, 'LCDI-07161', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 Gb', '', 'IMG_9123.jpg', 'operational'),\n(200, 'LCDI-07152', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(1).jpg', 'operational'),\n(201, 'LCDI-07154', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(2).jpg', 'operational'),\n(202, 'LCDI-07155', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(3).jpg', 'operational'),\n(203, 'LCDI-07156', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(4).jpg', 'operational'),\n(204, 'LCDI-07157', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(5).jpg', 'operational'),\n(205, 'LCDI-07158', '', 'Raspberry Pi', 'Raspberry Pi 2 Model B 1 GB', '', 'IMG_9123(7).jpg', 'operational'),\n(206, 'LCDI-07235', '02AA01AC1815010B', 'Nest Smart Device', 'Nest Smart Thermostat', '', 'IMG_9119.jpg', 'operational'),\n(207, 'LCDI-07224', '', 'IoT DEvice: Window/Door Sensors', 'Go Control Home Security Suite Motion Detector Devices', '', 'IMG_9120.jpg', 'operational'),\n(208, 'LCDI-07222', '', 'Wireless Lighting', 'Wireless lighting kit', '', 'IMG_9878.JPG', 'operational'),\n(209, 'LCDI-07163', '', 'Camera', 'Samsung Smart Cam', '', 'IMG_9879.JPG', 'operational'),\n(210, 'LCDI-07164', '', '', 'Kwikset Smart Code Lock', '', 'IMG_9880.JPG', 'operational'),\n(211, 'LCDI07165', '', 'Wireless Lighting', 'Levitron Plug-In Dimmer', '', 'IMG_9867.JPG', 'operational'),\n(212, 'LCDI-07166', '', 'Wireless Hub', 'Wink Wireless Hub', '', 'IMG_9877.JPG', 'operational'),\n(213, 'LCDI-07197', 'P/N: 0D3RK1', 'External CD/DVD Drive', 'Dell USB External DVD Drive', 'Permanently signed out by FI.', 'None.gif', 'operational'),\n(214, 'LCDI07201', 'WKC1EC2VT756', 'External Drive', 'Western Digital External Hard Drive 2TB', '', 'IMG_9891.JPG', 'operational'),\n(215, 'LCDI-07168', 'WCATRC241266', 'Hard Drive', 'WD Black 1TB Internal Hard Drive', '', 'None.gif', 'operational'),\n(216, 'LCDI-07167', 'WCAW37609675', 'Hard Drive', 'WD Black 1TB Internal Hard Drive', 'Permanently signed out by FI.', 'None.gif', 'operational'),\n(217, 'LCDI-07169', '', 'Flash Drive', 'San Disk Ultra USB 3.0 32 GB Drive', '', 'IMG_9227.JPG', 'operational'),\n(218, 'LCDI-07202', 'WX91E13TKS87', 'External Drive', 'Western Digital 2 TB My Passport Drive ', '', 'None.gif', 'operational'),\n(219, 'LCDI07223', 'X00MKBEIL', '', 'Small, black, wireless keyboard&mouse. Looks like a remote control for a tv.\\r\\n\\r\\nAlso has a cable with it', '', 'IMG_9302.JPG', 'operational'),\n(220, 'LCDI07240', 'X000VA40BZ', '', 'Touchscreen Mini Kit', '', 'IMG_9303.JPG', 'operational'),\n(227, 'LCDI07241', 'WMAM9SC64936', 'Hard Drive', 'Western Digital WD800JD 80GB Hard Drive', '', 'IMG_9906.JPG', 'operational'),\n(228, 'LCDI07242', 'WCAT1J187788', 'Hard Drive', 'Dell 250GB HDD', '', 'IMG_9309.jpg', 'operational'),\n(229, 'LCDI07243', 'WCAT1J204800', 'Hard Drive', 'Dell 250GB HDD', '', 'IMG_9310.jpg', 'operational'),\n(230, 'LCDI07244', 'S1ATNEAD635307K', 'External Drive', 'Samsung 840 Pro 256GB SSD', '', 'IMG_9315.jpg', 'operational'),\n(231, 'LCDI07245', 'S1ATNEAD637379X', 'External Drive', 'Samsung 840 Pro 256GB SSD', '', 'IMG_9314.jpg', 'operational'),\n(232, 'LCDI07246', 'S1ATNEAD641849T', 'External Drive', 'Samsung 840 Pro 256GB SSD', '', 'IMG_9313.jpg', 'operational'),\n(233, 'LCDI07247', 'S1ATNEAD545518J', 'External Drive', 'Samsung 840 Pro 256GB SSD', '', 'IMG_9312.jpg', 'operational'),\n(234, 'LCDI07248', 'S14GNEACC15347X', 'External Drive', 'Samsung 840 Pro 256GB SSD', 'Permanently signed out by FI.', 'IMG_9311.jpg', 'operational'),\n(235, 'LCDI07251', '5910103YN078', 'Chromecast', 'Chromecast', '', 'cc1.jpg', 'operational'),\n(236, 'LCDI07252', '5911103YN0VD', 'Chromecast', 'Chromecast', '', 'cc2.jpg', 'operational'),\n(237, 'LCDI07253', '510KPWQ352617', 'Nexus', 'Nexus 5X 16GB', '', 'nexus.jpg', 'operational'),\n(238, 'LCDI07254', 'WX91E13RPA14', 'External Drive', '2 TB WD My Passport External Drive', 'Silver Front', 'IMG_9897.JPG', 'operational'),\n(239, 'LCDI07198', '', 'External Drive', 'Black Dell External DVD Drive', '', 'IMG_9862.JPG', 'operational'),\n(240, 'LCDI07249', 'X000N4F1MV', 'Cable', 'USB 3.0 to SATA 6G Converter', 'Still in box', 'IMG_9863.JPG', 'operational'),\n(241, 'LCDI07202', 'WX91E13TKS67', 'External Drive', 'WD My Passport Drive', '', 'IMG_9871.JPG', 'operational'),\n(242, 'LCDI07185', '', 'Camera', 'GO Pro with Accessories', 'Inside Blue Lowepro Case', 'IMG_9872.JPG', 'operational'),\n(243, 'LCDI07186', '', 'Camera', 'GO Pro with Accessories ', 'In Blue Lowepro Case', 'IMG_9874.JPG', 'operational'),\n(244, 'LCDI07184', '', 'Camera', 'GO Pro with Accessories', 'In Blue Lowepro Case', 'IMG_9873.JPG', 'operational'),\n(245, 'LCDI07200', 'IMEI: 359336043275069', 'Phone', 'Black Nokia Lumia 710', 'In Box', 'IMG_9888.JPG', 'operational'),\n(246, 'LCDI07199', 'D01EC0A015330ULV', 'Tablet', 'Black Kindle Fire', 'In Box', 'IMG_9890.JPG', 'operational'),\n(247, 'LCDI07188', 'B006A0A010331A0M', 'Tablet', 'Black Kindle Fire', 'With box', 'IMG_9894.JPG', 'operational'),\n(248, 'LCDI07189', 'B006A0A010230E57', 'Tablet', 'Black Kindle Fire', 'In Box', 'IMG_9896.JPG', 'operational'),\n(249, 'LCDI07310', '', 'Nexus', 'Black Nexus', 'No box', 'None.gif', 'operational'),\n(250, 'LCDI07311', 'SFH8QL0HMG9J6', 'Watch', 'Apple Watch \"Sport\"', 'In white box with charger and extra wristband', 'None.gif', 'operational'),\n(251, 'LCDI07312', 'RFAGA03V0SZ', 'Watch', 'Samsung Gear 2 Watch', 'In round white box', 'None.gif', 'operational'),\n(252, 'LCDI07313', 'D9OKBC004610', 'Nexus', 'Black Nexus Tablet', 'Sign out til end of semester - David P.', 'IMG_0045.JPG', 'operational'),\n(253, 'LCDI07314', 'D8OKBC758481', 'Nexus', 'Black Nexus Tablet', 'In box', 'IMG_0046.JPG', 'operational'),\n(254, 'LCDI07315', 'D9OKBC250058', 'Nexus', 'Black Nexus Tablet', 'In box\\r\\nNo USB charging cable', 'IMG_0047.JPG', 'operational'),\n(255, 'LCDI07316', 'D8OKBC769308', 'Nexus', 'Black Nexus Tablet', 'In Box', 'IMG_0048.JPG', 'operational'),\n(256, 'LCDI07317', 'D8OKBC775383', 'Nexus', 'Black Nexus Tablet', 'In box', 'IMG_0049.JPG', 'operational'),\n(257, 'LCDI07318', 'D9OKBC004772', 'Nexus', 'Black Nexus Tablet', '', 'IMG_0050.JPG', 'operational'),\n(258, 'LCDI07319', 'D9OKBC250069', 'Nexus', 'Black nexus tablet', 'Sign-out until end of semester - Alex LaFleur', 'IMG_0051.JPG', 'operational'),\n(259, 'LCDI07320', 'D8OKBC769448', 'Nexus', 'Black Nexus Tablet', 'In box', 'IMG_0052.JPG', 'operational'),\n(260, 'LCDI07255', 'WCC3F4RRR4UR', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(261, 'LCDI07256', 'WCC3F3REPAUZ', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(262, 'LCDI07257', 'WCC3F3YP0EPS', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(263, 'LCDI07258', 'WCC3F4ZFLT6S', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(264, 'LCDI07259', 'WCC3F2XXAFZ1', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(265, 'LCDI07260', 'WCC3F4RRRPJ8', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(266, 'LCDI07261', 'WCC3F0DLUTXN', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(267, 'LCDI07262', 'WCC3F0DLU8ND', 'Hard Drive', 'Western Digital 1TB Black Hard Drive', 'FOR490 - Permanent Sign-out.', 'None.gif', 'operational'),\n(268, 'LCDI07321', 'CLNRPCDADR6330', 'Phone', 'HTC Rhyme (Purple)', 'In Box', 'IMG_0057.JPG', 'operational'),\n(269, 'LCDI07322', '', 'Phone', 'Black T-Mobile LG Optimus F6', 'In box', 'IMG_0058.JPG', 'operational'),\n(270, 'LCDI07323', 'R47ABW4Z7R', 'Phone', 'Black Motorola Moto G', 'In box', 'IMG_0059.JPG', 'operational'),\n(271, 'LCDI07324', '990002100523685', 'Phone', 'White Samsung Galaxy S 3', 'In box', 'IMG_0060.JPG', 'operational'),\n(272, 'LCDI07263', '8931010039', 'Changing Pad', 'Hatch Baby Smart Changing Pad', '', 'IMG_0062.JPG', 'operational'),\n(273, 'LCDI07328', 'DMPHPT8cdvd1', 'iPad', 'White 16GB iPad 3rd Gen Model: A1416', '', 'None.gif', 'operational'),\n(274, 'LCDI07325', 'D8OKBC776073', 'Tablet', 'Black 16GB Nexus Tablet', 'W/ Box', 'IMG_9054.JPG', 'operational'),\n(275, 'LCDI07326', 'B0F00715544703K4', 'Amazon Echo', 'Black Amazon Echo device with charging cables', 'In Box', 'IMG_9055.JPG', 'operational'),\n(276, 'LCDI07327', 'S309A025364', 'IoT Motion Device', 'Black, Leap Motion Sensor', 'In box', 'IMG_9056.JPG', 'operational'),\n(277, 'LCDI07279', '2159A47000929', 'Wireless Router', 'TP-Link 300mbps Wireless N Router', '', 'IMG_9096.JPG', 'operational'),\n(278, 'LCDI07280', 'LQ7ZTQ', 'TEST', 'Anker Compact Bluetooth Keyboard', '', 'IMG_9098.JPG', 'operational'),\n(279, 'LCDI07281', 'BTD4001551069', 'TEST', 'Kinivo Bluetooth USB Adapter BTD-400', '', 'IMG_9099.JPG', 'operational'),\n(280, 'LCDI07282', 'FCC ID:BCG-E2816A', 'iPhone', 'iPhone 6', '', 'IMG_9103.JPG', 'operational'),\n(281, 'LCDI07284', '23219EE1D2', 'Jawbone', 'Jawbone Jambox Bluetooth Speaker Red', '', 'None.gif', 'operational');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `Sessions`\n--\n\nCREATE TABLE IF NOT EXISTS `Sessions` (\n `ID` int(255) NOT NULL AUTO_INCREMENT,\n `SessionID` varchar(255) NOT NULL,\n `UserName` varchar(255) NOT NULL,\n `IP` varchar(255) NOT NULL,\n `Token` varchar(255) NOT NULL,\n `Date` datetime NOT NULL,\n PRIMARY KEY (`ID`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=1000 ;\n\n--\n-- Dumping data for table `Sessions`\n--\n\nINSERT INTO `Sessions` (`ID`, `SessionID`, `UserName`, `IP`, `Token`, `Date`) VALUES\n(1, 'f0e275cf56f8272', 'netadmin', '192.168.10.24', 'fe8bc9d94d448234f9e6f36f26147fca', '2014-10-30 10:20:23'),\n(6, '2d55cfd1c06ada8', 'ddeloge', '192.168.10.33', '2319d879f3f644bce19cb04062fb1ed3', '2014-10-31 03:32:09'),\n(8, '08a54b6bcbf7bde', 'mfortier', '192.168.10.32', 'a3571684fe5c674ed2c03173925978b3', '2014-10-31 04:34:36'),\n(9, 'eccdc14cb51ff99', 'netadmin', '192.168.10.24', 'afcd33589d1cbab75bb0db1ee0fe2faf', '2014-11-03 08:35:02'),\n(10, 'aa7051b9a3bd6a2', 'williams', '192.168.10.15', '26e0722ca24c1ebbee448f4029a52784', '2014-11-03 08:35:56'),\n(11, 'c8511d8af663b8a', 'netadmin', '192.168.10.24', '90f5f0f846371ee2a89ac178a978c3b0', '2014-11-03 09:30:24'),\n(12, '354028cb56d0db7', 'bcampbell', '192.168.10.33', '520c7218bbdca0797ac16aa05e961946', '2014-11-03 09:44:13'),\n(13, '23d54824c58626e', 'bfagersten', '192.168.10.24', '2bf9d4825a36c32be77c0ea3f776d1d0', '2014-11-03 11:03:24'),\n(14, 'e15133a0c53d24d', 'williams', '192.168.10.15', '6a677e5bafc23537168445f2749c0a88', '2014-11-03 01:11:46'),\n(15, '17645267fc7097b', 'ddeloge', '192.168.10.33', 'a840c67167407d6a9ecf0819a54840e7', '2014-11-03 01:16:19'),\n(16, '754141495f57ac9', 'williams', '192.168.10.15', '76ca7518a7dbf2036ca6886a9f6bedea', '2014-11-03 03:03:13'),\n(17, '37158c854c6a3d7', 'ddeloge', '192.168.10.25', 'bfef43c76a5c10391d616f9bbb3feb88', '2014-11-03 03:20:17'),\n(19, '3da8dac07b3a516', 'ddeloge', '192.168.10.21', '92e472143b40c57fd32a6de28fe9a77e', '2014-11-03 03:26:47'),\n(20, '0e564e3b27fda63', 'ddeloge', '192.168.10.32', '79fa66bf4e72af04dde68f39b0d3b1dd', '2014-11-03 04:12:30'),\n(21, 'd7d1c7c6a91336f', 'williams', '192.168.10.15', 'fb63a6063e01dc61a04f4c8782129c96', '2014-11-04 08:21:23'),\n(22, '1414b3c31545935', 'ascallion', '192.168.10.1', '2fe5e8d44c12fa88a86f930d34822824', '2014-11-04 08:32:22'),\n(23, 'b6674b11ba8e20e', 'ascallion', '192.168.10.17', 'f0cdb51892e75d55cf3c6bf905e513df', '2014-11-04 10:30:17'),\n(24, '4d8ced87060d721', 'ascallion', '192.168.10.32', '714cd3a2b0cea13c2ef68cea91ca618d', '2014-11-04 11:01:23'),\n(25, '8efa4f450e59235', 'williams', '192.168.10.15', '5e29e0de1cdff2af4524e4888113d06c', '2014-11-04 12:38:07'),\n(26, '722dd2a0dc74bfa', 'jbonaccorsi', '192.168.10.32', 'dd48a920102cdef400c6fa3e11073b59', '2014-11-04 01:13:21'),\n(29, '2f7dbc5b0f47054', 'Jbonaccorsi', '192.168.10.32', 'ec4a9cbdecf70c0b73ace5999f0748dd', '2014-11-04 04:14:13'),\n(32, '0c4d51259db56ee', 'williams', '192.168.10.15', '4dae9c3a97672a558c4ab3a1c392e318', '2014-11-05 08:55:33'),\n(33, '05140d0492c98b4', 'mgreen', '192.168.10.32', 'db5b65ff34393f70ecc909978b0a9acc', '2014-11-05 09:02:22'),\n(35, '78d6ebecfa4e6ce', 'williams', '192.168.10.30', '678baab3fc782d9c4d4a8c189b2576f6', '2014-11-05 09:29:48'),\n(36, '4d8e5a26a73da72', 'mgreen', '192.168.10.32', 'f4543d9e4d0a890bb99543e4817d0fd3', '2014-11-05 11:36:54'),\n(37, 'd1bb5e2af6e424b', 'mgreen', '192.168.10.32', 'f1b481e3d6175d8f78625a76809039c1', '2014-11-05 12:02:14'),\n(38, '00e48acbb243e6f', 'mfortier', '192.168.10.32', 'e2e72d7e894c2dfa8a679cd3656c2d33', '2014-11-05 12:36:49'),\n(40, 'c323b7dfa66c86e', 'williams', '192.168.10.15', '8cd8724381109f5d893a3abf0345672d', '2014-11-05 03:09:58'),\n(41, '54a07b89cec2511', 'netadmin', '192.168.10.24', '966b5f664a9306c2b7a5ca5bb77d8537', '2014-11-05 03:17:25'),\n(42, '3954f093bfb31c7', 'mfortier', '192.168.10.32', '8bf2fe21ea849a1769275afe878311aa', '2014-11-05 03:18:47'),\n(43, 'e6a93111296702c', 'mgreen', '192.168.10.32', '331cd96529a0fa9e9faac84694c14c9e', '2014-11-05 04:17:30'),\n(44, 'b0d2829fbe5bf29', 'mgreen', '192.168.10.22', 'fd1770f885cd5385ad7ad0186e15f028', '2014-11-05 04:44:43'),\n(45, 'b4dc4823c7a3aef', 'mgreen', '192.168.10.32', '0a73b275898033b91d17b330365acf3f', '2014-11-05 05:48:26'),\n(46, '68f1e823426c492', 'mgreen', '192.168.10.22', '350b320d300e39efefdb05807afa8ca1', '2014-11-05 06:39:59'),\n(47, '2aa5c2c8f8b9193', 'mgreen', '192.168.10.32', '93b1b0b5c46f37d3c2f2b779faa7f987', '2014-11-05 07:48:40'),\n(48, '18558bc9c5197ec', 'netadmin', '192.168.10.24', 'c7fb37a57c1980e929e728fdd1ff2228', '2014-11-06 09:20:03'),\n(49, '4716a4200c73c4e', 'williams', '192.168.10.15', '84271c1887834311e03f9a8f2f9eb761', '2014-11-06 09:25:51'),\n(50, '7396c1c17c25623', 'adible', '192.168.10.32', 'd3f9088437de95d907ce38ff19ae2a12', '2014-11-06 09:34:11'),\n(51, 'b6e95d7e0857b2e', 'adible', '192.168.10.30', '9d2e196e6638d6a655ca8c1cbf1f3a4d', '2014-11-06 09:54:44'),\n(52, 'fe8a57120bea344', 'adible', '192.168.10.32', 'e3a4ae7f372966462e823a5ee0867260', '2014-11-06 11:30:58'),\n(53, '455f1b84a95d428', 'bcampbell', '192.168.10.32', 'c96ef11233bbb73a7d64da0f8e71421d', '2014-11-06 12:17:13'),\n(54, '9190089e11aeaa6', 'bcampbell', '192.168.10.32', '1edf8356ceb661ca6b5ea66a8142cb2b', '2014-11-06 01:36:01'),\n(55, '7daa3fc8842911f', 'bcampbell', '192.168.10.32', '43d8216342667a848835a79bedb5d467', '2014-11-06 02:47:10'),\n(56, '190f7ded7c97280', 'williams', '192.168.10.15', 'ecde1eec7b690f823529dab5cee4f07f', '2014-11-06 03:40:38'),\n(57, '08bda37d3624c00', 'mgreen', '192.168.10.20', '3c85af53444590a310a035f426d244a0', '2014-11-06 05:24:26'),\n(58, '249622f402af7bf', 'ascallion', '192.168.10.32', '31d22d86b7a706d774cfa5eed6df501b', '2014-11-07 11:24:29'),\n(59, 'df9ef2432c94205', 'ddeloge', '192.168.10.32', '7f2f3b894d3be380c05835440fe31f8a', '2014-11-07 01:20:40'),\n(60, '1dc89393073bbc8', 'williams', '192.168.10.15', '88c58e18eea24b1ae6ddb6eab13dac39', '2014-11-07 02:34:02'),\n(61, '6561598bb97c2ae', 'ddeloge', '192.168.10.32', '884bf28db6580b545dd9aa38f68545d8', '2014-11-07 02:39:44'),\n(62, '2de9cf19761c291', 'netadmin', '192.168.10.24', '7987d7a76f6b610d88c8b56ca0f1cc8d', '2014-11-07 02:42:27'),\n(63, 'a386355be27e591', 'mfortier', '192.168.10.32', 'b863ef7a3781e8840c129a85e4a1ed77', '2014-11-07 04:15:57'),\n(64, '29657a4be6810e8', 'bcampbell', '192.168.10.32', '6760b749ada682acf3a703d75393d8aa', '2014-11-10 09:06:30'),\n(65, '12d1119e090907b', 'williams', '192.168.10.15', 'e02cdb3c2f1341906fc7a7ee8375e56f', '2014-11-10 12:35:19'),\n(66, '5e41b27a29c09c6', 'ddeloge', '192.168.10.32', 'bc71d470c8c443c1e083014c2314729a', '2014-11-10 01:02:36'),\n(67, '686c31bbf7cdff9', 'ddeloge', '192.168.10.32', '50b4014bc2326e8c3ba62297ba5559c0', '2014-11-10 02:19:06'),\n(69, '1998de11f141227', 'williams', '192.168.10.15', '713b904337807b26c5f553a81ce314a5', '2014-11-11 10:08:56'),\n(70, '78fc64f89a0cd84', 'ascallion', '192.168.10.32', '6234abd62f2c1b3fc0dd43e042c939dc', '2014-11-11 10:11:59'),\n(71, '7ab390849b6554b', 'jbonaccorsi', '192.168.10.32', '65797aa5288bcd34d3bd8ebae0ffe94e', '2014-11-11 12:50:48'),\n(72, '7b200c749a14700', 'jbonaccorsi', '192.168.10.32', 'aaa5453ed8e22e94e7b8d4a3584dcdf7', '2014-11-11 03:37:42'),\n(73, '06c13621ddc2ff4', 'jbonaccorsi', '192.168.10.32', '69e24faf1e0b81dbdc77afb835ddb044', '2014-11-11 06:39:39'),\n(75, '4b65cb1410e63b8', 'mgreen', '192.168.10.32', '09037b2f3bdcc1fbbae4e1cc0be4e362', '2014-11-12 10:59:05'),\n(76, '49cf2d88ffd1285', 'mfortier', '192.168.10.32', 'c60d85d3597666fd26007216ad236bda', '2014-11-12 12:59:41'),\n(77, '70f9e38d22edc86', 'mfortier', '192.168.10.32', '066d89d51cc1c0f6969635bda0f512fc', '2014-11-12 01:14:14'),\n(78, '03aebbf80128009', 'pdesborough', '192.168.10.31', 'f53a009fcb177995aee59891763b62c5', '2014-11-12 01:35:53'),\n(79, 'ef6e6d944edfb5d', 'mfortier', '192.168.10.32', 'ccd03fae95fd964a67a99719df18198f', '2014-11-12 03:44:31'),\n(80, '646e3c0d3e07454', 'mgreen', '192.168.10.30', '0b5b29708d59cfcea4735156d585e7c9', '2014-11-12 07:47:16'),\n(81, '9736630a510d4ab', 'adible', '192.168.10.32', '8fb702801c96314b708b8c44f1a7477b', '2014-11-13 09:25:43'),\n(82, '1aa1dd80331d4b8', 'adible', '192.168.10.32', '307c74c31a850c342f2a6d78c236500a', '2014-11-13 11:58:30'),\n(83, '68e0da31be40e31', 'bcampbell', '192.168.10.32', '118c38c077c7a8d49f831afceb993b84', '2014-11-13 12:09:49'),\n(84, 'db57275c6b6d786', 'bcampbell', '192.168.10.32', '24fe56dbd2886280973ad7eb18a54a82', '2014-11-13 02:55:30'),\n(85, '797215be21e8179', 'ddeloge', '192.168.10.32', '3b083bb74bbc2802db14eafb0e8a155c', '2014-11-14 12:44:01'),\n(86, '5b27896f33f9865', 'ddeloge', '192.168.10.32', '8044343e0c182d3a5b419c3818285015', '2014-11-14 02:17:41'),\n(87, '229b23ede1b1f14', 'netadmin', '192.168.10.24', '8c799ca9f265cd13c7ef4729e42fa312', '2014-11-14 02:53:29'),\n(88, '490d118ae449a71', 'mfortier', '192.168.10.32', '478b6d7c4b815bd3cc02d6d027406464', '2014-11-14 05:18:30'),\n(89, '9c26da57296d99f', 'ddeloge', '192.168.10.32', '54e8f2eb37bd6f61cbc5cd6638c31300', '2014-11-17 01:46:27'),\n(90, '5c342a8c8f6d194', 'ddeloge', '192.168.10.32', 'dc8cfba8cb5b01b3aa02efc33feb3d7a', '2014-11-17 03:55:33'),\n(91, '54c890760f72199', 'ascallion', '192.168.10.32', 'd43c2f69f2ee1b07f58ef23232218119', '2014-11-18 10:58:53'),\n(92, '4fa5808c3bf007d', 'ascallion', '192.168.10.20', 'f025c39196ec35b99d44d46ed50eb933', '2014-11-18 11:03:47'),\n(93, 'd878f4aedd87be3', 'jbonaccorsi', '192.168.10.32', '7c3931ab5fdaec35e4c6fec0002982fc', '2014-11-18 01:02:49'),\n(94, 'd68adf79f01227e', 'netadmin', '192.168.10.24', '8fa9a6c219d5c2f6ad5011248060b8ab', '2014-11-18 01:11:28'),\n(95, '50f10bf612f3546', 'bfagersten', '192.168.10.24', '7c1af33f7be447939411cf19277e629c', '2014-11-18 03:21:49'),\n(96, '612f0b93d1b9b6f', 'jbonaccorsi', '192.168.10.32', '7bf029691e9d45d194e40b79b5d7643a', '2014-11-18 03:53:37'),\n(97, '8fa829390c29edb', 'jbonaccorsi', '192.168.10.32', '4df10fd20ff9b2328b01662348d96e98', '2014-11-18 05:29:15'),\n(99, '2f2df76bf7ee069', 'mgreen', '192.168.10.20', '98c76578f88888b6bf819d17e2184906', '2014-11-19 08:24:24'),\n(100, '4e0bbd5bd5e4529', 'mgreen', '192.168.10.32', 'ce29992184e9e5757bfa0d75cf191751', '2014-11-19 08:50:44'),\n(101, '0726c5f2576fcf1', 'mgreen', '192.168.10.20', '019ee636b1b1242f6d0b841581646de1', '2014-11-19 09:31:03'),\n(102, 'f0343de27dbfa1b', 'mgreen', '192.168.10.20', '28665e3d2c537333da1678275c2e091c', '2014-11-19 11:18:48'),\n(103, 'ffb3d47bc55c085', 'mgreen', '192.168.10.30', 'ee7c376b8804d2ccdf25f31891c84dab', '2014-11-19 11:36:00'),\n(104, '69ff58700e55985', 'mfortier', '192.168.10.32', '90d50a503e76e415604065018e692540', '2014-11-19 12:31:14'),\n(105, '5c7e35678bf0d29', 'mfortier', '192.168.10.32', 'ac3af36fa50f0bc59020d7a082f20cb4', '2014-11-19 01:26:57'),\n(106, 'bbad0a149d4f2e9', 'mfortier', '192.168.10.32', 'ecd69d7ddce4b99282f8f9b2c6ed9932', '2014-11-19 02:09:31'),\n(107, '610f29fb48b0a17', 'mfortier', '192.168.10.32', '5a0516ca97b80a10d201b8465046666b', '2014-11-19 03:28:14'),\n(109, '1773f69c64e7edd', 'mgreen', '192.168.10.36', '008a8db0eefdeecc0bda35f10dd8a830', '2014-11-19 04:29:56'),\n(110, 'da90b0ffc33338d', 'williams', '192.168.10.15', '019aba5070f7525fd02b6cc582add5b1', '2014-11-19 04:35:09'),\n(111, 'a63f4a03a70d4f0', 'mgreen', '192.168.10.36', 'e6a58694b4da75e35986babf3aa3efeb', '2014-11-19 05:42:46'),\n(112, '01894fb62110a23', 'mgreen', '192.168.10.36', 'a62e88e4b1a4fe7354baf1655bf55ede', '2014-11-19 06:47:39'),\n(113, 'f603222020e6b17', 'adible', '192.168.10.32', '2ec5b5879e292d337781e6925d1bdac0', '2014-11-20 09:42:21'),\n(114, '98b21eae3987833', 'williams', '192.168.10.15', '2dec032d5951099e9f384c1d03fd74e2', '2014-11-20 10:19:40'),\n(115, 'e9fa01d09508d2a', 'adible', '192.168.10.32', '77f0dd584a15785d2bbc5cab6e8ebe6f', '2014-11-20 10:46:56'),\n(117, '0714e08327ff86f', 'mgreen', '192.168.10.32', '72a3bee8f6e84b3da97785b5b9a326c3', '2014-11-20 05:02:57'),\n(118, 'c5dbc47a4defb39', 'mgreen', '192.168.10.30', '27b945d01378f328b69512814c18d585', '2014-11-20 06:39:07'),\n(119, '0c60892132bed97', 'ddeloge', '192.168.10.32', '618ff63b67f78f2dfc21f9078f2037df', '2014-11-21 11:55:48'),\n(120, 'd2ea9e724f5e558', 'ddeloge', '192.168.10.32', '035cba05cc89c3a8a1a0c44e38a67537', '2014-11-21 03:52:08'),\n(121, '3b2a00fa8205895', 'ddeloge', '192.168.10.32', '3affb09bfdc06762e526c5f01422ed89', '2014-11-24 02:30:46'),\n(122, '5f4de74abf21b23', 'ddeloge', '192.168.10.32', '3e4e59fb2f73a8df391101ca35c0afeb', '2014-11-24 03:53:47'),\n(123, '5a2c1cec827156c', 'ddeloge', '192.168.10.32', 'c3210ce57f40070504124b35ccc4f0f1', '2014-11-25 08:18:00'),\n(124, 'dd66c84bb376d35', 'netadmin', '192.168.10.24', 'cda9a89bd1b1b626ac2d93357f0073c4', '2014-11-25 10:29:14'),\n(125, 'd8846c7edaf3d8b', 'williams', '192.168.10.15', 'c541db93d909829f9153a2c6192f862f', '2014-11-25 11:13:49'),\n(126, 'a8786956a1a35b9', 'ddeloge', '192.168.10.32', '28e506fae3cb3ecbba1ff9db41e031f1', '2014-11-25 11:47:08'),\n(127, '9e48e2cc2982296', 'williams', '192.168.10.15', 'f02968495e1bf726558b4f8f94c02857', '2014-11-25 12:42:28'),\n(128, '32b475c25a09ba1', 'williams', '192.168.10.15', 'fb779d3dce110c708e71de4600b21f2b', '2014-11-25 01:24:27'),\n(129, '7b1bba60b848977', 'bcampbell', '192.168.10.32', 'f044f7c0399e5fc9c8f10d3a9e8c3f7a', '2014-12-01 09:47:28'),\n(130, 'c7b04f928d46018', 'ddeloge', '192.168.10.32', '6b68312e261d50114388c3605ab73f59', '2014-12-01 01:39:06'),\n(131, '82e179b9b628211', 'williams', '192.168.10.9', '1fbb243456ae27ac9e975fd666d1702c', '2014-12-01 02:28:12'),\n(132, '0e04d2c26a89df1', 'ddeloge', '192.168.10.32', '2e8372bbcd7118704e5e2e7c323e80e6', '2014-12-01 02:29:45'),\n(133, '61e2bc4f0f87306', 'ascallion', '192.168.10.5', '1cd772a758e4db3e7915246f0bf075f4', '2014-12-02 08:56:24'),\n(134, 'f04a7b0eb7eba62', 'jbonaccorsi', '192.168.10.32', '3cdcc30d0eb0c77b6da1fe3b7c52a863', '2014-12-02 12:10:11'),\n(135, 'd6c36a5518515f0', 'jbonaccorsi', '192.168.10.32', '3e6594d55b3e648a86a21fd9e168fea3', '2014-12-02 12:50:03'),\n(136, 'b04155817501586', 'jbonaccorsi', '192.168.10.32', 'd41ed7b8cd4f2c6d0719fdfd6bf64a99', '2014-12-02 02:26:24'),\n(137, 'cce29fbddad9d1d', 'jbonaccorsi', '192.168.10.32', '78fc898cb0640cfaacf81a606ad6a0fd', '2014-12-02 03:43:48'),\n(138, 'c9d818a1f3141d5', 'jbonaccorsi', '192.168.10.32', '6401cabf570f5ad705a1978b1a0f7232', '2014-12-02 04:48:35'),\n(139, '8451b08da7f62a7', 'mgreen', '192.168.10.32', '868071ae4dd50d4728618fbbda1000fe', '2014-12-03 08:13:18'),\n(140, 'b702dcd57d7817f', 'netadmin', '192.168.10.24', 'b039539696f52fe94388f4c69454891c', '2014-12-03 02:40:22'),\n(141, '7d910c004297478', 'mfortier', '192.168.10.21', '2b77c7e9cbde2d05fbbc65870062ca3f', '2014-12-03 03:30:47'),\n(144, '6019a651c2011d7', 'mgreen', '192.168.10.19', '3b0611074c068e6b2b25658f7589a11b', '2014-12-03 04:26:33'),\n(145, '3799e3dbfa6980e', 'mgreen', '192.168.10.30', 'f05d3550009ef28fb88bfee706ddfa11', '2014-12-03 06:54:50'),\n(146, '061133a1529a56c', 'bcampbell', '192.168.10.32', '52502fd5d60640195b5e0da9216c1b18', '2014-12-04 12:13:55'),\n(147, '65e655b44089d91', 'bcampbell', '192.168.10.30', '8c7907bc20aec6989c1fb67817fa994d', '2014-12-04 03:23:26'),\n(148, '3501cf95f4a14bb', 'ascallion', '192.168.10.32', 'c13887ebaf298ccfc1b93dd0d3814268', '2014-12-05 08:58:41'),\n(149, '78c4f7cf0ab7255', 'ascallion', '192.168.10.38', '354c1d3e963d25c25738071e43aa43bb', '2014-12-05 09:10:35'),\n(150, 'fd09a591f0bb6c0', 'ascallion', '192.168.10.38', '8137cb9cb5527d01f37f57106b8fc008', '2014-12-05 09:16:02'),\n(151, 'd041dc5bee02461', 'ddeloge', '192.168.10.32', '7eca40843e2d4d2771b71659be1ed646', '2014-12-05 12:17:38'),\n(152, '0250780fab96c21', 'ddeloge', '192.168.10.32', '5fc7b11e6082d21f1e7210b7f9063c66', '2014-12-05 01:22:17'),\n(153, 'f577a042ff4d8de', 'williams', '192.168.10.4', 'b794e7021c21d96e1ffc323a5b6d10ea', '2014-12-05 02:24:28'),\n(154, 'fde5b9681625ec5', 'netadmin', '192.168.10.24', '8f6c1da4e56538b2decd5f722751d2d3', '2014-12-05 02:27:12'),\n(155, '74620aafa5adc5d', 'ddeloge', '192.168.10.32', '0e1170961e11f7e7523daefff6780c07', '2014-12-05 03:42:26'),\n(156, '1a30acf4355d16f', 'twright', '192.168.10.30', 'd83a9155ee96870bcb6474fad8d5b0a7', '2014-12-06 02:30:35'),\n(157, '3be9840066651e3', 'twright', '192.168.10.30', '8f5862266df16e8a17db4350f922e2e2', '2014-12-06 03:17:00'),\n(158, '4c91438f8d3c47a', 'ascallion', '192.168.10.32', '34aa6c4af5847ed83a3dd6a295e58cc3', '2014-12-09 08:06:25'),\n(159, '1e56593de4a43ac', 'ascallion', '192.168.10.30', 'f6d2605314997385ce68d716ffc36f45', '2014-12-09 08:43:37'),\n(160, '2822d91e7ea357d', 'ascallion', '192.168.10.32', '2605f2b81865f8bfdab4bb33024e54d1', '2014-12-09 08:57:58'),\n(161, '261a021042dfdff', 'ascallion', '192.168.10.32', 'e512dac5f2619748e23e868a04d96323', '2014-12-09 10:21:17'),\n(162, 'c9252ea701db694', 'ascallion', '192.168.10.38', '7b7387beb4a5b64763d0e17fd24011e3', '2014-12-09 10:30:34'),\n(163, 'babc21b4f5928d6', 'ohatalsky', '192.168.10.32', '304555a12987c4273545318e894eca54', '2014-12-09 12:16:30'),\n(164, 'e5903be8d0ca28e', 'bfagersten', '192.168.10.24', '495862be4f4a040d5bcf471a1a6fe32b', '2014-12-09 01:37:56'),\n(165, 'e00457f35ab3b62', 'williams', '192.168.10.9', '098be7811d55a9d6341fc1fb33bc7ee5', '2014-12-09 02:00:24'),\n(166, '0b595b316144e0b', 'ohatalsky', '192.168.10.32', '61285d6f9fb6fd26d1d49368638feffc', '2014-12-09 02:08:46'),\n(167, 'd191f3ff1c28e1e', 'ohatalsky', '192.168.10.32', '7d2c4ffb3ce8e6fca17c83f059bfd692', '2014-12-09 02:17:15'),\n(168, '35e5d2ec9f469d6', 'bfagersten', '192.168.10.24', '25b9b7e82d9f529e0380732ffe605b6e', '2014-12-09 03:47:02'),\n(169, '88c078fea8111a9', 'ohatalsky', '192.168.10.32', '2669122a90d5ad8994c3fa2646d7fe95', '2014-12-09 06:06:19'),\n(170, '0388fff3f6fd61a', 'OHATALSKY', '192.168.10.32', 'c535ca3ab629debe2a0805f48707c043', '2014-12-09 07:07:40'),\n(171, '8c7842591866d65', 'mgreen', '192.168.10.30', 'b0d936723a66a12227d93f623b1c7f16', '2014-12-10 09:11:13'),\n(172, '65ae2c9d4dcc7ad', 'mgreen', '192.168.10.32', '248f31933eac9d6adcc15d2619a872ca', '2014-12-10 09:13:50'),\n(173, '903fff9e157c982', 'mgreen', '192.168.10.30', '9cd13b39d9d71ccf42c16670cc25a0d8', '2014-12-10 09:45:26'),\n(174, '368e675be09a472', 'mgreen', '192.168.10.30', 'aa33a6c89b8365bea55b2e9a43bf62f1', '2014-12-10 09:49:43'),\n(175, '0eece35e9c0698d', 'williams', '192.168.10.9', '6af0f9372ee9ba8345987e00f49f1013', '2014-12-10 02:32:10'),\n(176, '09bf593b8bbc2e9', 'mfortier', '192.168.10.2', 'f6a46b2e74064aeedc82eb6b506634f8', '2014-12-10 03:35:53'),\n(177, 'b24a0aefe982812', 'mgreen', '192.168.10.32', '66c1e56194c8ffd12605943cdccbd34e', '2014-12-10 05:16:46'),\n(178, 'bbed050b663f10d', 'bcampbell', '192.168.10.32', '0d335f08a9757a579185c2fc0f4704af', '2015-01-12 12:02:08'),\n(179, '90342e04d939d2f', 'bcampbell', '192.168.10.32', 'f590ab6f5bd84aead19f7e4c242e20a8', '2015-01-12 01:28:49'),\n(180, 'be0e5b4ba3e7334', 'bcampbell', '192.168.10.32', '7f212cfc3a573287488016409cdd14f3', '2015-01-12 02:09:22'),\n(181, '2ff006211b2a5f4', 'adible', '192.168.10.32', '27ef62ac5c04db8dbc7b05c18bc615be', '2015-01-12 05:07:51'),\n(182, 'e4534bad84e785d', 'williams', '192.168.10.1', 'a51cec736cddd47c42d29b2f01347ebe', '2015-01-13 08:46:33'),\n(183, '85bd3dfb201477d', 'williams', '192.168.10.1', 'acce5acbaf3ade503c9b4ac62da9e023', '2015-01-13 09:49:35'),\n(184, '54508c80b632f40', 'jallibone', '192.168.10.32', 'ebbbf6dec0d7b2a831ac7232d448f763', '2015-01-13 09:51:31'),\n(185, '7b860d2b03a56eb', 'jallibone', '192.168.10.32', '99ec1229e55576c8ca468ca5d314b4d5', '2015-01-13 10:40:53'),\n(186, '519f4d03d4abdef', 'twright', '192.168.10.32', 'e3f8249fbe2e86736f70108a35d88dc2', '2015-01-13 01:08:32'),\n(187, 'e7e6f4866787fcf', 'jbonaccorsi', '192.168.10.32', 'fb984428c7f8e52e08321bbf9a813eeb', '2015-01-13 07:18:54'),\n(188, '00bb9a1606b74f8', 'jbonaccorsi', '192.168.10.32', '6dc9239b5df5d070c305a30a77e0aba9', '2015-01-14 02:12:23'),\n(189, '3c7b1e4ff76ccb8', 'nmazzamaro', '192.168.10.32', '7d8a79250be3010d3635bf6c11ca2e94', '2015-01-14 04:47:56'),\n(190, '0f18445c26438ef', 'adible', '192.168.10.32', '26a9b803e8caad0d12ab088dde0cd952', '2015-01-15 03:53:52'),\n(191, 'e82d19f05f04b9d', 'tchermely', '192.168.10.32', '968770e55035a28098a580609ece83ce', '2015-01-15 04:49:27'),\n(192, '7c8f8154521689d', 'tchermely', '192.168.10.32', '283e61a831d485e527763de3af8b24da', '2015-01-15 05:58:37'),\n(193, '44922b240271d35', 'ddeloge', '192.168.10.32', '395284e7688878aedfdc826e5860920f', '2015-01-16 08:36:09'),\n(194, 'b028d5870f48c95', 'ddeloge', '192.168.10.32', '0da0321a01c225aa642a3b3d8de95a6d', '2015-01-16 11:04:28'),\n(195, 'b0185cb794f5112', 'ascallion', '192.168.10.32', 'ce5ed82f1f192c7188f1cf0300404d15', '2015-01-19 08:20:50'),\n(196, '368e088c36810a5', 'ascallion', '192.168.10.32', '5153280376256199cdd0f8a3033bc0bc', '2015-01-19 09:09:44'),\n(197, 'c86c1f3da6d5c39', 'ascallion', '192.168.10.32', '8fb39cc463691e525d73fb5c6cb7eb78', '2015-01-19 09:48:47'),\n(198, 'd78933d916036ce', 'bcampbell', '192.168.10.32', 'e56540a902743d65b026fa6ea538844d', '2015-01-19 12:35:27'),\n(199, '834f01b21510b4f', 'bcampbell', '192.168.10.12', '17829b6db3462d5d2c2bf3837b9604ff', '2015-01-19 01:08:46'),\n(201, '419e2e5f58fffbf', 'bcampbell', '192.168.10.32', 'ccb4a2aa87fea4896013e51f9de69d73', '2015-01-19 03:14:55'),\n(202, 'd7e6089ea693343', 'bcampbell', '192.168.10.32', 'e73da3b975e4b80139d0ccb9430d8f23', '2015-01-19 03:41:41'),\n(203, '72693d785b628bc', 'adible', '192.168.10.32', 'b0f341d271944a4528b2abd64bc1ae62', '2015-01-19 04:36:50'),\n(204, '3e31ed25990725f', 'adible', '192.168.10.32', '1ab1106f0176d5fd4e94dc87241ecde9', '2015-01-19 06:00:38'),\n(205, 'ae0c34f54a6b87d', 'adible', '192.168.10.32', '3889e3981dd727f091af01162ffd50a5', '2015-01-19 08:00:32'),\n(206, '7cfd6d114b3673d', 'mgreen', '192.168.10.6', 'fe65d0fd9fb0e45de1b2b5718034ee83', '2015-01-20 11:12:14'),\n(207, '95b9cae3c7a344e', 'cdumont', '192.168.10.25', 'c29c11e52e86c19f431c9cfee342af5d', '2015-01-20 11:49:11'),\n(208, 'd666c8c4f00c7e7', 'twright', '192.168.10.32', '1b54a73c426a13c24d1c961371440a77', '2015-01-20 01:39:07'),\n(209, 'f5e3c97c1abc977', 'twright', '192.168.10.32', '0793a20f91db6bf84f6738b5f8316cfa', '2015-01-20 02:27:51'),\n(210, 'ac45edc46b37ec3', 'twright', '192.168.10.32', '16193ef2fa5914d6cd8d18ad4a7ef909', '2015-01-20 03:41:39'),\n(211, 'e9597cc45ac9533', 'kdobrolet', '192.168.10.5', '48bf09a664bfaa318d914dd4e09fcd90', '2015-01-20 03:43:48'),\n(212, '667c2186b47cc46', 'twright', '192.168.10.32', 'f6a3d10ddc1729d0a428191f595bf7e9', '2015-01-20 04:22:29'),\n(213, '9138eda711c8bec', 'jbonaccorsi', '192.168.10.32', '4664ee835c20075a73e22a8075f790db', '2015-01-20 05:11:44'),\n(214, '8cd5fefa210b2a6', 'jbonaccorsi', '192.168.10.32', '5976286e35dfd75e2f65584b82b0e486', '2015-01-20 06:35:16'),\n(215, '2fdecb6d526bc3a', 'jdiorio', '192.168.10.32', '6e89dd4f6c3ddddafd74914606357a46', '2015-01-21 09:05:35'),\n(216, '2dad237fe823501', 'jdiorio', '192.168.10.32', '30dbdbccc7ced78b99d773f2080d6407', '2015-01-21 10:27:12'),\n(217, 'f1f3e036b46e4a2', 'williams', '192.168.10.1', '4867879daf3dae39256b52c979878010', '2015-01-21 11:50:11'),\n(218, '8f792575ae44774', 'jbonaccorsi', '192.168.10.32', 'b487a69e0e3a2a8bd82fd1e7bd61630a', '2015-01-21 01:30:08'),\n(219, '94a60534b05c9f2', 'ascallion', '192.168.10.19', '3fa9660d821d85ba25dae23fa59db7a0', '2015-01-22 08:30:55'),\n(220, '7aa557b06cd2797', 'ascallion', '192.168.10.32', '27ba7864268267fbff2af8c999a2a11b', '2015-01-22 10:17:23'),\n(221, '8812ae97586a684', 'adible', '192.168.10.32', '47acb78e10320f46b3bf26d3468f01c7', '2015-01-22 12:00:08'),\n(222, 'f55d2919e0b53e0', 'adible', '192.168.10.32', 'c5aad8934e3990ad745a9a27f3e94331', '2015-01-22 03:41:55'),\n(223, '135aca42405c811', 'tchermely', '192.168.10.32', '326aa1b99bd27bb7b585052f2380b78b', '2015-01-22 04:08:37'),\n(224, '94f1aca36fdf1f8', 'tchermely', '192.168.10.32', '77f7852cc9ec76b703291892b5f53ae3', '2015-01-22 04:42:43'),\n(225, '440c1e558d06e20', 'tchermely', '192.168.10.32', 'bf35e986fabae82daf8249b2d8fbf806', '2015-01-22 06:21:57'),\n(226, '15b1a818b262bdf', 'ddeloge', '192.168.10.32', 'fe42443b53a18e08c908970c95abbb1f', '2015-01-23 09:52:36'),\n(227, 'bca1550198bd44f', 'ascallion', '192.168.10.32', '329022a31338a3a0230934874920c37e', '2015-01-23 12:50:18'),\n(228, 'ba9e464c2454161', 'ascallion', '192.168.10.32', '64b9a429b76b5114d63423b2c442d225', '2015-01-23 02:05:20'),\n(229, '2a6073759c8009b', 'ascallion', '192.168.10.19', '784aab87ce4949694343e730ede04858', '2015-01-23 03:11:25'),\n(230, '2f004d9e584666f', 'ddeloge', '192.168.10.32', '678b6635ac96af640fedd3b312012db7', '2015-01-23 04:08:53'),\n(231, 'c94c5f6c6dd238d', 'bfagersten', '192.168.10.14', 'f5a48ab29c1885f195dc10eefb2a80f9', '2015-01-23 05:00:38'),\n(232, 'bf21b30edae22b8', 'ddeloge', '192.168.10.32', '47703a18b0a10e035ef9a445517e732a', '2015-01-23 06:09:43'),\n(233, 'badc56eec76fd10', 'ascallion', '192.168.10.32', '1e20fca77a3862e358b748a3bd294643', '2015-01-26 09:21:29'),\n(234, '589d87cdc0e4c8b', 'ascallion', '192.168.10.32', 'faa7c53c4dd8ce2c811ed22949d3ae4a', '2015-01-26 10:39:21'),\n(235, '97b1901505c3094', 'ascallion', '192.168.10.32', '899080e3ac24e219cc9663442692ec25', '2015-01-26 11:46:43'),\n(236, '9bdd8c3129942d0', 'bcampbell', '192.168.10.32', '49da61cb024553164407dfe2e54ac8f1', '2015-01-26 11:58:49'),\n(237, '1c4135fdca4b1b9', 'bcampbell', '192.168.10.32', 'f0bbfdcb7603de419dc394ac7259965c', '2015-01-26 12:42:54'),\n(238, '0c0499148f17db4', 'bcampbell', '192.168.10.32', 'cb0e8c9acffeed2f8ea40771fb43186e', '2015-01-26 01:41:09'),\n(239, 'cfc6ef09c280236', 'adible', '192.168.10.32', 'ad5d967bd563f268574f1a2c626f6dce', '2015-01-26 04:12:55'),\n(240, 'b86625ae9094a2d', 'adible', '192.168.10.32', '3f680295df77304976e7bc7953cba234', '2015-01-26 05:03:16'),\n(241, '4213b52c3c8f496', 'adible', '192.168.10.32', '0c79e0ff28596c447755e774c6714c0f', '2015-01-26 06:10:53'),\n(242, '1b7be944a60e754', 'adible', '192.168.10.32', '2fd22eda821f7c34efed7b99aa2172d4', '2015-01-26 07:58:14'),\n(243, '0b75565adfbfc8b', 'jallibone', '192.168.10.32', 'a28c21029dd97598572bc8562b661f68', '2015-01-27 08:21:45'),\n(244, '2f5319bd546d93c', 'jallibone', '192.168.10.32', 'b692a9e73f3a1e83db72449fef2f2225', '2015-01-27 10:26:03'),\n(245, '65e45f35ea5554a', 'jallibone', '192.168.10.32', '3131f556a1a0c3d8a86b4f395755749f', '2015-01-27 11:08:47'),\n(246, 'c2b0685041aee7a', 'jallibone', '192.168.10.32', '3ed9a883586af1a1ad5bc995480db366', '2015-01-27 11:49:43'),\n(247, '5b262a779b3bb2a', 'twright', '192.168.10.32', '50f56a0a45eb2e20d65e1fb9cd2ab00f', '2015-01-27 12:19:48'),\n(248, 'cf9f6778f2220a1', 'twright', '192.168.10.32', 'e8b76bdcfca150d2b297df0f8b486209', '2015-01-27 01:38:14'),\n(249, '86a3127d225eab3', 'twright', '192.168.10.32', 'fb5909b549bc1d79bb8d199350bb6b4f', '2015-01-27 03:50:53'),\n(250, '24c0d7bacc263e3', 'twright', '192.168.10.6', 'b950783ab4de267d8a66523d0429a77d', '2015-01-27 04:07:58'),\n(251, '80ff5e4199a04e9', 'twright', '192.168.10.6', 'aa52387f2723f81225f362b19cf9d068', '2015-01-27 05:10:32'),\n(252, '69fb164adfe5a8d', 'jdiorio', '192.168.10.32', '5a3fd4242f7802eb116154896f15dee1', '2015-01-28 07:56:52'),\n(253, 'ec9aec47d1c2a0f', 'williams', '192.168.10.1', 'fd5bc3d3be243c559eb1ef444ec7527d', '2015-01-28 08:49:38'),\n(254, '530ea34df216d96', 'williams', '192.168.10.1', '29fd1d42283e7879544d9747741e56d2', '2015-01-28 10:00:07'),\n(255, 'b903bc45299cd05', 'jdiorio', '192.168.10.32', 'c7b8fe56cacbb79f9a895aee53e0e387', '2015-01-28 10:49:32'),\n(257, '90d977528d3a6eb', 'aacebo', '192.168.10.26', '1fdd0b420b03a2f6e2f44b085eebde01', '2015-01-28 12:37:10'),\n(258, 'ab44e26cc5182cb', 'williams', '192.168.10.1', '7967f1fd1de3509f9be80a8bc979f012', '2015-01-28 02:18:42'),\n(259, 'ddf08f8196c4d20', 'nmazzamaro', '192.168.10.32', '6f5754088e1daf05330e672d34520a46', '2015-01-28 05:33:59'),\n(260, 'c522713a48d9e19', 'ascallion', '192.168.10.32', 'c4f9bcb3d5aee5f8f3213796070ec0f0', '2015-01-29 08:12:10'),\n(261, 'b016a2176f86e31', 'ascallion', '192.168.10.6', 'a510712f0e0d68415430579a05e49d84', '2015-01-29 08:32:07'),\n(262, 'c6523fa5e0d5b61', 'ascallion', '192.168.10.32', '22afcaf79db594225855bdd639c16a8b', '2015-01-29 11:59:42'),\n(263, '22711b384d3a9b1', 'adible', '192.168.10.32', '7f6b56fc43fae75b8b145a4d0e0e28bd', '2015-01-29 12:09:52'),\n(264, '471c804f1bb29eb', 'kdobrolet', '192.168.10.9', 'ce18354d2d8c37220ed51bc5d8d46899', '2015-01-29 12:59:52'),\n(265, 'a5492d881382b61', 'mgreen', '192.168.10.12', '8cfd163aa3d7a683337d9bd28a49c899', '2015-01-29 01:06:25'),\n(266, '514d8a70ecee06b', 'adible', '192.168.10.32', '4746c316b10e3927653f25ae6ee11eb4', '2015-01-29 01:08:55'),\n(267, '1121aca5c7f48d2', 'adible', '192.168.10.32', '5c0ac46fa1ff1644872afb42382fab88', '2015-01-29 03:02:14'),\n(268, 'c57106a39d3622d', 'tchermely', '192.168.10.32', '58fd9f297ec85c3427c82d5be4a83cbc', '2015-01-29 04:13:01'),\n(269, '4e67e7d6fa06de5', 'tchermely', '192.168.10.32', '0db32d177e00458c9ade7947305e31d4', '2015-01-29 06:27:26'),\n(270, '87317849d659aed', 'ddeloge', '192.168.10.6', 'fb8eb7909063c6683c6e656a44b9d7cc', '2015-01-30 10:00:44'),\n(271, 'b4b3ea77502364b', 'ddeloge', '192.168.10.32', '980885f3f2f40fde151956a9c5801d2c', '2015-01-30 10:50:42'),\n(272, '8a150db8a7d3674', 'jdiorio', '192.168.10.32', '99e7d2f17b82709a61d0cd233bb8d831', '2015-01-30 12:30:57'),\n(273, '5d2a68d9f0cf0de', 'jdiorio', '192.168.10.32', 'fc72caf619cf6e5df3d34e8711dda6c2', '2015-01-30 12:58:56'),\n(274, '46c34dbba943d49', 'jdiorio', '192.168.10.32', 'ebcfe47cd4e5ef704a2550df1b5b7427', '2015-01-30 01:44:00'),\n(276, 'da6a01de142157d', 'ddeloge', '192.168.10.32', 'ae7894b9cfde9eb0be1d2195a33d013a', '2015-01-30 03:55:39'),\n(277, '628e4cefa73aa00', 'ddeloge', '192.168.10.32', '3299f481fad4cb2fafa9d17087337e00', '2015-01-30 05:33:17'),\n(278, '18c2948e44b14d0', 'ddeloge', '192.168.10.32', 'd5d7ad73df5a1ae82056b837ac9c9f48', '2015-01-30 06:37:09'),\n(279, '491a7fbfc777088', 'bcampbell', '192.168.10.32', 'c622aa5fc8129070d310868433c806a8', '2015-02-02 02:00:49'),\n(280, 'b4dfe2faee26f9c', 'bcampbell', '192.168.10.32', '64616d253610040de48be6ad3513d389', '2015-02-02 03:58:42'),\n(281, 'cb854eacc5d636e', 'tpeyton', '192.168.10.14', '17fc467bd908f2a708a6755a7185584b', '2015-02-02 05:42:47'),\n(283, 'c7c0428924221b0', 'twright', '192.168.10.6', '33c7b23626b5743b8f21b23e5675ec6e', '2015-02-02 06:53:51'),\n(284, 'ff53fc9a00e49bc', 'jallibone', '192.168.10.32', 'c2af935dd4754072f02000a5235c6ae9', '2015-02-03 08:47:59'),\n(285, 'f6b9505814cb09a', 'mgreen', '192.168.10.12', '31206d251139c9c92603b7e2d8e4e419', '2015-02-03 08:49:01'),\n(286, '0600c1583d21a88', 'jallibone', '192.168.10.32', '514ff0d4fca0927fecc0e7a2145d2c73', '2015-02-03 11:57:00'),\n(287, '086c9dd8f9dd338', 'mgreen', '192.168.10.12', 'ba3f93b22275215a75c105074a4c0658', '2015-02-03 12:08:13'),\n(288, 'f9a720f3d6f1d48', 'twright', '192.168.10.32', 'bad86676dc1b734d77719639771d3692', '2015-02-03 12:34:55'),\n(289, 'ecae0293f1fc62d', 'twright', '192.168.10.32', 'fd627cb4c9875faa4cac042b0d162f00', '2015-02-03 01:19:29'),\n(290, '105b640e27e3b45', 'twright', '192.168.10.32', '5c9dca55948c02c0a6709bcddcc342e8', '2015-02-03 03:45:41'),\n(291, '101179dd743913e', 'jbonaccorsi', '192.168.10.32', '50996bb793aecceda29e2b2c9c109826', '2015-02-03 05:33:12'),\n(292, '9188322be82a89a', 'jbonaccorsi', '192.168.10.32', '369ff0a40f8c3cca96732881647dfa0f', '2015-02-03 06:10:35'),\n(293, '7189f5e036e839d', 'jbonaccorsi', '192.168.10.32', 'd0a8b1b0451237fd6a926d170d224b4b', '2015-02-03 06:50:45'),\n(294, '0c629c7b6256151', 'jbonaccorsi', '192.168.10.32', '39bd4a3c2b9f00bb13e8b60ff06f4905', '2015-02-03 07:44:51'),\n(295, '9f09314ea61c092', 'jdiorio', '192.168.10.32', '252afb48a98159126b892a1b9a644069', '2015-02-04 08:51:38'),\n(297, '3346072eff8a16e', 'williams', '192.168.10.1', '4e57c0ba50e9832fe1f4af279f30b345', '2015-02-04 10:56:16'),\n(300, '6ca18e55b1926ab', 'williams', '192.168.10.1', '6eb0224e543f1d9b5783bf1ab864c25e', '2015-02-04 01:26:10'),\n(301, '31fa6530b79997f', 'jbonaccorsi', '192.168.10.32', '0b28ee73477567758bce0692764dc7a6', '2015-02-04 03:07:48'),\n(302, '6e515e32903eb75', 'jbonaccorsi', '192.168.10.32', 'da3481cfcba1af9befffca8cf5e303a8', '2015-02-04 03:40:10'),\n(303, 'ea0260c77f2c080', 'ascallion', '192.168.10.19', '23be7a5b62d818618b90000b6c91fafd', '2015-02-05 08:22:59'),\n(304, '45e659f27e9660a', 'adible', '192.168.10.32', 'c273ce3e947c8b8dbf3a8aa6e221e744', '2015-02-05 12:02:26'),\n(305, '1f0333322516bdd', 'adible', '192.168.10.32', '042a2e6eea0225cc9cdd9453b621ecc4', '2015-02-05 01:23:34'),\n(306, 'dde4529fd2bb17e', 'mgreen', '192.168.10.12', '651b4a3480b75adf4bc433b9ecd0d729', '2015-02-05 02:31:20'),\n(307, '6568c01dfd6978a', 'adible', '192.168.10.32', 'd0549523b5ed231cd8fc755a87acbc9d', '2015-02-05 03:34:15'),\n(308, '804d723f8f2f38f', 'tchermely', '192.168.10.32', '8a17e9a0bc9c67c57a753c0fc02c17d2', '2015-02-05 04:58:04'),\n(309, '2a617bfd5b2f79e', 'ddeloge', '192.168.10.32', 'bc717d0a8060390d4091b9c92172b60f', '2015-02-06 08:46:06'),\n(310, '4ecf92850e02613', 'ddeloge', '192.168.10.32', '9d6c06fc333314704e9f3c67a2b6de4b', '2015-02-06 10:00:07'),\n(311, '76edd6fb22f3b72', 'jdiorio', '192.168.10.32', '7afc75fb94483e15be5f2bb00818fb82', '2015-02-06 12:26:48'),\n(312, '92c42cd49727670', 'jdiorio', '192.168.10.32', '7661ea57ffbb4ee408b89c80314d211e', '2015-02-06 02:02:50'),\n(315, '0714da60647fd11', 'bcampbell', '192.168.10.32', '2869459bd0611bfa81ff4311a4618635', '2015-02-07 04:00:49'),\n(316, 'c57cff438fefc58', 'ascallion', '192.168.10.32', '4c707a782435a74df12324e9ccf17be4', '2015-02-09 09:46:43'),\n(317, '0b99d5ca1efcb74', 'ascallion', '192.168.10.32', 'af5e2506bbf9e415ed82ee738d559cb0', '2015-02-09 11:32:25'),\n(318, 'bb4a47f1b8895ad', 'bcampbell', '192.168.10.32', 'f9e1f78a4dc6ac514efe60fdd31b123f', '2015-02-09 12:32:59'),\n(319, '11744a56d247cee', 'bcampbell', '192.168.10.32', 'b9889af67e0360c1f63feaed5f9091c0', '2015-02-09 01:12:30'),\n(320, 'a7cd885826162bd', 'bcampbell', '192.168.10.32', '79f1527199531274f8130f2803526aa4', '2015-02-09 03:04:48'),\n(321, '1b6cc68067df738', 'adible', '192.168.10.32', '0c0490a1495cdfc94b693783bea46778', '2015-02-09 04:15:44'),\n(322, '16a1d269e9ccc17', 'adible', '192.168.10.32', '61331feff1c9b58f83ed98e3d69e60d9', '2015-02-09 05:11:16'),\n(323, 'e2fe1eb7a69b05c', 'adible', '192.168.10.32', '3fa42de66a946361d58ce29c76bd170f', '2015-02-09 06:45:28'),\n(325, '591971950964993', 'adible', '192.168.10.32', '1f1b335e6a8f20c7ef21b1c4ffe73191', '2015-02-09 07:57:20'),\n(326, '878b4826c5d57d4', 'jallibone', '192.168.10.32', '35334ff6583d39b7782b21b53a1faec6', '2015-02-10 08:31:20'),\n(328, 'ec956320c8688c5', 'jallibone', '192.168.10.32', '19727178158ac2cdda0b84a8a35295ac', '2015-02-10 09:19:52'),\n(329, '90ec2f8085bf94c', 'jallibone', '192.168.10.32', 'a99b294423237b2f775fd0101fdd207a', '2015-02-10 10:15:16'),\n(330, '0f439f4177e8649', 'twright', '192.168.10.32', 'e2f0d656eba61d9c14165b0fcff0a1a5', '2015-02-10 01:30:31'),\n(331, '003c6bca25610c2', 'twright', '192.168.10.49', 'a95a4b936fdd21f236306b7fa16fd1a3', '2015-02-10 03:29:42'),\n(333, '16d9bd849a9593c', 'jbonaccorsi', '192.168.10.49', '0beeb7dbcf352c52ee87d4ad5e2bdd28', '2015-02-10 05:16:36'),\n(334, '13ebc79f351c15b', 'jbonaccorsi', '192.168.10.49', '55f6d1bea781d21a6a2a26cd44c77403', '2015-02-10 07:42:18'),\n(335, '9261bef92bd0a70', 'jdiorio', '192.168.10.49', '37853f9fc5b06140b0de31b7b2f9a830', '2015-02-11 08:16:01'),\n(336, 'd88f63a36fdb43d', 'jdiorio', '192.168.10.52', '8ac5b62fc0547308bc6e9cb4eb2c6da5', '2015-02-11 08:26:15'),\n(337, '03e9d3add41a650', 'jdiorio', '192.168.10.52', '7b9f1990889fc2692c577658251cbd9f', '2015-02-11 09:52:40'),\n(338, '00b7f79418b8669', 'jdiorio', '192.168.10.52', '41df465798a0bb15246f1bc3c4bddc6f', '2015-02-11 10:54:06'),\n(339, '83e9303f76ad6f1', 'ascallion', '192.168.10.49', 'deb375c1e45ed1a474102506495fd75a', '2015-02-11 04:14:05'),\n(340, '66b2d5cc5456a44', 'ascallion', '192.168.10.49', '7f357871f43e2c274ac371acaa6fc582', '2015-02-11 06:07:50'),\n(341, '1da52ad1f3a5548', 'ascallion', '192.168.10.6', 'f61515d89bc618af0194813717cdc122', '2015-02-12 10:26:54'),\n(342, 'd834eaeca156ccb', 'williams', '192.168.10.1', '615d097d877b39530671f66bb2812e2f', '2015-02-12 10:31:32'),\n(343, '9ca1ee98376300d', 'ascallion', '192.168.10.49', '12947fd7476de671e5d926946a942abe', '2015-02-12 11:54:23'),\n(344, '3e41e5879c76584', 'netadmin', '192.168.10.14', '82981b16bb25d7021692dce1abf5427a', '2015-02-12 01:39:58'),\n(345, '10cf40248fea72f', 'tchermely', '192.168.10.49', '13ba829743ff305b670e973ea211b089', '2015-02-12 04:28:16'),\n(346, '2c82c70c8a7b920', 'tchermely', '192.168.10.49', '766f999fd9269787bceda8cca3da7ee1', '2015-02-12 05:41:35'),\n(347, 'a5645b97119c6f7', 'tchermely', '192.168.10.49', '47b10303d125aeecb92d4e95a4a34594', '2015-02-12 06:46:50'),\n(348, '852cc1bf7a94488', 'ddeloge', '192.168.10.52', '49a8c8ee8a97b708ff94ff94f982b54b', '2015-02-13 08:21:49'),\n(349, 'f22a2bee360414f', 'ddeloge', '192.168.10.52', '1381175c0fc8a9e54d8724e4f560ef54', '2015-02-13 10:13:23'),\n(350, 'f9b271d2083b07f', 'jdiorio', '192.168.10.52', 'fef7d22fdbfbe01acc0ec1755f95ab78', '2015-02-13 12:18:59'),\n(351, '8520ee41e3eef61', 'jdiorio', '192.168.10.52', '83b9d3020d6077bce182bae7c672fc06', '2015-02-13 02:15:18'),\n(352, 'dc3c5f7ff779aa9', 'jdiorio', '192.168.10.52', '383a87c54197a2b9287f465809f7e458', '2015-02-13 03:33:55'),\n(353, '0a6e884be30e3f0', 'bcampbell', '192.168.10.49', '2b5f96dfca3ea7e7dc5e6136c6ab6d61', '2015-02-16 12:18:03'),\n(357, '53bbab7766901ea', 'adible', '192.168.10.49', 'aa73dfd968c5933581a9848845ce31cf', '2015-02-16 05:29:55'),\n(359, '3e46dcfae52bed9', 'adible', '192.168.10.49', '5a7acf3a2d8fcad6c2b44fb53459ae15', '2015-02-16 06:58:43'),\n(360, 'd38d048cefac524', 'adible', '192.168.10.49', '620d157ac4f0b61cae935cf9a6d76eff', '2015-02-16 08:12:55'),\n(361, '41942d6f2cca8e6', 'jallibone', '192.168.10.52', '5ee8543e62869674c31e1429715b571a', '2015-02-17 09:32:50'),\n(362, 'd88dd79aa3e76fb', 'jallibone', '192.168.10.52', 'f913272c8bd835e8982cc7a0a74dbe16', '2015-02-17 10:09:19'),\n(364, '1fb20c44ce335ee', 'jallibone', '192.168.10.52', 'ada939134c3ba8b1e2d59cfa2e83a59e', '2015-02-17 10:36:06'),\n(365, '4c4557fb5fb3b40', 'williams', '192.168.10.1', 'c1463cd59cb08c608130bbb732e0e1aa', '2015-02-17 10:41:47'),\n(366, 'e168469c679cdcf', 'jallibone', '192.168.10.52', '40da3455e7b91015040b1c4aaad7988c', '2015-02-17 11:13:58'),\n(367, 'bdb4ae1864b653b', 'jallibone', '192.168.10.52', '19a34609c7b7ba0f3a1d979b95c2f041', '2015-02-17 11:52:38'),\n(368, '05d3978a42d5a15', 'jallibone', '192.168.10.52', '53944d01be69c604730b36bd74f97f5c', '2015-02-17 12:17:09'),\n(369, '57228217555f2a1', 'twright', '192.168.10.52', 'dd7704d64691254a6e6d2921838172d8', '2015-02-17 01:16:45'),\n(370, '2edb9f60aefe11e', 'twright', '192.168.10.52', 'd2da71afd5cd7d7e9251eb44a4e4c6ac', '2015-02-17 03:51:10'),\n(371, '3a5fbdc6493a02e', 'jbonaccorsi', '192.168.10.49', '9613925f6db97ddf8d7a4622a794a98d', '2015-02-17 05:29:58'),\n(372, 'f673ddd27d1e4bd', 'jbonaccorsi', '192.168.10.49', 'adad1a4a3c34c94a95c50a7530c96a32', '2015-02-17 07:19:46'),\n(373, 'a00bb3bb08fd312', 'jdiorio', '192.168.10.52', '08c071e889296c4561c33907c1e2bc89', '2015-02-18 10:03:28'),\n(374, 'b2482dda0d10bbe', 'jdiorio', '192.168.10.52', '78b593af9e7e755cffcaa4af7bc434bd', '2015-02-18 10:59:03'),\n(375, '26acddbe49f1c4e', 'williams', '192.168.10.52', 'd7d6631a0580b7b7c7a3ad83106a831a', '2015-02-18 11:54:11'),\n(376, 'ff38b0b97102bf6', 'williams', '192.168.10.1', 'd3a34f0d1a17f2a7978a01c120183c9f', '2015-02-18 12:47:07'),\n(377, '34b6d71bdb1bbad', 'williams', '192.168.10.52', 'f48fc99378f91660b708d53e4c2796d9', '2015-02-18 01:41:46'),\n(378, 'b23e71d535912c9', 'jbonaccorsi', '192.168.10.49', 'd9e3f4f5a5c9747c85d17c070cd872df', '2015-02-18 03:38:36'),\n(379, '96a6f695aa5a951', 'jbonaccorsi', '192.168.10.49', 'd6da9cb40fb2c51494b44b184273ebed', '2015-02-18 04:16:37'),\n(380, 'd6bf821bff005ea', 'ascallion', '192.168.10.49', 'bb06f9a40defba467ce0909bf3e5c133', '2015-02-18 05:48:40'),\n(381, '15d5597062e0fea', 'ascallion', '192.168.10.49', 'fd0b395eda535de84499c002007576f1', '2015-02-18 07:12:38'),\n(382, '8be226bbf79617b', 'ascallion', '192.168.10.49', 'f74b54efd10661d81642911bd610c2db', '2015-02-19 09:11:21'),\n(383, 'c04261091f7d92c', 'ascallion', '192.168.10.49', 'ad5661fd57d9db0219b9b27753c71a15', '2015-02-19 12:06:56'),\n(384, '7121d6bb3f7dfff', 'kdobrolet', '192.168.10.9', '684c8e632eba5cf2f485db5c3eee5527', '2015-02-19 02:21:51'),\n(385, '064ce458cdbf020', 'adible', '192.168.10.49', '677bcc99ee5529fffaf82da41d1b7ade', '2015-02-19 02:44:39'),\n(386, 'f2c84725004fecc', 'tchermely', '192.168.10.49', '6a3158b156ef13dbd641e12ec1223b65', '2015-02-19 05:36:14'),\n(387, '4ad07c2415e951e', 'williams', '192.168.10.52', '33cb1eae07c47843f91731527e4ce9af', '2015-02-20 08:58:12'),\n(388, '98cfcb28ed056dd', 'williams', '192.168.10.52', '10c74eb9c40a1343686d0f069619a39d', '2015-02-20 10:11:45'),\n(389, 'cb287ded31c892b', 'ascallion', '192.168.10.49', '8a6f12595eafa20ce01a288ac2ff6565', '2015-02-20 01:43:40'),\n(390, '0adcffeb74b4c4a', 'ascallion', '192.168.10.49', 'eac0a9117b2b96e741c5ca352cc634bb', '2015-02-20 03:27:10'),\n(391, '2698452131c1152', 'ascallion', '192.168.10.49', 'deec2acae4b6dc7420659d4d82605704', '2015-02-20 04:17:10'),\n(392, '465f04730fafef6', 'bcampbell', '192.168.10.49', '1a316dd93bc64a2c297d07b05295fa3c', '2015-02-21 02:42:51'),\n(393, '0fa7b8f08f63eb5', 'ascallion', '192.168.10.49', '703bdb1943cce39e3f576f52183e2d3c', '2015-02-23 08:54:29'),\n(394, '056d361039aed2c', 'ascallion', '192.168.10.49', 'a88b0ca33d87b99af48fd599361a3867', '2015-02-23 10:09:49'),\n(395, 'fb16d0a2a9d8bf4', 'ascallion', '192.168.10.49', 'a4df069bcb0634a7972ae53c5755623b', '2015-02-23 11:03:50'),\n(396, '97e35a891627166', 'williams', '192.168.10.1', '36bce780d40771daa2b73f8c10593667', '2015-02-23 01:24:27'),\n(397, '069c904f46b222d', 'adible', '192.168.10.49', '00d365a5b9151759ee0d8d5912940f0d', '2015-02-23 04:24:40'),\n(398, 'b3d1672828f77f6', 'adible', '192.168.10.49', 'b32566d7648abb6c9b9c4c61fc9bb03a', '2015-02-23 06:18:21'),\n(399, '376be465bc03abb', 'jallibone', '192.168.10.52', 'b4e18ce817483886e81736ca19318fea', '2015-02-24 09:10:13'),\n(400, '92e6b9d6611c1d2', 'jallibone', '192.168.10.52', 'a5170efad81d8769ac5ea0637678cc07', '2015-02-24 11:02:36'),\n(401, 'bfd3dbe21d591a1', 'twright', '192.168.10.52', 'b47ea063b7b2ae32bc507ea2a83fa6c7', '2015-02-24 12:15:32'),\n(402, 'c26ebba5e365f94', 'twright', '192.168.10.52', '035970ae726edad9599feb83adf4a830', '2015-02-24 01:52:42'),\n(403, '9f786e4de5171b0', 'netadmin', '192.168.10.14', '79c0fa3165918cffe33ae9fffa349a15', '2015-02-24 03:15:37'),\n(404, '4e1963d8fea880e', 'twright', '192.168.10.52', '8494233a9d6bea114d0042e60e9cf9ee', '2015-02-24 03:54:53'),\n(406, 'a349a1c183f3598', 'twright', '192.168.10.52', '72a66a8ba705ffefbe7f3e8c569ada83', '2015-02-24 05:14:48'),\n(409, '0fba56871a150dd', 'jdiorio', '192.168.10.52', 'e72e4dacd885132c1fb36233b78075af', '2015-02-25 09:06:47'),\n(410, '555ea8dee6693cb', 'jdiorio', '192.168.10.52', 'f306cc00eaee8fe0ee0389e4e1242825', '2015-02-25 10:10:50'),\n(411, 'b6c13810165824c', 'jdiorio', '192.168.10.52', '93a65932b9b45803d6f820d4a5a0c220', '2015-02-25 11:07:05'),\n(412, 'c3274eeb888461e', 'jdiorio', '192.168.10.52', '6aca8d3ac411da164e01971045c46468', '2015-02-25 11:47:40'),\n(413, '9adb5a74fcdaa28', 'jbonaccorsi', '192.168.10.49', '46c0398b27d3e4dd84c7450119ca3ccc', '2015-02-25 01:28:39'),\n(414, '9608d946664f4d9', 'jbonaccorsi', '192.168.10.49', 'b654bcd091fbdd6ab4b20cb7a547cb16', '2015-02-25 03:23:55'),\n(415, 'cd7c0eed6fc1fb3', 'ascallion', '192.168.10.52', 'e9635bb6d54232c270ca39357ae779fb', '2015-02-25 04:25:17'),\n(416, '27d4ce2f0de840c', 'ascallion', '192.168.10.49', 'fe0b1d9712677932772e46c8ec99525f', '2015-02-25 06:30:07'),\n(417, '658c5804af52283', 'ascallion', '192.168.10.49', 'a576fd3758d9e825fc4aa7b39c9617fb', '2015-02-26 10:25:21'),\n(418, '7007aa2649f9a39', 'ascallion', '192.168.10.49', '901453c3d14c47927e7dc7b3cf832dde', '2015-02-26 12:11:04'),\n(419, '2de1dfc7841395a', 'adible', '192.168.10.49', '4c35efc918828f3e2d840ac1106b41fb', '2015-02-26 02:27:36'),\n(420, '80f72a682efd0f7', 'tchermely', '192.168.10.49', '70409529f617523ad46d9e1a38843f05', '2015-02-26 04:01:40'),\n(421, '80f0ccfcd7da701', 'tchermely', '192.168.10.49', '0c3814564152343573818de4c67c0820', '2015-02-26 05:24:20'),\n(422, '3ba8ddefec31abe', 'tchermely', '192.168.10.49', '0f22eb815f6165ad60e3102028176979', '2015-02-26 06:28:28'),\n(423, 'da6008970b3b2a9', 'ddeloge', '192.168.10.52', 'b33ed511ab1f585002f318660e8537ce', '2015-02-27 10:07:58'),\n(424, 'a8e5e30368c9ec0', 'ddeloge', '192.168.10.52', '7a35f29069cb78e49f62992e62471e86', '2015-02-27 11:25:00'),\n(425, '34e679f23798e99', 'jdiorio', '192.168.10.52', '268a5486b69f18922f05131ac0b49a9b', '2015-02-27 12:46:51'),\n(429, 'f95889ef21a6985', 'ascallion', '192.168.10.49', '456b1541c6079a91f2b66dd56c1bac14', '2015-03-02 12:05:00'),\n(434, '9cc3d0b9291bec9', 'adible', '192.168.10.49', '6a9253786100c710dafc48891fdf8d27', '2015-03-02 04:48:09'),\n(435, 'c6dab881889f862', 'adible', '192.168.10.49', '7a3d207de83b13352212551e2adef60b', '2015-03-02 08:20:30'),\n(436, '6005759f503dd3c', 'jallibone', '192.168.10.52', '3705eafcaa871489781a2616a7a75811', '2015-03-03 08:52:39'),\n(437, '32c08545a000f45', 'jallibone', '192.168.10.52', 'bfdf0c709b74d351661952d8ff9c802e', '2015-03-03 10:27:59'),\n(439, '69aa4e16a538d12', 'twright', '192.168.10.52', '3313e8e6509c4a0e1d532a0527efa46b', '2015-03-03 12:46:50'),\n(440, '92fa979c47e6ad2', 'twright', '192.168.10.52', 'ed0c1a453c93500c6408da09c22d8a7f', '2015-03-03 03:34:43'),\n(441, 'f33e8def2064ae5', 'jbonaccorsi', '192.168.10.49', '8262b28c5bd550f02111e2a3f90921eb', '2015-03-03 06:46:50'),\n(442, 'b917f3a0b1a9265', 'jdiorio', '192.168.10.52', 'f09c724af1786044c7aa77d0bf87a03a', '2015-03-04 09:05:18'),\n(443, '18ae981d90cbeee', 'jdiorio', '192.168.10.52', 'f04744cb247580f8460cf6b4a8b3cdd9', '2015-03-04 10:52:37'),\n(444, '23364f745952953', 'jdiorio', '192.168.10.49', '1ad474184931e6ae6c7776d036c94f7e', '2015-03-04 11:41:05'),\n(445, 'dfd00cef07456a8', 'jbonaccorsi', '192.168.10.49', '9926bedfb9e3e579277185bbd3e6e625', '2015-03-04 01:30:57'),\n(446, '0871d7ed20885d7', 'jbonaccorsi', '192.168.10.49', '487ca0342f9c6c51b78a3d215d77612c', '2015-03-04 03:12:38'),\n(447, 'e193843d3f07690', 'ascallion', '192.168.10.49', '189bd69fc703503bd07b5af1a5841285', '2015-03-05 10:43:02'),\n(448, '3034cc8d4a46764', 'adible', '192.168.10.49', '6358a7def606ae134cd4b3806cce3785', '2015-03-05 02:17:52'),\n(450, '1aa163d0d28a79b', 'tchermely', '192.168.10.49', 'f0d663a99d7924a653331d7154015888', '2015-03-05 04:06:56'),\n(451, '2777045c470a8eb', 'tchermely', '192.168.10.49', '1305105113f51efdaa053a93c9480b0b', '2015-03-05 06:24:13'),\n(452, '5ad70c045d23b7d', 'ddeloge', '192.168.10.49', '6dc4f230d4d2f968717dbd5eef646114', '2015-03-06 09:39:38'),\n(453, '5cc9c2c1a869e49', 'williams', '192.168.10.1', '93bae77a1b200d040d998eb862e7849c', '2015-03-06 09:41:29'),\n(454, '083697956833f7f', 'ddeloge', '192.168.10.49', '60f218a4c8841597ac292f1bd4838cb0', '2015-03-06 11:05:28'),\n(455, '5fcd91e02d9f26c', 'netadmin', '192.168.10.14', '47afa2c630621c0fadf026343422c37d', '2015-03-06 01:59:01'),\n(456, '87dab38bef3be47', 'ddeloge', '192.168.10.49', '9752aa57b591eca71e0a8e1abbed7002', '2015-03-06 04:32:22'),\n(457, '5c272dfcb087b45', 'netadmin', '192.168.10.19', '4e86eb9f334d13cdeed644ac70ba2385', '2015-03-16 10:01:10'),\n(458, 'af818a1d9f28ece', 'netadmin', '192.168.10.19', '34dedb6d5dfe576ac32f0629fe8417ed', '2015-03-16 11:07:03'),\n(460, '00042be10f53614', 'bcampbell', '192.168.10.49', 'a1324a2a5d7fcb252151ae57789e22f7', '2015-03-16 12:40:50'),\n(461, '9313bffb6f227eb', 'bcampbell', '192.168.10.33', '008cca8b19c896b642bec08b6d8caece', '2015-03-16 02:55:24'),\n(462, 'ce6206bf634a538', 'adible', '192.168.10.49', '95dfb207e9b4e87d217e6a4a7325c5a0', '2015-03-16 05:18:02'),\n(463, '765dec44b2cfb58', 'lcdioa', '192.168.10.49', 'c869a0726ccdc1ae5111edc7e9982574', '2015-03-17 08:34:53'),\n(464, 'c7c0919001b8fa8', 'netadmin', '192.168.10.19', 'ab29ed7527171239147da06e020e0fba', '2015-03-17 09:04:53'),\n(465, '34c6b84e58a3d5a', 'jallibone', '192.168.10.49', 'd0d57a262a5ccff5dfaedcc38262c5e1', '2015-03-17 09:40:21'),\n(466, '832d63c12c2f950', 'twright', '192.168.10.42', 'f91db130220ab67e5d5bca751513a4b4', '2015-03-17 12:17:08'),\n(467, 'b5c0c652abb2961', 'twright', '192.168.10.42', '9328bb9d09d3ffda1cba485aa9bd7705', '2015-03-17 02:13:43'),\n(468, 'abad42a21befef4', 'netadmin', '192.168.10.19', '876110bc37434e0cdb56ad00c799f77c', '2015-03-17 02:18:16'),\n(469, '45181ca19e98aea', 'twright', '192.168.10.42', 'fa8a41b2d387f201d774b0a43c75dcef', '2015-03-17 03:41:01'),\n(470, '53fa34694ceed48', 'twright', '192.168.10.49', 'e8ddfb9e820c6f9c59b2b38091ff037b', '2015-03-17 04:14:15'),\n(471, '86a4af2ce29a7de', 'jbonaccorsi', '192.168.10.49', '27edd1d094e72c18b619ba3843cdedf4', '2015-03-17 06:22:27'),\n(472, '40cc4c69de38e9b', 'JDIORIO', '192.168.10.42', 'db4236662a6c425a2a677e1f1252fd20', '2015-03-18 09:27:48'),\n(473, '77f4fc5113289fa', 'jdiorio', '192.168.10.42', '137722b8e15234e01a37eb9652f122ae', '2015-03-18 10:54:41'),\n(475, '4e28d2d456a5277', 'jdiorio', '192.168.10.42', 'b7775c7b100a10bc0283d1a259c71189', '2015-03-18 11:42:11'),\n(476, 'f9c6072c71584dc', 'jbonaccorsi', '192.168.10.49', '77b37140e5370b9dde81e996c5dacd58', '2015-03-18 01:56:36'),\n(477, '15c1085685b1220', 'jbonaccorsi', '192.168.10.49', '72129e3ab14eb279add83fa7bae9b6cb', '2015-03-18 02:40:26'),\n(478, 'd36c37fa6a6cf00', 'jbonaccorsi', '192.168.10.49', 'd0df6d5c38fc2b447a37472faacbd4d6', '2015-03-18 03:12:30'),\n(479, 'c75e4e7ee911c58', 'ascallion', '192.168.10.49', '5209296e041b275746d67127c6439172', '2015-03-18 05:00:19'),\n(480, '63ecd43bbc6d33c', 'adible', '192.168.10.49', 'a1dbf7ed43a554b4e6a02c9984f8fea2', '2015-03-19 02:39:53'),\n(483, '0e395ce3fffa4f2', 'tchermely', '192.168.10.49', '4223229146e42fa2e2dbd3162ace4f50', '2015-03-19 06:17:47'),\n(485, 'd7ff3ff1335f2f8', 'lcdioa', '192.168.10.49', '8cac51efeb49f9a9aea9403d8ce98393', '2015-03-20 09:49:39'),\n(486, 'ca98ee945d147ef', 'ddeloge', '192.168.10.42', '987b103568aba41f09637ec2fac5ade8', '2015-03-20 10:35:36'),\n(487, '51a8f7840e14c03', 'jdiorio', '192.168.10.42', 'b0fd9e00f002aa3e2c6aca690e980f9a', '2015-03-20 12:37:23'),\n(489, '548672219c68a30', 'jdiorio', '192.168.10.49', 'c94a52f5726e78acd505d957458c1d9d', '2015-03-20 02:21:21'),\n(491, 'b315c75f11478e7', 'bfagersten', '192.168.10.22', 'fbb50a2ba057159127701a78bd70474c', '2015-03-20 02:43:35'),\n(494, '7ee8a850ea228c4', 'bfagersten', '192.168.10.19', '4c28868dddbac50b5a796897f5c4690b', '2015-03-20 03:35:18'),\n(495, '4be8a5766f738ed', 'ddeloge', '192.168.10.49', '643869711c7ff693bc2c5405ec1bb488', '2015-03-20 04:47:05'),\n(496, 'ea6b0fe0b6b49a2', 'ascallion', '192.168.10.49', '8fcc489e284d909d30968a937f02e953', '2015-03-23 08:45:53'),\n(497, '1dd7e52721a05d8', 'ascallion', '192.168.10.49', '807615a2ead9f08142d54b6b1d876b93', '2015-03-23 09:56:34'),\n(498, '6d65ab8654e61a7', 'bcampbell', '192.168.10.49', '3a0c925dc259b21891f8b1d0dece8831', '2015-03-23 12:11:02'),\n(499, '1cdcf9f2da41a8a', 'bcampbell', '192.168.10.49', 'eb972ec4b0858da879b80932f1e17942', '2015-03-23 01:52:26'),\n(501, '9482925b337a2f7', 'adible', '192.168.10.49', '73ae9d8c80ee6c59798244fb0aac4e99', '2015-03-23 07:28:54'),\n(502, 'c48ac46742aba30', 'lcdioa', '192.168.10.49', '2b79268f6d4ec416120a7d42bc1536f2', '2015-03-24 10:00:54'),\n(503, '8204801f7c63918', 'twright', '192.168.10.49', '6ec8e3afc720faf2bb424556d36fde6b', '2015-03-24 03:34:43'),\n(505, '360e4db8feb3a69', 'jbonaccorsi', '192.168.10.49', 'c78f8cd5d8e9954dcf6b1e38f4bb8b34', '2015-03-24 05:37:16');\nINSERT INTO `Sessions` (`ID`, `SessionID`, `UserName`, `IP`, `Token`, `Date`) VALUES\n(506, '2f27d1939d8ab9f', 'jbonaccorsi', '192.168.10.49', 'a71a2c77b6611b031b0b50fbfde92e5b', '2015-03-25 01:15:35'),\n(507, '052e6fbfb787e58', 'jbonaccorsi', '192.168.10.49', '12e08e3d30a9c122a488ef80c2048510', '2015-03-25 02:14:52'),\n(508, 'da6981bba0fc702', 'jbonaccorsi', '192.168.10.49', '3763b6f8bd46feebc29498c6bb37c395', '2015-03-25 03:28:32'),\n(509, 'eeff1e0b36217f3', 'sbarrett', '192.168.10.4', '679d0c8459a6da8dab337a34fccda69f', '2015-03-25 04:01:40'),\n(510, 'a38278710696963', 'ascallion', '192.168.10.49', 'aaffbb9181d6f82d6434e41b7e078884', '2015-03-26 09:58:42'),\n(511, '096c6c2ef2a5f72', 'ascallion', '192.168.10.49', 'b060b449ac14f81f0983502eef579852', '2015-03-26 11:28:54'),\n(512, '03e2faa5eff4436', 'tchermely', '192.168.10.49', '07acbec618b842099e8d3c6cee3cd685', '2015-03-26 03:48:05'),\n(513, 'a257fcb174416c0', 'tchermely', '192.168.10.49', 'ad2ca4fc859b54ea8eacc94ff628f30e', '2015-03-26 06:41:40'),\n(514, 'a41a9197ab8156c', 'ddeloge', '192.168.10.49', 'b3e380ab71e3487226a005476196d927', '2015-03-27 09:58:50'),\n(515, 'be4211e327ad9f8', 'ddeloge', '192.168.10.49', 'e201450d15d8af27b7dff02fa1b75d80', '2015-03-27 11:16:45'),\n(516, '2453aed1fd3cc4a', 'ktellers', '192.168.10.8', '6deb8f264d444d392f878c89cb79a822', '2015-03-27 11:36:24'),\n(517, '520c6080e598e9a', 'jdiorio', '192.168.10.43', '36273bf76729be4e8d919fbfd90c23c7', '2015-03-27 01:28:03'),\n(518, '9b780a79b01f06e', 'twright', '192.168.10.49', '9d866233efe8c6fd902cb065d0eca4b2', '2015-03-29 12:32:08'),\n(519, '17fe88752295a6d', 'ascallion', '192.168.10.49', '0263cf3ffa41bf41554470399300c313', '2015-03-30 08:57:37'),\n(520, '0c42c2f2a8f8916', 'adible', '192.168.10.49', 'c65c53f6f37857d31ddc861cb083152b', '2015-03-30 04:07:12'),\n(521, 'faa7ebe744b1d93', 'adible', '192.168.10.49', 'a11dd27fb00f74655e9ae4324f81330f', '2015-03-30 05:25:08'),\n(522, '806cdb19fbcc075', 'lcdioa', '192.168.10.49', '0136cdd5a0f2efd1aacbb64b3c5c64cc', '2015-03-31 08:41:42'),\n(523, '72ce7286c9fc619', 'twright', '192.168.10.49', 'e024d11f387f28d0b30676422c4c00ad', '2015-03-31 12:02:46'),\n(524, 'cac84abf848c4b4', 'twright', '192.168.10.49', 'c5189162cee67fc22b8445f1af1e8384', '2015-03-31 01:20:24'),\n(525, '0783d29e4873503', 'twright', '192.168.10.49', '63f52b770519dc0bcf52f8da286d8da7', '2015-03-31 03:04:19'),\n(526, '59b88df24523ead', 'twright', '192.168.10.49', '2744e25aaed269f976da67d3d179e2da', '2015-03-31 05:01:18'),\n(527, '44c50415023bea5', 'jbonaccorsi', '192.168.10.49', 'd4b666e71b9369f6a12c6ee8ac73ed0d', '2015-03-31 07:22:58'),\n(528, '05377ab813baee5', 'jdiorio', '192.168.10.43', '280a881142472a15e94f65e0b3ce7225', '2015-04-01 08:29:44'),\n(529, 'a4f385a0c88cc71', 'jdiorio', '192.168.10.43', '15f539e5b191e3d11a8f400b066c14a9', '2015-04-01 09:37:16'),\n(532, '347d79c2d176750', 'jbonaccorsi', '192.168.10.49', '0cd85823e637d2981054cb2a82e340df', '2015-04-01 02:28:05'),\n(534, '300f005ccf004cc', 'ascallion', '192.168.10.49', '48a7eb857142ea34d8d3d41a2711b9b9', '2015-04-01 03:51:15'),\n(535, '05862fb3b7922f9', 'ascallion', '192.168.10.49', '1af8429a46180c0af5f14d812515ecbb', '2015-04-01 05:24:52'),\n(536, 'bd6c4a16254c2c3', 'ascallion', '192.168.10.9', 'ccf41916538915a13dbe2dbce2777201', '2015-04-02 08:49:35'),\n(537, 'db0357f374ebb17', 'ascallion', '192.168.10.49', '2bdedc427f08dc74c8252a7a151b19b0', '2015-04-02 11:31:46'),\n(538, '7f54446509c42f5', 'adible', '192.168.10.49', '5e1783cf4a1887305badfd07b3e9dd49', '2015-04-02 12:35:09'),\n(539, '1df7a7a61d289ba', 'tchermely', '192.168.10.49', '50fa2f84da8bc6703c7c3c74aba151ca', '2015-04-02 04:00:40'),\n(540, '51ff8ecdffa08ca', 'tchermely', '192.168.10.49', 'c49bba08d4dd29d4ec4adb0dd889631c', '2015-04-02 04:54:02'),\n(541, '0202b5a6fa407e7', 'tchermely', '192.168.10.49', '463ae60310d9da9d5f062eabc41151a3', '2015-04-02 07:25:22'),\n(542, '920cf8797a306f6', 'ddeloge', '192.168.10.49', 'db5d19490b42dd673899a9ef7a3bebb6', '2015-04-03 09:59:42'),\n(544, 'e7835fcd0f0be21', 'jdiorio', '192.168.10.43', 'f6c461daf6bff18c10fa2d99aad75351', '2015-04-03 03:07:14'),\n(545, 'd5e042531c5bf74', 'ascallion', '192.168.10.6', '285ca8a2fe4118e40d87fcfe3da2cad5', '2015-04-06 11:39:18'),\n(546, 'a2c3dd4dd707397', 'bcampbell', '192.168.10.49', 'cfda23777a6de0dbd10005824eda4890', '2015-04-06 12:27:27'),\n(547, 'cf145dc18d72749', 'bcampbell', '192.168.10.49', '400bd055ac7e5efbe0d40ad8142e81d5', '2015-04-06 01:37:23'),\n(549, 'b71d0e63abb9dee', 'adible', '192.168.10.49', 'a0c9a8d79c85a42ef923c7636801c11c', '2015-04-06 04:08:46'),\n(550, '5c276b4fb54c038', 'adible', '192.168.10.49', '5e8c3de7b3ce52144d3df9c6e7b8676e', '2015-04-06 05:17:48'),\n(551, '8d3affebadf4168', 'twright', '192.168.10.49', '11888c5663858bd747e98ac22c74b02a', '2015-04-07 12:38:54'),\n(552, '954b07451704e57', 'twright', '192.168.10.49', '4008d5fdeac11a0a81eb07b69ad0cf2d', '2015-04-07 03:24:09'),\n(553, '48813855863aaa0', 'twright', '192.168.10.49', '3fbc2dbbacc1a1e088b45253502f8f54', '2015-04-07 04:45:17'),\n(554, '9f461817428189a', 'jdiorio', '192.168.10.7', '96510a7e0491c0ad58d2c5c664c09e9c', '2015-04-08 08:44:45'),\n(556, '37eab6bddbdf431', 'jdiorio', '192.168.10.7', '9c759cc2141eb7f6ede5d0cbd5dd5e06', '2015-04-08 09:57:35'),\n(557, '60b9c419f556fd5', 'ascallion', '192.168.10.49', 'd38cd74bab8acf0b6ab40ffc5f327cbe', '2015-04-08 05:34:33'),\n(558, 'db908f9f6bc3c2e', 'hgregal', '192.168.10.22', '97851dfa7736bcae24f3649be2be69cd', '2015-04-09 10:18:12'),\n(559, '003ebfc2b547413', 'adible', '192.168.10.49', 'ac7f18c74025ef68da244581a77ebbfb', '2015-04-09 12:07:08'),\n(560, 'ef62f49fa7f78a8', 'ddeloge', '192.168.10.7', '99950918601c65ce159cb85daa83e73f', '2015-04-10 08:44:17'),\n(561, 'bcf2846372bd52e', 'ddeloge', '192.168.10.49', '6593519753961feb3177223ddf91dc69', '2015-04-10 10:03:33'),\n(562, '333376a7b6f5cfa', 'jdiorio', '192.168.10.49', '2d2a8327d504f22679f57521d8d9e121', '2015-04-10 01:40:15'),\n(563, '3483df37be69a55', 'ascallion', '192.168.10.26', 'd3aad8a51e262872d5be4665cf3b1e7e', '2015-04-11 12:11:08'),\n(565, '30fa1416aa394ef', 'ascallion', '192.168.10.49', '2ced12b2a90aefb41a5469c997dd2b5f', '2015-04-13 10:55:52'),\n(566, '98a2918ea7a7502', 'ascallion', '192.168.10.49', '21d46f926bc8b0f246618d2f4ffdcad2', '2015-04-13 11:40:39'),\n(567, '64294e74f911aa3', 'adible', '192.168.10.49', 'b1fb1134a9664609b0560cd1a5f0770c', '2015-04-13 06:09:35'),\n(568, '8998ebdc594188a', 'lcdioa', '192.168.10.24', 'e734d6efda5b58bee6e0aa29d262793d', '2015-04-14 10:59:01'),\n(569, 'e714e7a5e277412', 'twright', '192.168.10.24', 'caf8425772bb77f5f733a11ac6e4c24c', '2015-04-14 01:14:03'),\n(570, 'e03f879304314c5', 'acaron', '192.168.10.51', '0ecfb523c3446781c400150210e83311', '2015-04-16 09:02:42'),\n(571, '6fa0ecc9dee87fd', 'acaron', '192.168.10.51', 'af6f87d63a0d922fd19f06292d49aa67', '2015-04-16 02:34:53'),\n(572, '1952556e3e9ed15', 'adible', '192.168.10.75', 'ad696b49e18de90265fe9bd74532d457', '2015-04-16 02:46:48'),\n(574, 'cb08f89f5c3bd28', 'jdiorio', '192.168.10.75', 'f84854845dbbd64de63d9af52ca06f1b', '2015-04-17 03:34:31'),\n(575, 'e6fdac733a97276', 'ascallion', '192.168.10.75', '8e20ccf99d186c9a2c82c1de17e259aa', '2015-04-20 11:14:00'),\n(578, '563c2a4ccd8729d', 'adible', '192.168.10.80', '3f811c483d609e1ea5d928815881754a', '2015-04-23 03:10:17'),\n(579, 'f45010e964a919d', 'ddeloge', '192.168.10.26', '7423ed78466ea7860b8ee119cde9d0bf', '2015-04-24 08:58:06'),\n(580, '11aaab180a12e24', 'bcampbell', '192.168.10.75', '64282a0247c8378f544c04760dc6be02', '2015-04-27 02:16:00'),\n(581, '6df821614f8a105', 'acaron', '192.168.10.51', '32b3f5564369c6d0e77c8f2ddb636041', '2015-04-28 10:06:56'),\n(582, '3dfbeb2c19d9ccd', 'acaron', '192.168.10.51', '8afecb98b2af5c7062d9ed3d5a00585f', '2015-04-28 01:56:55'),\n(583, '4ea55758936e2c6', 'williams', '192.168.10.3', '1212e2b56eb59e9507c91e5650c16e65', '2015-04-28 02:07:05'),\n(584, '363e573cf879639', 'williams', '192.168.10.3', 'e06d7a284a52ee8a39d259c123f2467b', '2015-04-29 10:01:29'),\n(587, '6a0a70a00028682', 'lcdioa', '192.168.10.3', 'ecbec6829a5f5b82e177444962520e90', '2015-06-01 02:55:17'),\n(588, '8de69bbe7c04505', 'acaron', '192.168.10.28', '0bfde78924d8b95fbaf9a29a71c53347', '2015-06-01 02:57:44'),\n(589, '6b3045039a1bc49', 'lcdioa', '192.168.10.4', '570a15ad67d64ae2aa38e4a548915c7f', '2015-06-01 03:01:07'),\n(591, '486fbc3eb5debc4', 'acaron', '192.168.10.8', 'dce892868dda09faf9d393d86eb2daca', '2015-06-02 08:20:01'),\n(592, '4ab213ed28ac762', 'williams', '192.168.10.1', '1f39eff8a3b0f3c770c6aa9bd3e65fff', '2015-06-18 04:05:16'),\n(594, '7b8071343576ae6', 'ascallion', '192.168.10.47', 'dc12f322f101d769078059500d60f725', '2015-09-02 07:41:10'),\n(595, '0ceeff506b6364e', 'ascallion', '192.168.10.47', '0dd4b3748086aa9ba0c68979c96a36f1', '2015-09-02 08:11:34'),\n(596, 'd4e702e807d2ad5', 'ascallion', '192.168.10.47', '4534cd247ffba32e9224c25bfd1bb65e', '2015-09-02 09:12:23'),\n(597, '077fa94570c1fa2', 'ascallion', '192.168.10.23', '33a2e32fbfaf3f84af78e0469d3eae3b', '2015-09-02 09:27:13'),\n(598, '6c478749faea92b', 'ascallion', '192.168.10.23', 'd5d7e630b7de38337fccd661001c537d', '2015-09-02 10:22:41'),\n(599, '998204536fc8fa8', 'ascallion', '192.168.10.23', '5eec3ace4da59f4d4ed34f2235090ac5', '2015-09-02 11:13:35'),\n(605, 'd6c3be914e3ba0f', 'ascallion', '192.168.10.47', '1b357447d2c27b54f27429f1ffa061af', '2015-09-14 12:54:23'),\n(606, '3eceb9a5da42da4', 'ascallion', '192.168.10.23', '745aea213fa78c2ed69ff7a0d9b45e18', '2015-09-14 01:41:40'),\n(609, '52a52c40fb91532', 'dparadise', '192.168.10.17', '5fae75539c41cd8c931ce766400adb49', '2015-09-15 02:43:24'),\n(610, 'a471d408d05d09a', 'ascallion', '192.168.10.47', '16ae83653067272c8c6390f362cadd56', '2015-09-16 08:40:19'),\n(611, 'ef6ca4d96679c6c', 'amaccarone', '192.168.10.38', '3190568159c6ddd3ad96f5ea7440f1c0', '2015-09-16 09:40:53'),\n(612, 'e4d2a929c2a071f', 'ascallion', '192.168.10.23', '5ba44add0c544243c9b3bf58db86727e', '2015-09-16 09:59:19'),\n(613, '1fcdac0f8586ebf', 'ascallion', '192.168.10.47', 'd3aec2abf401755e1538df889f558818', '2015-09-16 10:51:49'),\n(614, '3901bd9330f249d', 'ascallion', '192.168.10.47', '3f147b0757c76a495189903653fe6b8d', '2015-09-16 11:47:18'),\n(615, '2f7d73b5b38c7d8', 'cdumont', '192.168.10.20', '7a2da0703c0f5c6026f4a8019e9e6bc6', '2015-09-16 12:06:11'),\n(616, '8b4431d3d3cd1c7', 'bcampbell', '192.168.10.47', 'daff6402ffbdeb4c498a79baaec5cc96', '2015-09-16 01:42:30'),\n(617, '539aff405a859a8', 'bcampbell', '192.168.10.47', 'dfd5014fb86b5f10bf72b1cc436214fd', '2015-09-16 02:10:31'),\n(618, 'd8cc84480672655', 'acaron', '192.168.10.8', '9ffcea27d5b14aafad0b6535634a0126', '2015-09-17 11:00:51'),\n(620, '92261a0e7fa513d', 'acaron', '192.168.10.8', '88485eb514f7094f18eac2a5bcad57a8', '2015-09-17 01:51:11'),\n(621, 'fc10f6d75c23c44', 'acaron', '192.168.10.15', '55fccd73327c41fb76fb41990f0259ca', '2015-09-17 04:30:54'),\n(622, '8d31afa3a1e9ce1', 'ohatalsky', '192.168.10.17', 'cf3991ffd38d16940cb6a44c0311f21e', '2015-09-18 11:53:55'),\n(623, '02c5309bd594144', 'dparadise', '192.168.10.17', 'f35023f386f6916acd523198035a77e0', '2015-09-18 04:01:49'),\n(624, '8ec97f0f14a8e16', 'bfagersten', '192.168.10.6', '9baa82893eb24a461c4f81639c3f49ba', '2015-09-18 04:03:31'),\n(628, '503d1686297bb0f', 'dparadise', '192.168.10.17', '6c6760e552262be80016995d0718cc68', '2015-09-21 11:23:19'),\n(629, 'b02deeb75cef29b', 'amaccarone', '192.168.10.4', 'a63815effb60cc87f67b1dc2e6fd2d9c', '2015-09-21 02:02:57'),\n(630, 'cb5c5bed9880a8e', 'ascallion', '192.168.10.47', '79739ac234b1112a2eed02264bf52072', '2015-09-21 02:17:54'),\n(635, 'e24afbf7de8ab90', 'bfagersten', '192.168.10.6', '4ff9f31d4da9e99c10c6790e2abbbc68', '2015-09-21 04:46:16'),\n(637, '4fafc753660ffe2', 'tchermely', '192.168.10.68', 'f5b8a658a8df4d019791d08ee8906d37', '2015-09-22 08:14:16'),\n(638, 'a4b857705897578', 'aleonard', '192.168.10.41', 'c76445ca9c134a65cf7da85f379f31c3', '2015-09-22 10:33:27'),\n(639, 'b63a49f8ea93d65', 'jwilliams', '192.168.10.10', '86c02560075d2ec009686c880af345db', '2015-09-22 11:12:52'),\n(640, '0d914ba5a6a48a6', 'jwilliams', '192.168.10.10', 'fb1637820a70c90c7eaabb88ad9ff474', '2015-09-22 01:25:51'),\n(641, '17caddb457b1f3e', 'amaccarone', '192.168.10.6', 'f7abaebfe38cc4147f4abfdf220409c2', '2015-09-23 08:02:36'),\n(642, '69253300834aae2', 'ascallion', '192.168.10.47', '11ac9a93eaa96d34e877105b4918083e', '2015-09-23 08:04:10'),\n(643, '6aaeeab7c27e193', 'mgreen', '192.168.10.4', '4a692266bbcd4052c5328eb016a65215', '2015-09-23 08:36:35'),\n(648, 'cab5c0934a0d960', 'ascallion', '192.168.10.47', '41c1e23b8d77e6e1d2d53d30141f4348', '2015-09-23 09:20:48'),\n(650, 'd24d58d4c24fb9b', 'dyost', '192.168.10.6', '60415d8c8e4e9ef693e786b7888f0d79', '2015-09-23 01:08:51'),\n(652, 'a9e79eb7eb91569', 'jwilliams', '192.168.10.10', '5255d6b52b720562ec2ba9be32551f25', '2015-09-23 03:49:20'),\n(653, '5883d48c8f74463', 'jwilliams', '192.168.10.10', 'f577ed50d85b0e5509ec228ee8f6f8c4', '2015-09-24 08:42:29'),\n(655, '723f7a0e282e195', 'dyost', '192.168.10.6', 'beab49e1207b141245cb03ad9c9155ee', '2015-09-25 02:05:04'),\n(656, '75c3c45ae241df7', 'ascallion', '192.168.10.47', '0964cf520f5e37812e07e2ebd14b8cd6', '2015-09-28 12:10:44'),\n(657, 'fcc21cff0a6ce20', 'ascallion', '192.168.10.47', '1db6fc1bc6bf9827c2778e873e27d28f', '2015-09-28 02:24:14'),\n(658, '97499cf23374c28', 'ascallion', '192.168.10.47', '3f6606f4928a56ebdcac9ac9cdf1304e', '2015-09-28 02:48:54'),\n(659, '132bd4fc486253f', 'ascallion', '192.168.10.47', 'f435cc74a40f777c59421f2944f14b10', '2015-09-28 02:52:39'),\n(660, '5701c328e40bd2d', 'ascallion', '192.168.10.17', 'ee1cc558e3d0311bed47b647b305dc63', '2015-09-28 03:15:40'),\n(662, 'ae056adbb2a92c5', 'ascallion', '192.168.10.47', 'e60c8aba489a1d216b2ce2b8767cd477', '2015-09-28 03:40:38'),\n(663, '735a41733f266fa', 'jwilliams', '192.168.10.17', '6b7c8b068a7fed25a6dc69a4f9048e4c', '2015-09-28 04:42:16'),\n(664, 'a3b0f67b6788919', 'bfagersten', '192.168.10.6', 'ed7f250c93640f423371b5e6efc4f15f', '2015-09-28 05:31:04'),\n(665, 'f89c75938cf30b7', 'sbarrett', '192.168.10.9', 'c813dd81768dd8ef1a5833ed74a22567', '2015-09-29 09:01:32'),\n(666, '0ab1ecafc39954c', 'sbarrett', '192.168.10.5', 'af9712d1c83e5d971b72787dd4ea234f', '2015-09-29 09:05:14'),\n(668, 'cb2616bb001f874', 'jwilliams', '192.168.10.10', '66ac943021190ec321255e14287e1d6c', '2015-09-29 11:02:27'),\n(670, '8809c28dc9758e3', 'ohatalsky', '192.168.10.17', 'd18c7e66797f5503c44870298f54e685', '2015-09-29 02:23:05'),\n(671, 'd761df1ca59c6a7', 'dparadise', '192.168.10.17', '4c47613d5570cc1ee4c3fd3f661b6654', '2015-09-29 03:04:53'),\n(672, '69713cd6e37c2d3', 'dparadise', '192.168.10.17', 'a837d22ba982869ca0c00de7b8d2203d', '2015-09-29 04:09:57'),\n(675, '6b2917bcf9e6688', 'cantonovich', '192.168.10.23', 'c6070858fd10a11760ec1225aa6ddafa', '2015-09-29 04:37:05'),\n(676, 'b0f8e159c10ebc9', 'tpeyton', '192.168.10.6', '3b7d1577bd69dc1f4a7744f1d4f7dc72', '2015-09-29 04:53:15'),\n(679, '163092003d0310b', 'jmorrill', '192.168.10.6', 'b51074be5046673b748725d5a3c316ff', '2015-09-29 05:09:51'),\n(680, 'c03068bc0bf1f20', 'tpeyton', '192.168.10.6', '0f3a1dc5bfd96a357a04818dc73237d4', '2015-09-29 05:43:00'),\n(682, '2ed5147efcd55cd', 'ascallion', '192.168.10.47', 'fd15b0ca221dbc866344028173440e4f', '2015-09-30 08:07:34'),\n(683, 'bc9bc8299a73e71', 'ascallion', '192.168.10.47', '9299890c33cf36988745e524c34b8182', '2015-09-30 09:15:54'),\n(684, 'cf80d29d6949acc', 'ascallion', '192.168.10.47', '1ea2e0cee3a311792caff3da2d943f8e', '2015-09-30 10:14:37'),\n(685, 'f29169df31027b0', 'dparadise', '192.168.10.17', 'e67a306d3e318a8687a433b0685cb713', '2015-09-30 12:16:35'),\n(686, '46e14e9803ffdeb', 'bcampbell', '192.168.10.12', 'feff570423329dced5f986e7f3b70597', '2015-09-30 01:51:49'),\n(687, '632427342be599f', 'bcampbell', '192.168.10.41', '6178c01967bef34df3ed180e95d8463c', '2015-09-30 02:40:07'),\n(688, '49c048d4a90d880', 'jwilliams', '192.168.10.10', 'b258e4854e812ae20b311fdd9829bb67', '2015-09-30 02:42:02'),\n(689, 'e2916f7c2323ffd', 'bcampbell', '192.168.10.41', '4827200bc8080e849d9dd28e5779d889', '2015-09-30 03:19:28'),\n(690, 'b809915484998ec', 'jwilliams', '192.168.10.10', '3d519b62f93bd27768a1004faa5edb98', '2015-10-01 12:50:30'),\n(691, '4fe32293be916bb', 'dyost', '192.168.10.6', 'b7606f5bc80c72f357703af36d1b7c1c', '2015-10-01 01:22:29'),\n(692, '37a2cc50c47aa65', 'dhartman', '192.168.10.17', '2d38e1c3529557f3ce5c3a5bc7b28617', '2015-10-05 08:32:33'),\n(693, '6fd9be151a23a35', 'dhartman', '192.168.10.17', '837e4da731346b98aa3511ef1d4d1205', '2015-10-05 09:32:30'),\n(694, 'a5676a4081d9552', 'ascallion', '192.168.10.47', 'e353dba81e9e2e0ff32bbd9f5a7b16b8', '2015-10-05 01:54:24'),\n(695, 'c6803febebc374b', 'ascallion', '192.168.10.47', '4beaa5fb811dc61aea1aded6d42befd2', '2015-10-05 02:35:12'),\n(696, 'a9d80fbacf3b286', 'bfagersten', '192.168.10.6', '5c9db1c2d1a4ec3be1c74e9d5600532d', '2015-10-05 05:08:12'),\n(697, '6204abf0fb8d01a', 'jwilliams', '192.168.10.10', '923f1dc2483abecd49fd3e6a4760e164', '2015-10-05 05:08:25'),\n(698, 'ca8f18f3432d21a', 'nmicallef', '192.168.10.37', '7f584230f5d1c173f86eab2ccacaabc7', '2015-10-05 05:11:47'),\n(700, 'f62ff2f2f00d11a', 'jwilliams', '192.168.10.10', '318d68e245113a00b27b37377f0fac7a', '2015-10-06 12:44:15'),\n(701, '7bf6f837aa96630', 'dhartman', '192.168.10.47', 'e6ccb3e4feeeeb20aeca6daf3ea58d54', '2015-10-06 01:53:37'),\n(702, '7b7a6f58119887b', 'dhartman', '192.168.10.47', '6ff7f1db35d01150b8740fae56fdc4af', '2015-10-06 02:48:54'),\n(704, '8be910e297fde5c', 'acaron', '192.168.10.8', '100c74c4c4b58b4e9f9a09086aaad6e0', '2015-10-06 04:17:26'),\n(705, 'f574ddff241f647', 'bfagersten', '192.168.10.17', 'f72789dab186802826df7228c1e422fe', '2015-10-06 05:12:39'),\n(706, '9db05f14ebc98d3', 'mfortier', '192.168.10.11', '2a63712d46f80ef5c9e66db4be9f8e90', '2015-10-06 05:15:38'),\n(707, 'b01679928615256', 'amaccarone', '192.168.10.6', '9f491bccf0fed9c7eaf8d5032ebd67b2', '2015-10-07 08:08:06'),\n(708, '49509d579e38ab9', 'ascallion', '192.168.10.47', 'e7786148c9405c7b02dba25bf092a873', '2015-10-07 08:36:41'),\n(709, '21e0b45a1ee644b', 'ascallion', '192.168.10.64', 'd9633b04fab6006fc092e58ff1b3d592', '2015-10-07 08:57:57'),\n(710, '21bcc5dc4363de9', 'ascallion', '192.168.10.64', '30fde23262090bb3a119b97f0dd7fa7c', '2015-10-07 10:47:10'),\n(711, '277d25e44bd132e', 'dparadise', '192.168.10.17', '631144faeb3412c2f4c5bcedfcabc00f', '2015-10-07 12:18:42'),\n(712, '3e2fee159378c8d', 'jnicastro', '192.168.10.62', '6dd7d62d65b269c75851ddc2a92a5bbd', '2015-10-07 05:05:21'),\n(713, '6151cfd4d6d79bb', 'jnicastro', '192.168.10.62', 'b3058c3eb35c1165045f4d4058d30bbb', '2015-10-07 07:22:39'),\n(714, 'a37c771f6bc2aea', 'vchaudhari', '192.168.10.47', '2cc36d2d2c199dc0c3e0789a99642a20', '2015-10-08 01:17:56'),\n(715, 'eb67b3461b82862', 'vchaudhari', '192.168.10.47', 'd3799bab536f4cd591f3403fb3a90ab2', '2015-10-08 02:19:45'),\n(716, '21a9ada39316817', 'bfagersten', '192.168.10.6', 'c504711c090be41d631d2f207231fea1', '2015-10-09 11:01:39'),\n(717, 'ac6643d9a800ff6', 'ohatalsky', '192.168.10.17', '1ad9f5a8e3b622e4b36a8eadd31a498f', '2015-10-09 01:17:48'),\n(718, '4fa4c86f0aabaeb', 'jwilliams', '192.168.10.10', '64b22698ac3d7550f662059caed7c10f', '2015-10-13 09:21:05'),\n(719, '3cc7012ec72750f', 'bfagersten', '192.168.10.6', '501e24ddf4d8f1531412a241b40e98c0', '2015-10-13 02:08:20'),\n(720, 'd87c284d89ed9b2', 'ascallion', '192.168.10.64', 'f7dfc8a01577c2dda070012b3c22aad6', '2015-10-14 08:42:44'),\n(721, 'de2f717ea96d78f', 'ascallion', '192.168.10.64', '6a662b520722acbb06f78ebbf001b3f9', '2015-10-14 09:55:46'),\n(722, 'c55d11cccb3c751', 'ascallion', '192.168.10.47', '9dfe365939611dbe4ffef059daf252f8', '2015-10-14 11:14:19'),\n(723, '62399a889dee056', 'ascallion', '192.168.10.64', 'fea74436c27d834385e71f9bff85e847', '2015-10-14 11:55:45'),\n(724, 'e41a4061cfbb06a', 'bcampbell', '192.168.10.47', 'e5978de2996e92a5f0f899940157a61a', '2015-10-14 12:35:58'),\n(725, '0823fc02a43f20a', 'bcampbell', '192.168.10.47', '979de0fe34268b85f6b2b2cc51e05900', '2015-10-14 01:50:07'),\n(726, '7260e3c0e848827', 'bcampbell', '192.168.10.47', 'c82633cdd9468c7ba23bc587ad485225', '2015-10-14 03:26:55'),\n(727, '1842b8e2ca868f0', 'jnicastro', '192.168.10.41', '242b112fd6d5dbd056340d5837640bc5', '2015-10-14 07:33:52'),\n(728, '9aa0d71f0b7604a', 'dyost', '192.168.10.6', '19222d1708bfeced554d9701a3e1bd94', '2015-10-15 12:53:09'),\n(729, '08711b5fb31d32a', 'dyost', '192.168.10.6', '20a63d4c7f49d2d06692048850a81f8b', '2015-10-15 12:57:16'),\n(732, '7c553b01b410f00', 'cgreen', '192.168.10.47', 'ef825191c9b88f01ead8dc44aec772a7', '2015-10-16 11:05:08'),\n(733, '432481009fc42c0', 'dhartman', '192.168.10.47', 'a52679382fa5ae64fbfabfe41573cbc8', '2015-10-19 08:01:54'),\n(734, '826595906d0b35c', 'dhartman', '192.168.10.47', 'a21d568a4400e5a12b9222d0bb1b11f5', '2015-10-19 09:40:52'),\n(735, '0e4c38c0100a1c2', 'dhartman', '192.168.10.64', '5804a56c15da888189ac3814dff910be', '2015-10-19 10:12:40'),\n(736, 'e011df08dc84376', 'dparadise', '192.168.10.17', 'adba61ef599eea9956a3585e25dd67b9', '2015-10-19 11:46:35'),\n(739, 'a1c91f7dcbd5f59', 'ohatalsky', '192.168.10.17', 'e33fce679e92ec342c79b91fab13523a', '2015-10-19 02:24:26'),\n(740, '25c56829f2e2608', 'acaron', '192.168.10.8', '837dd36560b08b372a900584df16e1de', '2015-10-19 02:38:29'),\n(741, '1da4a68ba9f4c24', 'nmicallef', '192.168.10.62', '5634c1aaddcf3ad3f1d3fa3586961b9f', '2015-10-19 02:53:20'),\n(742, 'eeb95a161843f9a', 'cdumont', '192.168.10.20', '6649def3b3bfb15f1f654c30bb9680cb', '2015-10-19 03:51:17'),\n(743, 'adc2fab6a459cdd', 'ohatalsky', '192.168.10.17', '1cb41dc7b50be683d80a6721c1bab142', '2015-10-19 03:53:13'),\n(744, '515676fffa9ca64', 'bfagersten', '192.168.10.6', '2122209a727736ea09de2709d8a15bb0', '2015-10-19 05:27:17'),\n(745, 'f5c1a0fea4b1488', 'nmicallef', '192.168.10.62', '7a9579d822f7f46b400b593dddbef684', '2015-10-19 06:16:42'),\n(746, 'd3b3ff484f04f30', 'jfernandez', '192.168.10.47', '41e035b9476253221d34febf4a72b8db', '2015-10-20 07:40:49'),\n(747, '67ed88cecde7e4e', 'jfernandez', '192.168.10.47', '5bc6bed9b0a0a79917133da35c154e6e', '2015-10-20 08:10:15'),\n(748, 'e73deacbd471a2a', 'jwilliams', '192.168.10.10', '7818c1750e12ba2ae43529c5baeafad7', '2015-10-20 09:14:27'),\n(749, '42d6786ebc7f339', 'jfernandez', '192.168.10.64', 'ad234a95f01b81a7063d030338ee48ab', '2015-10-20 09:16:25'),\n(750, '5a14da159dc2ac2', 'jfernandez', '192.168.10.47', '5cce64760ba034acb19f29db1442c4c7', '2015-10-20 09:39:35'),\n(751, '0138ce55bef0b77', 'jfernandez', '192.168.10.64', '96b94326a8ac777b1d3fe84c67af8fbe', '2015-10-20 10:38:33'),\n(752, '258ec6c1a6c10a6', 'jfernandez', '192.168.10.64', '37b825a3f0c3c2bd8d628afadd767b10', '2015-10-20 12:11:17'),\n(753, 'ab4fa576df52e1e', 'dhartman', '192.168.10.64', '00ac6e60b96b4d65a1bc314777e75d41', '2015-10-20 01:24:24'),\n(754, 'ec8a68f6fdf9eff', 'dparadise', '192.168.10.17', 'edafeb3d119ace5d05f4f2e8c38039e9', '2015-10-20 04:36:39'),\n(755, '3c79680bcb76cc3', 'tpeyton', '192.168.10.6', 'c29fa4dacc0eb591f9f2e31f4db3e1cc', '2015-10-20 05:25:59'),\n(756, '4be84d657921ed6', 'bfagersten', '192.168.10.6', '1d8b4c5855a2b83c4b8b6c23f74d9acc', '2015-10-20 06:46:26'),\n(757, 'e9558654c03fcb6', 'ascallion', '192.168.10.64', '56ec44b8580089f8f80df910cfa26e2b', '2015-10-21 08:13:58'),\n(758, '014c8a17eb78cd2', 'ascallion', '192.168.10.64', 'a3ac1ef96456f033ee03b15ffeb8ed74', '2015-10-21 08:57:02'),\n(759, '813cc74dd525b1e', 'ascallion', '192.168.10.64', '0cfb461367803838d43f387019da7154', '2015-10-21 09:46:52'),\n(760, '0610f1169ccb87f', 'ascallion', '192.168.10.23', 'e0f0177ea193509b18295954d19f62c6', '2015-10-21 10:46:46'),\n(761, 'f7f3a85b19c2d1f', 'cdumont', '192.168.10.20', 'cbc7630b834a75533737c2aa651d131e', '2015-10-21 11:13:36'),\n(762, '3a25462077fb88b', 'ascallion', '192.168.10.64', '68a983d17fcd6af7d78b5ebef12b443e', '2015-10-21 11:16:29'),\n(763, '2220764052c0c6f', 'bcampbell', '192.168.10.47', '05f17f456d308f4c034ceaf947ff0b80', '2015-10-21 12:48:27'),\n(764, 'b692f7ea0eaf354', 'dparadise', '192.168.10.17', 'bbc965bb86ed69fb0c7f5d291bf6a70d', '2015-10-21 01:19:40'),\n(765, 'e420ddca310960c', 'cdumont', '192.168.10.20', '5e2559e9e35fddc451f9b4db8211b4c7', '2015-10-21 01:30:45'),\n(766, '301d263b3bd75d3', 'cdumont', '192.168.10.20', 'bce3352d1ce77aa4a6a712788d02d78e', '2015-10-21 02:29:23'),\n(767, '9a6f88f47cb253b', 'bcampbell', '192.168.10.47', 'dd7e5d4f686a024a684b661019a2b617', '2015-10-21 02:36:39'),\n(768, '0be5425398871d9', 'bcampbell', '192.168.10.47', '3fcf4beffbd2630440e84972736a5ee9', '2015-10-21 03:15:13'),\n(769, 'd0839dc9aa01a45', 'vchaudhari', '192.168.10.23', '2fc82a3aa5de1b3507681522fe470b3a', '2015-10-22 09:50:46'),\n(770, 'd8c1ce8fc2bde27', 'vchaudhari', '192.168.10.23', 'a31ecffe8f9c827f684464c62fb158b5', '2015-10-22 12:15:34'),\n(771, 'ee8c4d0e5393487', 'vchaudhari', '192.168.10.23', '0843f1a4f6a8ffb5d5749eefb84d1fb4', '2015-10-22 02:03:11'),\n(772, '722a1bf355ea9e4', 'dyost', '192.168.10.35', '114a9e897697d6582fbe0ff398c549be', '2015-10-22 03:03:29'),\n(774, 'faff6b56b96a3f6', 'bfagersten', '192.168.10.6', 'b48ceb1f8a0519a0cdbd1ba88822cc88', '2015-10-23 11:59:15'),\n(775, '6d43dec29ec7978', 'bfagersten', '192.168.10.6', '255d700a0bd7d9fd77a6f7f4b8437b81', '2015-10-24 12:14:11'),\n(776, '12f8af08b75111d', 'cdumont', '192.168.10.20', '7800c2a5528e2e80cb4d1f1c9c2fbd2e', '2015-10-24 02:13:17'),\n(777, '110cf9fd015718e', 'dhartman', '192.168.10.23', '3f51d7f3c784390b93a644a4874f55d4', '2015-10-26 07:54:59'),\n(779, 'c37e79515781ac6', 'ascallion', '192.168.10.64', '88ce4c69fe00321a4c27694bf459fd69', '2015-10-26 02:25:42'),\n(780, '73d4eced9c4e330', 'cdumont', '192.168.10.20', 'a55d8744ee1051a3c37e5a36d445658d', '2015-10-26 03:03:14'),\n(781, '77bbfb71ef0e19d', 'nmicallef', '192.168.10.62', '5ba5c2896bf7bbb19712165b46ae4bc4', '2015-10-26 05:05:30'),\n(782, '4df7e0c17410da6', 'nmicallef', '192.168.10.62', '4615815c6f2c90798a3cc6db83d9e413', '2015-10-26 06:18:35'),\n(783, '96eb4a6f4d44a09', 'jfernandez', '192.168.10.47', 'e1cf3156d065d13dcffa7733e1944378', '2015-10-27 09:37:40'),\n(784, 'b6ce143922c7dfa', 'jfernandez', '192.168.10.47', '3ad886fb579431614e7579d348a97439', '2015-10-27 10:09:25'),\n(788, '39a5fad06ee8a13', 'mfortier', '192.168.10.33', '7cd50f6f37e97b72f5e7f89b1598f635', '2015-10-27 05:14:35'),\n(789, '92740b2ddcc8687', 'ascallion', '192.168.10.64', 'e904d969dc9e7562458b4e7cb7a87eeb', '2015-10-28 07:59:32'),\n(790, '8b6bd2a35af57ac', 'ascallion', '192.168.10.64', '164c0474418ab7123df8bdcd604be9b8', '2015-10-28 09:51:25'),\n(791, '762a051ed1ba2c7', 'ascallion', '192.168.10.64', '014b81306d93200a3d0a99d9958f2a47', '2015-10-28 11:10:07'),\n(792, 'c19b09a06add33a', 'dparadise', '192.168.10.17', '91587568c5f701ae433b0f91ff2cd6ab', '2015-10-28 12:10:44'),\n(793, '5a9a92c95bde50e', 'bcampbell', '192.168.10.47', '1a9b0e504efa2dd5c599d1e8267a589d', '2015-10-28 12:58:13'),\n(794, '2ea9007e5ff18e5', 'jnicastro', '192.168.10.62', '5a04520c0de20c3f2e1cdaa6d26c0c89', '2015-10-28 06:05:59'),\n(795, '3a715063af48141', 'jnicastro', '192.168.10.62', '5761b5af2b52e0577c2537b6f67cafc6', '2015-10-28 07:05:43'),\n(796, '7e1a4dff2d89eb1', 'bfagersten', '192.168.10.6', 'fca314390b854f396685351f39d76a37', '2015-10-30 12:08:48'),\n(797, '7c4aa76f12b235f', 'cgreen', '192.168.10.47', '5ca4582df97eecdd8ec4edf8c90f5630', '2015-10-30 01:03:52'),\n(798, '7bd6629e20445b9', 'cgreen', '192.168.10.47', 'b2eae70308956ef0e894aff6c482803c', '2015-10-30 02:40:45'),\n(799, 'c42489229013bda', 'dhartman', '192.168.10.23', '8368b5a6c013c4018f2fe927e382925c', '2015-11-02 07:51:56'),\n(800, '7fa3aaef135ded8', 'jwilliams', '192.168.10.10', '7f90eef4a3458f4c22755b09f06e190b', '2015-11-02 07:59:09'),\n(803, '44be410dda9b132', 'aleonard', '192.168.10.20', 'f8fcb67d80c9a055fe35a692da7d42c7', '2015-11-02 11:55:29'),\n(804, '0ea42c18226ce29', 'ascallion', '192.168.10.64', '1fda0e567472cf5dd6422a9d1e5fc1a6', '2015-11-02 12:03:49'),\n(806, 'f4915b93c10d502', 'cdumont', '192.168.10.37', 'f7a70725919e956c9c1d7f70dbf4cb51', '2015-11-02 12:36:53'),\n(809, '1fb34210964c48a', 'ascallion', '192.168.10.47', 'f88ef08bb4128aff263f02a5562b7192', '2015-11-02 12:55:43'),\n(812, '8123342a47cdbdd', 'ascallion', '192.168.10.64', '117072f79862575451d76145004274c6', '2015-11-02 02:14:45'),\n(813, '2f164bb208e2868', 'ohatalsky', '192.168.10.17', 'd874fbe9eb17c2d0bb9316c3dd00a0f3', '2015-11-02 02:39:39'),\n(814, '8d89cc5415e8f66', 'ascallion', '192.168.10.47', '1f4884d9250fc27720897b98ef219270', '2015-11-02 02:54:20'),\n(815, '484e2eafba4cf83', 'dparadise', '192.168.10.35', '34a2521807bbe0da56ee7aed06ec6e0e', '2015-11-02 04:45:13'),\n(816, '027929e87447867', 'nmicallef', '192.168.10.70', '3d1b26ff30fe94902adfff0d78097bf4', '2015-11-02 05:06:50'),\n(817, '9443d6f0d5f74b7', 'nmicallef', '192.168.10.70', 'c79120ff71b7619b75156564750209f0', '2015-11-02 05:50:39'),\n(818, '470fd6d48e12777', 'jfernandez', '192.168.10.47', '28c5e641d3be8cb8a74d5698559daa11', '2015-11-03 07:40:22'),\n(819, 'f3e5e058d39ffc5', 'jfernandez', '192.168.10.47', '9720555c1bab9c9075ad9d128294f659', '2015-11-03 11:05:36'),\n(820, '819587e27215676', 'dhartman', '192.168.10.47', '2555d7f529933f6d3cc7f6b3adac70bc', '2015-11-03 12:53:30'),\n(822, '424f91e68414e96', 'ascallion', '192.168.10.64', '3291ff6051b595c435d4411d492c44d6', '2015-11-04 10:00:34'),\n(823, '0a93babaab75be4', 'ascallion', '192.168.10.64', 'b3196585c389fb41a9c45b3246b8f3c0', '2015-11-04 11:56:33'),\n(824, '96d586d9fce62f5', 'bcampbell', '192.168.10.47', 'cfcf863dcc9e8fddbda0a7af0e42d3d8', '2015-11-04 01:13:27'),\n(825, '4b8c1429627fe71', 'vchaudhari', '192.168.10.47', 'c1ef60c4b46ede827c9c3f61fd6ed133', '2015-11-05 10:08:18'),\n(826, 'd6eb02ba0bf23c1', 'vchaudhari', '192.168.10.47', 'e5f2644dd046962df810f76216f20085', '2015-11-05 10:53:59'),\n(827, 'f75619b7fc8fb72', 'cgreen', '192.168.10.47', '8272ead30c1281ffb2871983d6d7d0df', '2015-11-06 10:50:23'),\n(828, '19a3c689fd43951', 'cgreen', '192.168.10.47', '8fd312b6e1e394cae06f9973f5d8b32a', '2015-11-06 11:50:21'),\n(830, '7e695328cc5b8ec', 'ascallion', '192.168.10.64', '6e339d9773484ccfacde429e7459e0cd', '2015-11-09 12:07:32'),\n(831, '1ef27e14d754249', 'tpeyton', '192.168.10.35', '0ace8f924282ed5970dfcc950ac61e56', '2015-11-09 12:12:24'),\n(832, '1010942c28be13b', 'ascallion', '192.168.10.64', '62b88659f972632dc117be92d2a6601b', '2015-11-09 01:05:18'),\n(833, '5265c0397e02991', 'ascallion', '192.168.10.64', 'ba2d4607f090633d34359e1e1372541b', '2015-11-09 02:15:38'),\n(834, '28ea847b37e9d1b', 'ohatalsky', '192.168.10.17', '6748c8caea6e18115688491a5ad3203d', '2015-11-09 04:34:12'),\n(836, 'dddb03c5c5ec9ed', 'jfernandez', '192.168.10.47', 'a7be53be34ab2c53d538bdfa026b606c', '2015-11-10 07:40:12'),\n(837, 'c04796accbcb308', 'jfernandez', '192.168.10.47', 'c5b82bea9bed21883bf09fe73ae24a8b', '2015-11-10 11:02:32'),\n(840, '8583c9c0ee68192', 'ascallion', '192.168.10.64', '1962450a98a38f5fedb0b8536254d84e', '2015-11-11 09:48:48'),\n(841, '5a9350081c3e825', 'ascallion', '192.168.10.64', '6697782bcffaafab18b5d240882e92df', '2015-11-11 10:49:24'),\n(842, 'd65869731191efe', 'ascallion', '192.168.10.64', '1c4d082c0127689cd149defee8d5262c', '2015-11-11 11:40:09'),\n(843, 'ac56b78e594dd9a', 'ascallion', '192.168.10.64', '03945b973349edabf0772e64f5de84a7', '2015-11-16 03:44:36'),\n(844, 'b8a5b4b9a5f0a69', 'jfernandez', '192.168.10.47', 'd56ad94367c232c35d3286314502f3f3', '2015-11-17 07:41:48'),\n(846, '3dbe3076cada160', 'jfernandez', '192.168.10.64', '1e684a3b4da8176ea105086ed703ff44', '2015-11-17 12:14:34'),\n(850, 'a9d92c663b2c514', 'amaccarone', '192.168.10.6', '25b4e59ea04c5f560a45d24f10117d0f', '2015-11-18 08:02:44'),\n(851, '81c39e210515803', 'ascallion', '192.168.10.64', 'b284c6545e03b85774d66880b195fcba', '2015-11-18 10:04:29'),\n(852, '043cab18655aa26', 'ascallion', '192.168.10.47', '87c588ddd5b258800ffcd73a901c6401', '2015-11-18 10:32:40'),\n(853, '799c263a0185dee', 'amaccarone', '192.168.10.6', '75a8ec657a98bf8e6462e5a6e8267e1e', '2015-11-18 10:50:51'),\n(854, '5200c509a5b4b1b', 'ascallion', '192.168.10.64', 'ff82e75f8a184a6dab9323f632ca7bca', '2015-11-18 10:55:26'),\n(855, '7c39227886ce15b', 'dparadise', '192.168.10.18', '595646a9fb45eb8da9c45128a2cd151b', '2015-11-20 03:25:25'),\n(856, '4f2b581f71efbe7', 'ascallion', '192.168.10.23', '6ee8fa56de02840b26d21d36ef528c69', '2015-11-30 12:19:31'),\n(857, '1275a7fd15eb4ac', 'ascallion', '192.168.10.64', 'e590198f46ec6c24d52dd11c4b7b8bd7', '2015-11-30 12:23:11'),\n(858, 'b5b53000aedce84', 'ascallion', '192.168.10.64', '965b373b685bd37f74e0c88c5a795d91', '2015-11-30 02:09:14'),\n(860, 'b9bea9397035587', 'jfernandez', '192.168.10.47', 'db55591d86d0ca27bbf40fc8d86fd2f5', '2015-12-01 10:42:10'),\n(861, '20cf9bf9a49016b', 'jfernandez', '192.168.10.64', '7f8648bb3016db1ebcc2cf89eb8a3613', '2015-12-01 11:11:13'),\n(862, '13a08e46743ce78', 'dhartman', '192.168.10.64', 'cf23ca3237d6bf88cac4ec218881a3ae', '2015-12-01 12:51:01'),\n(866, 'c8c1f94824df8af', 'jwilliams', '192.168.10.10', '6b8a4295a642fd285942ab7f13b2cb35', '2015-12-01 05:59:27'),\n(867, 'df075b0b4d22b03', 'ascallion', '192.168.10.64', '60678a54dadd9f2c8c2a6ad44b85d2b3', '2015-12-02 07:49:52'),\n(868, '51402b6bf099297', 'ascallion', '192.168.10.64', 'b16f566caecc66bef9f3ec84c84ce6ef', '2015-12-02 11:18:12'),\n(869, 'c24e8497bd8ed47', 'dparadise', '192.168.10.18', '7fc1656c291ae3bc5b9230ab2b74e0d0', '2015-12-02 12:09:57'),\n(870, '5a39005ea485143', 'cgreen', '192.168.10.47', '9868e8de561150e92ac01b941b39f224', '2015-12-04 08:48:34'),\n(871, '39b95da0b2466e0', 'dhartman', '192.168.10.47', '98ae6f972231a731bb2490c0b92568c4', '2015-12-07 09:41:24'),\n(872, 'ed39a4a99b14c18', 'ascallion', '192.168.10.64', '5ee5e7bc13d0905a3bcbe1b4190b80a7', '2015-12-07 12:04:26'),\n(873, '1c1da213c35225b', 'ascallion', '192.168.10.47', '20facf278f4c5d4aa3a3f2430a79df2d', '2015-12-07 01:33:33'),\n(878, '6c4f75cf311b620', 'dhartman', '192.168.10.47', '4d7ca9f5f85ac72054bd99efbb477920', '2015-12-08 01:37:29'),\n(880, 'e0e7f6c2bc29494', 'mfortier', '192.168.10.6', '6677bbac3afd4faee5b0a0dc4269343c', '2015-12-08 03:56:54'),\n(881, '0cae41b8b15c834', 'mfortier', '192.168.10.6', '95de59677d5bf1f0e508c6efed9d677e', '2015-12-08 05:12:50'),\n(882, 'c9b2fc47c28fb22', 'ascallion', '192.168.10.64', '60514ad5d5504f1ad0d75856b9a977b5', '2015-12-09 08:12:14'),\n(883, 'aca496228c69472', 'ascallion', '192.168.10.64', '43d1ba3cba7a366a13cd49414247d374', '2015-12-09 09:03:38'),\n(884, '63a50aefc58abc7', 'ascallion', '192.168.10.64', 'e21545079fec54e4743827ece45cc8f8', '2015-12-09 09:51:03'),\n(885, 'b369041b1b4b5ad', 'bcampbell', '192.168.10.3', '357f638ec7b99c7e0f964d6ea0f1a23f', '2015-12-09 01:28:26'),\n(886, 'a6e8ea6ca591dd6', 'mroberts', '192.168.10.19', 'de1701cfbd87ca1b92e348094b2f72b4', '2015-12-12 04:53:29'),\n(888, 'cabd5c6cdff5eb8', 'jfernandez', '192.168.10.64', '2b058e146517ae2997d57447cb6518d5', '2015-12-15 07:58:55'),\n(889, '23b24f4fc86c589', 'jfernandez', '192.168.10.19', 'd86174f8119265d691656aaa2ac52735', '2015-12-15 09:18:40'),\n(892, 'baeda436fc90c94', 'administrator', '192.168.10.64', '55b9d16d3f5ef6c891c36d8134d74fb3', '2015-12-16 03:35:29'),\n(893, 'a7903e8dc2729e4', 'sbarrett', '192.168.10.32', '0c8cd400e93ba4177743922f58504b99', '2016-01-14 11:45:18'),\n(894, '0be350495b437e0', 'ascallion', '192.168.10.9', '540eef4b0819385106a7d0ddbc42a297', '2016-01-18 07:50:00'),\n(896, '5acaef5b51168fc', 'mfortier', '192.168.10.7', 'b78adccd97952533fde4f1c7aebc0e43', '2016-01-18 08:30:52'),\n(898, 'b47da9e20e0b89c', 'ascallion', '192.168.10.36', 'df72a8836c173f40010e60af2b4070a1', '2016-01-18 10:34:02'),\n(899, '9b6cdc7f5b687a1', 'ascallion', '192.168.10.36', 'edae4410bd1e8b2239d33e864794c656', '2016-01-18 11:55:56'),\n(901, 'd932f43f2df90e3', 'mgreen', '192.168.10.13', 'f82f5ba3b439cfaba937c400c5a1a669', '2016-01-18 12:11:46'),\n(902, 'f1269b69d4f03b0', 'sbarrett', '192.168.10.24', 'd04a66b9712a7a6193496faaa9f12357', '2016-01-19 08:43:02'),\n(903, 'e89a413b510df28', 'jfernandez', '192.168.10.47', '0b419dd42a93803487bb4f0b95db29c1', '2016-01-19 09:38:43'),\n(905, '02874c718171d45', 'sbarrett', '192.168.10.24', '1a0cae73870761fccec4e0065dab31e1', '2016-01-19 11:16:46'),\n(907, '26c215fbfd4df79', 'acaron', '192.168.10.25', 'c0ccf865066a66fcc2fb3b146cafaa30', '2016-01-19 11:24:57'),\n(908, '2222150de95eca0', 'dparadise', '192.168.10.18', 'f5c20fa056c0a2b9c6e98fc9eaab4b20', '2016-01-19 01:31:08'),\n(909, '822f0fd89acfb59', 'cdumont', '192.168.10.13', '8d21ba10763a619d8dd18ef6330c359f', '2016-01-19 01:34:05'),\n(910, '8e7f45d7cbe59b0', 'amaccarone', '192.168.10.16', '37cbce588899feb1b0a5b00bbe038723', '2016-01-19 01:37:18'),\n(911, '3cf98ba5b086f6a', 'cgreen', '192.168.10.47', '35ee12a521938361d54f701dd7113526', '2016-01-19 03:14:56'),\n(912, 'd78643acedc84e4', 'jfernandez', '192.168.10.36', 'f735eaf8250f54a3180c5c70707085a0', '2016-01-20 03:39:04'),\n(913, '10bbfb1dfbe8b8e', 'dparadise', '192.168.10.19', 'd5a4b47e5b3c88ddfc6424fd4a99865d', '2016-01-21 01:18:03'),\n(914, '2cc12b8cc3e49ba', 'cgreen', '192.168.10.43', 'b1a7de106fb6e4a0790de392eca6ec11', '2016-01-22 02:17:07'),\n(915, '9a369b5ef932772', 'cgreen', '192.168.10.43', 'eac5a8c6400ba8e5c3706a1c360b8cb1', '2016-01-22 03:38:35'),\n(917, 'cbe063a3657d129', 'ascallion', '192.168.10.9', '06ffbfb0bdc474af1b3141d3c7679b54', '2016-01-25 08:02:14'),\n(918, 'b63dbd30ea77468', 'ascallion', '192.168.10.9', '74429a8147663e9282d24401fe4c5167', '2016-01-25 08:29:56'),\n(919, 'da842a159ddc247', 'ascallion', '192.168.10.43', '16fb035f14a642e29bcd084739aaa9f5', '2016-01-25 08:37:51'),\n(920, '026bde647703fd0', 'ascallion', '192.168.10.9', 'c65a4136dcfd155bfa9a9ea4ea0f50de', '2016-01-25 09:15:58'),\n(921, '1f00f959070715b', 'ascallion', '192.168.10.43', 'eb0fccfcdbd05f738b954294157b1d1f', '2016-01-25 11:55:08'),\n(922, '4a81daf977a518e', 'ascallion', '192.168.10.6', '74763a2392061a148338bfa85220cb6c', '2016-01-25 12:15:18'),\n(923, '4eda937a8a2b48b', 'egoeben', '192.168.10.43', '87a7d2793d776bccc68513e771202525', '2016-01-25 03:01:44'),\n(924, '6bc85f0f5f3098c', 'mfortier', '192.168.10.7', 'af33766a4c16a4760d062b7109d4dc56', '2016-01-26 08:01:10'),\n(925, '79112198255447e', 'jfernandez', '192.168.10.36', '7308817ccddb1dd8311ce9dc773d7728', '2016-01-26 08:38:52'),\n(926, '3d65c526583bd5c', 'jfernandez', '192.168.10.36', 'b64ffa5b9f7fd170208d2d2e006bc85a', '2016-01-26 11:01:28'),\n(928, 'b70b1867d7a0e14', 'jwilliams', '192.168.10.21', '801df0b8b61822662a20be4b912d5335', '2016-01-26 12:44:24'),\n(929, '8ea1b2ddd9fcf65', 'dparadise', '192.168.10.18', '6cc8944bfc23a0df8534445e9a59ec1d', '2016-01-26 01:13:58'),\n(931, '5c6b3d1d3e074f5', 'jwilliams', '192.168.10.21', '29c3b34f9a667a644c6a066be3929092', '2016-01-26 02:41:47'),\n(932, '887ccafce8e0a0c', 'bcampbell', '192.168.10.19', '952737f6caaca6bf337076c37245cc66', '2016-01-26 02:42:20'),\n(934, '000329b2c89a7e0', 'cgreen', '192.168.10.43', 'b03bc73a197ab5f3b0e91a884b0a9d85', '2016-01-26 03:03:08'),\n(935, '2d90c2204c2c398', 'dyost', '192.168.10.13', '608862b990d7fbb1a117116da011ed86', '2016-01-27 02:28:32'),\n(936, '97f07a8e4322ddd', 'jfernandez', '192.168.10.36', '36d484eec85d48e63b9d784eb6b2ea7e', '2016-01-27 02:33:22'),\n(939, 'd8e8a46be49699d', 'ohatalsky', '192.168.10.18', '4a17845f23fb1f26f60d1a9fdcaade63', '2016-01-28 11:37:57'),\n(940, 'cb3275de22b2f1b', 'dparadise', '192.168.10.18', '1d5254ff905f7dcd1696159540884653', '2016-01-28 01:31:59'),\n(942, '5a0015cd88f7a2a', 'jwilliams', '192.168.10.21', '6b076f1497f823a5cc881e14f4aee820', '2016-01-28 02:54:36'),\n(944, '1e5bd6cf036627b', 'mfortier', '192.168.10.7', '7e48d610c7d0bd10f254f9621f3c2fd7', '2016-02-01 08:06:51'),\n(945, '773092e827ba3fc', 'ascallion', '192.168.10.9', '95952e1d9f9f18d20a4b79597313bfb3', '2016-02-01 08:29:30'),\n(946, '8d1b42f7eaa84ca', 'mfortier', '192.168.10.7', '41c2b850ed53421afeb760142096b133', '2016-02-01 08:57:06'),\n(947, '96ae281db8b722f', 'ascallion', '192.168.10.43', '2330c27b75c1305ffca9c773eac16765', '2016-02-01 11:57:20'),\n(948, '40e0b323674ace1', 'ascallion', '192.168.10.36', '0f93dcb7561684f3bd2f15e7bed7f944', '2016-02-01 12:02:15'),\n(949, '1d09bb72abe8837', 'jwilliams', '192.168.10.21', '7ca6640b5853fb873851a7f0498afbb5', '2016-02-01 12:54:41'),\n(950, '9f450dd68aa3b52', 'acaron', '10.0.8.6', '1a3f3756f2aee62e6364e416c0e023ef', '2016-02-01 01:02:53'),\n(954, 'e7ee9e61e0b67c6', 'jwilliams', '192.168.10.36', '6f66fb52885ccd6ffba58acf320ba594', '2016-02-01 03:27:15'),\n(956, 'e41d6d86ac57bfd', 'mfortier', '192.168.10.7', '9699809fd12b1393d082b3adab5f243c', '2016-02-02 08:03:03'),\n(958, 'a3fed690dd14db7', 'jfernandez', '192.168.10.43', '5c3c853a3ae85b40fc248032b82f6754', '2016-02-02 11:15:35'),\n(959, 'f5035a1e6d398c9', 'jfernandez', '192.168.10.43', 'da406b78f7f44d4347d411f7aa4bceb2', '2016-02-02 12:14:19'),\n(960, '2a13054dcefda22', 'cgreen', '192.168.10.43', 'b1e21007d7dd2e33db2b20e81287fece', '2016-02-02 02:53:47'),\n(961, '9e751b2d7d82c73', 'dyost', '192.168.10.16', '964ce8f8e99be1b8a328504c4a7b4d2d', '2016-02-02 03:24:42'),\n(962, 'c145010c9188406', 'svashaw', '192.168.10.43', 'bc302db449e83d2a7208327a01f396b6', '2016-02-03 10:38:51'),\n(963, '7f02c59e48fdb1c', 'svashaw', '192.168.10.43', 'f415b60f28a020eb25db7887aa883ecf', '2016-02-03 11:46:28'),\n(965, '318aa11dd0cad72', 'jfernandez', '192.168.10.36', '14926d7270aff349512308ed4cdeda79', '2016-02-03 02:12:59'),\n(966, 'dcde3142031bacc', 'jwilliams', '192.168.10.21', '779072b7c20e17e0efd589b5bd4abc98', '2016-02-03 02:43:09'),\n(968, 'a8ee8fd8180f5b5', 'jfernandez', '192.168.10.36', 'd1e74f4c50a72346896bfb895780a515', '2016-02-03 03:22:23'),\n(969, '6186239d58fbbd5', 'acaron', '192.168.10.27', 'dba8d8191cdc0e2c27764b4a7e701837', '2016-02-04 09:58:08'),\n(970, 'd83ea8b6b5b9cdc', 'jehlers', '192.168.10.6', '66432b3698df589cd9ceacc423f7d130', '2016-02-04 01:52:25'),\n(971, '21daf3544cc1d4e', 'cgreen', '192.168.10.43', '3efc35272b0494d80c9fd52c75bbac22', '2016-02-04 03:13:20'),\n(972, 'e1a6fadcb91be9b', 'acaron', '10.0.8.6', 'de7d500aada6ec6ef57e567eedc6ae2b', '2016-02-04 09:42:02'),\n(975, 'b244f61fea5fa6b', 'mfortier', '192.168.10.7', 'c71683aef1ab430e9c245a192d12627a', '2016-02-09 10:41:54'),\n(977, 'ec121fb89daa6fb', 'jfernandez', '192.168.10.36', '693d951a9c3a77d35064237dcdee1731', '2016-02-09 10:46:09'),\n(979, 'd144b9900801e65', 'cdumont', '192.168.10.13', '70ab46307973be2b47a7ad74a94c3bfc', '2016-02-09 11:36:58'),\n(981, 'c4ed9ba3c17752d', 'jfernandez', '192.168.10.36', '9275677d30077c810e3a7f0bd2778f81', '2016-02-09 11:47:57'),\n(982, 'c5765a0f430959a', 'mfortier', '192.168.10.7', 'bcb0b2a43721646d4fb3e0318ca3d4a1', '2016-02-09 11:48:16'),\n(984, '7b8c890b3ec5634', 'cdumont', '192.168.10.13', '61c7cfdd76c860c0c5ead4443d54eef3', '2016-02-09 12:16:27'),\n(985, '2e25eb98c0a091b', 'egoeben', '192.168.10.43', '1614c9b3f3c6e1b28d5a54a74a3b77da', '2016-02-09 12:45:54'),\n(986, '8bb90dbb2c3eadb', 'dparadise', '192.168.10.18', '98d5fb72b0bce0f8690f66ace787816d', '2016-02-09 02:23:44'),\n(987, 'c8ca83b6276916f', 'cgreen', '192.168.10.43', '720079b46c2574cc7010f3100e9dd1ee', '2016-02-09 02:30:01'),\n(988, '0641698c66d3dae', 'jfernandez', '192.168.10.43', '4b85267fb8f0bb0218598e3d58e25db7', '2016-02-10 01:53:16'),\n(989, '535bb151956faff', 'dyost', '192.168.10.4', '57376ed6d9bda8a7b6efde2460c1e5ad', '2016-02-10 02:07:31'),\n(990, 'fb6c33d072bca50', 'dyost', '192.168.10.4', 'fbfc939719110502c3c1372902cb907e', '2016-02-10 02:47:12'),\n(991, 'e3372a4ad4635b0', 'jfernandez', '192.168.10.43', '57225174aa67be8d2058ec9b9d54ac84', '2016-02-10 03:51:48'),\n(992, '7f4219f4f1195d3', 'jwilliams', '192.168.10.36', 'b79d3fbc50655c7064d8e6c27145fcc3', '2016-02-11 08:29:54'),\n(994, 'f4debc64118bab6', 'dparadise', '192.168.10.18', '5d7f88eec51688c7c8c776db9c5f6f20', '2016-02-11 01:47:11'),\n(996, 'ac5ccccbebf3608', 'bfagersten', '192.168.10.15', '8d82b9849afa7c6b2c674e53a8e3679d', '2016-02-11 03:43:02'),\n(997, '40ee1eba8c02736', 'mfortier', '192.168.10.7', 'c85d0d4dbd794131d46b683d583c7a0e', '2016-02-12 12:56:13'),\n(998, 'a88e816c52aeb3f', 'ohatalsky', '192.168.10.18', '669438e165f032f92c137a29cb59d167', '2016-02-12 02:00:24'),\n(999, 'd8215861e74f0dc', 'mfortier', '192.168.10.7', 'd6f5fdd55ac30395987442aa43a690b5', '2016-02-12 02:40:45');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `Wiped`\n--\n\nCREATE TABLE IF NOT EXISTS `Wiped` (\n `ID` int(255) NOT NULL,\n `DeviceID` varchar(255) NOT NULL,\n `UserName` varchar(255) NOT NULL,\n `Date` datetime NOT NULL,\n KEY `ID` (`ID`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `Wiped`\n--\n\nINSERT INTO `Wiped` (`ID`, `DeviceID`, `UserName`, `Date`) VALUES\n(0, '1', 'dparadise', '2016-02-11 01:53:05');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `barcodes`\n--\n\nCREATE TABLE IF NOT EXISTS `barcodes` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `serials` varchar(10) NOT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=362 ;\n\n--\n-- Dumping data for table `barcodes`\n--\n\nINSERT INTO `barcodes` (`id`, `serials`) VALUES\n(1, '06999'),\n(2, 'LCDI07000'),\n(3, 'LCDI07001'),\n(4, 'LCDI07002'),\n(5, 'LCDI07003'),\n(6, 'LCDI07004'),\n(7, 'LCDI07005'),\n(8, 'LCDI07006'),\n(9, 'LCDI07007'),\n(10, 'LCDI07008'),\n(11, 'LCDI07009'),\n(12, 'LCDI07010'),\n(13, 'LCDI07011'),\n(14, 'LCDI07012'),\n(15, 'LCDI07013'),\n(16, 'LCDI07014'),\n(17, 'LCDI07015'),\n(18, 'LCDI07016'),\n(19, 'LCDI07017'),\n(20, 'LCDI07018'),\n(21, 'LCDI07019'),\n(22, 'LCDI07020'),\n(23, 'LCDI07021'),\n(24, 'LCDI07022'),\n(25, 'LCDI07023'),\n(26, 'LCDI07024'),\n(27, 'LCDI07025'),\n(28, 'LCDI07026'),\n(29, 'LCDI07027'),\n(30, 'LCDI07028'),\n(31, 'LCDI07029'),\n(32, 'LCDI07030'),\n(33, 'LCDI07031'),\n(34, 'LCDI07032'),\n(35, 'LCDI07033'),\n(36, 'LCDI07034'),\n(37, 'LCDI07035'),\n(38, 'LCDI07036'),\n(39, 'LCDI07037'),\n(40, 'LCDI07038'),\n(41, 'LCDI07039'),\n(42, 'LCDI07040'),\n(43, 'LCDI07041'),\n(44, 'LCDI07042'),\n(45, 'LCDI07043'),\n(46, 'LCDI07044'),\n(47, 'LCDI07045'),\n(48, 'LCDI07046'),\n(49, 'LCDI07047'),\n(50, 'LCDI07048'),\n(51, 'LCDI07049'),\n(52, 'LCDI07050'),\n(53, 'LCDI07051'),\n(54, 'LCDI07052'),\n(55, 'LCDI07053'),\n(56, 'LCDI07054'),\n(57, 'LCDI07055'),\n(58, 'LCDI07056'),\n(59, 'LCDI07057'),\n(60, 'LCDI07058'),\n(61, 'LCDI07059'),\n(62, 'LCDI07060'),\n(63, 'LCDI07061'),\n(64, 'LCDI07062'),\n(65, 'LCDI07063'),\n(66, 'LCDI07064'),\n(67, 'LCDI07065'),\n(68, 'LCDI07066'),\n(69, 'LCDI07067'),\n(70, 'LCDI07068'),\n(71, 'LCDI07069'),\n(72, 'LCDI07070'),\n(73, 'LCDI07071'),\n(74, 'LCDI07072'),\n(75, 'LCDI07073'),\n(76, 'LCDI07074'),\n(77, 'LCDI07075'),\n(78, 'LCDI07076'),\n(79, 'LCDI07077'),\n(80, 'LCDI07078'),\n(81, 'LCDI07079'),\n(82, 'LCDI07080'),\n(83, 'LCDI07081'),\n(84, 'LCDI07082'),\n(85, 'LCDI07083'),\n(86, 'LCDI07084'),\n(87, 'LCDI07085'),\n(88, 'LCDI07086'),\n(89, 'LCDI07087'),\n(90, 'LCDI07088'),\n(91, 'LCDI07089'),\n(92, 'LCDI07090'),\n(93, 'LCDI07091'),\n(94, 'LCDI07092'),\n(95, 'LCDI07093'),\n(96, 'LCDI07094'),\n(97, 'LCDI07095'),\n(98, 'LCDI07096'),\n(99, 'LCDI07097'),\n(100, 'LCDI07098'),\n(101, 'LCDI07099'),\n(102, 'LCDI07100'),\n(103, 'LCDI07101'),\n(104, 'LCDI07102'),\n(105, 'LCDI07103'),\n(106, 'LCDI07104'),\n(107, 'LCDI07105'),\n(108, 'LCDI07106'),\n(109, 'LCDI07107'),\n(110, 'LCDI07108'),\n(111, 'LCDI07109'),\n(112, 'LCDI07110'),\n(113, 'LCDI07111'),\n(114, 'LCDI07112'),\n(115, 'LCDI07113'),\n(116, 'LCDI07114'),\n(117, 'LCDI07115'),\n(118, 'LCDI07116'),\n(119, 'LCDI07117'),\n(120, 'LCDI07118'),\n(121, 'LCDI07119'),\n(122, 'LCDI07120'),\n(123, 'LCDI07121'),\n(124, 'LCDI07122'),\n(125, 'LCDI07123'),\n(126, 'LCDI07124'),\n(127, 'LCDI07125'),\n(128, 'LCDI07126'),\n(129, 'LCDI07127'),\n(130, 'LCDI07128'),\n(131, 'LCDI07129'),\n(132, 'LCDI07130'),\n(133, 'LCDI07131'),\n(134, 'LCDI07132'),\n(135, 'LCDI07133'),\n(136, 'LCDI07134'),\n(137, 'LCDI07135'),\n(138, 'LCDI07136'),\n(139, 'LCDI07137'),\n(140, 'LCDI07138'),\n(141, 'LCDI07139'),\n(142, 'LCDI07140'),\n(143, 'LCDI07141'),\n(144, 'LCDI07142'),\n(145, 'LCDI07143'),\n(146, 'LCDI07144'),\n(147, 'LCDI07145'),\n(148, 'LCDI07146'),\n(149, 'LCDI07147'),\n(150, 'LCDI07148'),\n(151, 'LCDI07149'),\n(152, 'LCDI07150'),\n(153, 'LCDI07151'),\n(154, 'LCDI07152'),\n(155, 'LCDI07153'),\n(156, 'LCDI07154'),\n(157, 'LCDI07155'),\n(158, 'LCDI07156'),\n(159, 'LCDI07157'),\n(160, 'LCDI07158'),\n(161, 'LCDI07159'),\n(162, 'LCDI07160'),\n(163, 'LCDI07161'),\n(164, 'LCDI07162'),\n(165, 'LCDI07163'),\n(166, 'LCDI07164'),\n(167, 'LCDI07165'),\n(168, 'LCDI07166'),\n(169, 'LCDI07167'),\n(170, 'LCDI07168'),\n(171, 'LCDI07169'),\n(172, 'LCDI07170'),\n(173, 'LCDI07171'),\n(174, 'LCDI07172'),\n(175, 'LCDI07173'),\n(176, 'LCDI07174'),\n(177, 'LCDI07175'),\n(178, 'LCDI07176'),\n(179, 'LCDI07177'),\n(180, 'LCDI07178'),\n(181, 'LCDI07179'),\n(182, 'LCDI07180'),\n(183, 'LCDI07181'),\n(184, 'LCDI07182'),\n(185, 'LCDI07183'),\n(186, 'LCDI07184'),\n(187, 'LCDI07185'),\n(188, 'LCDI07186'),\n(189, 'LCDI07187'),\n(190, 'LCDI07188'),\n(191, 'LCDI07189'),\n(192, 'LCDI07190'),\n(193, 'LCDI07191'),\n(194, 'LCDI07192'),\n(195, 'LCDI07193'),\n(196, 'LCDI07194'),\n(197, 'LCDI07195'),\n(198, 'LCDI07196'),\n(199, 'LCDI07197'),\n(200, 'LCDI07198'),\n(201, 'LCDI07199'),\n(202, 'LCDI07200'),\n(203, 'LCDI07201'),\n(204, 'LCDI07202'),\n(205, 'LCDI07203'),\n(206, 'LCDI07204'),\n(207, 'LCDI07205'),\n(208, 'LCDI07206'),\n(209, 'LCDI07207'),\n(210, 'LCDI07208'),\n(211, 'LCDI07209'),\n(212, 'LCDI07210'),\n(213, 'LCDI07211'),\n(214, 'LCDI07212'),\n(215, 'LCDI07213'),\n(216, 'LCDI07214'),\n(217, 'LCDI07215'),\n(218, 'LCDI07216'),\n(219, 'LCDI07217'),\n(220, 'LCDI07218'),\n(221, 'LCDI07219'),\n(222, 'LCDI07220'),\n(223, 'LCDI07221'),\n(224, 'LCDI07222'),\n(225, 'LCDI07223'),\n(226, 'LCDI07224'),\n(227, 'LCDI07225'),\n(228, 'LCDI07226'),\n(229, 'LCDI07227'),\n(230, 'LCDI07228'),\n(231, 'LCDI07229'),\n(232, 'LCDI07230'),\n(233, 'LCDI07231'),\n(234, 'LCDI07232'),\n(235, 'LCDI07233'),\n(236, 'LCDI07234'),\n(237, 'LCDI07235'),\n(238, 'LCDI07236'),\n(239, 'LCDI07237'),\n(240, 'LCDI07238'),\n(241, 'LCDI07239'),\n(242, 'LCDI07240'),\n(243, 'LCDI07241'),\n(244, 'LCDI07242'),\n(245, 'LCDI07243'),\n(246, 'LCDI07244'),\n(247, 'LCDI07245'),\n(248, 'LCDI07246'),\n(249, 'LCDI07247'),\n(250, 'LCDI07248'),\n(251, 'LCDI07249'),\n(252, 'LCDI07250'),\n(253, 'LCDI07251'),\n(254, 'LCDI07252'),\n(255, 'LCDI07253'),\n(256, 'LCDI07254'),\n(257, 'LCDI07255'),\n(258, 'LCDI07256'),\n(259, 'LCDI07257'),\n(260, 'LCDI07258'),\n(261, 'LCDI07259'),\n(262, 'LCDI07260'),\n(263, 'LCDI07261'),\n(264, 'LCDI07262'),\n(265, 'LCDI07263'),\n(266, 'LCDI07264'),\n(267, 'LCDI07265'),\n(268, 'LCDI07266'),\n(269, 'LCDI07267'),\n(270, 'LCDI07268'),\n(271, 'LCDI07269'),\n(272, 'LCDI07270'),\n(273, 'LCDI07271'),\n(274, 'LCDI07272'),\n(275, 'LCDI07273'),\n(276, 'LCDI07274'),\n(277, 'LCDI07275'),\n(278, 'LCDI07276'),\n(279, 'LCDI07277'),\n(280, 'LCDI07278'),\n(281, 'LCDI07279'),\n(282, 'LCDI07280'),\n(283, 'LCDI07281'),\n(284, 'LCDI07282'),\n(285, 'LCDI07283'),\n(286, 'LCDI07284'),\n(287, 'LCDI07285'),\n(288, 'LCDI07286'),\n(289, 'LCDI07287'),\n(290, 'LCDI07288'),\n(291, 'LCDI07289'),\n(292, 'LCDI07290'),\n(293, 'LCDI07291'),\n(294, 'LCDI07292'),\n(295, 'LCDI07293'),\n(296, 'LCDI07294'),\n(297, 'LCDI07295'),\n(298, 'LCDI07296'),\n(299, 'LCDI07297'),\n(300, 'LCDI07298'),\n(301, 'LCDI07299'),\n(302, 'LCDI07300'),\n(303, 'LCDI07301'),\n(304, 'LCDI07302'),\n(305, 'LCDI07303'),\n(306, 'LCDI07304'),\n(307, 'LCDI07305'),\n(308, 'LCDI07306'),\n(309, 'LCDI07307'),\n(310, 'LCDI07308'),\n(311, 'LCDI07309'),\n(312, 'LCDI07310'),\n(313, 'LCDI07311'),\n(314, 'LCDI07312'),\n(315, 'LCDI07313'),\n(316, 'LCDI07314'),\n(317, 'LCDI07315'),\n(318, 'LCDI07316'),\n(319, 'LCDI07317'),\n(320, 'LCDI07318'),\n(321, 'LCDI07319'),\n(322, 'LCDI07320'),\n(323, 'LCDI07321'),\n(324, 'LCDI07322'),\n(325, 'LCDI07323'),\n(326, 'LCDI07324'),\n(327, 'LCDI07325'),\n(328, 'LCDI07326'),\n(329, 'LCDI07327'),\n(330, 'LCDI07328'),\n(331, 'LCDI07329'),\n(332, 'LCDI07300'),\n(333, 'LCDI07301'),\n(334, 'LCDI07302'),\n(335, 'LCDI07303'),\n(336, 'LCDI07304'),\n(337, 'LCDI07305'),\n(338, 'LCDI07306'),\n(339, 'LCDI07307'),\n(340, 'LCDI07308'),\n(341, 'LCDI07309'),\n(342, 'LCDI07310'),\n(343, 'LCDI07311'),\n(344, 'LCDI07312'),\n(345, 'LCDI07313'),\n(346, 'LCDI07314'),\n(347, 'LCDI07315'),\n(348, 'LCDI07316'),\n(349, 'LCDI07317'),\n(350, 'LCDI07318'),\n(351, 'LCDI07319'),\n(352, 'LCDI07320'),\n(353, 'LCDI07321'),\n(354, 'LCDI07322'),\n(355, 'LCDI07323'),\n(356, 'LCDI07324'),\n(357, 'LCDI07325'),\n(358, 'LCDI07326'),\n(359, 'LCDI07327'),\n(360, 'LCDI07328'),\n(361, 'LCDI07329');\n--\n-- Database: `test`\n--\nCREATE DATABASE `test` DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci;\nUSE `test`;\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n"
},
{
"alpha_fraction": 0.7710526585578918,
"alphanum_fraction": 0.7736842036247253,
"avg_line_length": 94.125,
"blob_id": "b1da00ce1ea8a90afe006a38f4e163d9f0d9de76",
"content_id": "14d1fad29e37c314cf129648a449a88f57c25630",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 760,
"license_type": "permissive",
"max_line_length": 214,
"num_lines": 8,
"path": "/README.md",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "[](https://waffle.io/lcdi/Inventory)\n# [LCDI Internal Inventory Management Site](http://www.champlain.edu/cybersecurity-and-digital-forensics/senator-patrick-leahy-center-for-digital-investigation-(lcdi))\n\n[SB Admin 2](http://startbootstrap.com/template-overviews/sb-admin-2/) is an open source, admin dashboard template for [Bootstrap](http://getbootstrap.com/) created by [Start Bootstrap](http://startbootstrap.com/).\nUsed for header and side bar\n\nUsing [Flask Framework](http://flask.pocoo.org/) for Python back end and HTML, CSS, [Bootstrap](http://getbootstrap.com/) for design and styles\nUsing MySQL through Python ORM [Peewee](http://docs.peewee-orm.com/en/latest/)"
},
{
"alpha_fraction": 0.5108637809753418,
"alphanum_fraction": 0.7064123153686523,
"avg_line_length": 16.154544830322266,
"blob_id": "14bf6df98890ec1edd18803c83a94b7f60012e19",
"content_id": "52cbdeb99fab0f64dd8e82a4f222baddde252177",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1887,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 110,
"path": "/requirements.txt",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "Babel==1.3\nBzrTools==2.6.0\nFlask==0.10.1\nFlask-JSGlue==0.3\nFlask-SQLAlchemy==2.1\nFlask-WhooshAlchemy==0.56\nFormEncode==1.2.6\nJinja2==2.7.2\nM2Crypto==0.21.1\nMarkupSafe==0.18\nMySQL-python==1.2.3\nPaste==1.7.5.1\nPasteDeploy==1.5.2\nPasteScript==1.7.5\nPillow==2.3.0\nPyYAML==3.10\nRoutes==2.0\nSQLAlchemy==0.8.4\nSecretStorage==2.0.0\nTempita==0.5.2\nWebOb==1.3.1\nWerkzeug==0.11.3\nWhoosh==2.7.2\namqp==1.3.3\namqplib==1.0.2\nanyjson==0.3.3\nargparse==1.2.1\nblinker==1.4\nboto==2.20.1\nbzr==2.7.0dev1\nchardet==2.0.1\ncliff==1.4.5\ncmd2==0.6.7\ncolorama==0.2.5\nconfigobj==4.7.2\ndecorator==3.4.0\nduplicity==0.6.23\neventlet==0.13.0\ngreenlet==0.4.2\ngunicorn==19.4.5\nhtml5lib==0.999\nhttplib2==0.8\nipython==1.2.1\niso8601==0.1.10\nitsdangerous==0.24\njsonpatch==1.3\njsonpointer==1.0\njsonschema==2.3.0\nkeyring==3.5\nkombu==3.0.7\nlaunchpadlib==1.10.2\nlazr.restfulclient==0.13.3\nlazr.uri==1.0.3\nlibrabbitmq==1.0.3\nlockfile==0.8\nlpthw.web==1.1\nlxml==3.3.3\nmatplotlib==1.3.1\nmercurial==2.8.2\nmock==1.0.1\nnetaddr==0.7.10\nnova==2014.1.5\nnumpy==1.8.2\noauth==1.0.1\noslo.config==1.2.1\noslo.messaging==1.3.0\noslo.rootwrap==1.2.0\nparamiko==1.10.1\npbr==0.7.0\npeewee==2.8.0\npexpect==3.1\nprettytable==0.7.2\npyOpenSSL==0.13\npyasn1==0.1.7\npycadf==0.4.1\npycrypto==2.6.1\npydns==2.3.6\npygobject==3.12.0\npygpgme==0.3\npyparsing==2.0.1\npython-apt==0.9.3.5ubuntu2\npython-cinderclient==1.0.8\npython-dateutil==1.5\npython-glanceclient==0.12.0\npython-keystoneclient==0.7.1\npython-ldap==2.4.10\npython-neutronclient==2.3.4\npython-novnc==0.4-dfsg-1-20130425-git4973b9cc80\npython-openid==2.2.5\npytz==2012c\nrepoze.lru==0.6\nrequests==2.2.1\nscgi==1.13\nscipy==0.13.3\nsimplegeneric==0.8.1\nsimplejson==3.3.1\nsix==1.5.2\nsqlalchemy-migrate==0.8.2\nstevedore==0.14.1\nsuds==0.4.1\nurllib3==1.7.1\nvirtualenv==1.11.4\nvirtualenv-clone==0.2.4\nvirtualenvwrapper==4.1.1\nwadllib==1.3.2\nwarlock==1.1.0\nwebsockify==0.5.1\nwheel==0.24.0\nwsgiref==0.1.2\nzope.interface==4.0.5\n"
},
{
"alpha_fraction": 0.6637285351753235,
"alphanum_fraction": 0.6667401194572449,
"avg_line_length": 27.012346267700195,
"blob_id": "beee9ea0a1ae5a41162a0197f407cbef781638f7",
"content_id": "e0f771903472725fa557b68eeea9023780ae00d5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13614,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 486,
"path": "/lcdi.py",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "# Flask imports\nfrom flask import Flask, render_template, session, redirect, url_for, escape, request, jsonify, abort\n#from flask_jsglue import JSGlue\nfrom werkzeug import secure_filename\nimport flask.ext.whooshalchemy\nfrom functools import wraps\n\n# Peewee\nfrom peewee import *\n\n# Python\nimport sys\nimport os\nimport os.path\nimport time\nimport json\nimport logging\nimport shutil\n\n# Custom support files\nimport adLDAP\n\n# Paramaters\npagePostKey = 'functionID'\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nUPLOAD_FOLDER = os.path.join(APP_ROOT, 'static/item_photos')\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif',\n\t\t\t\t\t\t 'PNG', 'JPG', 'JPEG', 'GIF'])\n\n# ~~~~~~~~~~~~~~~~ Start Execution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\napp = Flask(__name__)\n#jsglue = JSGlue(app)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config.from_object('config')\n\n#lcdiLog = logging.getLogger('lcdi_logger')\n\n# TODO use a decorator for logins http://flask.pocoo.org/docs/0.10/patterns/viewdecorators/#login-required-decorator\n\n# ~~~~~~~~~~~~~~~~ Decorators ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef login_required(f):\n\t@wraps(f)\n\tdef decorated_function(*args, **kwargs):\n\t\tif not 'username' in session:\n\t\t\treturn getLoginURL()\n\t\treturn f(*args, **kwargs)\n\treturn decorated_function\n\n# ~~~~~~~~~~~~~~~~ Startup Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n__all__ = ['getConfig']\n\ndef init():\n\t#lcdiLog.addHandler(logging.FileHandler(os.getcwd() + '/lcdi.log'))\n\t#lcdiLog.setLevel(logging.DEBUG)\n\t#logging.basicConfig(filename='lcdi.log',level=logging.DEBUG)\n\t# Generate secret key for session\n\tapp.secret_key = os.urandom(20)\n\ndef getIndexURL():\n\treturn redirect(url_for('index'))\n\ndef getLoginURL():\n\treturn redirect(url_for('login'))\n\ndef getName():\n\treturn session['displayName']\n\ndef getConfig(key):\n\treturn app.config[key]\n\nimport models\n\n# ~~~~~~~~~~~~~~~~ Page Render Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef renderHomepage():\n\treturn render_template(\"page/PageIndex_Homepage.html\")\n\ndef renderInventoryListings(itemType = 'ALL', status = 'ALL', quality = 'ALL', searchSerial = None, searchModal = None):\n\t\n\tdeviceList = models.getDevicesWithLog(itemType, status, quality)\n\tlength = models.getDevices()\n\t\n\terror = None\n\tif 'error' in session:\n\t\terror = session['error']\n\t\tsession.pop('error', None)\n\t\n\treturn render_template(\"page/PageIndex_Inventory.html\",\n\t\t\tfilter_Type = itemType,\n\t\t\tfilter_Status = status,\n\t\t\tfilter_quality = quality,\n\t\t\t\n\t\t\tquery = deviceList,\n\t\t\ttypes = models.getDeviceTypes(),\n\t\t\tstates = models.getStates(),\n\t\t\t\n\t\t\ttotalItems = len(length),\n\t\t\ttotalSignedOut = len(models.getDevicesWithLog('ALL', 'out', 'ALL')),\n\t\t\t\n\t\t\tdata_id = searchSerial,\n\t\t\tqueueModal = searchModal,\n\t\t\t\n\t\t\tname = escape(getName()),\n\t\t\terror = error\n\t\t)\n\ndef renderPage_View(serial, error = None):\n\tdevice = models.Device.select().where(models.Device.SerialNumber == serial).get()\n\tlog = models.getDeviceLog(serial)\n\t\n\tif len(log) > 0:\n\t\tdevice.statusIsOut = not log.get().DateIn\n\telse:\n\t\tdevice.statusIsOut = False\n\t\n\treturn render_template(\"page/PageViewItem.html\",\n\t\t\tdevice = device,\n\t\t\ttypes = models.getDeviceTypes(),\n\t\t\tstates = models.getStates(),\n\t\t\tlog = log,\n\t\t\terror = error\n\t\t)\n\n# ~~~~~~~~~~~~~~~~ Routing Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\[email protected]('/items/all')\n@login_required\ndef viewAll():\n\tsession['redirectSource'] = 'all'\n\treturn getIndexURL()\n\[email protected]('/items/out')\n@login_required\ndef viewOut():\n\tsession['redirectSource'] = 'outItems'\n\treturn getIndexURL()\n\[email protected]('/', methods=['GET', 'POST'])\n@login_required\ndef index():\n\t# http://flask.pocoo.org/snippets/15/\n\t\n\t# Render main page\n\tif request.method == 'POST':\n\t\tfunction = request.form[pagePostKey]\n\t\t\n\t\t#logging.info('[INDEX] Executing function: ' + function)\n\t\ttry:\n\t\t\tif function == 'addItem':\n\t\t\t\treturn addItem(\n\t\t\t\t\t\tserialDevice = request.form['device_serial'],\n\t\t\t\t\t\tdevice_type = request.form['device_types'],\n\t\t\t\t\t\tdevice_other = request.form['other'],\n\t\t\t\t\t\tdescription = request.form['device_desc'],\n\t\t\t\t\t\tnotes = request.form['device_notes'],\n\t\t\t\t\t\tquality = request.form['device_quality'],\n\t\t\t\t\t\tfile = request.files['file']\n\t\t\t\t\t)\n\t\t\telif function == 'deleteItem':\n\t\t\t\ttry:\n\t\t\t\t\tserial = request.form['serial']\n\t\t\t\t\titem = models.Device.select().where(models.Device.SerialNumber == serial).get();\n\t\t\t\t\tif item.PhotoName:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tos.remove(UPLOAD_FOLDER + '/' + item.PhotoName)\n\t\t\t\t\t\texcept OSError, e:\n\t\t\t\t\t\t\tprint e.errno\n\t\t\t\t\t\t\tprint e.filename\n\t\t\t\t\t\t\tprint e.strerror\n\t\t\t\t\titem.delete_instance()\n\t\t\t\texcept:\n\t\t\t\t\tprint(sys.exc_info()[0])\n\t\t\t\t\n\t\t\t\treturn getIndexURL()\n\t\t\telif function == 'filter':\n\t\t\t\treturn renderInventoryListings(itemType = request.form['type'], status = request.form['status'], quality = request.form['quality'])\n\t\texcept:\n\t\t\t#logging.error(sys.exc_info()[0])\n\t\t\t#flash(sys.exc_info()[0])\n\t\t\treturn renderInventoryListings()\n\t\t\n\telse:\n\t\tstatus = 'ALL'\n\t\tif 'redirectSource' in session:\n\t\t\tif session['redirectSource'] == 'outItems':\n\t\t\t\tstatus = 'out'\n\t\t\tsession.pop('redirectSource', None)\n\t\treturn renderInventoryListings(status = status)\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n\tif 'username' in session:\n\t\treturn getIndexURL()\n\telif request.method == 'POST':\n\t\ttry:\n\t\t\tuser = request.form['username']\n\t\t\tpw = request.form['password']\n\t\t\tvalid, hasEditAccess = adLDAP.checkCredentials(\n\t\t\t\tapp.config['LDAP_CONTROLLER'],\n\t\t\t\tapp.config['LDAP_DOMAIN_A'],\n\t\t\t\tapp.config['LDAP_DOMAIN_B'],\n\t\t\t\tuser, pw)\n\t\t\tif valid != True:\n\t\t\t\tsession[\"error\"] = valid\n\t\t\tif valid == True or getConfig('ALLOW_LOGINS'):\n\t\t\t\tif hasEditAccess == True or getConfig('ALLOW_LOGINS'):\n\t\t\t\t\t# Set username and displayName in session\n\t\t\t\t\tsession['username'] = user\n\t\t\t\t\tsession['displayName'] = session['username']\n\t\t\t\t\tsession['hasEditAccess'] = hasEditAccess or app.debug == True\n\t\t\t\t\tsession['redirectSource'] = 'outItems'\n\t\t\t\telse:\n\t\t\t\t\tsession[\"error\"] = \"You do not have access\"\n\t\t\t\n\t\t\t# Send user back to index page\n\t\t\t# (if username wasnt set, it will redirect back to login screen)\n\t\t\treturn getIndexURL()\n\t\t\t\n\t\texcept Exception as e:\n\t\t\treturn str(e)\n\telse:\n\t\t# Was not a POST, which means index or some other source sent user to login\n\t\tif 'error' in session:\n\t\t\terror = session['error']\n\t\t\tsession.pop('error', None)\n\t\t\treturn render_template(\"page/PageLogin.html\", error=error)\n\t\telse:\n\t\t\treturn render_template(\"page/PageLogin.html\", error=None)\n\[email protected]('/logout')\ndef logout():\n\tsession.pop('username', None)\n\tsession.pop('displayName', None)\n\tsession.pop('hasEditAccess', None)\n\tsession.pop('redirectSource', None)\n\treturn getIndexURL()\n\[email protected]('/search', methods=['GET', 'POST'])\n@login_required\ndef search():\n\tif not request.method == 'POST':\n\t\treturn getIndexURL()\n\t\n\tsearchPhrase = str(request.form['searchField'])\n\t\n\tif searchPhrase.startswith(\"scan:\"):\n\t\tserial = searchPhrase[5:]\n\t\tquery = models.getDeviceLog(serial)\n\t\tisOut, obj = models.getStatus(query)\n\t\tif isOut:\n\t\t\tmodal = \"signIn\"\n\t\telse:\n\t\t\tmodal = \"signOut\"\n\t\treturn renderInventoryListings(searchSerial = serial, searchModal = modal)\n\t\t\n\tif models.isSearchUser(searchPhrase):\n\t\tlogs = models.Log.select().where(\n\t\t\t\tmodels.Log.AuthorizerIn.contains(searchPhrase) | \n\t\t\t\tmodels.Log.AuthorizerOut.contains(searchPhrase) | \n\t\t\t\tmodels.Log.UserIn.contains(searchPhrase) | \n\t\t\t\tmodels.Log.UserOut.contains(searchPhrase)\n\t\t\t).order_by(-models.Log.DateOut)\n\t\t\n\t\treturn render_template(\"page/PageUserLogs.html\", query = logs, searchPhrase = searchPhrase)\n\t\t\n\tif (len(models.Device.select().where(models.Device.SerialNumber == searchPhrase)) == 1):\n\t\treturn renderPage_View(searchPhrase)\n\t\n\tquery = models.Device.select().where(\n\t\tmodels.Device.SerialNumber.contains(searchPhrase) |\n\t\tmodels.Device.SerialDevice.contains(searchPhrase) |\n\t\tmodels.Device.Type.contains(searchPhrase) |\n\t\tmodels.Device.Description.contains(searchPhrase)\n\t)\n\tdeviceList = models.getDeviceAndLogListForQuery(query)\n\t\n\treturn render_template(\"page/PageSearchResults.html\",\n\t\t\tquery = deviceList,\n\t\t\ttypes = models.getDeviceTypes(),\n\t\t\tparams = searchPhrase,\n\t\t\tsearchPhrase = searchPhrase)\n\[email protected]('/signInOut', methods=['GET', 'POST'])\n@login_required\ndef signInOut():\n\tif not request.method == 'POST':\n\t\treturn getIndexURL()\n\t\n\tfunction = request.form[pagePostKey]\n\tserial = request.form['lcdi_serial']\n\t\n\tif function == 'out':\n\t\tmodels.Log.create(\n\t\t\tSerialNumber = serial,\n\t\t\tUserOut = request.form['userID'],\n\t\t\tPurpose = request.form['purpose'],\n\t\t\tDateOut = models.datetime.datetime.now(),\n\t\t\tAuthorizerOut = session['username']\n\t\t)\n\telif function == 'in':\n\t\tdeviceLog = models.getDeviceLog(serial).get()\n\t\tdeviceLog.UserIn = request.form['userID']\n\t\tdeviceLog.DateIn = models.datetime.datetime.now()\n\t\tdeviceLog.AuthorizerIn = session['username']\n\t\tdeviceLog.save()\n\t\t\n\treturn getIndexURL()\n\[email protected]('/users', methods=['GET', 'POST'])\n@login_required\ndef userLogsAll():\n\t\n\tquery = models.Log.select().order_by(-models.Log.DateOut)\n\t\n\tsearchPhrase = \"\"\n\tif request.method == 'POST':\n\t\tsearchPhrase = request.form['searchField']\n\t\tquery = (query\n\t\t\t.where(\n\t\t\t\tmodels.Log.UserOut.contains(searchPhrase) |\n\t\t\t\tmodels.Log.UserIn.contains(searchPhrase) |\n\t\t\t\tmodels.Log.AuthorizerIn.contains(searchPhrase) |\n\t\t\t\tmodels.Log.AuthorizerOut.contains(searchPhrase)\n\t\t\t)\n\t\t)\n\t\t\n\t\tif not 'isFormSubmission' in request.form:\n\t\t\ttable = render_template('page/PageUserLogs_Body.html', query = query)\n\t\t\treturn jsonify(tableBody = table)\n\t\t\n\treturn render_template(\"page/PageUserLogs.html\", query = query, types = models.getDeviceTypes(), searchPhrase = searchPhrase)\n\[email protected]('/viewItem', methods=['POST'])\n@login_required\ndef viewItem():\n\tif request.method == 'POST':\n\t\treturn redirect(url_for('view', serial=request.form['lcdi_serial']))\n\[email protected]('/view/<string:serial>', methods=['GET', 'POST'])\n@login_required\ndef view(serial):\n\t\n\terror = None\n\t\n\ttry:\n\t\tif request.method == 'POST':\n\t\t\tif request.form[pagePostKey] == 'updateItem':\n\t\t\t\terror = updateItem(\n\t\t\t\t\toldSerial = serial,\n\t\t\t\t\tserialDevice = request.form['device_serial'],\n\t\t\t\t\tdescription = request.form['device_desc'],\n\t\t\t\t\tnotes = request.form['device_notes'],\n\t\t\t\t\tquality = request.form['device_quality'],\n\t\t\t\t\tfile = request.files['file']\n\t\t\t\t)\n\texcept models.DoesNotExist:\n\t\tabort(404)\n\texcept NameError, e:\n\t\terror = \"[view] \" + str(e)\n\texcept TypeError, e:\n\t\terror = \"[view] \" + str(e)\n\texcept:\n\t\terror = \"[view] \" + str(sys.exc_info()[0])\n\treturn renderPage_View(serial, error = error)\n\t\[email protected](404)\ndef not_found(error):\n\treturn render_template('page/404.html'), 404\n\n# ~~~~~~~~~~~~~~~~ Utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef addItem(serialDevice, device_type, device_other, description, notes, quality, file):\n\t\n\tif device_type == 'Other':\n\t\tdevice_type = device_other\n\t\t\n\tserialNumber, error = models.generateSerial(device_type)\n\t\n\tif serialNumber == None:\n\t\tsession['error'] = error\n\t\treturn getIndexURL()\n\t\n\tfilename, error = uploadFile(serialNumber, file)\n\tif filename == None:\n\t\tif error != None:\n\t\t\tsession['error'] = error\n\t\t\treturn getIndexURL()\n\t\telse:\n\t\t\tstatic = os.getcwd() + \"/static/\"\n\t\t\tfilename = serialNumber + \".png\"\n\t\t\tshutil.copy(static + \"default.jpeg\", static + \"item_photos/\" + filename)\n\t\n\tif filename == None:\n\t\tfilename = \"\"\n\tmodels.Device.create(\n\t\t\tSerialNumber = serialNumber,\n\t\t\tSerialDevice = serialDevice,\n\t\t\tType = device_type,\n\t\t\tDescription = description,\n\t\t\tIssues = notes,\n\t\t\tPhotoName = filename,\n\t\t\tQuality = quality\n\t\t)\n\treturn renderPage_View(serialNumber, error = error)\n\ndef updateItem(oldSerial, serialDevice, description, notes, quality, file):\n\t\n\tdevice = models.Device.select().where(models.Device.SerialNumber == oldSerial).get()\n\t\n\tdevice.SerialNumber = oldSerial\n\tdevice.SerialDevice = serialDevice\n\tdevice.Description = description\n\tdevice.Issues = notes\n\tdevice.Quality = quality\n\t\n\tfilename, error = uploadFile(oldSerial, file)\n\tif filename != None:\n\t\tdevice.PhotoName = filename\n\t\n\tdevice.save()\n\t\n\treturn error\n\ndef uploadFile(serialNumber, file):\n\tif file and not file.filename:\n\t\treturn (None, None)\n\tif file and allowed_file(file.filename):\n\t\tfileList = file.filename.split(\".\")\n\t\tfilename = serialNumber + '.' + str(fileList[1])\n\t\ttry:\n\t\t\tfile.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\t\texcept IOError, e:\n\t\t\treturn (None, \"[uploadFile] \" + \"[Errno \" + str(e.errno) + \"] \" + str(e))\n\t\texcept NameError, e:\n\t\t\treturn (None, \"[uploadFile] \" + str(e))\n\t\texcept:\n\t\t\treturn (None, \"[uploadFile] \" + str(sys.exc_info()[0]))\n\telse:\n\t\tfilename = None\n\treturn (filename, None)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\ndef signOutItem(serial, sname, use):\n\tidentifierItem = models.Log.select().order_by(models.Log.Identifier.desc())\n\t\n\tif len(identifierItem) == 0:\n\t\tidentifier = 1\n\telse:\n\t\tidentifier = identifierItem.Identifier + 1\n\t\n\tmodels.Log.create(\n\t\t\tIdentifier = identifier,\n\t\t\tSerialNumber = serial,\n\t\t\tUserOut = escape(session['username']),\n\t\t\tAuthorizerOut = sname,\n\t\t\tPurpose = use,\n\t\t\tDateOut = models.datetime.datetime.now()\n\t\t)\n\t\n\treturn renderPage_View(serial)\n\n# ~~~~~~~~~~~~~~~~ Start page ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ninit()\n\nif __name__ == '__main__':\n\tctx = app.test_request_context()\n\tctx.push()\n\tapp.preprocess_request()\n\tport = int(os.getenv('PORT', 8080))\n\thost = os.getenv('IP', '0.0.0.0')\n\tapp.run(port=port, host=host)\n\t\n\tmodels.db.connect()\n\t\n\t#\"\"\"\n\t#models.Device.create_table()\n\t#models.Log.create_table()\n\t#\"\"\"\n\t\n\tmodels.db.close()\n"
},
{
"alpha_fraction": 0.7326732873916626,
"alphanum_fraction": 0.7425742745399475,
"avg_line_length": 24.25,
"blob_id": "35f8441059d83f248750afa3ddacf7deefec7036",
"content_id": "70c57bd9d106f1f400818117be0444d672a44325",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 4,
"path": "/lcdi.wsgi",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\nsys.path.insert(0, '/var/www/lcdi')\nfrom lcdi import app as application\n"
},
{
"alpha_fraction": 0.7638888955116272,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 17.25,
"blob_id": "a636d75184e77434277edc3a3f7f81aa968ffb53",
"content_id": "e376047cafc7465506db61f3ee59bf467523e385",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 72,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 4,
"path": "/restart.sh",
"repo_name": "lcdi/Inventory",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nbash update.sh\nbash ../renewPerms.sh\nservice apache2 restart"
}
] | 11 |
ChuyueSun/hangman-game | https://github.com/ChuyueSun/hangman-game | 0521ee5348d6ceacaa9f3191a3165c54f2a478d6 | b00dcf40319ddade9366ebdfccd711adc42ff685 | 89404c6e20d91ad922856d7078118a9725506100 | refs/heads/master | 2021-04-28T02:58:38.762496 | 2018-02-20T03:59:32 | 2018-02-20T03:59:32 | 122,128,666 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6126254200935364,
"alphanum_fraction": 0.6159992814064026,
"avg_line_length": 33.86996841430664,
"blob_id": "42e596d5d5365a94de561e35e283eb49ca4d8903",
"content_id": "56ce5355d37b03490887a79f60ff95836a491617",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11263,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 323,
"path": "/hangman.py",
"repo_name": "ChuyueSun/hangman-game",
"src_encoding": "UTF-8",
"text": "# Problem Set 2, hangman.py\n# Name: Chuyue Sun\n# Collaborators: No\n# Time spent: 3hr\n\n# Hangman Game\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n# (so be sure to read the docstrings!)\nimport random\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\n\n\ndef choose_word(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n \n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n# end of helper code\n\n# -----------------------------------\n\n# Load the list of words into the variable wordlist\n# so that it can be accessed from anywhere in the program\nwordlist = load_words()\n\n\ndef is_word_guessed(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing; assumes all letters are\n lowercase\n letters_guessed: list (of letters), which letters have been guessed so far;\n assumes that all letters are lowercase\n returns: boolean, True if all the letters of secret_word are in letters_guessed;\n False otherwise\n '''\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\n for i in secret_word:\n if (i not in letters_guessed):\n return False\n return True\n\n\n\n\ndef get_guessed_word(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing; assumes the letters in\n secret_word are all lowercase.\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string, comprised of letters and asterisks (*) that represents\n which letters in secret_word have been guessed so far.\n '''\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\n result=\"\"\n for i in secret_word:\n if i not in letters_guessed:\n result+=\"*\"\n else:\n result+=i\n return result\n\n\n\ndef get_available_letters(letters_guessed):\n '''\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string (of letters), comprised of letters that represents which \n letters have not yet been guessed. The letters should be returned in\n alphabetical order.\n '''\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\n import string\n result=\"\"\n for i in string.ascii_lowercase:\n if i not in letters_guessed:\n result+=i\n return result\n \n \n\ndef hangman(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses they start with.\n \n * The user should start with 10 guesses.\n\n * Before each round, you should display to the user how many guesses\n they have left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Remember to make\n sure that the user puts in a letter!\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n Follows the other limitations detailed in the problem write-up.\n '''\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\n guesses_remaining=10\n letters_guessed=[]\n print(\"Welcome to Hangman!\")\n print(\"I am thinking of a word that is\",len(secret_word),\"letters long\")\n print(\"------------------\")\n while guesses_remaining>0:\n \n if guesses_remaining>1:\n print(\"You have\",guesses_remaining,\"guesses left.\")\n else:\n print(\"You have\",guesses_remaining,\"guess left.\")\n print(\"Available letters:\",get_available_letters(letters_guessed))\n letter=input(\"Please guess a letter: \")\n if letter==\"%\" and guessed_remaining>=2:\n guesses_remaining-=2\n hangman_with_help(secret_word)\n elif letter==\"%\" and guessed_remaining<2:\n print(\"Oops! Not enough guessed left:\",get_available_letters(letters_guessed))\n if str.isalpha(letter)==False:\n print(\"Oops! That is not a valid letter. Please input a letter from the alphabet:\",get_guessed_word(secret_word,letters_guessed))\n elif str.lower(letter) in letters_guessed:\n print(\"Oops! You've already guessed that letter:\",get_guessed_word(secret_word,letters_guessed))\n else:\n letter=str.lower(letter)\n letters_guessed.append(letter)\n\n if letter in secret_word:\n print(\"Good guess:\",get_guessed_word(secret_word,letters_guessed))\n else:\n if letter in [\"a\",\"e\",\"i\",\"o\",\"u\"]:\n guesses_remaining-=2\n else:\n guesses_remaining-=1\n\n print(\"Oops! That letter is not in my word:\",get_guessed_word(secret_word,letters_guessed))\n print(\"------------------\")\n if is_word_guessed(secret_word,letters_guessed):\n \n def get_unique (secret_word):\n unique=0\n for i in string.ascii_lowercase:\n if i in secret_word:\n unique+=1\n return unique\n\n score=guesses_remaining+2*get_unique(secret_word)*len(secret_word)\n print(\"Congratulations, you won!\")\n print(\"Your total score for this game is:\",score)\n break\n \n if guesses_remaining==0:\n print(\"Sorry, you ran out of guesses. The word was\",secret_word,\".\")\n \n \n \n \n\n\n\n# When you've completed your hangman function, scroll down to the bottom\n# of the file and uncomment the lines to test\n# (hint: you might want to pick your own\n# secret_word while you're doing your own testing)\n\n\n# -----------------------------------\n\ndef choose(secret_word,available_letters):\n result=\"\"\n for i in string.ascii_lowercase:\n if i in secret_word and i in available_letters :\n result+=i\n return result\n \n\n\ndef hangman_with_help(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses they start with.\n \n * The user should start with 10 guesses.\n \n * Before each round, you should display to the user how many guesses\n they have left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Remember to make sure that\n the user puts in a letter.\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n * If the guess is the symbol %, you should reveal to the user one of the \n letters missing from the word at the cost of 2 guesses. If the user does \n not have 2 guesses remaining, print a warning message. Otherwise, add \n this letter to their guessed word and continue playing normally.\n \n Follows the other limitations detailed in the problem write-up.\n '''\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\n\n \n guesses_remaining=10\n letters_guessed=[]\n print(\"Welcome to Hangman!\")\n print(\"I am thinking of a word that is\",len(secret_word),\"letters long\")\n print(\"------------------\")\n while guesses_remaining>0:\n \n if guesses_remaining>1:\n print(\"You have\",guesses_remaining,\"guesses left.\")\n else:\n print(\"You have\",guesses_remaining,\"guess left.\")\n print(\"Available letters:\",get_available_letters(letters_guessed))\n letter=input(\"Please guess a letter: \")\n if letter==\"%\" and guesses_remaining>=2:\n guesses_remaining-=2\n available_letters=get_available_letters(letters_guessed)\n choose_from=choose(secret_word,available_letters)\n new=random.randint(0,len(choose_from)-1)\n exposed_letter=choose_from[new]\n print(\"Letter revealed:\",exposed_letter)\n letters_guessed.append(exposed_letter)\n print(get_guessed_word(secret_word,letters_guessed))\n elif letter==\"%\" and guesses_remaining<2:\n print(\"Oops! Not enough guessed left:\",get_available_letters(letters_guessed))\n elif str.isalpha(letter)==False:\n print(\"Oops! That is not a valid letter. Please input a letter from the alphabet:\",get_guessed_word(secret_word,letters_guessed))\n elif str.lower(letter) in letters_guessed:\n print(\"Oops! You've already guessed that letter:\",get_guessed_word(secret_word,letters_guessed))\n else:\n letter=str.lower(letter)\n letters_guessed.append(letter)\n\n if letter in secret_word:\n print(\"Good guess:\",get_guessed_word(secret_word,letters_guessed))\n else:\n if letter in [\"a\",\"e\",\"i\",\"o\",\"u\"]:\n guesses_remaining-=2\n else:\n guesses_remaining-=1\n\n print(\"Oops! That letter is not in my word:\",get_guessed_word(secret_word,letters_guessed))\n print(\"------------------\")\n if is_word_guessed(secret_word,letters_guessed):\n \n def get_unique (secret_word):\n unique=0\n for i in string.ascii_lowercase:\n if i in secret_word:\n unique+=1\n return unique\n\n score=guesses_remaining+2*get_unique(secret_word)*len(secret_word)\n print(\"Congratulations, you won!\")\n print(\"Your total score for this game is:\",score)\n break\n \n if guesses_remaining==0:\n print(\"Sorry, you ran out of guesses. The word was\",secret_word,\".\")\n \n \n# When you've completed your hangman_with_help function, comment the two similar\n# lines below that were used to run the hangman function, and then uncomment\n# those two lines and run this file to test!\n# Hint: You might want to pick your own secret_word while you're testing.\n\nif __name__ == \"__main__\":\n pass\n\n # To test part 2, comment out the pass line above and\n # uncomment the following two lines.\n \n #secret_word = choose_word(wordlist)\n #hangman(secret_word)\n\n###############\n \n # To test part 3 re-comment out the above lines and \n # uncomment the following two lines. \n# \n# secret_word = choose_word(wordlist)\n# hangman_with_help(secret_word)\n"
},
{
"alpha_fraction": 0.7790973782539368,
"alphanum_fraction": 0.786223292350769,
"avg_line_length": 83.0999984741211,
"blob_id": "c4160b3641b65d99388ecc5f00d9fe1aff96677f",
"content_id": "7a40e48a74c91ec16d666a913beb994e841e7ea7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 249,
"num_lines": 10,
"path": "/README.md",
"repo_name": "ChuyueSun/hangman-game",
"src_encoding": "UTF-8",
"text": "\nThe general behavior we want to implement is described below. \n\n1. The computer must select a word at random from the list of available words provided in words.txt. Note that words.txt contains words in all lowercase letters .\n2. The user is given a certain number of guesses at the beginning.\n3. The game is interactive; the user inputs their guess and the computer either:\na. reveals the letter if it exists in the secret word\nb. penalizes the user and updates the number of guesses remaining\n4. The game ends when either the user guesses the secret word or the user runs out\nof guesses.\n5. If you guess the special character % the computer will provide you with one of the missing letters in the secret word at a cost of 2 guesses. If you don’t have two guesses still remaining, the computer will warn you of this and let you try again.\n"
}
] | 2 |
Azure/azure-linux-automation | https://github.com/Azure/azure-linux-automation | 334462d3917af56c47f5659a5d05cb577867ff44 | 8dc386ebc46873deb1c68832de7f65d36d7e0f90 | ee55fd5cccae0ee25e7aa547d9def4ccd367d655 | refs/heads/master | 2023-08-27T14:04:27.629094 | 2023-03-28T16:48:36 | 2023-03-28T16:48:36 | 21,183,815 | 63 | 153 | null | 2014-06-24T23:13:50 | 2022-11-03T03:21:04 | 2023-03-28T16:48:36 | PowerShell | [
{
"alpha_fraction": 0.6660124063491821,
"alphanum_fraction": 0.6772681474685669,
"avg_line_length": 41.385398864746094,
"blob_id": "27571f76806c16907e8de9640f3fb23c12981686",
"content_id": "e25fbcbf2075b267d28bbf7feee821c9d07ed902",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 49930,
"license_type": "permissive",
"max_line_length": 481,
"num_lines": 1178,
"path": "/remote-scripts/E2E-DAYTRADER-SETUP.py",
"repo_name": "Azure/azure-linux-automation",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport re\nimport time\nimport imp\nimport sys\nfrom azuremodules import *\n\n#OS independent variables\ndaytrader_db_root_password = \"daytrader_root_password\"\ndaytrader_db_name\t= \"tradedb\"\ndaytrader_db_hostname = \"localhost\" \ndaytrader_db_username = \"trade\"\ndaytrader_db_password = \"trade\"\nfront_endVM_ips\t= \"unknown\"\nvm_username\t\t= \"unknown\"\nvm_password\t\t= \"unknown\"\ncommon_packages_list = [\"php\",\"at\",\"php-mysql\",\"wget\",\"libstdc++*\",\"libstdc++.so.5\",\"elfutils.x86_64\",\"libstdc++33\",\"compat-libstdc++-296\",\"libXp.x86_64\",\"compat-libstdc++-33.x86_64\",\"compat-db.x86_64\",\"libXmu.x86_64\",\"gtk2.x86_64\",\"pam.x86_64\",\"libXft.x86_64\",\"libXtst.x86_64\",\"gtk2-engines.x86_64\",\"elfutils.x86_64\",\"elfutils-libs\",\"ksh\",\"bc\",\"xauth\"]\n\n#OS dependent variables\npexpect_pkg_name\t= \"unknown\"\nmysql_pkg_name\t\t= \"unknown\"\ncurrent_distro\t\t= \"unknown\"\ndistro_version\t\t= \"unknown\"\nservice_httpd_name\t= \"unknown\"\nservice_mysqld_name\t= \"unknown\"\nservice_command\t\t= \"unknown\"\nfrontend_packages_list = \"unknown\"\nsinglevm_packages_list = \"unknown\"\nstartup_file\t\t\t= \"/etc/rc.local\"\n\ndef DetectDistro():\n\tdistribution = 'unknown'\n\tversion = 'unknown'\n\t\n\tRunLog.info(\"Detecting Distro \")\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S cat /etc/*-release\")\n\toutputlist = re.split(\"\\n\", output)\n\t\n\tfor line in outputlist:\n\t\tline = re.sub('\"', '', line)\n\t\tif (re.match(r'^ID=(.*)',line,re.M|re.I) ):\n\t\t\tmatchObj = re.match( r'^ID=(.*)', line, re.M|re.I)\n\t\t\tdistribution = matchObj.group(1)\n\t\telif (re.match(r'^VERSION_ID=(.*)',line,re.M|re.I) ):\n\t\t\tmatchObj = re.match( r'^VERSION_ID=(.*)', line, re.M|re.I)\n\t\t\tversion = matchObj.group(1)\n\t\n\tif(distribution == 'unknown'):\n\t\t# Finding the Distro\n\t\tfor line in outputlist:\n\t\t\tif (re.match(r'.*Ubuntu.*',line,re.M|re.I) ):\n\t\t\t\tdistribution = 'ubuntu'\n\t\t\t\tbreak\n\t\t\telif (re.match(r'.*SUSE Linux.*',line,re.M|re.I)):\n\t\t\t\tdistribution = 'SUSE Linux'\n\t\t\t\tbreak\n\t\t\telif (re.match(r'.*openSUSE.*',line,re.M|re.I)):\n\t\t\t\tdistribution = 'openSUSE'\n\t\t\t\tbreak\n\t\t\telif (re.match(r'.*centos.*',line,re.M|re.I)):\n\t\t\t\tdistribution = 'centos'\n\t\t\t\tbreak\n\t\t\telif (re.match(r'.*Oracle.*',line,re.M|re.I)):\n\t\t\t\tdistribution = 'Oracle'\n\t\t\t\tbreak\n\t\t\telif (re.match(r'.*Red Hat.*',line,re.M|re.I)):\n\t\t\t\tdistribution = 'rhel'\n\t\t\t\tbreak\t\n\treturn [distribution, version]\n\ndef set_variables_OS_dependent():\n\tglobal current_distro\n\tglobal distro_version\n\tglobal pexpect_pkg_name\n\tglobal mysql_pkg_name\n\tglobal service_httpd_name\n\tglobal service_mysqld_name\n\tglobal service_command\n\tglobal common_packages_list\n\tglobal frontend_packages_list\n\tglobal singlevm_packages_list\n\tglobal startup_file\n\t\n\t[current_distro, distro_version] = DetectDistro()\n\tprint \"current_distro: \" +current_distro\n\tif(current_distro == \"unknown\"):\n\t\tRunLog.info(\"ERROR: Unknown linux distro...\\nExiting the Wordpress installation\\n\")\n\t\tend_the_script(\"ERROR: Unknown linux distro...\\nExiting the Wordpress installation\\n\")\n\telse:\n\t\tservice_command\t\t= \"service \" #space character after service is mandatory here.\n\n\t# Identify the Distro to Set OS Dependent Variables\n\tif ((current_distro == \"Oracle\") or (current_distro == \"ol\")):\n\t\tpexpect_pkg_name\t= \"pexpect\"\n\t\tservice_httpd_name\t= \"httpd\"\n\t\tservice_mysqld_name = \"mysqld\"\n\t\tmysql_pkg_name\t\t= \"mysql-server\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql.x86_64\",\"mysql-client\",\"httpd\"]\n\t\tif(distro_version == \"7\" or \"7.0\"):\n\t\t\tservice_mysqld_name\t= \"mariadb\"\n\t\t\tmysql_pkg_name\t\t= \"mariadb-server\"\n\t\t\tfrontend_packages_list = common_packages_list + [\"mariadb\",\"httpd\"]\n\telif ((current_distro == \"centos\")):\n\t\tpexpect_pkg_name\t= \"pexpect\"\n\t\tservice_httpd_name\t= \"httpd\"\n\t\tservice_mysqld_name = \"mysqld\"\n\t\tmysql_pkg_name\t\t= \"mysql-server\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql.x86_64\",\"mysql-client\",\"httpd\"]\n\t\tif(distro_version == \"7\" or \"7.0\"):\n\t\t\tservice_mysqld_name\t= \"mariadb\"\n\t\t\tmysql_pkg_name\t\t= \"mariadb-server\"\n\t\t\tfrontend_packages_list = common_packages_list + [\"mariadb\",\"httpd\"]\n\telif (current_distro == \"ubuntu\"):\n\t\tpexpect_pkg_name\t= \"python-pexpect\"\n\t\tservice_httpd_name\t= \"apache2\"\n\t\tservice_mysqld_name\t= \"mysql\"\n\t\tmysql_pkg_name\t\t= \"mysql-server\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql-client\",\"php5\",\"php5-mysql\",\"libstdc++6\",\"mysql\",\"libapache2-mod-php5\",\"apache2\"]\n\telif (current_distro == \"openSUSE\"):\n\t\tpexpect_pkg_name\t= \"python-pexpect\"\n\t\tservice_httpd_name\t= \"apache2\"\n\t\tservice_mysqld_name\t= \"mysql\"\n\t\tmysql_pkg_name\t\t= \"mysql-community-server\"\n\t\tservice_command = \"/etc/init.d/\"\n\t\tstartup_file\t\t\t= \"/etc/init.d/boot.local\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql-community-server-client\",\"php5\", \"php5-mysql\",\"apache2-mod_php5\",\"apache2\"]\n\telif (current_distro == \"SUSE Linux\"):\n\t\tpexpect_pkg_name\t= \"python-pexpect\"\t\t\t\t\t \n\t\tservice_httpd_name\t= \"apache2\"\n\t\tservice_mysqld_name\t= \"mysql\"\n\t\tmysql_pkg_name\t\t= \"mysql\"\n\t\tservice_command = \"/etc/init.d/\"\n\t\tstartup_file\t\t\t= \"/etc/init.d/boot.local\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql-client\",\"php5\",\"php5-mysql\",\"php53\",\"php53-mysql\",\"apache2-mod_php5\",\"apache2\"]\n\telif (current_distro == \"sles\"):\n\t\tpexpect_pkg_name\t= \"python-pexpect\"\t\t\t\t\t \n\t\tservice_httpd_name\t= \"apache2\"\n\t\tservice_mysqld_name\t= \"mysql\"\n\t\tmysql_pkg_name\t\t= \"mysql\"\n\t\tstartup_file\t\t\t= \"/etc/init.d/boot.local\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql-client\",\"php5\",\"php5-mysql\",\"php53\",\"php53-mysql\",\"apache2-mod_php5\",\"apache2\"]\n\t\tif(distro_version == \"12\"):\n\t\t\tservice_mysqld_name\t= \"mysql\"\n\t\t\tmysql_pkg_name\t\t= \"mariadb\"\n\t\t\tfrontend_packages_list = common_packages_list + [\"mariadb-client\",\"php5\",\"php5-mysql\",\"php53\",\"php53-mysql\",\"apache2-mod_php5\",\"apache2\"]\n\telif ((current_distro == \"Red Hat\") or (current_distro == \"rhel\")):\n\t\tpexpect_pkg_name\t= \"pexpect\"\t\t\t\t\t \n\t\tservice_httpd_name\t= \"httpd\"\n\t\tservice_mysqld_name\t= \"mysqld\"\n\t\tmysql_pkg_name\t\t= \"mysql-server\"\n\t\tfrontend_packages_list = common_packages_list + [\"mysql.x86_64\",\"httpd\"]\n\t\tif(distro_version == \"7.0\"):\n\t\t\tservice_mysqld_name\t= \"mariadb\"\n\t\t\tmysql_pkg_name\t\t= \"mariadb-server\"\n\t\t\tfrontend_packages_list = common_packages_list + [\"mariadb\",\"httpd\"]\n\n\tsinglevm_packages_list = frontend_packages_list + [mysql_pkg_name]\n\tRunLog.info( \"set_variables_OS_dependent .. [done]\")\n\ndef end_the_script():\t\n\tprint file_get_contents(\"/home/\"+vm_username+\"/Runtime.log\")\n\texit()\n\t\ndef file_get_contents(filename):\n with open(filename) as f:\n return f.read()\n\ndef exec_multi_cmds_local_sudo(cmd_list):\n\tf = open('/tmp/temp_script.sh','w')\n\tfor line in cmd_list:\n\t\tf.write(line+'\\n') \n\tf.close()\n\tRun (\"chmod +x /tmp/temp_script.sh\")\n\tRun (\"echo '\"+vm_password+\"' | sudo -S /tmp/temp_script.sh 2>&1 > /tmp/exec_multi_cmds_local_sudo.log\")\n\treturn file_get_contents(\"/tmp/exec_multi_cmds_local_sudo.log\")\n\t\ndef update_repos():\n\tRunLog.info( \"\\nUpdating the repositoriy information...\")\n\tif ((current_distro == \"ubuntu\") or (current_distro == \"Debian\")):\n\t\tRun(\"echo '\"+vm_password+\"' | sudo -S apt-get update\")\n\telif ((current_distro == \"rhel\") or (current_distro == \"Oracle\") or (current_distro == 'centos') or (current_distro == \"ol\")):\n\t\tRun(\"echo '\"+vm_password+\"' | sudo -S yum -y update\")\n\telif (current_distro == \"openSUSE\") or (current_distro == \"SUSE Linux\") or (current_distro == \"sles\"):\n\t\tRun(\"echo '\"+vm_password+\"' | sudo -S zypper --non-interactive --gpg-auto-import-keys update\")\n\telse:\n\t\tRunLog.error((\"Repo upgradation failed on:\"+current_distro))\n\t\n\tRunLog.info( \"Updating the repositoriy information... [done]\")\n\ndef disable_selinux():\n\tRunLog.info( \"\\nDiasabling selinux\")\n\tselinuxinfo = Run (\"echo '\"+vm_password+\"' | sudo -S cat /etc/selinux/config\")\n\tif (selinuxinfo.rfind('SELINUX=disabled') != -1):\n\t\tRunLog.info( \"selinux is already disabled\")\n\telse :\n\t\tselinux = Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config \")\n\t\tif (selinuxinfo.rfind('SELINUX=disabled') != -1):\n\t\t\tRunLog.info( \"selinux is disabled\")\n\tRunLog.info( \"Diasabling selinux... [done]\")\n\ndef disable_iptables():\n\tRunLog.info( \"\\n Disabling ip-tables..\")\n\tif (current_distro == 'ubuntu'):\n\t\tufw = Run (\"echo '\"+vm_password+\"' | sudo -S ufw disable\")\t\t\n\telif(current_distro == 'rhel' or current_distro == 'centos' or current_distro == \"Oracle\" or current_distro == \"ol\"):\n\t\tcmds = (\"service iptables save\",\"service iptables stop\",\"chkconfig iptables off\",\"service ip6tables save\",\"service ip6tables stop\",\"chkconfig ip6tables off\",\"iptables -nL\" ,\"systemctl stop iptables.service\",\"systemctl disable iptables.service\",\"systemctl stop firewalld.service\",\"systemctl disable firewalld.service\")\n\t\toutput = exec_multi_cmds_local_sudo(cmds)\n\t\tioutput = Run(\"echo '\"+vm_password+\"' | sudo -S service iptables status\")\n\t\tfoutput = Run(\"echo '\"+vm_password+\"' | sudo -S service firewalld status\")\n\t\t\n\t\tif(ioutput.find('dead') != -1 or foutput.find('dead') != -1):\n\t\t\tRunLog.info( \"Diasabling iptables and firewalls..[done]\")\n\t\telse:\n\t\t\tRunLog.info( \"Diasabling iptables and firewalls..[failed]\")\n\telif((current_distro == 'SUSE Linux')or(current_distro == 'sles')):\n\t\tcmds = (\"/sbin/yast2 firewall startup manual\",\"/sbin/rcSuSEfirewall2 stop\",\"chkconfig SuSEfirewall2_setup off\")\n\t\toutput = exec_multi_cmds_local_sudo(cmds)\n\t\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S /sbin/rcSuSEfirewall2 status\")\n\t\tif((output.find('unused') != -1) or (output.find('dead') != -1)):\n\t\t\tRunLog.info( \"Diasabling iptables..[done]\")\n\t\telse:\n\t\t\tRunLog.info( \"Diasabling iptables..[failed]\")\n\telse:\n\t\tRun (\"echo '\"+vm_password+\"' | sudo -S chkconfig iptables off\")\n\t\tRun (\"echo '\"+vm_password+\"' | sudo -S chkconfig ip6tables off\")\n\t\tRunLog.info( \"Disabling iptables..[done]\")\n\ndef easy_install(module):\n\tRunLog.info( \"Installing '\"+module+\"' using easy_install..\")\n\toutput = Run (\"echo '\"+vm_password+\"' | sudo -S easy_install '\"+module+\"'\")\n\tif(output.rfind('Installed /usr/lib/python2.7')or output.rfind('Finished processing')):\n\t\tRunLog.info( \" '\"+module+\"' module installation [done]..\")\n\telse:\n\t\tRunLog.info( \" '\"+module+\"' module installation [failed]..\")\n\t\tRunLog.info( \"Installing pexpect from source..\")\n\t\tupdate_python_and_install_pexpect()\n\t\tRunLog.info( \"\\n\\nInvoking the script with new python:....\")\n\t\tRunLog.info( Run(\"python \"+__file__+\" \"+' '.join(sys.argv[1:])))\n\t\tend_the_script()\n\t\t\n\t\t\ndef yum_package_install(package):\n\tRunLog.info((\"\\nyum_package_install: \" + package))\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S yum install -y \"+package)\n\toutputlist = re.split(\"\\n\", output)\n\n\tfor line in outputlist:\n\t\t#Package installed successfully\n\t\tif (re.match(r'Complete!', line, re.M|re.I)):\n\t\t\tRunLog.info((package+\": package installed successfully.\\n\"+line))\n\t\t\treturn True\n\t\t#package is already installed\n\t\telif (re.match(r'.* already installed and latest version', line, re.M|re.I)):\n\t\t\tRunLog.info((package + \": package is already installed.\\n\"+line))\n\t\t\treturn True\n\t\telif (re.match(r'^Nothing to do', line, re.M|re.I)):\n\t\t\tRunLog.info((package + \": package already installed.\\n\"+line))\n\t\t\treturn True\n\t\t#Package installation failed\n\t\telif (re.match(r'^Error: Nothing to do', line, re.M|re.I)):\n\t\t\tbreak\n\t\t#package is not found on the repository\n\t\telif (re.match(r'^No package '+ re.escape(package)+ r' available', line, re.M|re.I)):\n\t\t\tbreak\n\t\t\t\n\t#Consider package installation failed if non of the above matches.\n\tRunLog.error((package + \": package installation failed!\\n\" +output))\n\treturn False\n\ndef aptget_package_install(package):\n\tRunLog.info(\"Installing Package: \" + package)\n\t# Identify the package for Ubuntu\t\n\t# We Haven't installed mysql-secure_installation for Ubuntu Distro\n\tif (package == 'mysql-server'):\n\t\tRunLog.info( \"apt-get function package:\" + package) \n\t\t\n\t\tcmds = (\"export DEBIAN_FRONTEND=noninteractive\",\"echo mysql-server mysql-server/root_password select \" + daytrader_db_root_password + \" | debconf-set-selections\", \"echo mysql-server mysql-server/root_password_again select \" + daytrader_db_root_password + \"| debconf-set-selections\", \"echo '\"+vm_password+\"' | sudo -S apt-get install -y --force-yes mysql-server\")\n\t\toutput = exec_multi_cmds_local_sudo(cmds)\n\n\telse:\n\t\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S apt-get install -y --force-yes \"+package)\n\t\n\toutputlist = re.split(\"\\n\", output)\t\n \n\tunpacking = False\n\tsetting_up = False\n\n\tfor line in outputlist:\n\t\t#package is already installed\n\t\tif (re.match(re.escape(package) + r' is already the newest version', line, re.M|re.I)):\n\t\t\tRunLog.info(package + \": package is already installed.\"+line)\n\t\t\treturn True\n\t\t#package installation check 1\t\n\t\telif (re.match(r'Unpacking '+ re.escape(package) + r\" \\(.*\" , line, re.M|re.I)):\n\t\t\tunpacking = True\n\t\t#package installation check 2\n\t\telif (re.match(r'Setting up '+ re.escape(package) + r\" \\(.*\" , line, re.M|re.I)):\n\t\t\tsetting_up = True\n\t\t#Package installed successfully\n\t\tif (setting_up and unpacking):\n\t\t\tRunLog.info(package+\": package installed successfully.\")\n\t\t\treturn True\n\t\t#package is not found on the repository\n\t\telif (re.match(r'E: Unable to locate package '+ re.escape(package), line, re.M|re.I)):\n\t\t\tbreak\n\t\t#package installation failed due to server unavailability\n\t\telif (re.match(r'E: Unable to fetch some archives', line, re.M|re.I)):\n\t\t\tbreak\n\t\t\n\t#Consider package installation failed if non of the above matches.\n\tRunLog.info(package + \": package installation failed!\\n\")\n\tRunLog.info(\"Error log: \"+output)\n\treturn False\n\ndef zypper_package_install(package):\n\tRunLog.info( \"\\nzypper_package_install: \" + package)\n\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S zypper --non-interactive in \"+package)\n\toutputlist = re.split(\"\\n\", output)\n\t\t\n\tfor line in outputlist:\n\t\t#Package installed successfully\n\t\tif (re.match(r'.*Installing: '+re.escape(package)+r'.*done', line, re.M|re.I)):\n\t\t\tRunLog.info((package+\": package installed successfully.\\n\"+line))\n\t\t\treturn True\n\t\t#package is already installed\n\t\telif (re.match(r'\\''+re.escape(package)+r'\\' is already installed', line, re.M|re.I)):\n\t\t\tRunLog.info((package + \": package is already installed.\\n\"+line))\n\t\t\treturn True\n\t\t#package is not found on the repository\n\t\telif (re.match(r'^No provider of \\''+ re.escape(package) + r'\\' found', line, re.M|re.I)):\n\t\t\tbreak\n\n\t#Consider package installation failed if non of the above matches.\n\tRunLog.error((package + \": package installation failed!\\n\"+output))\n\treturn False\n\ndef install_deb(file_path):\n\tRunLog.info( \"\\nInstalling package: \"+file_path)\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S dpkg -i \"+file_path+\" 2>&1\")\n\tRunLog.info( output)\n\toutputlist = re.split(\"\\n\", output)\n\n\tfor line in outputlist:\n\t\t#package is already installed\n\t\tif(re.match(\"installation successfully completed\", line, re.M|re.I)):\n\t\t\tRunLog.info(file_path + \": package installed successfully.\"+line)\n\t\t\treturn True\t\t\t\n\t\t\t\n\tRunLog.info(file_path+\": Installation failed\"+output)\n\treturn False\n\ndef install_rpm(file_path):\n\tRunLog.info( \"\\nInstalling package: \"+file_path)\n\tif((current_distro == \"SUSE Linux\") or (current_distro == \"openSUSE\") or (current_distro == \"sles\")):\n\t\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S rpm -ivh --nodeps \"+file_path+\" 2>&1\")\n\telse:\n\t\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S rpm -ivh --nodeps \"+file_path+\" 2>&1\")\n\tRunLog.info( output)\n\toutputlist = re.split(\"\\n\", output)\n\tpackage = re.split(\"/\", file_path )[-1]\n\tmatchObj = re.match( r'(.*?)\\.rpm', package, re.M|re.I)\n\tpackage = matchObj.group(1)\n\t\n\tfor line in outputlist:\n\t\t#package is already installed\n\t\tif (re.match(r'.*package'+re.escape(package) + r'.*is already installed', line, re.M|re.I)):\n\t\t\tRunLog.info(file_path + \": package is already installed.\"+line)\n\t\t\treturn True\n\t\telif(re.match(re.escape(package) + r'.*######', line, re.M|re.I)):\n\t\t\tRunLog.info(package + \": package installed successfully.\"+line)\n\t\t\treturn True\n\t\t\t\n\tRunLog.info(file_path+\": Installation failed\"+output)\n\treturn False\n\t\t\ndef yum_package_uninstall(package):\n\tRunLog.info( \"\\nRemoving package: \"+package)\n\toutput = Run (\"echo '\"+vm_password+\"' | sudo -S yum remove -y \"+package)\n\treturn True\n\ndef zypper_package_uninstall(package):\n\tRunLog.info( \"\\nRemoving package: \"+package)\n\toutput = Run (\"echo '\"+vm_password+\"' | sudo -S zypper remove -y \"+package)\n\treturn True\n\t\ndef aptget_package_uninstall(package):\n\tRunLog.info( \"\\nRemoving package: \"+package)\n\toutput = Run (\"echo '\"+vm_password+\"' | sudo -S apt-get remove -y \"+package)\n\treturn True\n\t\ndef install_package(package):\n\tRunLog.info( \"\\nInstall_package: \"+package)\n\tif ((current_distro == \"ubuntu\") or (current_distro == \"Debian\")):\n\t\treturn aptget_package_install(package)\n\telif ((current_distro == \"rhel\") or (current_distro == \"Oracle\") or (current_distro == 'centos')or (current_distro == \"ol\")):\n\t\treturn yum_package_install(package)\n\telif (current_distro == \"SUSE Linux\") or (current_distro == \"openSUSE\") or (current_distro == \"sles\"):\n\t\treturn zypper_package_install(package)\n\telse:\n\t\tRunLog.error((package + \": package installation failed!\"))\n\t\tRunLog.info((current_distro + \": Unrecognised Distribution OS Linux found!\"))\n\t\treturn False\n\ndef install_package_file(file_path):\n\tRunLog.info( \"\\n Install_package_file: \"+file_path)\n\tif ((current_distro == \"ubuntu\") or (current_distro == \"Debian\")):\n\t\treturn install_deb(file_path)\n\telif ((current_distro == \"rhel\") or (current_distro == \"Oracle\") or (current_distro == 'centos')or (current_distro == \"ol\")):\n\t\treturn install_rpm(file_path)\n\telif (current_distro == \"SUSE Linux\") or (current_distro == \"openSUSE\") or (current_distro == \"sles\"):\n\t\treturn install_rpm(file_path)\n\telse:\n\t\tRunLog.error((package + \": package installation failed!\"))\n\t\tRunLog.info((current_distro + \": Unrecognised Distribution OS Linux found!\"))\n\t\treturn False\n\ndef uninstall_package(package):\n\tRunLog.info( \"\\nUninstall package: \"+package)\n\tif ((current_distro == \"ubuntu\") or (current_distro == \"Debian\")):\n\t\treturn aptget_package_uninstall(package)\n\telif ((current_distro == \"rhel\") or (current_distro == \"Oracle\") or (current_distro == 'centos')or (current_distro == \"ol\")):\n\t\treturn yum_package_uninstall(package)\n\telif (current_distro == \"SUSE Linux\") or (current_distro == \"openSUSE\") or (current_distro == \"sles\"):\n\t\treturn zypper_package_uninstall(package)\n\telse:\n\t\tRunLog.error((package + \": package installation failed!\"))\n\t\tRunLog.info((current_distro + \": Unrecognised Distribution OS Linux found!\"))\n\t\treturn False\n\ndef install_packages_singleVM():\n\tRunLog.info( \"\\nInstall packages singleVM ..\")\n\t\n\tfor package in singlevm_packages_list:\n\t\tif(install_package(package)):\n\t\t\tRunLog.info( package + \": installed successfully\")\n\t\telse:\n\t\t\tRunLog.error( package + \": installation Failed\")\n\tRunLog.info( \"Install packages singleVM ..[done]\")\n\treturn True\n\t\ndef install_packages_backend():\n\tRunLog.info(\"Installing Packages in Backend VM \")\n\t#Identify the packages list from \"packages_list\"\n\tfor package in singlevm_packages_list:\n\t\tif(install_package(package)):\n\t\t\tRunLog.info( package + \": installed successfully\")\n\t\telse:\n\t\t\tRunLog.error( package + \": installation Failed\")\t\t\n\treturn True\n\ndef install_packages_frontend():\n\tRunLog.info(\"Installing Packages in LoadBalancer Frontend VM\")\n\t#Identify the packages list from \"packages_list\"\n\tfor package in frontend_packages_list:\n\t\tif(install_package(package)):\n\t\t\tRunLog.info(package + \": installed successfully\")\n\t\telse:\n\t\t\tRunLog.info(package + \": installation Failed\")\t\t\t\n\tRunLog.info( \"Install packages singleVM ..[done]\")\n\treturn True\n\ndef exec_multi_cmds_local(cmd_list):\n\tf = open('/tmp/temp_script.sh','w')\n\tfor line in cmd_list:\n\t\tf.write(line+'\\n') \n\tf.close()\n\tRun (\"bash /tmp/temp_script.sh 2>&1 > /tmp/exec_multi_cmds_local.log\")\n\treturn file_get_contents(\"/tmp/exec_multi_cmds_local.log\")\n\ndef set_javapath():\n\tRunLog.info( \"\\nSetting Java path\")\n\t\n\tf = open('/tmp/setjavapath.sh','w')\n\tf.write('export PATH=$PATH:/opt/ibm/java-x86_64-60/jre/bin\\n') \n\tf.write('export JAVA_HOME=/opt/ibm/java-x86_64-60/jre\\n') \n\tf.write('export PATH=$PATH:/root/IBMWebSphere/apache-maven-2.2.1/bin\\n') \n\tf.write('export CLASSPATH=/root/IBMWebSphere/mysql-connector-java-5.1.18/mysql-connector-java-5.1.18.jar\\n') \n\tf.close()\n\tRun (\"echo '\"+vm_password+\"\\' | sudo -S mv /tmp/setjavapath.sh /etc/profile.d/\")\n\tRunLog.info( \"Setting Java path...[done]\")\n\ndef exec_multi_cmds_ssh(user_name, password, hostname, commands):\n\ttry:\n\t\ts = pxssh.pxssh()\n\t\tlog = \"\"\n\t\ts.login(hostname, user_name, password)\n\t\tfor line in commands:\n\t\t\ts.sendline(line)\n\t\t\ts.prompt()\n\t\t\tlog = log + s.before\n\t\t\t\n\texcept pxssh.ExceptionPxssh as e:\n\t\tRunLog.error((\"pxssh failed on login.\"))\n\t\tRunLog.error((e))\n\t\n\ts.logout()\n\treturn log\n\ndef exec_cmd_remote_ssh(user_name, password, ip, command):\n\tchild = pexpect.spawn (\"ssh -t \"+user_name+\"@\"+ip+\" \"+command)\n\tchild.logfile = open(\"/tmp/mylog\", \"w\")\n\n\tfor j in range(0,6):\n\t\tchild.timeout=6000\n\t\t#wait till expected pattern is found\n\t\ti = child.expect (['.assword', \"yes/no\",pexpect.EOF])\n\t\tif (i == 0):\n\t\t\tchild.sendline (password)\n\t\t\tRunLog.info( \"Password entered\")\n\t\telif (i == 1):\n\t\t\tchild.sendline (\"yes\")\n\t\t\tRunLog.info( \"yes sent\")\n\t\telse:\n\t\t\tbreak\n\treturn file_get_contents(\"/tmp/mylog\")\n\ndef mvn_install():\n\tmvn_install_status = False\n\tRunLog.info( \"Installing Maven..\")\n\n\tcmds = (\"cd /root/IBMWebSphere/daytrader-2.2.1-source-release\", \\\n\t\"export CLASSPATH=/root/IBMWebSphere/mysql-connector-java-5.1.18/mysql-connector-java-5.1.18.jar\", \\\n\t\"export PATH=$PATH:/root/IBMWebSphere/apache-maven-2.2.1/bin\",\\\n\t\"export JAVA_HOME=/opt/ibm/java-x86_64-60/jre\",\\\n\t\"export PATH=$PATH:/opt/ibm/java-x86_64-60/jre/bin\",\\\n\t\"echo $PATH\",\"echo $JAVA_HOME\", \\\n\t\"echo $PWD\", \\\n\t\"mvn install 2>&1 > /tmp/mvn.log\")\n\t\n\tRunLog.info( exec_multi_cmds_local_sudo(cmds))\n\t\n\tfor i in range(0,5):\n\t\toutput = Run (\"echo '\"+vm_password+\"\\' | sudo -S tail -n 25 /tmp/mvn.log\")\n\t\tif \"BUILD SUCCESSFUL\" in output:\n\t\t\tRunLog.info(\"Installing Maven.. [done]\")\n\t\t\tmvn_install_status = True\n\t\t\tbreak\n\t\telse:\n\t\t\tRunLog.info(exec_multi_cmds_local_sudo(cmds))\n\t\t\t\n\tif mvn_install_status == False:\n\t\tRunLog.error( \"Installing Maven.. [failed]\")\n\t\tprint Run (\"echo '\"+vm_password+\"\\' | sudo -S cat /tmp/mvn.log\")\n\t\tend_the_script()\n\ndef setup_websphere():\n\tRunLog.info( \"\\nSetting up Websphere ..\")\n\tRunLog.info( \"Extracting /tmp/IBMWebSphere.tar.gz\")\n\tJustRun (\"echo '\"+vm_password+\"' | sudo -S tar -xvf /tmp/IBMWebSphere.tar.gz -C /root\")\n\tif (current_distro == \"ubuntu\"):\n\t\tinstall_package_file(\"/root/IBMWebSphere/ibm-java-x86-64-sdk_6.0-10.1_amd64.deb\")\n\telse:\n\t\tinstall_package_file(\"/root/IBMWebSphere/ibm-java-x86_64-sdk-6.0-9.1.x86_64.rpm\")\n\t\t\n\tset_javapath()\n\n\tRunLog.info( \"Installing Websphere\"\t)\n\tRunLog.info( exec_multi_cmds_local((\"export CLASSPATH=$CLASSPATH:/root/IBMWebSphere/mysql-connector-java-5.1.18/mysql-connector-java-5.1.18.jar\", \"export PATH=$PATH:/root/IBMWebSphere/apache-maven-2.2.1/bin\", \"export JAVA_HOME=/opt/ibm/java-x86_64-60/jre\", \"export PATH=$PATH:/opt/ibm/java-x86_64-60/jre/bin\", \"echo $PATH\", \"echo $CLASSPATH\", \"echo '\"+vm_password+\"' | sudo -S env PATH=$PATH /root/IBMWebSphere/wasce_setup-2.1.1.6-unix.bin -i silent -r responseFile.properties\")))\n\tRunLog.info( \"\\nSetting up Websphere ..[done]\")\n\ndef mysql_secure_install(db_root_password):\n\tRunLog.info( \"\\nStarting mysql_secure_install\")\n\tchild = pexpect.spawn (\"/usr/bin/mysql_secure_installation\")\n\t\n\t#wait till expected pattern is found\n\ti = child.expect (['enter for none', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline (\"\")\n\t\tRunLog.info( \"'enter for none' command successful\\n\")\n\t\n\t#wait till expected pattern is found\n\ttry:\n\t\ti = child.expect (['\\? \\[Y\\/n\\]', pexpect.EOF])\n\t\tif (i == 0):\n\t\t\tchild.sendline (\"Y\")\t#send y\n\t\t\tRunLog.info( \"'Set root password' command successful\\n\"+child.before)\n\texcept:\n\t\tRunLog.error( \"exception:\" + str(i))\n\t\treturn\t\n\n\tfor x in range(0, 10):\n\t\t#wait till expected pattern is found\n\t\ttry:\n\t\t\ti = child.expect (['\\? \\[Y\\/n\\]', 'password:', pexpect.EOF])\n\t\t\tif (i == 0):\n\t\t\t\tchild.sendline (\"Y\")\t#send y\n\t\t\telif(i == 1):\n\t\t\t\tchild.sendline (db_root_password)\t#send y\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept:\n\t\t\tRunLog.error( \"exception:\" + str(i))\n\t\t\treturn\n\ndef create_db(db_name, db_root_password):\n\tRunLog.info( \"\\nCreating a database on MySQL with name \"+db_name)\n\tchild = pexpect.spawn ('mysql -uroot -p'+db_root_password)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline ('CREATE DATABASE '+db_name+\";\")\n\t\tRunLog.info( \"'CREATE DATABASE' command successful\\n\"+child.before)\n\t\t#wait till expected pattern is found -> Show Databases\n\t\ti = child.expect (['m*>', pexpect.EOF])\n\t\tif (i == 0):\n\t\t\tchild.sendline (\"show databases;\") #send y\n\t\tRunLog.info( \"'show databases' command successful\\n\"+child.before)\n\t\t#wait till expected pattern is found -> exit\n\t\ti = child.expect (['m*>', pexpect.EOF])\n\t\tif (i == 0):\n\t\t\tchild.sendline (\"exit\")\n\t\t\n\t\tRunLog.info( \"Creating a database on MySQL with name \"+db_name+\"..[done]\")\n\t\treturn True\n\n\tRunLog.error( \"Creating a database on MySQL with name \"+db_name+\"..[failed]\")\n\treturn False\n\ndef create_user_db(db_name, db_root_password, db_hostname, db_username, db_password):\n\tRunLog.info( \"\\nCreating user with username: \"+db_username+\", on MySQL database name: \"+db_name)\n\tchild = pexpect.spawn ('mysql -uroot -p'+db_root_password)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline ('CREATE USER '+db_username+\"@\"+db_hostname+\";\") #send y\n\t\tRunLog.info( \"'CREATE USER' command successful\\n\"+child.before)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline (\"GRANT ALL PRIVILEGES ON \"+db_name+\".* TO '\"+db_username+\"'@'\"+db_hostname+\"' IDENTIFIED by '\"+db_password+\"' WITH GRANT OPTION;\")\n\t\tRunLog.info( \"'GRANT ALL PRIVILEGES' command successful\\n\"+child.before)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline (\"FLUSH PRIVILEGES;\") #send y\n\t\tRunLog.info( \"'FLUSH PRIVILEGES' command successful\\n\"+child.before)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline (\"show databases;\") #send y\n\t\tRunLog.info( \"'show databases' command successful\\n\"+child.before)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline (\"select host,user from mysql.user;\") #send y\n\t\tRunLog.info( \"'select user' command successful\\n\"+child.before)\n\n\t#wait till expected pattern is found\n\ti = child.expect (['m*>', pexpect.EOF])\n\tif (i == 0):\n\t\tchild.sendline (\"exit\") #send y\n\t\tRunLog.info( \"'CREATE USER' command successful\\n\"+child.before)\n\t\n\tRunLog.info( \"Creating user with username: \"+db_username+\", on MySQL database name: \"+db_name+\"...[done]\")\n\t\ndef get_services_status(service):\n\tRunLog.info(\"Acquiring the status of services\")\n\tcurrent_status = \"unknown\"\n\n\tRunLog.info(\"get service func : \" + service)\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S \"+service_command+service+\" status\")\n\toutputlist = re.split(\"\\n\", output)\n\n\tfor line in outputlist:\n\t\t#start condition\n\t\tif (re.match(re.escape(service)+r'.*start\\/running', line, re.M|re.I) or \\\n\t\t\tre.match(r'.*'+re.escape(service)+r'.*is running.*', line, re.M|re.I) or \\\n\t\t\tre.match(r'Starting.*'+re.escape(service)+r'.*OK',line,re.M|re.I) or \\\n\t\t\tre.match(r'^Checking for.*running', line, re.M|re.I) or \\\n\t\t\tre.match(r'.*active \\(running\\).*', line, re.M|re.I)):\n\t\t\tRunLog.info(service+\": service is running\\n\"+line)\n\t\t\tcurrent_status = \"running\"\n\n\t\tif (re.match(re.escape(service)+r'.*Stopped.*',line,re.M|re.I) or \\\n\t\t\tre.match(r'.*'+re.escape(service)+r'.*is not running.*', line, re.M|re.I) or \\\n\t\t\tre.match(re.escape(service)+r'.*stop\\/waiting', line, re.M|re.I) or \\\n\t\t\tre.match(r'^Checking for.*unused', line, re.M|re.I) or \\\n\t\t\tre.match(r'.*inactive \\(dead\\).*', line, re.M|re.I)):\n\t\t\tRunLog.info(service+\": service is stopped\\n\"+line)\n\t\t\tcurrent_status = \"stopped\"\n\t\n\tif(current_status == \"unknown\"):\n\t\toutput = Run(\"pgrep \"+service+\" |wc -l\")\n\t\tif (int(output) > 0):\n\t\t\tRunLog.info(\"Found '\"+output+\"' instances of service: \"+service+\" running.\")\n\t\t\tRunLog.info(service+\": service is running\\n\")\n\t\t\tcurrent_status = \"running\"\n\t\telse:\n\t\t\tRunLog.info(\"No instances of service: \"+service+\" are running.\")\n\t\t\tRunLog.info(service+\": service is not running\\n\")\n\t\t\tcurrent_status = \"stopped\"\n\n\treturn (current_status)\n\ndef set_services_status(service, status):\n\tRunLog.info(\"Setting service status\")\n\tcurrent_status = \"unknown\"\n\tset_status = False\n\n\tRunLog.info(\"service :\" + service)\n\t\n\tRunLog.info(\"service status:\"+ status)\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S \"+service_command+service+\" \"+status)\n\tcurrent_status = get_services_status(service)\n\tRunLog.info(\"current_status -:\" + current_status)\n\n\tif((current_status == \"running\") and (status == \"restart\" or status == \"start\" )):\n\t\tset_status = True\n\telif((current_status == \"stopped\") and (status == \"stop\")):\n\t\tset_status = True\n\telse:\n\t\tRunLog.info(\"set_services_status failed\\nError log: \\n\" + output)\n\n\treturn (set_status, current_status)\n\t\ndef deploy_daytrader():\n\tcmds = (\"export CLASSPATH=/root/IBMWebSphere/mysql-connector-java-5.1.18/mysql-connector-java-5.1.18.jar\", \\\n\t\"export PATH=$PATH:/root/IBMWebSphere/apache-maven-2.2.1/bin\",\\\n\t\"export JAVA_HOME=/opt/ibm/java-x86_64-60/jre\",\\\n\t\"export PATH=$PATH:/opt/ibm/java-x86_64-60/jre/bin\",\\\n\t\"echo $PATH\",\"echo $JAVA_HOME\", \\\n\t\"/opt/IBM/WebSphere/AppServerCommunityEdition/bin/deploy.sh --user system --password manager deploy /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/daytrader-ear/target/daytrader-ear-2.2.1.ear /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\")\n\toutput = exec_multi_cmds_local_sudo(cmds)\n\tif(output.rfind(\"TradeJMS\")!= -1):\n\t\tRunLog.info('** Daytrader setup is completed succesfully **\\n ' + output)\n\t\tout = exec_multi_cmds_local_sudo([\"/opt/IBM/WebSphere/AppServerCommunityEdition/bin/deploy.sh --user system --password manager redeploy /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/daytrader-ear/target/daytrader-ear-2.2.1.ear /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"])\n\telse:\n\t\tRunLog.error('** Daytrader setup is not completed succesfully **\\n ' + output)\n\treturn output\n\ndef start_ibm_websphere():\n\tRunLog.info( \"\\nStarting websphere..\")\n\tset_services_status(service_httpd_name, \"stop\")\n\tRun(\"echo '\"+vm_password+\"' | sudo -S chkconfig '\"+service_httpd_name+\"' off\")\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S /opt/IBM/WebSphere/AppServerCommunityEdition/bin/startup.sh\")\n\tif \"Exception\" in output:\n\t\tRunLog.error(\"Failure Starting IBM Websphere\")\n\t\tresult = False\n\t\traise Exception\n\telse:\n\t\tRunLog.info(\"IBM Websphere Server Started....wait for 100 seconds to deploy application\")\n\timport time\n\ttime.sleep(100)\n\tRunLog.info( \"\\nStarting websphere.. [done]\")\n\treturn True\n\ndef stop_ibm_websphere():\n\tRunLog.info( \"\\nStopping websphere..\")\n\toutput = Run(\"echo '\"+vm_password+\"' | sudo -S /opt/IBM/WebSphere/AppServerCommunityEdition/bin/./stop-server.sh\")\n\tif \"Exception\" in output:\n\t\tRunLog.error(\"Failure Stoping IBM Websphere\")\n\t\tresult=False\n\t\traise Exception\n\telse:\n\t\tRunLog.info(\"Successfully Stopped IBM WebSphere\")\n\tRunLog.info( \"Stopping websphere.. [done]\")\n\ndef install_ibm_mySql_connector():\n\tRunLog.info( \"\\nInstalling MySQL Java connector..\")\n\toutput = exec_multi_cmds_local_sudo((\"sh /opt/IBM/WebSphere/AppServerCommunityEdition/bin/deploy.sh --user system --password manager install-library --groupId mysql /root/IBMWebSphere/mysql-connector-java-5.1.18/mysql-connector-java-5.1.18.jar\",\"\\n\"))\n\t\n\tif (\"Installed mysql\" in output):\n\t\tRunLog.info( \"Mysql connector java jar installed successfully.\")\n\telse:\n\t\tRunLog.error( \"Mysql connector java jar installation failed\")\n\t\tend_the_script()\n\t\t\n\tRunLog.info( \"Installing MySQL Java connector.. [done]\")\n\ndef setup_daytrader():\n\tRunLog.info( \"\\nSetting up daytrader ..\")\n\tmvn_install()\n\t\n\tRunLog.info( \"\\nConfiguring daytrader-mysql-xa-plan.xml\")\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S rm -rf /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/daytrader-war/src/main/webapp/dbscripts/mysql/Table.ddl\"))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S cp /root/IBMWebSphere/Table.ddl /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/daytrader-war/src/main/webapp/dbscripts/mysql/\"))\n\tRunLog.info( exec_multi_cmds_local_sudo((\"mysql -u\"+daytrader_db_username+\" -p\"+daytrader_db_password+\" -h\"+daytrader_db_hostname+\" \"+ daytrader_db_name + \" </root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/daytrader-war/src/main/webapp/dbscripts/mysql/Table.ddl\",\"\\n\")))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/\\(.*<config-property-setting name=\\\"UserName\\\">\\).*\\(<\\/config-property-setting>\\)/\\\\1\"+daytrader_db_username+\"\\\\2/g' /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/\\(.*<config-property-setting name=\\\"Password\\\">\\).*\\(<\\/config-property-setting>\\)/\\\\1\"+daytrader_db_password+\"\\\\2/g' /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/\\(.*<config-property-setting name=\\\"ServerName\\\">\\).*\\(<\\/config-property-setting>\\)/\\\\1\"+daytrader_db_hostname+\"\\\\2/g' /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/\\(.*<config-property-setting name=\\\"DatabaseName\\\">\\).*\\(<\\/config-property-setting>\\)/\\\\1\"+daytrader_db_name+\"\\\\2/g' /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/\\(.*<host>\\).*\\(<\\/host>\\)/\\\\1\"+daytrader_db_hostname+\"\\\\2/g' /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"))\n\tRunLog.info( Run (\"echo '\"+vm_password+\"' | sudo -S sed -i 's/<version>5.1.7<\\/version>/<version>5.1.18<\\/version>/g' /root/IBMWebSphere/daytrader-2.2.1-source-release/assemblies/javaee/plans/target/classes/daytrader-mysql-xa-plan.xml\"))\n\n\tstart_ibm_websphere()\n\tRunLog.info( install_ibm_mySql_connector())\n\tRunLog.info( deploy_daytrader())\n\tRunLog.info( \"Setting up daytrader .. [done]\")\n\ndef put_file_sftp(user_name, password, ip, file_name):\n\tchild = pexpect.spawn (\"sftp \"+user_name+\"@\"+ip)\n\tchild.logfile = open(\"/tmp/mylog\", \"w\")\n\tfile_sent = False\n\n\tfor j in range(0,6):\n\t\t#wait till expected pattern is found\n\t\ti = child.expect (['.assword', \".*>\", \"yes/no\",pexpect.EOF,pexpect.TIMEOUT], timeout=300)\n\t\tif (i == 0):\n\t\t\tchild.sendline (password)\n\t\t\tRunLog.info( \"Password entered\")\n\t\telif (i == 2):\n\t\t\tchild.sendline (\"yes\")\n\t\t\tRunLog.info( \"yes sent\")\n\t\telif (i == 1):\n\t\t\tif file_sent == True:\n\t\t\t\tchild.sendline (\"exit\")\n\t\t\t\tbreak\n\t\t\tchild.sendline (\"put \"+file_name)\n\t\t\tRunLog.info( \"put file succesful\")\n\t\t\tfile_sent = True\n\t\telif (i == 4):\n\t\t\tcontinue\n\n\treturn file_get_contents( \"/tmp/mylog\")\n\t\ndef get_file_sftp(user_name, password, ip, file_name):\n\tchild = pexpect.spawn (\"sftp \"+user_name+\"@\"+ip)\n\tchild.logfile = open(\"/tmp/mylog\", \"w\")\n\tfile_sent = False\n\n\tfor j in range(0,6):\n\t\ti = child.expect (['.assword', \".*>\", \"yes/no\",pexpect.EOF,pexpect.TIMEOUT], timeout=300)\n\t\tif (i == 0):\n\t\t\tchild.sendline (password)\n\t\t\tRunLog.info( \"Password entered\")\n\t\telif (i == 2):\n\t\t\tchild.sendline (\"yes\")\n\t\t\tRunLog.info( \"yes sent\")\n\t\telif (i == 1):\n\t\t\tif file_sent == True:\n\t\t\t\tchild.sendline (\"exit\")\n\t\t\t\tbreak\n\t\t\tchild.sendline (\"get \"+file_name)\n\t\t\tRunLog.info( \"get file succesful\")\n\t\t\tfile_sent = True\n\t\telif (i == 4):\n\t\t\tcontinue\n\n\treturn file_get_contents( \"/tmp/mylog\")\n\t\ndef verify_daytrader_instllation():\n\tif (sys.argv[1] == 'loadbalancer_setup'):\n\t\tips = front_endVM_ips\n\telif (sys.argv[1] == \"singleVM_setup\"): \n\t\tips = [\"127.0.0.1\"]\n\telse:\n\t\treturn 1\n\t\t\n\tRun(\"mkdir /tmp/verify_dtr/\")\n\tfor ip in ips:\n\t\tdtr_url = \"http://\"+ip+\":8080/daytrader\"\n\t\tRun(\"wget -t 2 -T 3 \"+dtr_url+\" -O /tmp/verify_dtr/\"+ip+\".html\")\n\toutput = Run(\"grep -irun 'DayTrader' /tmp/verify_dtr/ | wc -l\")\n\tRun(\"rm -rf /tmp/verify_dtr/\")\n\toutput = output.rstrip('\\n')\n\n\tif( int(output) == len(ips)):\n\t\tprint \"DTR_INSTALL_PASS\" \n\t\tRun(\"echo 'DTR_INSTALL_PASS' > dtr_test.txt\")\n\t\treturn 0\n\telse:\n\t\tprint \"DTR_INSTALL_FAIL\" \n\t\tRun(\"echo 'DTR_INSTALL_FAIL' > dtr_test.txt\")\n\t\treturn 1\n\t\ndef collect_logs():\n\tRun(\"mkdir logs\")\n\tRun(\"cp -f /tmp/*.log logs/\")\n\tRun(\"cp -f *.XML logs/\")\n\tif (sys.argv[1] == 'loadbalancer_setup'):\n\t\tfor ip in front_endVM_ips:\n\t\t\texec_cmd_remote_ssh(vm_username, vm_password, ip, \"mv Runtime.log \"+ip+\"-Runtime.log\")\n\t\t\tget_file_sftp(vm_username, vm_password, ip, ip+\"-Runtime.log\")\n\tRun(\"cp -f *.log logs/\")\n\tRun(\"cp -f dtr_test.txt logs/\")\n\tRun(\"tar -czvf logs.tar.gz logs/\")\n\t\n\t\ndef setup_Daytrader_E2ELoadBalance_backend(front_end_users):\n\t# Installing packages in Backend VM Role\n\tif (not install_packages_backend()):\n\t\tRunLog.error( \"Failed to install packages for Backend VM Role\")\n\tsetup_websphere()\n\tset_services_status(service_mysqld_name, \"start\")\n\trtrn = get_services_status(service_mysqld_name)\n\tif (rtrn != \"running\"):\n\t\tRunLog.error( \"Failed to start '\"+service_mysqld_name+\"'\")\n\t\tend_the_script()\n\n\t# To make to connection from backend to other IP's ranging from 0.0.0.0\n\tbind = Run(\"echo '\"+vm_password+\"' | sudo -S sed -i 's/^bind-address\\s*=\\s*127.0.0.1/bind-address = 0.0.0.0/' /etc/mysql/my.cnf | grep bind\")\n\tRun(\"echo '\"+vm_password+\"' | sudo -S service '\"+service_mysqld_name+\"' restart\")\n\t\n\t# Installing the \"mysql secure installation\" in other Distro's (not in Ubuntu)\n\tif (current_distro != 'ubuntu'):\n\t\tmysql_secure_install(daytrader_db_root_password)\n\t\t\t\n\t# Creating database using mysql\n\tcreate_db(daytrader_db_name, daytrader_db_root_password)\n\t\n\t# Creating users to access database from mysql\n\tcreate_user_db(daytrader_db_name, daytrader_db_root_password, \"%\", daytrader_db_username, daytrader_db_password)\n\tRunLog.info( \"Keeping '\"+service_mysqld_name+\"' service in startup..\")\n\tRun (\"echo '\"+vm_password+\"' | sudo -S /sbin/chkconfig --add '\"+service_mysqld_name+\"'\")\n\tRun (\"echo '\"+vm_password+\"' | sudo -S /sbin/chkconfig '\"+service_mysqld_name+\"' on\")\n\tRunLog.info( \"Keeping '\"+service_mysqld_name+\"' service in startup..[done]\")\n\t\ndef setup_Daytrader_E2ELoadBalance_frontend():\n\t# Installing packages in Front-end VM Role's\n\tif (not install_packages_frontend()):\n\t\tRunLog.error( \"Failed to install packages for Frontend VM Role\")\n\t\tend_the_script()\n\n\tset_services_status(service_httpd_name, \"start\")\n\trtrn = get_services_status(service_httpd_name)\n\tif (rtrn != \"running\"):\n\t\tRunLog.error( \"Failed to start :\" + service_httpd_name)\n\t\tend_the_script()\n\tsetup_websphere()\n\tsetup_daytrader()\n\ndef setup_Daytrader_singleVM():\n\tif(install_packages_singleVM() == False):\n\t\tprint \"Abort\"\n\t\tend_the_script()\n\t# Installing packages in Backend VM Role\n\tset_services_status(service_mysqld_name, \"start\")\n\trtrn = get_services_status(service_mysqld_name)\n\tif (rtrn != \"running\"):\n\t\tRunLog.error( \"Failed to start mysqld\")\n\t\tend_the_script()\n\n\t# To make to connection from backend to other IP's ranging from 0.0.0.0\n\tbind = Run(\"echo '\"+vm_password+\"' | sudo -S sed -i 's/^bind-address\\s*=\\s*127.0.0.1/bind-address = 0.0.0.0/' /etc/mysql/my.cnf | grep bind\")\n\tRun(\"echo '\"+vm_password+\"' | sudo -S service '\"+service_mysqld_name+\"' restart\")\n\t\n\t# Installing the \"mysql secure installation\" in other Distro's (not in Ubuntu)\n\tif (current_distro != 'ubuntu'):\n\t\tmysql_secure_install(daytrader_db_root_password)\n\t\t\t\n\t# Creating database using mysql\n\tcreate_db(daytrader_db_name, daytrader_db_root_password)\n\t\n\t# Creating users to access database from mysql\n\tcreate_user_db(daytrader_db_name, daytrader_db_root_password, daytrader_db_hostname, daytrader_db_username, daytrader_db_password)\n\tRunLog.info( \"Keeping '\"+service_mysqld_name+\"' service in startup..\")\n\tRun (\"echo '\"+vm_password+\"' | sudo -S /sbin/chkconfig --add '\"+service_mysqld_name+\"'\")\n\tRun (\"echo '\"+vm_password+\"' | sudo -S /sbin/chkconfig '\"+service_mysqld_name+\"' on\")\n\tRunLog.info( \"Keeping '\"+service_mysqld_name+\"' service in startup..[done]\")\n\n\tsetup_websphere()\n\tsetup_daytrader()\t\n\t#Keeping the server in the startup.\n\toutput = Run('cat '+startup_file+' | grep \"^exit\"')\n\tif \"exit\" in output:\n\t\tRunLog.info( output)\n\t\toutput = exec_multi_cmds_local_sudo((\"sed -i 's_^exit 0_sh /opt/IBM/WebSphere/AppServerCommunityEdition/bin/startup.sh\\\\nexit 0_' \"+startup_file,\"\\n\"))\n\t\tRun(\"echo '\"+vm_password+\"' | sudo -S chmod 755 '\"+startup_file+\"'\")\n\telse:\n\t\tRunLog.info( \"exit not found\")\n\t\texec_multi_cmds_local_sudo(('echo \"sh /opt/IBM/WebSphere/AppServerCommunityEdition/bin/startup.sh\" >> '+startup_file,'\\n'))\n\t\tRun(\"echo '\"+vm_password+\"' | sudo -S chmod 755 '\"+startup_file+\"'\")\n\ndef update_python_and_install_pexpect():\n\tpython_install_commands = (\t\"wget --no-check-certificate http://python.org/ftp/python/2.7.2/Python-2.7.2.tgz\", \\\n\t\"tar -zxvf Python-2.7.2.tgz\", \\\n\t\"cd Python-2.7.2\", \\\n\t\"./configure --prefix=/opt/python2.7 --enable-shared\", \\\n\t\"make\", \\\n\t\"make altinstall\", \\\n\t'echo \"/opt/python2.7/lib\" >> /etc/ld.so.conf.d/opt-python2.7.conf', \\\n\t\"ldconfig\", \\\n\t\"cd ..\", \\\n\t'if [ -f \"/opt/python2.7/bin/python2.7\" ];then ln -fs /opt/python2.7/bin/python2.7 /usr/bin/python ; fi'\n\t)\n\n\tpexpect_install_commands = (\"wget http://kaz.dl.sourceforge.net/project/pexpect/pexpect/Release%202.3/pexpect-2.3.tar.gz\", \\\n\t\"tar -xvf pexpect-2.3.tar.gz\", \\\n\t\"cd pexpect-2.3\", \\\n\t\"python setup.py install\")\n\n\tpckg_list = (\"readline-devel\", \"openssl-devel\", \"gmp-devel\", \"ncurses-devel\", \"gdbm-devel\", \"zlib-devel\", \"expat-devel\",\\\n\t\"libGL-devel\", \"tk\", \"tix\", \"gcc-c++\", \"libX11-devel\", \"glibc-devel\", \"bzip2\", \"tar\", \"tcl-devel\", \"tk-devel\", \\\n\t\"pkgconfig\", \"tix-devel\", \"bzip2-devel\", \"sqlite-devel\", \"autoconf\", \"db4-devel\", \"libffi-devel\", \"valgrind-devel\")\n\n\tRunLog.info( \"Installing packages to build python..\")\n\tfor pkg in pckg_list:\n\t\tinstall_package(pkg)\n\t\t\n\tRunLog.info( \"Installing packages ..[done]\")\n\t\t\n\tRunLog.info( \"Installing python 2.7.2\")\n\tRunLog.info( exec_multi_cmds_local_sudo(python_install_commands))\n\n\toutput = Run (\"python -V 2>&1\")\n\tif \"2.7.2\" not in output:\n\t\tRunLog.error( \"Installing python 2.7.2 .. [failed!]\\nAborting the script..\\n\")\n\t\tend_the_script()\n\telse:\n\t\tRunLog.info( \"Installing python 2.7.2 .. [done]\")\n\t\t\n\tRunLog.info( \"Installing pexpect from source..\")\n\n\texec_multi_cmds_local_sudo(pexpect_install_commands)\n\tRunLog.info( \"Installing pexpect from source.. [done]\")\n\ndef get_username_password_from_xml():\n\tglobal vm_username\n\tglobal vm_password\n\tif(not os.path.isfile(\"Daytrader_install.XML\")):\n\t\tRunLog.error(\"File not found Daytrader_install.XML\")\n\t\tend_the_script()\n\toutput = file_get_contents(\"Daytrader_install.XML\")\n\toutputlist = re.split(\"\\n\", output)\n\n\tfor line in outputlist:\n\t\tif \"</username>\" in line:\n\t\t\tmatchObj = re.match( r'<username>(.*)</username>', line, re.M|re.I)\n\t\t\tvm_username = matchObj.group(1)\n\t\telif \"</password>\" in line:\n\t\t\tmatchObj = re.match( r'<password>(.*)</password>', line, re.M|re.I)\n\t\t\tvm_password = matchObj.group(1)\n\ndef show_usage():\n\tprint \"Error: Invalid usage\"\n\tprint \"Usage: \\\"python \"+__file__+\" singleVM_setup\\\" for single VM Daytrader Setup\"\n\tprint \"Usage: \\\"python \"+__file__+\" loadbalancer_setup\\\" for locagbalanced Daytrader Setup\"\n\tprint \"Usage: \\\"python \"+__file__+\" frontend_setup <back end vm ip>\\\" frontend setup for locadbalanced Daytrader Setup\"\n\tend_the_script()\n\ndef RunTest():\n\tip = \"127.0.0.1\"\n\tglobal daytrader_db_hostname\n\tglobal front_endVM_ips\n\tfront_endVM_username = vm_username\n\tfront_endVM_password = vm_password\n\tfile_name = __file__\n\n\tif len(sys.argv) > 1 :\n\t\tif sys.argv[1] == 'loadbalancer_setup':\n\t\t\tif len(sys.argv) == 2 :\n\t\t\t\toutput = file_get_contents(\"Daytrader_install.XML\")\n\t\t\t\toutputlist = re.split(\"\\n\", output)\n\t\t\t\tfor line in outputlist:\n\t\t\t\t\tif \"</back_endVM_ip>\" in line:\n\t\t\t\t\t\tmatchObj = re.match( r'<back_endVM_ip>(.*)</back_endVM_ip>', line, re.M|re.I)\n\t\t\t\t\t\tback_endVM_ip = matchObj.group(1)\n\t\t\t\t\t\tdaytrader_db_hostname = back_endVM_ip\n\t\t\t\t\telif \"</front_endVM_ips>\" in line:\n\t\t\t\t\t\tmatchObj = re.match( r'<front_endVM_ips>(.*)</front_endVM_ips>', line, re.M|re.I)\n\t\t\t\t\t\tfront_endVM_ips = str.split(matchObj.group(1))\n\t\t\t\t\t\tRunLog.info( \"frontend ips : \")\n\t\t\t\t\telif \"</front_endVM_username>\" in line:\n\t\t\t\t\t\tmatchObj = re.match( r'<front_endVM_username>(.*)</front_endVM_username>', line, re.M|re.I)\n\t\t\t\t\t\tfront_endVM_username = matchObj.group(1)\n\t\t\t\t\telif \"</front_endVM_password>\" in line:\n\t\t\t\t\t\tmatchObj = re.match( r'<front_endVM_password>(.*)</front_endVM_password>', line, re.M|re.I)\n\t\t\t\t\t\tfront_endVM_password = matchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\tRunLog.info( \"\\nStarting loadbalancer_setup\")\n\t\t\t\tRunLog.info( \"Starting backend VM setup\")\n\t\t\t\tsetup_Daytrader_E2ELoadBalance_backend(front_endVM_ips)\n\t\t\t\t\n\t\t\t\tfrontend_count = 1\t\t\n\t\t\t\tfor ip in front_endVM_ips:\n\t\t\t\t\tRunLog.info(\"**********************************************************\")\n\t\t\t\t\tRunLog.info(\"\\nConfiguring frontend\"+str(frontend_count)+\" at \"+ip+\":\\n\")\n\t\t\t\t\tRunLog.info( \"Copying \"+__file__+\" to \"+ip)\n\t\t\t\t\tRunLog.info( put_file_sftp(front_endVM_username, front_endVM_password, ip, __file__))\n\t\t\t\t\tRunLog.info( \"Copying \"+\"azuremodules.py\"+\" to \"+ip)\n\t\t\t\t\tRunLog.info( put_file_sftp(front_endVM_username, front_endVM_password, ip, \"azuremodules.py\"))\n\t\t\t\t\tRunLog.info(\"Copying Daytrader_install.XML to \"+ ip)\n\t\t\t\t\tRunLog.info(put_file_sftp(front_endVM_username, front_endVM_password, ip, \"Daytrader_install.XML\"))\n\t\t\t\t\tRunLog.info( \"Copying \"+\"IBMWebSphere.tar.gz\"+\" to \"+ip)\n\t\t\t\t\tRunLog.info( put_file_sftp(front_endVM_username, front_endVM_password, ip, \"IBMWebSphere.tar.gz\"))\n\t\t\t\t\tif(current_distro == 'sles'):\n\t\t\t\t\t\tRunLog.info( \"Copying \"+\"Python pexpect rpm\"+\" to \"+ip)\n\t\t\t\t\t\tRunLog.info( put_file_sftp(front_endVM_username, front_endVM_password, ip, \"python-pexpect-3.1-1.1.noarch.rpm\"))\n\t\t\t\t\telse:\n\t\t\t\t\t\tRunLog.info( \"Python pexpect is available in repository \")\n\t\t\t\t\tRunLog.info( exec_cmd_remote_ssh(front_endVM_username, front_endVM_password, ip, \"mv IBMWebSphere.tar.gz /tmp/IBMWebSphere.tar.gz\"))\n\t\t\t\t\tRunLog.info( \"\\nStarting frontend VM setup on \"+ip)\n\t\t\t\t\tRunLog.info( exec_cmd_remote_ssh(front_endVM_username, front_endVM_password, ip, \"python \"+file_name+\" frontend_setup \"+ back_endVM_ip))\n\t\t\t\t\tfrontend_count = frontend_count+1\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tshow_usage()\n\t\telif sys.argv[1] == 'frontend_setup':\n\t\t\tif len(sys.argv) == 3:\n\t\t\t\tdaytrader_db_hostname = sys.argv[2]\n\t\t\t\tsetup_Daytrader_E2ELoadBalance_frontend()\n\t\t\t\t#Keeping the server ins the startup and rebooting the VM.\n\t\t\t\toutput = Run('cat '+startup_file+' | grep \"^exit\"')\t\t\t\t\n\t\t\t\tif \"exit\" in output:\t\t\t\t\t\n\t\t\t\t\toutput = exec_multi_cmds_local_sudo((\"sed -i 's_^exit 0_sh /opt/IBM/WebSphere/AppServerCommunityEdition/bin/startup.sh\\\\nexit 0_' \"+startup_file,\"\\n\"))\t\t\t\t\t\n\t\t\t\t\tRun(\"echo '\"+vm_password+\"' | sudo -S chmod 755 '\"+startup_file+\"'\")\n\t\t\t\telse:\n\t\t\t\t\tRunLog.info( \"exit not found\")\n\t\t\t\t\texec_multi_cmds_local_sudo(('echo \"sh /opt/IBM/WebSphere/AppServerCommunityEdition/bin/startup.sh\" >> '+startup_file,'\\n'))\n\t\t\t\t\tRun(\"echo '\"+vm_password+\"' | sudo -S chmod 755 '\"+startup_file+\"'\")\n\t\t\t\tRunLog.info( \"Rebooting the frontend....\\n\")\n\t\t\t\tRunLog.info( exec_multi_cmds_local_sudo([\"reboot\"]))\n\t\t\telif len(sys.argv) < 3:\n\t\t\t\tprint \"Back end IP missing\"\n\t\t\t\tshow_usage()\n\t\t\telse:\n\t\t\t\tshow_usage()\n\t\telif sys.argv[1] == \"singleVM_setup\":\n\t\t\tif len(sys.argv) == 2 :\n\t\t\t\tRunLog.info( \"\\nStarting single VM setup\")\n\t\t\t\tsetup_Daytrader_singleVM()\n\t\t\telse:\n\t\t\t\tshow_usage()\n\t\telse:\n\t\t\tshow_usage()\n\telse:\n\t\tshow_usage()\n\n# Code execution Start from here\nget_username_password_from_xml()\nset_variables_OS_dependent()\nupdate_repos()\ndisable_selinux()\ndisable_iptables()\n\n#check for availability of pexpect module\ntry:\n\timp.find_module('pexpect')\n\timport pexpect\nexcept ImportError:\n\tRunLog.error( \"Unable to found pexpect module\")\n\tRunLog.info( \"Trying to install\")\n\tRunLog.info( \"pexpect_pkg_name: \" + pexpect_pkg_name)\n\tif(not install_package(pexpect_pkg_name)):\n\t\tRunLog.info( \"pexpect module could not be installed\")\n\t\tpythonversion = Run (\"echo '\"+vm_password+\"' | sudo -S python --version 2>&1\")\n\t\tif(pythonversion.find('2.7.*')):\n\t\t\tif((current_distro == \"sles\") and (distro_version == \"12\")):\n\t\t\t\tRunLog.info( \"Trying to install pexpect module using rpm package\")\n\t\t\t\tout = Run(\"echo '\"+vm_password+\"' | sudo -S rpm -ivh python-pexpect-3.1-1.1.noarch.rpm\")\t\t\t\t\t\t\t\t\n\t\t\t\tif(out.find('done')!= -1):\n\t\t\t\t\tRunLog.info( \" pexpect module rpm installation done..\")\n\t\t\t\telse:\n\t\t\t\t\tRunLog.info( \" pexpect module rpm installation failed..\")\n\t\t\t\t\tRunLog.info( \"Installing pexpect from source..\")\n\t\t\t\t\tupdate_python_and_install_pexpect()\n\t\t\t\t\tRunLog.info( \"\\n\\nInvoking the script with new python:....\")\n\t\t\t\t\tRunLog.info( Run(\"python \"+__file__+\" \"+' '.join(sys.argv[1:])))\n\t\t\t\t\tend_the_script()\n\t\t\telif(current_distro == 'rhel'):\n\t\t\t\teasy_install( module_name)\n\t\t\telse:\n\t\t\t\tRunLog.info( \"Installing pexpect from source..\")\n\t\t\t\tupdate_python_and_install_pexpect()\n\t\t\t\tRunLog.info( \"\\n\\nInvoking the script with new python:....\")\n\t\t\t\tRunLog.info( Run(\"python \"+__file__+\" \"+' '.join(sys.argv[1:])))\n\t\t\t\tend_the_script()\n\t\telse:\n\t\t\tRunLog.info( \"Installing pexpect from source..\")\n\t\t\tupdate_python_and_install_pexpect()\n\t\t\tRunLog.info( \"\\n\\nInvoking the script with new python:....\")\n\t\t\tRunLog.info( Run(\"python \"+__file__+\" \"+' '.join(sys.argv[1:])))\n\t\t\tend_the_script()\n\nimport pexpect\nRunTest()\n\nresult = verify_daytrader_instllation()\nif (sys.argv[1] != 'frontend_setup'):\n\tcollect_logs()\nexit(result)\n"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.6019944548606873,
"avg_line_length": 19.055696487426758,
"blob_id": "0941d28e5606cc8340f19bf82fdcdabecaea6c3c",
"content_id": "46d42a637cdc33edee239602e2168a91c395dc3f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 7922,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 395,
"path": "/remote-scripts/ICA_VMSetup.sh",
"repo_name": "Azure/azure-linux-automation",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n#########################################################\n# ICA_VMSetup.sh\n#\n#Description : This script installs packages required for ICA and\n# \t\t\t Configures test VHD for ICA test run\n# Author : Amit Pawar [ [email protected] ]\n\n##############################################################################\n\n#DEBUG_LEVEL=3\n\necho \"***Logs of ICA_VMSetup.sh***\" \nwithErrors=0\ndbgprint()\n{\n #if [ $1 -le $DEBUG_LEVEL ]; then\n #echo \"$1\"\n \n\t\techo \"$1\" \n\t\t\n #fi\n}\n\n\n\n\ndbgprint \"Preparing the VHD for ICA...\"\n\n\n\n#installing gcc. dos2unix, make and python\n if [ -e /etc/debian_version ]; then\n\n dbgprint \"Installing gcc..\"\n\t\t\n\t\techo yes|apt-get install gcc \n\t\tif [ \"$?\" = \"0\" ]; then \n\t\t\tdbgprint \"Install gcc : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install gcc : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n\n dbgprint \"Installing make..\"\n\t\t\n\t\techo yes|apt-get install make\n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install make : SUCCESS\"\n\t\telse\n\t\t\tdbgprint \"Install make : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Installing python..\"\n\t\t\n\t\techo yes|apt-get install python \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install python : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install python : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Installing python-pyasn1..\" \n\t\t\n\t\techo yes|apt-get install python-pyasn1 \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install python-pyasn1 : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install python-pyasn1 : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\t\tdbgprint \"Installing iperf..\"\n\t\t\n\t\techo yes|apt-get install iperf \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install iperf : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install iperf : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\t\tdbgprint \"Installing bind9 dnsutils..\"\n\t\t\n\t\techo yes|apt-get install bind9 dnsutils \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install bind9 dnsutils : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install bind9 dnsutils : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Removing NetworkManager..\"\n\t\t \n\t\t echo yes | aptitude purge network-manager \n\t\t \n\t\t if [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Remove Network Manager : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Remove Network Manager : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\t\tdbgprint \"Updating the packages..\"\n\t\t\n\t\techo yes|apt-get update \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Update Packages : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Update Packages : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Upgrading the kernel..\"\n\t\t\n\t\techo yes|apt-get upgrade \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Upgrade Kernel : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Upgrade Kernel : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n fi\n\n if [ -e /etc/redhat-release ]; then\n\n dbgprint \"Installing gcc..\" \n\t\t\n\t\techo yes|yum install gcc \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install gcc : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install gcc : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n\n dbgprint \"Installing make..\"\n\t\t\n\t\techo yes|yum install make \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install make : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install make : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Installing python..\"\n\t\t\n\t\techo yes|yum install python \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install python : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install python : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Installing python-pyasn1..\"\n\t\t\n\t\techo yes|yum install python-pyasn1 \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install python-pyasn1 : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install python-pyasn1 : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\t\tdbgprint \"Installing iperf..\"\n\t\t\n\t\techo yes|yum install iperf \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install iperf : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install iperf : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Removing NetworkManager..\"\n\t\t\n\t\techo yes|yum remove NetworkManager \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Remove Network Manager : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Remove Network Manager : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Updating the packages..\"\n\t\t\n\t\techo yes|yum update \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Update Packages : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Update Packages : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\t\tdbgprint \"Upgrading the kernel..\"\n\t\t\n\t\techo yes|yum upgrade \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Upgrade kernel : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Upgrade kernel : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n\n fi\n\n if [ -e /etc/SuSE-release ]; then\n\n dbgprint \"Installing gcc..\"\n\t\t\n\t\tzypper --non-interactive install gcc \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install gcc : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install gcc : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n\n dbgprint \"Installing make..\"\n\t\t\n\t\tzypper --non-interactive install make \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install make : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install make : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Installing python..\"\n\t\t\n\t\tzypper --non-interactive install python \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install python : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install python : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Installing python-pyasn1..\"\n\t\t\n\t\tzypper --non-interactive install python-pyasn1 \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install python-pyasn1 : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install python-pyasn1 : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\t\tdbgprint \"Installing iperf..\"\n\t\t\n\t\tzypper --non-interactive install iperf \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install iperf : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install iperf : FAILED\"\n\t\t\twithErrors=1\n\t\t\t\n\t\tfi\n\t\t\n\t\tdbgprint \"Installing bind-utils..\"\n\t\t\n\t\tzypper --non-interactive install bind-utils \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Install bind-utils : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Install bind-utils : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\t\t\n\n dbgprint \"Removing NetworkManager..\"\n\t\t\n\t\tzypper --non-interactive remove NetworkManager \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Remove Network Manager : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Remove Network Manager : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n dbgprint \"Updating the packages..\"\n\t\t\n\t\tzypper --non-interactive update \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Update Packages : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Update Packages : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n\t\tdbgprint \"Upgrading the kernel..\"\n\t\t\n\t\tzypper --non-interactive up \n\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\tdbgprint \"Upgrade kernel : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"Upgrade kernel : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\n\n fi\n\n\n#installing icadaemon, git and lcov\n \n\ttar -xmf icatest-0.1.tar.gz\n\tchmod 755 ./icatest-0.1/setup.py\n\tdbgprint \"Installing Icadaemon..\"\n \tcd ./icatest-0.1\n\tpython setup.py install \n\tif [ \"$?\" = \"0\" ]; then \n\t\tdbgprint \"Icadaemon installed : SUCCESS\"\n\telse \n\t\tdbgprint \"Icadaemon installed : FAILED\"\n\t\twithErrors=1\n\tfi\n\tcd ~\n \n \n\n\tif [ -e /etc/debian_version ]; then\n\t\n\t\tdbgprint \"Installing git..\"\n\t\techo yes|apt-get install git \n\t\tif [ \"$?\" = \"0\" ]; then \n\t\t\tdbgprint \"git installed : SUCCESS\"\n\t\telse \n\t\t\tdbgprint \"git installed : FAILED\"\n\t\t\twithErrors=1\n\t\tfi\n\telse\n\t\n\t dbgprint \"Installing git..\"\n\t\ttar -xmf git-1.7.10.tar.gz\n\t\tcd git-1.7.10 \n\t\t./configure \n\t\tmake \n\t\tif [ \"$?\" -ne \"0\" ]; then\n\t\t\tdbgprint \"git installed : FAILED\"\n\t\t\twithErrors=1\n\t\telse\n\t\t\tmake install \n\t\t\tif [ \"$?\" = \"0\" ]; then\n\t\t\t\tdbgprint \"git installed : SUCCESS\"\n\t\t\telse \n\t\t\t\tdbgprint \"git installed : FAILED\"\n\t\t\t\twithErrors=1\n\t\t\tfi\n \t\t\t\n\t\tfi\n\t\tcd ~\n\tfi\n\n \n \n\n \n dbgprint \"Installing Lcov..\"\n tar -xmzf lcov-1.9.tar.gz\n\tcd lcov-1.9\n\tmake install \n\tif [ \"$?\" = \"0\" ]; then\n\t\tdbgprint \"Lcov installed : SUCCESS\"\n\telse \n\t\tdbgprint \"Lcov installed : FAILED\"\n\t\twithErrors=1\n\tfi;\n cd ~\n \n\n\n #Enabling essential services\n if [ -e /etc/redhat-release ]; then\n chkconfig rpcbind on && chkconfig sshd on && chkconfig nfs on \n fi\n\n if [ -e /etc/SuSE-release ]; then \n /sbin/SuSEfirewall2 off \n fi\n \n\tif [ $withErrors -ne 0 ]; then\n\t\tdbgprint \"Failed to install some packages!!!\"\n\t\tdbgprint \"Exiting with Errors\"\n\t\texit 10\n\tfi\n"
},
{
"alpha_fraction": 0.7123414874076843,
"alphanum_fraction": 0.7274630665779114,
"avg_line_length": 45.470272064208984,
"blob_id": "72838dbbd1dfd1db5fa08605aebf948e940c856f",
"content_id": "2fe7f87cf574b9b19f8842274a98ab66b3ecdb4e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8619,
"license_type": "permissive",
"max_line_length": 597,
"num_lines": 185,
"path": "/README.md",
"repo_name": "Azure/azure-linux-automation",
"src_encoding": "UTF-8",
"text": "This repository is deprecated, we're migrating to https://github.com/LIS/LISAv2.\n\n# azure-linux-automation\nAutomation tools for testing Linux images on Microsoft Azure\n## Overview\nAzure automation is the project for primarily running the Test Suite in the Windows Azure environment to test the Linux Agent for Windows Azure. Azure automation project is a collection of PowerShell, BASH and python scripts. The test ensures the functionality of Windows Azure Linux Agent and Windows Azure support for different Linux distributions. This test suite focuses on the Build Verification Tests (BVTs), Azure VNET Tests and Network tests. The test environment is composed of a Windows Machine (With Azure PowerShell SDK) and the Virtual Machines on Azure that perform the actual tests.\n## <a id=\"prepare\"></a>Prepare Your Machine for Automation Cycle\n### Prerequisite\n1. You must have a Windows Machine with PowerShell. Tested Platforms:\n\n a. Windows 7x64\n b. Windows 8x64\n c. Server 2008\n d. Server 2012\n e. Server 2012 R2\n \n2. You must be connected to Internet.\n3. You must have a valid Windows Azure Subscription.\n\n a. Subscription Name\n b. Subscription ID\n \n### Download Latest Automation Code\n1. Checkout from https://github.com/Azure/azure-linux-automation.git\n\n### Download Latest Azure PowerShell\n1.\tDownload Web Platform Installer from : http://go.microsoft.com/fwlink/p/?linkid=320376&clcid=0x409 \n2.\tStart Web Platform Installer and select Azure PowerShell and proceed for Azure PowerShell Installation.\n\n### Authenticate Your Machine with Your Azure Subscription\nThere are two ways to authenticate your machine with your subscription.\n\n1.\tAzure AD method\n\n This creates a 12 Hours temporary session in PowerShell, in that session, you are allowed to run Windows Azure Cmdlets to control / use your subscription. After 12 hours you will be asked to enter username and password of your subscription. This may create problems long running automations, hence we use certificate method.\n\n2.\tCertificate Method.\n\n To learn more about how to configure your PowerShell with your subscription, please visit [here](http://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/#Connect).\n\n### Download Public Utilities\nDownload Putty executables from http://www.putty.org and keep them in `.\\automation_root_folder\\tools`. You should have the following utilities:\n\n •\tplink.exe\n •\tpscp.exe\n •\tputty.exe\n •\tputtygen.exe\n\nDownload dos2unix executables from http://sourceforge.net/projects/dos2unix/ and keep them in `.\\automation_root_folder\\tools`. You should have the following utilities:\n\n •\tdos2unix.exe\n\t\t\nDownload 7-zip executable from http://www.7-zip.org/ ( Direct Download Link : http://www.7-zip.org/a/7za920.zip ) and keep them in `.\\automation_root_folder\\tools`. You should have the following utility:\n\n •\t7za.exe\n\t\t\n### Update Azure_ICA_all.xml file\n1.\tSetup Subscription details.\n\n Go to Config > Azure > General and update following fields :\n\n a.\tSubscriptionID\n b.\tSubscriptionName\n c.\tCertificateThumbprint (Make sure you have installed a management certificate and can access it via the Azure Management Portal (SETTINGS->MANAGEMENT CERTIFICATES). )\n d.\tStorageAccount\n e.\tLocation\n f.\tAffinityGroup (Make sure that you either use <Location> or <AffinityGroup>. Means, if you want to use Location, then AffinityGroup should be blank and vice versa )\n\n Example :\n ```xml\n <General>\n <SubscriptionID>Your Subscription ID</SubscriptionID>\n <SubscriptionName>Your Subscription Name</SubscriptionName>\n <CertificateThumbprint>Certificate associated with your subscription</CertificateThumbprint>\n <ManagementEndpoint>https://management.core.windows.net</ManagementEndpoint>\n <StorageAccount>your current storage account</StorageAccount>\n <Location>Your preferred location</Location>\n <AffinityGroup></AffinityGroup>\n </General>\n ```\n \n2.\tAdd VHD details in XML File.\n \n Go to Config > Azure > Deployment > Data. Make sure that your \"VHD under test\" should be present here in one of <Distro>..</Distro> entries. If your VHD is not listed here. Create a new Distro element and add your VHD details.\n\n Example:\n ```xml\n <Distro>\n <Name>Distro_Name</Name>\n <OsImage>Distro_OS_Image_Name_As_Appearing_under_Azure_OS_Images</OsImage>\n </Distro>\n ```\n \n3. Save file.\n\n### Prepare VHD to work in Azure\n`Applicable if you are uploading your own VHD with Linux OS to Azure.`\n\nA VHD with Linux OS must be made compatible to work in Azure environment. This includes –\n\n 1.\tInstallation of Linux Integration Services to Linux VM (if already not present)\n 2.\tInstallation of Windows Azure Linux Agent to Linux VM (if already not installed.)\n 3.\tInstallation of minimum required packages. (Applicable if you want to run Tests using Automation code)\n\nPlease follow the steps mentioned at: \nhttp://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-create-upload-vhd/\n\n### Prepare VHD to work with Automation code.\n`Applicable if you are using already uploaded VHD / Platform Image to run automation.`\n\nTo run automation code successfully, you need have following packages installed in your Linux VHD.\n\n 1.\tiperf\n 2.\tmysql-server\n 3.\tmysql-client\n 4.\tgcc\n 5.\tgcc-c++\n 6.\tbind\n 7.\tbind-utils\n 8.\tbind9\n 9.\tpython\n 10.\tpython-pyasn1\n 11.\tpython-argparse\n 12.\tpython-crypto\n 13.\tpython-paramiko\n 14.\tlibstdc++6\n 15.\tpsmisc\n 16.\tnfs-utils\n 17.\tnfs-common\n 18.\ttcpdump\n\n### Create SSH Key Pair\n`PublicKey.cer – PrivateKey.ppk`\n\nA Linux Virtual machine login can be done with Password authentication or SSH key pair authentication. You must create a Public Key and Private key to run automation successfully. To learn more about how to create SSH key pair, please visit [here](http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-use-ssh-key/).\n\nAfter creating Public Key (.cer) and putty compatible private key (.ppk), you must put it in your `automation_root_folder\\ssh\\` folder and mention their names in Azure XML file.\n\n### VNET Preparation\n`Required for executing Virtual Network Tests`\n\n#### Create a Virtual Network in Azure\nA virtual network should be created and connected to Customer Network before running VNET test cases. To learn about how to create a virtual network on Azure, please visit [here](https://azure.microsoft.com/documentation/articles/vpn-gateway-site-to-site-create/).\n\n#### Create A customer site using RRAS\nApart from Virtual Network in Azure, you also need a network (composed of Subnets and DNS server) to work as Customer Network. If you don’t have separate network to run VNET, you can create a virtual customer network using RRAS. To learn more, please visit [here](https://msdn.microsoft.com/en-us/library/dn636917.aspx).\n\n## How to Start Automation\nBefore starting Automation, make sure that you have completed steps in chapter [Prepare Your Machine for Automation Cycle](#prepare)\n\n 1.\tStart PowerShell with Administrator privileges\n 2.\tNavigate to folder where automation code exists\n 3.\tIssue automation command\n\n#### Automation Cycles Available\n 1.\tBVT\n 2.\tNETWORK\n 3.\tVNET\n 4.\tE2E-1\n 5. E2E-DISK\n 6.\tE2E-TIMESYNC\n 7.\tE2E-TIMESYNC-KERNBANCH\n 8.\tWORDPRESS1VM\n 9.\tWORDPRESS4VM\n 10.\tDAYTRADER1VM\n 11.\tDAYTRADER4VM\n 12. NETPERF\n 13. IOPERF-RAID\n 14. IOPERF-LVM\n\n#### Supported Azure Mode\n 1. AzureServiceManagement, if the value is present in the SupportedExecutionModes tag of the case definition\n 2. AzureResourceManager, if the value is present in the SupportedExecutionModes tag of the case definition\n \n#### Command to Start any of the Automation Cycle\nRun test in ASM mode\n\n .\\AzureAutomationManager.ps1 -xmlConfigFile .\\Azure_ICA_ALL.xml -runtests -email –Distro <DistroName> -cycleName <TestCycleToExecute> \n \nRun test in ARM mode\n\n .\\AzureAutomationManager.ps1 -xmlConfigFile .\\Azure_ICA_ALL.xml -runtests -email –Distro <DistroName> -cycleName <TestCycleToExecute> -UseAzureResourceManager\n\n#### More Information\nFor more details, please refer to the documents [here](https://github.com/Azure/azure-linux-automation/tree/master/Documentation).\n"
},
{
"alpha_fraction": 0.6722102761268616,
"alphanum_fraction": 0.6805257797241211,
"avg_line_length": 34.846153259277344,
"blob_id": "e4bc08206e0be6bfb942f45e5ef9a923bb080989",
"content_id": "3315fccb55ad69fe47788ab91d33c4346bf27869",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3728,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 104,
"path": "/remote-scripts/mariadb_perf_server_conf.sh",
"repo_name": "Azure/azure-linux-automation",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n# This script does the following:\n# 1. Prepares the RAID with all the data disks attached \n# 2. Places an entry for the created RAID in /etc/fstab.\n# 3. Configures mariadb server for sysbench performance test.\n# Usage:-\n# nohup bash mariadb_perf_server_conf.sh vm_loginuser &\n#\n# Author: Srikanth Myakam\n# Email\t: [email protected]\n#\n###########################################################################\n\nif [[ $# == 1 ]]\nthen\n\tusername=$1\nelse\n\techo \"Usage: bash $0 <username>\"\n\texit -1\nfi\ncode_path=\"/home/$username/code\"\n. $code_path/azuremodules.sh\n\nfunction install_mysql_server () \n{\n\tif [[ `detect_linux_ditribution` == \"ubuntu\" ]]\n\tthen\n\t\tmariadb_passwd=$1\n\t\texport DEBIAN_FRONTEND=noninteractive\n\t\techo mysql-server mysql-server/root_password select $mariadb_passwd | debconf-set-selections\n\t\techo mysql-server mysql-server/root_password_again select $mariadb_passwd| debconf-set-selections\n\t\tapt-get install -y --force-yes mysql-server\n\t\tcheck_exit_status \"Installation of mysql-server\" exit\n\telse\n\t\tinstall_package mariadb-server\n\t\tcheck_exit_status \"Installation of mariadb\" exit\n\tfi\n}\n\nformat=\"ext4\"\nmountdir=\"/dataIOtest\"\ndeviceName=\"/dev/md1\"\nLOGFILE=\"${code_path}/mariadb_perftest.log.txt\"\nmariadb_passwd=\"mariadb_passwd\"\nperf_db=\"iperf_db\"\nmysql_cnf_file=\"\"\n\n\ninstall_mysql_server $mariadb_passwd\n\nif [[ -f /etc/mysql/mariadb.conf.d/mysqld.cnf ]]; then\n\tmysql_cnf_file=/etc/mysql/mariadb.conf.d/mysqld.cnf\nelif [[ -f /etc/mysql/mysql.conf.d/mysqld.cnf ]]; then\n\tmysql_cnf_file=/etc/mysql/mysql.conf.d/mysqld.cnf\nelif [[ -f /etc/mysql/my.cnf ]]; then\n\tmysql_cnf_file=/etc/mysql/my.cnf\nelif [[ -f /etc/my.cnf ]]; then\n\tmysql_cnf_file=/etc/my.cnf \nelse\n\techo \"Cannnot find mariadb configuration file check the installation\"\n\texit -1\nfi\n\necho \"IO test setup started..\" > $LOGFILE\n\n# Verify if there are any unsed disks and creat raid using them and move the db folder to there\nlist=(`fdisk -l | grep 'Disk.*/dev/sd[a-z]' |awk '{print $2}' | sed s/://| sort| grep -v \"/dev/sd[ab]$\" `)\n\nif [[ ${#list[@]} -gt 0 ]]\nthen\n\tcreate_raid_and_mount $deviceName $mountdir $format >> $LOGFILE\n\tdf -hT >> $LOGFILE\n\techo \"## Configuring mariadb\" >> $LOGFILE\n\tmysql_datadir=\"$mountdir/mysql\"\n\tmkdir $mysql_datadir\n\tchmod 755 -R $mysql_datadir\n\tcp -rf /var/lib/mysql/* $mysql_datadir\n#\tsed -i \"s#datadir.*#datadir = $mysql_datadir#\" $mysql_cnf_file\n\tsed -i \"s/\\(.(*datadir.*\\)/#\\1/\" $mysql_cnf_file\n\techo \"datadir = $mysql_datadir\" >> $mysql_cnf_file\nfi\nsed -i \"s/\\(.*bind-address.*\\)/#\\1/\" $mysql_cnf_file\nsed -i \"s/\\(.*max_connections.*\\)/#\\1/\" $mysql_cnf_file\necho \"bind-address = 0.0.0.0\" >> $mysql_cnf_file\necho \"max_connections = 1024\" >> $mysql_cnf_file\n\nservice mysql restart\nservice mariadb restart\n\n#echo \"Mysql secure installation started\" >> $LOGFILE\n#echo -e \"\\ny\\ny\\n$mariadb_passwd\\n$mariadb_passwd\\ny\\ny\\ny\\ny\" |/usr/bin/mysql_secure_installation >> $LOGFILE\n#echo -e \"\\ny\\n$mariadb_passwd\\n$mariadb_passwd\\ny\\nn\\ny\\ny\" |/usr/bin/mysql_secure_installation >> $LOGFILE\n#check_exit_status \"/usr/bin/mysql_secure_installation\" >> $LOGFILE \nmysql -u root -p$mariadb_passwd -e \"GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$mariadb_passwd' WITH GRANT OPTION;\" >> $LOGFILE\ncheck_exit_status \"Enabling mysql remote access\" >> $LOGFILE\nmysql -u root -p$mariadb_passwd -e \"DROP DATABASE $perf_db;\" >> $LOGFILE\nmysql -u root -p$mariadb_passwd -e \"CREATE DATABASE $perf_db;\" >> $LOGFILE\nmysql -u root -p$mariadb_passwd -e \"SET GLOBAL max_connections = 5000;\" >> $LOGFILE\nmysql -u root -p$mariadb_passwd -e \"FLUSH PRIVILEGES;\" >> $LOGFILE\ncheck_exit_status \"Created database for performance\" >> $LOGFILE\nservice mysql restart\nservice mariadb restart\necho \"done\"\n"
}
] | 4 |
UMLLabs/DeepDrunk | https://github.com/UMLLabs/DeepDrunk | ebf436fceddf2db7891795c3604d2e53679c0575 | 15a608c60d45d96c2ce8aefe5b614f03d2eccf72 | 1b6e9b835f14f7e777b16c35bc54f3adaa02c169 | refs/heads/master | 2021-01-01T05:23:43.004405 | 2016-04-20T23:52:39 | 2016-04-20T23:52:39 | 56,030,071 | 0 | 2 | null | 2016-04-12T04:09:41 | 2016-04-14T00:50:36 | 2016-04-14T06:14:59 | Python | [
{
"alpha_fraction": 0.6371363401412964,
"alphanum_fraction": 0.6472703218460083,
"avg_line_length": 28.990196228027344,
"blob_id": "78ac9db40bb93f2b285448bb58e5db9a4a04980c",
"content_id": "5a02a35e747ecc9e427b24cc106b8850890576ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3059,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 102,
"path": "/tweepy1.py",
"repo_name": "UMLLabs/DeepDrunk",
"src_encoding": "UTF-8",
"text": "import tweepy\nfrom tweepy import OAuthHandler\nimport json\nimport re\nimport csv\nimport getopt\nimport sys\n\ndef main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], '', ['handle=', 'api_key_file='])\n except getopt.GetoptError as error:\n print error\n sys.exit(2)\n handle = None\n api_key_file = None\n\n for opt, arg in opts:\n if opt == '--handle':\n handle = arg\n elif opt == '--api_key_file':\n api_key_file = arg\n else:\n print \"Option {} is not valid!\".format(opt)\n\n api_file = open(api_key_file)\n\n lines = api_file.readlines()\n consumer_key = None\n consumer_secret = None\n access_token = None\n access_secret = None\n\n for line in lines:\n split_line = line.split('=')\n\n if split_line[0].strip() == 'consumer_key':\n consumer_key = split_line[1].strip()\n elif split_line[0].strip() == 'consumer_secret':\n consumer_secret = split_line[1].strip()\n elif split_line[0].strip() == 'access_token':\n access_token = split_line[1].strip()\n elif split_line[0].strip() == 'access_secret':\n access_secret = split_line[1].strip()\n\n get_tweets(handle, consumer_key, consumer_secret, access_token, access_secret)\ndef process(tweet):\n text = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", tweet)\n return text\n\ndef get_tweets(screen_name, consumer_key, consumer_secret, access_token, access_secret):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n\n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n\n #initialize a list to hold all the tweepy Tweets\n alltweets = []\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name,count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n print \"getting tweets before %s\" % (oldest)\n\n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n print \"...%s tweets downloaded so far\" % (len(alltweets))\n\n #transform the tweepy tweets into a 2D array that will populate the csv\n filtered = []\n for tweet in alltweets:\n if not tweet.text.encode(\"utf-8\").startswith(\"RT\"):\n filtered.append(tweet)\n\n outtweets = [process(tweet.text.encode(\"utf-8\")) for tweet in filtered]\n\n #write the csv\n save_tweets = open(screen_name+\"_tweets.txt\", 'w')\n for tweet in outtweets:\n save_tweets.write(tweet+\"\\n\")\n\n save_tweets.close()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.649350643157959,
"alphanum_fraction": 0.6883116960525513,
"avg_line_length": 18.25,
"blob_id": "95a104278460c79d15b37684f0f875a225081709",
"content_id": "2998593536896c8a20040691d2b9a55583c207b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 77,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 4,
"path": "/Model/RunExperiment.lua",
"repo_name": "UMLLabs/DeepDrunk",
"src_encoding": "UTF-8",
"text": "require 'torch'\n\ncmd = torch.CmdLine()\ncmd:option('word_vector_size', 200, )\n"
},
{
"alpha_fraction": 0.7853185534477234,
"alphanum_fraction": 0.7880886197090149,
"avg_line_length": 39.11111068725586,
"blob_id": "0f9b834f189fe23a8bf4a2fdd3ec52db0e69aa35",
"content_id": "b07f7ee793f6f45d33e7b247107fdf811a8d6f97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1444,
"license_type": "permissive",
"max_line_length": 254,
"num_lines": 36,
"path": "/README.md",
"repo_name": "UMLLabs/DeepDrunk",
"src_encoding": "UTF-8",
"text": "# Deep Drunk\n\nDeep Drunk is a project spearheaded by UML Labs at the University of Texas at Austin. Our goals is develop a machine learning application to determine whether or not a person is intoxicated by tweets, and eventually expand it to images and text messages.\n\n### To Do\n\n - Get data\n - Prepare data\n - Write code\n - Make it work\n - Solve chess\n - Solve Go\n\nLicense\n----\nThe MIT License\n\nCopyright (c) 2016 UML\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
},
{
"alpha_fraction": 0.6781609058380127,
"alphanum_fraction": 0.69786536693573,
"avg_line_length": 21.55555534362793,
"blob_id": "8d402bf1ab990e74c314a6c8ee83a339ac81a5fc",
"content_id": "c90a3e4037b69442a12c95ed98b27b868ca62c96",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1218,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 54,
"path": "/Model/Model.lua",
"repo_name": "UMLLabs/DeepDrunk",
"src_encoding": "UTF-8",
"text": "require 'nngraph'\nrequire 'nn'\nrequire 'table'\n\n-- Relevant papers\n-- Convolutional Neural Networks for Sentence Classification\n-- https://arxiv.org/pdf/1408.5882v2\n\n\nfunction test_model()\n model = nn.Sequential()\n\nend\n\n\n\nfunction build_model(word_vector_size, filter_sizes, nn_layer_sizes, dropout_rate)\n filter_sizes_length = table.getn(filter_sizes)\n nn_layer_sizes_length = table.getn(nn_layer_sizes)\n\n filter_table = nn.ParallelTable()\n\n\n for filter_size_i=1, filter_sizes_length do\n filter_size = filter_sizes[filter_size_i]\n\n filter = nn.Sequential()\n filter:add(nn.SpatialConvolution(1, 1, word_vector_size, filter_size))\n filter:add(nn.ReLU())\n filter:add(nn.SpatialMaxPooling(word_vector_size, filter_size))\n filter:add(nn.Reshape(1, filter_size * ))\n\n filter_table:add(filter)\n end\n\n filter_outputs = nn.JoinTable(filter_table)\n\n\n for nn_layer_size_i=1, nn_layer_sizes_length do\n layer_size = nn_layer_sizes[nn_layer_size_i]\n\n model:add(nn.Dropout(dropout_rate))\n model:add(nn.Linear(layer_size))\n model:add(nn.ReLU())\n end\n\n model:add(nn.Linear())\n model:add(nn.SoftMax())\n\n return model\nend\n\n--model = build_model(100, {2,3,4}, {100}, .5)\nmodel = test_model()\n"
}
] | 4 |
sridharRavi/CheebsRepo | https://github.com/sridharRavi/CheebsRepo | 74903ebceda29aabd75534487f710aa7d742526e | 3897efff98b50836ad5723f058761b1d96b72ac2 | dd51886f95489566d56c02f9335609c1b9db56c1 | refs/heads/master | 2020-06-10T04:19:43.051134 | 2018-01-13T05:48:20 | 2018-01-13T05:48:20 | 76,088,810 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7476939558982849,
"alphanum_fraction": 0.7590884566307068,
"avg_line_length": 61.620689392089844,
"blob_id": "f58b6fd11b9f8b7a0a05d448cfc10b2da00e4a8e",
"content_id": "f78a653363a0caf29a0ec49fa1332a447bdfb2f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1843,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 29,
"path": "/pyMailSender.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import smtplib;\r\n#Please turn on your internet Connection during this progam\r\nprint \"Your Email server must be in the form of smtp.email_service.com\";\r\nservices=['smtp.gmail.com','smtp.mail.yahoo.com','smtp.mail.outlook.com','smtp.mail.att.net','smtp.comcast.net','smtp.verizon.net'];\r\nprint \"various email services include\";\r\nprint services;\r\nmy_Email_Server=raw_input(\"enter your corresponding email server\");\r\nsmtp_values={'smtp.gmail.com':'587','smtp.mail.yahoo.com':'587','smtp.mail.outlook.com':'587','smtp.mail.att.net':'465','smtp.comcast.net':'587','smtp.verizon.net':'587'};\r\nsmtpObj=smtplib.SMTP(my_Email_Server,int(smtp_values[my_Email_Server]));\r\n#if a problem arises over the previous function,Then use SSL with port number 465\r\n#smtObj=smtplib.SMTP_SSL('server',port_num);\r\n#This oddly named function is used to initiate a 'conversation' with your smtp server;\r\nsmtpObj.ehlo();\r\n#The TLS encryption gets started!!\r\nsmtpObj.starttls();\r\nprint \"carefully Enter your email and password please\"\r\nemail_address=raw_input(\"enter your mail address\");\r\nmy_password=raw_input(\"enter your password\");\r\n#NEVER LEAVE YOUR PASSWORD IN AN UNENCRYPTED FILE LIKE THIS OR LEAVE IT IN A COMMENT!!\r\nsmtpObj.login(email_address,my_password);\r\n# if you are using a gmail account,an SMTP.AuthenticationError might occur.Log into your browser and change the settings to allow access to less secure apps\r\n#visit this page 'https://www.google.com/settings/security/lesssecureapps' to turn of your settings\r\n#other email services please visit the corresponding mail support page!!\r\nrecv_address=raw_input(\"enter the recipient's address\");\r\nprint \"The syntax should be 'subject:content' followed by '\\n'\";\r\ncontent=raw_input(\"enter the message you want to send!!\");\r\nsmtpObj.sendmail(email_address,recv_address,content);\r\nsmtpObj.quit();\r\n#Coded by Sridhar Cheebu"
},
{
"alpha_fraction": 0.4591355621814728,
"alphanum_fraction": 0.478192538022995,
"avg_line_length": 21.674419403076172,
"blob_id": "1c7bbc98a0d523ff5fd860740b8f232be046ed1c",
"content_id": "a2e0444371d7b11233ec15eb9462738022cff351",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5090,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 215,
"path": "/C++ MiniApps/Stringent/stringManip.cpp",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#include<iostream>\r\n#include<cstdlib>\r\n#include<string>\r\n int countVowels(char* str)\r\n {\r\n int countVowels=0;\r\n for(int i=0;str[i]!='\\0';i++)\r\n {\r\n if(str[i]=='a' || str[i]=='e' || str[i]=='i'|| str[i]=='o'|| str[i]=='u'|| str[i]=='A'|| str[i]=='E'|| str[i]=='I'|| str[i]=='O'|| str[i]=='U')\r\n {\r\n countVowels+=1;\r\n }\r\n }\r\n return countVowels;\r\n\r\n }\r\nint getLength(char* str)\r\n {\r\n int length=0;\r\nfor(int i=0;str[i]!='\\0';i++)\r\n {\r\n length+=1;\r\n }\r\n return length;\r\n }\r\n bool StringCompare(char* string1,char* string2)\r\n {\r\n int len1=getLength(string1);\r\n int len2=getLength(string2);\r\n int count=0;\r\n if(len1==len2)\r\n {\r\n for(int i=0;i<len1;i++)\r\n {\r\n if(string1[i]==string2[i])\r\n {\r\n count+=1;\r\n }\r\n else\r\n {\r\n return false;\r\n }\r\n }\r\n if(count==len1)\r\n {\r\n return true;\r\n }\r\n }\r\n else\r\n {\r\n return false;\r\n }\r\n\r\n }\r\n char* makeUpperCase(char* str)\r\n {\r\n int len=getLength(str);\r\n for(int i=0;i<len;i++)\r\n {\r\n if(str[i]>=97 && str[i]<=122)\r\n\t {\r\n\t\tstr[i]=str[i]-32;\r\n\t }\r\n }\r\n return str;\r\n }\r\n char* makeLowerCase(char *str)\r\n {\r\n int len=getLength(str);\r\n for(int i=0;i<len;i++)\r\n {\r\n if(str[i]>=65 && str[i]<=90)\r\n\t {\r\n\t\tstr[i]=str[i]+32;\r\n\t }\r\n }\r\n\r\nreturn str;\r\n\r\n }\r\nint countConsonants(char* str)\r\n {\r\n int vcount=countVowels(str);\r\n return (getLength(str)-vcount);\r\n }\r\n\r\n\r\n char* reverseString(char* str)\r\n {\r\nint len=getLength(str);\r\nchar* revStr=new char[100];\r\nint len2=getLength(revStr);\r\nint countit=0;\r\n for(int i=len-1;i>=0;i--)\r\n {\r\n revStr[countit]=str[i];\r\n countit+=1;\r\n }\r\nreturn revStr;\r\n}\r\n\r\n bool checkPalindrome(char* str)\r\n {\r\nint len=getLength(str);\r\nint count=0,i,j;\r\nfor(i=0,j=len-1;i<=j;i++,j--)\r\n{\r\n if(str[i]==str[j])\r\n {\r\n count+=1;\r\n }\r\n}\r\nif(count==len)\r\n{\r\n return true;\r\n}\r\nelse\r\n{\r\n return false;\r\n}\r\n}\r\nchar* makeConcatenation(char* str,char* newStr)\r\n{\r\n char* finStr;\r\n int len1=getLength(str);\r\n int len2=getLength(newStr);\r\n int newLen=len1+len2;\r\n finStr=new char[newLen];\r\n int count=0;\r\n for(int i=0;i<len1;i++)\r\n {\r\n finStr[i]=str[i];\r\n }\r\n for(int i=len1;i<newLen;i++)\r\n {\r\n finStr[i]=newStr[count];\r\n count+=1;\r\n }\r\n return finStr;\r\n}\r\n\r\nint main()\r\n{\r\n char *revStr,*String,*newStr;\r\n int ch=0;\r\n int len=0;\r\n bool isString=false;\r\n String =new char[100];\r\n revStr=new char[100];\r\n std::cout<<\"Enter the string\";\r\n std::cin>>String;\r\n std::cout<<\"______________STRINGENT______________\"<<std::endl;\r\n std::cout<<\"--------A String Manipulation Library-----\"<<std::endl;\r\n std::cout<<\"1. Count the Vowels\"<<std::endl;\r\n std::cout<<\"2. Get String Length \"<<std::endl;\r\n std::cout<<\"3. Get the reverse of the String\"<<std::endl;\r\n std::cout<<\"4. Check Palindrome\"<<std::endl;\r\n std::cout<<\"5. Count the consonants\"<<std::endl;\r\n std::cout<<\"6. Convert to UpperCase\"<<std::endl;\r\n std::cout<<\"7. Concatenate String \"<<std::endl;\r\n std::cout<<\"8. Compare tow strings\"<<std::endl;\r\n std::cout<<\"9. Convert to LowerCase\"<<std::endl;\r\n std::cout<<\"10. Exit\"<<std::endl;\r\n std::cout<<\"_____________________________________\"<<std::endl;\r\n std::cout<<\"Enter your choice\"<<std::endl;\r\n std::cin>>ch;\r\n while(ch!=10)\r\n {\r\n switch(ch)\r\n {\r\n case 1:\r\n std::cout<<\"The No Of Vowels is: \"<<countVowels(String)<<std::endl;\r\n break;\r\n case 2:\r\n len=getLength(String);\r\n std::cout<<\"The length is \"<<len<<std::endl;\r\n break;\r\n case 3:\r\n std::cout<<reverseString(String)<<std::endl;\r\n break;\r\n case 4:\r\n isString=checkPalindrome(String);\r\n std::cout<<\"The String is a Palindrome? 1. True 0. False \"<<isString<<std::endl;\r\n break;\r\n case 5:\r\n std::cout<<\"The No Of Consonants is: \"<<countConsonants(String)<<std::endl;\r\n break;\r\n case 6:\r\n std::cout<<makeUpperCase(String);\r\n break;\r\n case 7:\r\n std::cout<<\"Enter the string to be concatenated\";\r\n std::cin>>newStr;\r\n String=makeConcatenation(String,newStr);\r\n std::cout<<\"The concatenated string is\"<<String<<std::endl;\r\n break;\r\n case 8:\r\n std::cout<<\"Enter Another string for comparison\";\r\n std::cin>>revStr;\r\n std::cout<<\"The String are the same? 1. True 0. False \"<<StringCompare(String,revStr)<<std::endl;\r\n break;\r\n case 9:\r\n std::cout<<makeLowerCase(String);\r\n break;\r\n case 10:\r\n exit(0);\r\n break;\r\n\r\n }\r\n std::cout<<\"Enter your choice\"<<std::endl;\r\n std::cin>>ch;\r\n }\r\n\r\n return 0;\r\n}\r\n"
},
{
"alpha_fraction": 0.6775777339935303,
"alphanum_fraction": 0.7446808218955994,
"avg_line_length": 38.733333587646484,
"blob_id": "759a3c8f1edc5bf527f5fc60b2c553d672143fa5",
"content_id": "f210e56a2e0e708c4858ccaa79690f9b23ce0e85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 15,
"path": "/NaturalLangProc-python/WuandPalmeranalysis.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import nltk\r\nfrom nltk.corpus import wordnet\r\n#The Wu and Palmer method is mainly used to estimate the amount of semantic similarity between two words.\r\nw1=wordnet.synset(\"Cup.n.01\")\r\nw2=wordnet.synset(\"Saucer.n.01\");\r\nprint(w1.wup_similarity(w2));\r\nw1=wordnet.synset(\"Ship.n.01\")\r\nw2=wordnet.synset(\"Boat.n.01\");\r\nprint(w1.wup_similarity(w2));\r\nw1=wordnet.synset(\"Ship.n.01\")\r\nw2=wordnet.synset(\"Submarine.n.01\");\r\nprint(w1.wup_similarity(w2));\r\n#similarity between Cup and Saucer 0.142 14%similar\r\n#similarity between Ship and Boat 0.909 90% similar\r\n#similarity between Ship and Submarine 0.88 88% similar\r\n"
},
{
"alpha_fraction": 0.7393209338188171,
"alphanum_fraction": 0.7393209338188171,
"avg_line_length": 27.516128540039062,
"blob_id": "dfb7be24ba6cdca5a22c27b2f63d3fea1cc15c29",
"content_id": "d7663119198c43c05a18ad642f5f7355b8da619d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 31,
"path": "/NaturalLangProc-python/stemmingwords.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#The Stem of any word can be described as the base of the word. The words of the same stem usually convey the same meaning!! So very important to analyse thse words in NLP\r\n#Ex. I must MEET him\r\n#I just MET him\r\n#He scheduled a MEETING\r\n#In this case all of these highlightd words have the exact same meaning although in a different tense and context\r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import PorterStemmer\r\nst=PorterStemmer();\r\nmyList=[\"Pythonistas\",\"Pythonly\",\"Pythonic\",\"Pythonified\"]\r\n#Let us check the stem words for our List\r\nfor w in myList:\r\n\tprint st.stem(w);\r\n#The stem of every word of our list is!!\r\n#Pythonista\r\n#Pythonli\r\n#Python\r\n#Pythonifi\r\n#let us now try it for a completely different sentence!!\r\nmySent=\"Hello there! Do you want fries with that?\"\r\nwd=word_tokenize(mySent)\r\nfor w in wd:\r\n\tprint st.stem(w)\r\n#Hello\r\n#there\r\n#Do\r\n#you\r\n#want\r\n#fri\r\n#with\r\n#that"
},
{
"alpha_fraction": 0.7343453764915466,
"alphanum_fraction": 0.7362428903579712,
"avg_line_length": 30.9375,
"blob_id": "0165d5536fa26506b846966f74f08400913dc4ac",
"content_id": "7a7bc80e9e11ffb906cf20bcf6facf55e3dc5a29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 16,
"path": "/NaturalLangProc-python/synonymsandantonyms.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import nltk\r\nfrom nltk.corpus import wordnet\r\n#wordnet is a powerful package which provides synonyms,antonyms,lemmas for your required text(Corpus)\r\n#synsets will return the sets of all possible synonyms for the word\r\nval=wordnet.synsets(\"Nice\")\r\nprint (val[0].definition())#will provide the actual definition of thhe word\r\nprint(val);\r\nsynonyms=[];\r\nantonyms=[]\r\nfor i in wordnet.synsets(\"perfect\"):\r\n\tfor j in i.lemmas():\r\n\t\tsynonyms.append(j);\r\n\t\tif j.antonyms():\r\n\t\t\tantonyms.append(j);\r\nprint(synonyms)\r\nprint (antonyms)\r\n"
},
{
"alpha_fraction": 0.7579737305641174,
"alphanum_fraction": 0.7579737305641174,
"avg_line_length": 41.75,
"blob_id": "f1ed38abc4de1b00165b7f094b45f0ee679f3d85",
"content_id": "0fedadfc8e09e0ef538f955503bb5c4de1a8cb3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 12,
"path": "/NaturalLangProc-python/stopwordsremover.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#Stop words can be defined as any words which are in almost all cases UNNECESSARY for Natural Language Processing!\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nnList=[];#list which will store the most USEFUL WORDS FOR NLP\r\nexample_sentence=\"Hello Boys and Girls, today we present to you the legendary singer Bob Dylan!!\"\r\nstwords=set(stopwords.words(\"English\"))\r\nreqwords=word_tokenize(example_sentence);\r\nfor w in reqwords:\r\n\tif w not in stwords:\r\n\t\tnList.append(w);\r\nprint(nList)\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7206085920333862,
"alphanum_fraction": 0.7233748435974121,
"avg_line_length": 32.52381134033203,
"blob_id": "6cdb68bb5af9375918e71cb9ef77feacbe298cae",
"content_id": "8f0eed508b0b28c9e31166bbc22b51eb350ad43b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 723,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 21,
"path": "/imagescrp.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#simple python program to download images from a web page!!\r\nfrom bs4 import BeautifulSoup;\r\nimport requests;\r\nimport wget;\r\n#please provide a complete url if possible like 'www.leonardodavinci.net'\r\nmyUrl=raw_input(\"enter the required url\");\r\nmod_url=\"http://\"+myUrl\r\nreq=requests.get(mod_url);\r\nscrp=req.text;\r\nval=1;\r\nnewFileHand=open(\"imgtext.txt\",\"wb\");\r\nsoup=BeautifulSoup(scrp,\"html.parser\");\r\nfor i in soup.find_all('img'):\r\n newVal=mod_url+\"/\"+i.get('src');\r\n wget.download(newVal);\r\n newFileHand.write(\"\\n\".join(newVal));\r\nnewFileHand.close();\r\n\r\n#coded by Sridhar Cheebu\r\n#The resultant file will contain the separate image links\r\n#The download will occur in the working directory where you downloaded this file"
},
{
"alpha_fraction": 0.7401055693626404,
"alphanum_fraction": 0.7427440881729126,
"avg_line_length": 37.894737243652344,
"blob_id": "c98f5018ed1acc2fd72e3cf7173206b66e641772",
"content_id": "840971e40baaf4112bad04905abf8f7d00f9fe50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 758,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 19,
"path": "/getheadline.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#This particular program will grab the main headlines of the financial blog Zero Hedge\r\n#It will then print the file in the given file\r\n#WON'T work for other blogs or sites!!!\r\n#Zero Hedge is an English-language financial blog that aggregates news and presents editorial opinions from original and outside sources\r\nfrom bs4 import BeautifulSoup;\r\nimport requests;\r\nmyUrl=\"http://www.zerohedge.com/\";\r\nreq=requests.get(myUrl);\r\nmyFileHand=open(\"zerohedgenews.txt\",\"w\")\r\nscrp=req.text;\r\nsoup=BeautifulSoup(scrp,\"html.parser\");\r\nmySoup=soup.find_all('h2',{\"class\":\"title teaser-title\"});\r\nval=str(mySoup);\r\nnewS=BeautifulSoup(val);\r\nfor i in newS.find_all('a'):\r\n\tmyFileHand.write(str(i.text));\r\n\tmyFileHand.write(\"\\n\");\r\nmyFileHand.close();\r\n#Coded by Sridhar Cheebu\r\n"
},
{
"alpha_fraction": 0.762566864490509,
"alphanum_fraction": 0.762566864490509,
"avg_line_length": 49.94444274902344,
"blob_id": "fc3f2a1a588a4dfd4ef90bc850f68a873bddf8c8",
"content_id": "c5a47e377b4f5a50335b6b25c36953345d685018",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 935,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 18,
"path": "/NaturalLangProc-python/chunking.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#chunking allows us to group words togther by the tags!! Here we use regular expressions\r\nimport nltk\r\nfrom nltk.tokenize import PunktSentenceTokenizer\r\n#PunktSentenceTokenizer is a customizable tokenizer. It can scan a particular text body(corpus) and apply the same tokenizing pattern to another text!\r\nreq_text=\"Iam a man of very high honour. So don't you mess with me\"\r\nsamp_text=\"This road leads to one of the most dangerous places on the Earth\"\r\ncustom_token=PunktSentenceTokenizer(samp_text)\r\ntokens=custom_token.tokenize(req_text)\r\nfor i in tokens:\r\n\twords=nltk.word_tokenize(i)\r\n\ttagged=nltk.pos_tag(words)\r\n\tprint(tagged);\r\n\t#we will chunk together nouns,verbs,adverbs\r\n\tmyChunk=r\"\"\"Chunk: {<RB.?>*<VB.?>*<NNP><NN>?}\"\"\"\r\n\tchunkParse=nltk.RegexpParser(myChunk);\r\n\tchunked=chunkParse.parse(tagged);\r\n\tprint (chunked)\r\n\tchunked.draw();#matplotlib must be installed to get the chunk tree. It helps to visualize your chunk data!\r\n"
},
{
"alpha_fraction": 0.8245614171028137,
"alphanum_fraction": 0.8245614171028137,
"avg_line_length": 27.5,
"blob_id": "37b9ee8c6c69744ebdbee0de699b7162b24ccf61",
"content_id": "8a0d2c23c36149fa1ced12e41af73fdec118ec92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 2,
"path": "/README.md",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "# CheebsRepo\nA repository for web development and Python\n"
},
{
"alpha_fraction": 0.7664576768875122,
"alphanum_fraction": 0.7766457796096802,
"avg_line_length": 41.931034088134766,
"blob_id": "35acae116598cea7bf8e89db8f7109d24f905a4e",
"content_id": "2212d642dfc510b7f61b0861e5e373f1990f4239",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1276,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 29,
"path": "/MachLearningandDataAnalysis/KNNeighboursAlgoandefficiency.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import pandas\r\nimport sklearn\r\nfrom pandas.tools.plotting import scatter_matrix\r\nimport matplotlib.pyplot as plt;\r\nfrom sklearn import model_selection\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nurl=\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\";\r\nnames=['sepal-length','sepal-width','petal-length','petal-width','class']\r\ndataset=pandas.read_csv(url,names=names);\r\narray = dataset.values\r\nX = array[:,0:4]\r\nY = array[:,4]\r\n#About 20% of iris dataset is used for validation and about 80% is used for training the KNN Algorithm\r\nvalidation_size = 0.20\r\nseed = 7\r\nX_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)\r\n# Make predictions on validation dataset\r\nknn = KNeighborsClassifier()\r\nknn.fit(X_train, Y_train)\r\npredictions = knn.predict(X_validation)\r\n#accuracy of KNN algorithm ~90% efficiency\r\nprint(accuracy_score(Y_validation, predictions))\r\n#Some error results\r\nprint(confusion_matrix(Y_validation, predictions))\r\n#Detailed analysis of the Algorithm\r\nprint(classification_report(Y_validation, predictions))\r\n\r\n"
},
{
"alpha_fraction": 0.6871218681335449,
"alphanum_fraction": 0.6931719779968262,
"avg_line_length": 20.72549057006836,
"blob_id": "f2f5e4158ec365fca9841d2e2750c6fafa10d2ff",
"content_id": "84055b44d53041c311295e3579875ceec562f0f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 51,
"path": "/Web Dev/Video Player/vidjs.js",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "function doFirst()\r\n{\r\n\t barSize=600;\r\n\t myMovie=document.getElementById('myMovie');\r\n\t plButton=document.getElementById('play');\r\n\t bar=document.getElementById('defaultbar');\r\n\t progBar=document.getElementById('progBar');\r\n\tplButton.addEventListener('click',playOrPause,false);\r\n\tbar.addEventListener('click',clickedBar,false);\r\n}\r\nfunction playOrPause()\r\n{\r\n\tif(!myMovie.paused && !myMovie.ended)\r\n\t\t{\r\n\t\t\tmyMovie.pause();\r\n\t\t\tplButton.innerHTML='Play';\r\n\t\t\twindow.clearInterval(updateBar)\r\n\t\t}\r\n\telse\r\n\t{\r\n\t\tmyMovie.play();\r\n\t\tplButton.innerHTML='Pause';\r\n\t\tupdateBar=setInterval(update,500);\r\n\t}\r\n}\r\nfunction update()\r\n{\r\n\tif(!myMovie.ended)\r\n\t{\r\n\t\tvar size=parseInt(myMovie.currentTime*barSize/myMovie.duration);\r\n\t\tprogBar.style.width=size+'px';\r\n\t}\r\n\telse\r\n\t\t{\r\n\t\t\tprogBar.style.width='0px';\r\n\t\t\tplButton.innerHTML='Play';\r\n\t\t\twindow.clearInterval(updateBar);\r\n\t\t}\r\n}\r\nfunction clickedBar(e)\r\n{\r\n\tif(!myMovie.paused && !myMovie.ended)\r\n\t{\r\n\t\tvar mouseX=e.pageX-bar.offsetLeft;\r\n\t\tvar newtime=mouseX*myMovie.duration/barSize;\r\n\t\tmyMovie.currentTime=newtime;\r\n\t\tprogBar.style.width=mouseX+'px';\t\r\n\r\n\t}\r\n}\r\nwindow.addEventListener('load',doFirst,false);"
},
{
"alpha_fraction": 0.7467700242996216,
"alphanum_fraction": 0.7665805220603943,
"avg_line_length": 38.10344696044922,
"blob_id": "c2954588beccd2dcf2dd384a10a186a1d6c1574e",
"content_id": "6a47493f061916dfb9c62eac3ce5e8833cca8bf0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 29,
"path": "/NaturalLangProc-python/NaiveBayesAlgo.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import nltk\r\nfrom nltk.corpus import movie_reviews #ysing the movie review corpus in nltk package.\r\nimport random\r\ndocuments=[(list(movie_reviews.words(fileid)),category) for category in movie_reviews.categories()\r\n\t\t\tfor fileid in movie_reviews.fileids(category)]\r\nrandom.shuffle(documents);\r\nall_words=[];\r\nfor w in movie_reviews.words():\r\n\tall_words.append(w.lower())\r\nall_words=nltk.FreqDist(all_words);\r\n#we will print the most commonly used 100 words\r\n#print(all_words.most_common(100));\r\nword_features=list(all_words.keys())[:3000];\r\n\r\ndef find_features(document):\r\n\twords=set(document)\r\n\tfeatures=[]\r\n\tfor w in word_features:\r\n\t\tfeatures[w]=(w in words);\r\n\treturn features;\r\n\r\nfeaturesets=[(find_features(rev),category) for (rev,category) in documents]\r\ntraining_set=featuresets[:1000]\r\ntesting_set=featuresets[1000:]\r\nclassifier=nltk.NaiveBayesClassifier.train(training_set);\r\n#Will provide different accuracy for different datasets\r\nprint(\"NaiveBayesClassifier Algo Efficiency:\",(nltk.classify.accuracy(classifier,testing_set))*100)\r\n#will show the most informative features which help in classification\r\nclassifier.show_most_informative_features(15);"
},
{
"alpha_fraction": 0.7492212057113647,
"alphanum_fraction": 0.7647975087165833,
"avg_line_length": 43.85714340209961,
"blob_id": "85685415cd118b0a79d1b5109f54c36dd3e976b6",
"content_id": "b237700c152962fa4920251ac52c8c838ce605be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 14,
"path": "/NaturalLangProc-python/mostcommonwordsincorpus.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import nltk\r\nfrom nltk.corpus import movie_reviews #using the movie review corpus in nltk package.\r\nimport random\r\n\r\n#documents=[(list(movie_reviews.words(fileid)),category) for category in movie_reviews.categories()\r\n#\t\t\tfor fileid in movie_reviews.fileids(category)]\r\n#random.shuffle(documents);\r\nall_words=[];\r\nfor w in movie_reviews.words():\r\n\tall_words.append(w.lower())\r\n#all_words contain all the words within the movie_reviews folder (abt 2000 reviews) \r\nall_words=nltk.FreqDist(all_words);#uses frequency disribution to map commonly occuring words\r\n#we will print the most commonly used 100 words\r\nprint(all_words.most_common(100))\r\n"
},
{
"alpha_fraction": 0.7688679099082947,
"alphanum_fraction": 0.7712264060974121,
"avg_line_length": 34.956520080566406,
"blob_id": "1bcf1f89589e0afda18c27b431806dad874834f9",
"content_id": "8d3dc10544206c8b72d3d982b5db6a8917b26f8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 23,
"path": "/NaturalLangProc-python/Namedentitychunking.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#Named Entity Parsing allows you to parse the sentence on natural entities such as Organization,Country etc.\r\nimport nltk\r\nfrom nltk.tokenize import PunktSentenceTokenizer\r\nsamp_text=\"India is a country found on a really strong ethical reason. We were the original peacekeepers of South Asia\"\r\nreq_text=\"Michaelangelo lived most of his life in Italy, a few years in the Vatican employed by the church. He painted the Sistine Chapel at St.Peter's Basilica. He was a famed 16th century painter\" \r\ncustom_token=PunktSentenceTokenizer(samp_text)\r\ntokens=custom_token.tokenize(req_text)\r\nfor i in tokens:\r\n\twords=nltk.word_tokenize(i)\r\n\ttagged=nltk.pos_tag(words)\r\n\tprint(tagged);\r\n\tnet=nltk.ne_chunk(tagged);\r\n\tnet.draw();#MATPLOTLIB Required!!\r\n#ex..\r\n#PERSON\r\n#ORGANIZATION\r\n#GPE-like Italy,Spain\r\n#FACILITY\r\n#DATE\r\n#TIME\r\n#LOCATION\r\n#MONEY\r\n#PERCENT"
},
{
"alpha_fraction": 0.7408163547515869,
"alphanum_fraction": 0.7408163547515869,
"avg_line_length": 33.14285659790039,
"blob_id": "3f4880b682111a64700d3a0cac3ffb44cc341346",
"content_id": "84ffe5d8ce536da48d78e4fe2a70cf2ee6afd291",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 14,
"path": "/NaturalLangProc-python/lemmatizing.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#Lemmatizing is mainly used to return a much better form of the root or stem of a word. The returned word may be the singular form or might even be a synonym for the required word\r\nfrom nltk.stem import WordNetLemmatizer\r\nwd=WordNetLemmatizer();\r\nprint (wd.lemmatize(\"Good\"))\r\nprint (wd.lemmatize(\"Better\",pos=\"a\"))#the default Part of Speech parameter is Noun\r\nprint (wd.lemmatize(\"Mice\"))\r\nprint (wd.lemmatize(\"Boys\"))\r\nprint(wd.lemmatize(\"Geese\"))\r\n\r\n#Good\r\n#Better\r\n#Mice\r\n#Boys\r\n#Goose"
},
{
"alpha_fraction": 0.7582417726516724,
"alphanum_fraction": 0.7637362480163574,
"avg_line_length": 41.68000030517578,
"blob_id": "aa3e851cdfc7b45c01c34485edc2d93674180b42",
"content_id": "a5258115edef174c59f72e43a3bb379a24d84797",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 25,
"path": "/MachLearningandDataAnalysis/irisdataanalysis.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#This is a basic data analysis snippet describing the features of sklearn's iris dataset\r\nimport pandas\r\nimport sklearn\r\nfrom pandas.tools.plotting import scatter_matrix\r\nimport matplotlib.pyplot as plt;\r\nurl=\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\";\r\nnames=['sepal-length','sepal-width','petal-length','petal-width','class']\r\ndataset=pandas.read_csv(url,names=names);\r\n#getting the dimensions of the dataset\r\nprint(dataset.shape)\r\n#Peeking at the first 20 values of our dataset\r\nprint(dataset.head(20));\r\n#Let us describe some features of our data such as mean,min,max values etc\r\nprint(dataset.describe());\r\n#Let us groupby using classes. Tells the number of flowers belonging to a particular category\r\nprint(dataset.groupby('class').size())\r\n#Plotting a Box and Cluster graph using the dataset\r\ndataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)\r\nplt.show();\r\n#creating a histogram using our four classifiers\r\ndataset.hist();\r\nplt.show();\r\n#creating a scatter matrix using the dataset\r\nscatter_matrix(dataset)\r\nplt.show();\r\n"
},
{
"alpha_fraction": 0.7553699016571045,
"alphanum_fraction": 0.7565632462501526,
"avg_line_length": 36.1363639831543,
"blob_id": "53fd120bc6ca86a94618df2f67dfdd75fba5e945",
"content_id": "658c4c1100df1ad51c91a72302db94a4c94e5245",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1676,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 44,
"path": "/NaturalLangProc-python/taggingwords.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#In NLP, it is very important to know about the properties of the words. So it is important to tag them to understand their nature.\r\n#understanding the word's nature and tag will be useful during NLP\r\nimport nltk\r\nfrom nltk.tokenize import PunktSentenceTokenizer\r\n#PunktSentenceTokenizer is a customizable tokenizer. It can scan a particular text body(corpus) and apply the same tokenizing pattern to another text!\r\nreq_text=\"Iam a man of very high honour. So don't you mess with me\"\r\nsamp_text=\"This road leads to one of the most dangerous places on the Earth\"\r\ncustom_token=PunktSentenceTokenizer(samp_text)\r\ntokens=custom_token.tokenize(req_text)\r\nfor i in tokens:\r\n\twords=nltk.word_tokenize(i)\r\n\ttagged=nltk.pos_tag(words)\r\n\tprint(tagged);\r\n#The various tags assosciated with words in the English Language are...\r\n#CC: conjunction, coordinating\r\n#CD: numeral, cardinal\r\n#DT: determiner\r\n#IN: preposition or conjunction, subordinating\r\n#JJ: adjective or numeral, ordinal\r\n#JJR: adjective, comparative\r\n#JJS: adjective, superlative\r\n#LS: list item marker\r\n#MD: modal auxiliary\r\n#NN: noun, common, singular or mass\r\n#NNP: noun, proper, singular\r\n#NNS: noun, common, plural\r\n#PDT: pre-determiner\r\n#POS: genitive marker\r\n#PRP: pronoun, personal\r\n#PRP$: pronoun, possessive\r\n#RB: adverb\r\n#RBR: adverb, comparative\r\n#RBS: adverb, superlative\r\n#TO: \"to\" as preposition or infinitive marker\r\n#UH: interjection\r\n#VB: verb, base form\r\n#VBD: verb, past tense\r\n#VBG: verb, present participle or gerund\r\n#VBN: verb, past participle\r\n#VBP: verb, present tense, not 3rd person singular\r\n#VBZ: verb, present tense, 3rd person singular\r\n#WDT: WH-determiner\r\n#WP: WH-pronoun\r\n#WRB: Wh-adverb"
},
{
"alpha_fraction": 0.7918781638145447,
"alphanum_fraction": 0.7918781638145447,
"avg_line_length": 41.77777862548828,
"blob_id": "9a960993270a80ea1d5dc6450f7ccce7938b85a7",
"content_id": "f38d2c1617451587fff7d6c8e33996edaf08cd7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 394,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 9,
"path": "/NaturalLangProc-python/wordandsenttokenizer.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "\r\n#Tokenizing an input string into a number of smaller components is very useful for Natural Language Processing\r\nimport nltk\r\nfrom nltk import word_tokenize \r\nfrom nltk import sent_tokenize\r\nexample_sentence=\"Hello,There how are you,Iam Sridhar from Chennai,India. Great to see you!!\"\r\nwords=word_tokenize(example_sentence)\r\nsents=sent_tokenize(example_sentence);\r\nprint (words)\r\nprint (sents)"
},
{
"alpha_fraction": 0.6803278923034668,
"alphanum_fraction": 0.6967213153839111,
"avg_line_length": 37.28571319580078,
"blob_id": "d14afd36e2fa8ba292c70be8b75d09ce74356779",
"content_id": "1647ad023a04243e396897e17e79429ed08b7ca8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1098,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 28,
"path": "/MachLearningandDataAnalysis/isotonicregression.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#isotonic regression is a regression method for reducing mean squared error which is the\r\n#the difference between the estimator and what is to be estimated \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.collections import LineCollection\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.isotonic import IsotonicRegression\r\nfrom sklearn.utils import check_random_state\r\nn = 100\r\nx = np.arange(n)\r\nrs = check_random_state(0)\r\ny = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))\r\nir = IsotonicRegression()\r\ny_ = ir.fit_transform(x, y)\r\nlr = LinearRegression()\r\nlr.fit(x[:, np.newaxis], y) \r\nsegments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]\r\nlc = LineCollection(segments, zorder=0)\r\nlc.set_array(np.ones(len(y)))\r\nlc.set_linewidths(0.5 * np.ones(n))\r\nfig = plt.figure()\r\nplt.plot(x, y, 'r.', markersize=12)\r\nplt.plot(x, y_, 'g.-', markersize=12)\r\nplt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')\r\nplt.gca().add_collection(lc)\r\nplt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')\r\nplt.title('Isotonic regression')\r\nplt.show()"
},
{
"alpha_fraction": 0.7224712371826172,
"alphanum_fraction": 0.724756121635437,
"avg_line_length": 91.25409698486328,
"blob_id": "cb66ce3ecc29d63a02cf01b1b5cf73c855040f6d",
"content_id": "16637208e8d1ca655194b849d7813b8551d1bec3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11379,
"license_type": "no_license",
"max_line_length": 440,
"num_lines": 122,
"path": "/NaturalLangProc-python/hedberganalysis.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import nltk;\r\nfrom textblob import TextBlob;\r\nfrom nltk import sent_tokenize,word_tokenize;\r\nfrom nltk.corpus import stopwords,state_union;\r\nfrom nltk.tokenize import PunktSentenceTokenizer;\r\nfrom textblob.sentiments import NaiveBayesAnalyzer,PatternAnalyzer\r\nnjokes=[];\r\njokes=[\"I used to do drugs. I still do, but I used to, too.\",\r\n\"I'm against picketing, but I don't know how to show it.\",\r\n\"You know, I'm sick of following my dreams, man. I'm just going to ask where they're going and hook up with 'em later.\",\r\n\"I bought a seven dollar pen because I always lose pens and I got sick of not caring.\",\r\n\"My fake plants died because I did not pretend to water them.\",\r\n\"My friend asked me if I wanted a frozen banana, I said 'no, but I want a regular banana later, so ... yeah'.\",\r\n\"Is a hippopotamus really a hippopotomus or just a really cool opotamus?\",\r\n\"I know a lot about cars, man. I can look at any car's headlights and tell you exactly which way it's coming.\",\r\n\"I don't have a girlfriend. But I do know a woman who'd be mad at me for saying that.\",\r\n\"If carrots got you drunk, rabbits would be fucked up.\"\r\n\"I love blackjack. But I'm not addicted to gambling. I'm addicted to sitting in a semicircle.\", \r\n\"I tried to have a cookie, and this girl said, 'I'm mailing those cookies to my friend.' So I couldn't have one. You shouldn't make cookies untouchable.\", \r\n\"I drank some boiling water because I wanted to whistle.\", \r\n\"A lollipop is a cross between hard candy and garbage.\",\r\n\"Has anyone seen me on Letterman? Two million people watch that show and I don't know where they are. You might have seen this next comedian on the Late Show, but I think more people have seen me at the store. That should be my introduction.'You might have seen this next comedian at the store,' and people would say 'Hell yes I have!'\", \r\n\"It's hard to fight when you're in a gazebo.\" ,\r\n\"I don't like grouper fish. Well, they're okay. They hang around star fish. Because they're grouper fish.\",\r\n\"I have some speakers up here, thank God, because last night I didn't have them and I was telling jokes and I had no idea which joke I was telling. So I told jokes twice. I even told that one twice.\",\r\n\"I've got a wallet, it's orange. In case I wanna buy a deer. That doesn't make any sense at all.\",\r\n\"I miss the $2 bill, 'cause I can break a two. $20, no. $10, no. $5, maybe, $2? Oh yeah. What do you need, a one and another one?\",\r\n\"I called the hotel operator and she said, How can I direct your call?' I said, 'Well, you could say 'Action!', and I'll begin to dial. And when I say 'Goodbye', then you can yell 'Cut!'\",\r\n\"A dog came to my door, so I gave him a bone, the dog took the bone into the back yard and buried it. I'm going to go plant a tree there, with bones on it, then the dog will come back and say, 'Shoot! It worked! I must distribute these bones equally for I have a green paw!'\",\r\n\"Cavities are made by sugar. So if you need to dig a hole, then lay down some candy bars!\", \r\n\"I like cottage cheese. That's why I want to try other dwelling cheeses, too. How about studio apartment cheese? Tent cheese? Mobile home cheese? Do not eat mobile home cheese in a tornado.\"\r\n\"I went to a heavy metal concert. The singer yelled out, 'How many of you people feel like human beings tonight?' And then he said, 'How many of you feel like animals?' The thing is, everyone cheered after the animals part, but I cheered after the human beings part because I did not know there was a second part to the question.\" \r\n\"I have no problem not listening to the Temptations.\", \r\n\"I like buying snacks from a vending machine because food is better when it falls. Sometimes at the grocery, I'll drop a candy bar so that it will achieve its maximum flavor potential.\", \r\n\"When you put Listerine in your mouth, it hurts. Germs do not go quietly.\", \r\n\"If you're watching a parade, don't follow it. It never changes. If the parade is boring, run in the opposite direction. You will fast-forward the parade.\", \r\n\"On a traffic light yellow means yield, and green means go. On a banana, it's just the opposite, yellow means go ahead, green means stop, and red means, where'd you get that banana?\", \r\n\"Xylophone is spelled with an X. That's wrong. It should be a Z up front. Next time you spell xylophone, use a Z. If someone says, 'That's wrong!', you say, 'No, it ain't.' If you think that's wrong, then you need to have your head Z-rayed.\", \r\n\"I would imagine the inside of a bottle of cleaning fluid is really clean. I would imagine a vodka bottle is really drunk.\", \r\n\"A fly was very close to being called a land, because that's what it does half the time.\", \r\n\"I want to rob a bank with a BB gun. 'Give me all your money or I will give you a dimple! I will be rich, you will be cute. We both win.'\", \r\n\"I had a chicken finger that was so big, it was a chicken hand.\", \r\n\"I got binoculars 'cause I don't want to go that close.\", \r\n\"I can read minds, but I'm illiterate.\", \r\n\"If Spiderman was real, and I was a criminal, and he shot me with his web, I would say, 'Dude, thanks for the hammock.'\", \r\n\"I got a belt on that's holding up my pants, and the pants have belt loops that hold up the belt. What's going on here? Who is the real hero?\", \r\n\"I had the cab driver drive me here backwards, and the dude owed me $27.50.\"\r\n\"I travel with a boom box. When I get on a plane, I stuff the power cord for the boom box into the battery compartment. From an outsider's point of view, it looks like I've got it all wrong\",\r\n\"Advil has a candy coating. It's delicious. And it says right on the bottle 'Do not have more than two.' Well then do not put a candy coating around it.\",\r\n\"I had a job interview at an insurance company once and the lady said 'Where do you see yourself in five years?' I said 'Celebrating the fifth year anniversary of you asking me this question.'\",\r\n\"I fuckin' hate arrows, man. They try to tell me which direction to go. It's like, 'Fuck you, I ain't going that way, line with two thirds of a triangle on the end!'\",\r\n\"cid was my favorite drug. Acid opened up my mind, it expanded my mind. Because of acid, I now know that butter is way better than margarine. I saw through the bullshit.\",\r\n\"I used to live here in Los Angeles, on Sierra Bonita, and I had an apartment, and I had a neighbor. And whenever he would knock on my wall, I knew he wanted me to turn my music down. And that made me angry, cause I like loud music. So when he knocked on the wall, I'd mess with his head. I'd say, 'Go around! I cannot open the wall. I don't know if you have a doorknob on the other side, but over here there's nothing... it's just flat!'\",\r\n\"Listerine hurts. Man, when I put Listerine in my mouth, I'm fuckin' angry. Germs do not go quietly.\",\r\n\"That would suck if you became a priest and the day came where you had to fight the devil, you'd be like 'Shit, I didn't think that was for real!'\",\r\n\"You know that show 'My Three Sons'? That'd be funny if it was called 'My One Dad'\",\r\n\"I ran some Evian water through a filter... the shit disappeared! It was so fuckin' pure.\",\r\n\"I told the crowd last night to fuck off, but then I felt bad, so I said 'All right, fuck back on.'\",\r\n\"Gel's funny. You wash your hair and then you put gel in it. It's like, it's clean now, let me fuck it back up.\",\r\n\"When I'm on my hotel elevator, I like to pretend that someone else's floor is wrong. Like, if someone gets on and presses 3, I'm like 'You're on three? Hahahaha. Dude, I don't think I can ride with you.'\",\r\n\" I was at a restaurant, I saw a guy wearing a leather jacket at the same time he was eating a hamburger and drinking a glass of milk. I said 'Dude, you are a cow. The metamorphosis is complete. Don't fall asleep, I will tip you over.'\",\r\n\". Seahorses are slow. If I was in the ocean, I would not be a gambler on the horse races ... because you would be there fuckin' days\",\r\n\" Man, remember that movie The Outsiders and one of the guys name was 'Soda Pop', and at the time it was cool?... It's not cool right now. Your nickname was 'Soda Pop'... you would be dead.\",\r\n\" I remixed the remix... it was back to normal.\",\r\n\"As an adult, I'm not supposed to go down slides. So if I end up at the top of a slide, I have to act like I got there accidentally. 'How'd I get up here, god damnit?! I guess I have to slide down.'\",\r\n\"My manager's cool, he gets concerned, he says, 'Mitch, don't use liquor as a crutch.' I can't use liquor as a crutch... because a crutch helps me walk\",\r\n\" When I play the South, they say 'y'all'in the South. They take out the 'O' and the 'U'. So when I am in the South, I try to talk like that, so people understand me. 'Hello, can I have a bowl of chicken noodle... sp.'\",\r\n\" I want to be a race car passenger. Just a guy who bugs the driver.\",\r\n\" I didn't go to college, but if I did, I would have taken all my tests at a restaurant, because the customer is always right.\",\r\n\"Alcoholism is a disease. But it's like the only disease you can get yelled at for having. \",\r\n\" I was gonna stay overnight at my friends place, he said 'You're gonna have to sleep on the floor.'.... Damn gravity. You got me again. You know how badly I want to sleep on the wall.\",\r\n\"Dogs are forever in the pushup position.\",\r\n\"I did comedy for a fundraiser once. We were trying to raise money to buy one of those machines that shows how much money has been raised\",\r\n\"I saw a commercial for an above ground pool, it was 30 seconds long. You know why? Because that's the maximum amount of time you can depict yourself having fun in an above ground pool.\",\r\n\"I got a fire alarm at home, but really it's more like a 9-volt battery slowly drainer. 'Do you want to slowly get rid of your 9-volt batteries? Then buy this circle.'\",\r\n\"You know when they show someone on TV washing their hair under a waterfall? That's fuckin' bullshit, man. Because that thing would knock you on your ass.\",\r\n\"I'd like to see a forklift lift a crate of forks. It'd be so damn literal! You are using that machine to it's exact purpose!\",\r\n\" Now if I was to give a duck bread, I'd give him Pepperidge Farm bread because that shit's fancy. It's wrapped twice. So you open it... and it still ain't opened. That's why I don't buy it. I don't need another step between me and toast\"];\r\njokes=list(set(jokes));\r\nstop_words=set(stopwords.words(\"english\"));\r\nfiltJokes={};\r\ntaggedJokes={}\r\nsList=[];\r\nfor i in jokes:\r\n\tj=word_tokenize(i);\r\n\tsList=[w for w in j if not w in stop_words]\r\n\tfiltJokes[i]=sList;\r\n#print filtJokes;\r\ntrain_text=state_union.raw('2005-GWBush.txt');\r\ncustom_tagger=PunktSentenceTokenizer(train_text);\r\ntest_text=' '.join(jokes);\r\n#print test_text;\r\njokeTags=custom_tagger.tokenize(test_text);\r\ncnt=1;\r\nfor i in jokeTags:\r\n\twds=word_tokenize(i);\r\n\ttags=nltk.pos_tag(wds);\r\n\ttaggedJokes[cnt]=tags;\r\n\tchunker=r\"\"\"Chunk: {<RB.?>*<VB.?>*<NNP>+<NN>?} \"\"\";\r\n\tchunkParse=nltk.RegexpParser(chunker);\r\n\tchunky=chunkParse.parse(tags);\r\n\t#print chunky.draw();\r\n\tcnt+=1;\r\n#print taggedJokes;\r\nall_words=[];\r\nfor w in test_text.split():\r\n\tall_words.append(w);\r\n#print all_words;\r\nall_words=nltk.FreqDist(all_words);\r\n#print all_words.most_common(30);\r\n'''\r\nhedBlob=TextBlob(test_text);\r\ncnt=0;\r\nfor sentence in hedBlob.sentences:\r\n\tprint sentence.sentiment.polarity\r\n\tcnt+=1;\r\nprint cnt;\r\n'''\r\nfor i in jokes:\r\n\tblob = TextBlob(i, analyzer=PatternAnalyzer())\r\n\tprint blob.sentiment\r\nprint len(jokes)\r\n\r\n"
},
{
"alpha_fraction": 0.49115192890167236,
"alphanum_fraction": 0.4958263635635376,
"avg_line_length": 23.168067932128906,
"blob_id": "1cc10a29a520664a6180363321ca2f16e981560e",
"content_id": "6ae40f79988022e82634d4202eda575e873a1f99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2995,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 119,
"path": "/C++ MiniApps/Phoney/contactManagement.cpp",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "#include<iostream>\r\n#include<vector>\r\n#include<string>\r\n#include<map>\r\n#include<cstdlib>\r\nclass ContactBook\r\n{\r\nprivate:\r\nstd::string m_name;\r\nstd::string m_number;\r\nstd::map<std::string,std::string> m_contact;\r\nint m_size;\r\npublic:\r\n ContactBook(int size):m_size(size)\r\n {\r\n }\r\n void EnterValue(std::string name,std::string number)\r\n {\r\n m_contact.insert(make_pair(name,number));\r\n }\r\n std::string PrintValue(std::string name)\r\n {\r\n return m_contact[name];\r\n }\r\n std::map<std::string,std::string> getContactBook()\r\n {\r\n return m_contact;\r\n }\r\n void editContact(std::string name)\r\n {\r\n std::cout<<\"Enter the new number\";\r\n std::cin>>m_number;\r\n m_contact[name]=m_number;\r\n\r\n }\r\n void deleteContact(std::string name)\r\n {\r\n\r\n m_contact.erase(name);\r\n std::cout<<\"This contact is deleted :\"<<name<<std::endl;\r\n }\r\n\r\n};\r\nvoid printContactBook(ContactBook contact)\r\n{\r\n std::map<std::string,std::string> cont;\r\n cont=contact.getContactBook();\r\n if(cont.empty())\r\n {\r\n std::cout<<\"-----NO CONTACTS!-----\"<<std::endl;\r\n\r\n }\r\n else\r\n {\r\n\r\n std::map<std::string,std::string>::const_iterator it;\r\n it=cont.begin();\r\n std::cout<<\"____________CONTACTS_______________\"<<std::endl;\r\n while(it!=cont.end())\r\n {\r\n std::cout<<it->first<<\" : \"<<it->second<<std::endl;\r\n ++it;\r\n std::cout<<\"- - - - - - - - - - - - - - - - -\"<<std::endl;\r\n }\r\n std::cout<<\"____________________________________\"<<std::endl;\r\n }\r\n}\r\nint main()\r\n{\r\n std::string name,number;\r\n int len=0;\r\n int ch;\r\n std::cout<<\"Enter the size of the Phone Book!\";\r\n std::cin>>len;\r\n ContactBook contact(len);\r\n std::cout<<\"_______________PHONEY____________\"<<std::endl;\r\n std::cout<<\" 1. Add Contact\"<<std::endl;\r\n std::cout<<\" 2. Print Phone Book\"<<std::endl;\r\n std::cout<< \" 3. Delete Contact\"<<std::endl;\r\n std::cout<<\" 4. Edit Contact\"<<std::endl;\r\n std::cout<<\" 5. Exit\"<<std::endl;\r\n std::cout<<\"__________________________________\"<<std::endl;\r\n std::cout<<\"Enter your choice\"<<std::endl;\r\n std::cin>>ch;\r\n while(ch!=5)\r\n {\r\n switch(ch)\r\n {\r\n case 1:\r\n std::cout<<\"Enter the name\";\r\n std::cin>>name;\r\n std::cout<<\"Enter the number\";\r\n std::cin>>number;\r\n contact.EnterValue(name,number);\r\n break;\r\n case 2:\r\n printContactBook(contact);\r\n break;\r\n case 3:\r\n std::cout<<\"Enter the contact to be deleted\";\r\n std::cin>>name;\r\n contact.deleteContact(name);\r\n break;\r\n case 4:\r\n std::cout<<\"Enter the contact to be edited\";\r\n std::cin>>name;\r\n contact.editContact(name);\r\n break;\r\n case 5:\r\n std::cout<<\"_____BYE______\"<<std::endl;\r\n exit(0);\r\n break;\r\n }\r\n std::cout<<\"Enter your choice\"<<std::endl;\r\n std::cin>>ch;\r\n }\r\n\r\nreturn 0;\r\n}\r\n"
},
{
"alpha_fraction": 0.6881720423698425,
"alphanum_fraction": 0.698924720287323,
"avg_line_length": 35.74074172973633,
"blob_id": "1e8b1c6b56ce0be53973c12cce71cb2d29507695",
"content_id": "23692c4214383d0e2ab7df060754fefdc45dd60a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1023,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 27,
"path": "/xkcdcomicdwnldr.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup;\r\nimport requests;\r\nimport urllib2;\r\nmain_url=\"http://www.xkcd.com\";\r\nprint \"xkcd, sometimes styled XKCD, is a webcomic created by Randall Munroe. The comic's tagline describes it as 'A webcomic of romance, sarcasm, math, and language'\"\r\nprint \"The issue number ranges from 1 to 1768.New comics are added every three days\";\r\nurlInput=raw_input(\"Enter the issue number of xkcd you want to download\");\r\nmod_url=main_url+\"/\"+urlInput+\"/\";\r\nreq=requests.get(mod_url);\r\nif(req.status_code==404):\r\n\tprint \"Oops ,try reloading the page!!\"\r\nscrp=req.text;\r\nsoup=BeautifulSoup(scrp,\"html.parser\");\r\nmySoup=soup.find_all('div',{\"id\":\"comic\"});\r\nval=str(mySoup);\r\nnewS=BeautifulSoup(val,\"html.parser\");\r\nprint \"comic description\"\r\nfor i in newS.find_all('img'):\r\n print i['title'];\r\n getVal=i['src'];\r\n#print getVal;\r\nreqObj=urllib2.urlopen(\"http:\"+getVal);\r\nmyFileHand=open(\"xkcd %s.png\" %(urlInput),\"wb\");\r\nmyFileHand.write(reqObj.read());\r\nmyFileHand.close();\r\n#print mySoup;\r\n#coded by Sridhar Cheebu ;-}\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.713798999786377,
"alphanum_fraction": 0.7223168611526489,
"avg_line_length": 37.13333511352539,
"blob_id": "fca5fc3b828377a4fc28aeaf918d21aaf701d48f",
"content_id": "0c91a0ad2c29becc6e14b4564688980e67fee348",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 587,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 15,
"path": "/MachLearningandDataAnalysis/DigitRecog/digitanalysis.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import pandas as pd;\r\nimport numpy as np;\r\nfrom sklearn.ensemble import RandomForestClassifier\r\ndigitDf=pd.read_csv('C:\\Kaggle\\\\DigitRecog\\\\train.csv');\r\nsubDf=pd.read_csv('C:\\Kaggle\\\\DigitRecog\\\\sample_submission.csv')\r\n#print digitDf.head();\r\nX_test=digitDf[[0]].values.ravel();\r\nX_train=digitDf.iloc[:,1:].values;\r\ntestDf=pd.read_csv('C:\\Kaggle\\\\DigitRecog\\\\test.csv');\r\nrf = RandomForestClassifier(n_estimators=100)\r\nrf.fit(X_train, X_test)\r\npred = rf.predict(testDf);\r\n#print pred;\r\nmySol=pd.DataFrame({'Label':np.array(pred)}, index=subDf.ImageId);\r\nmySol.to_csv('digitSol.csv');\r\n"
},
{
"alpha_fraction": 0.6874194741249084,
"alphanum_fraction": 0.6904250979423523,
"avg_line_length": 36.21311569213867,
"blob_id": "69eef8fb9d1d4d0860d66e50ad970ae4476c091b",
"content_id": "b01007c430d7b36ce17ddc8ed3fd2c2ca054b3fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2329,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 61,
"path": "/MachLearningandDataAnalysis/Titanic/titanic.py",
"repo_name": "sridharRavi/CheebsRepo",
"src_encoding": "UTF-8",
"text": "import pandas as pd;\r\nimport matplotlib.pyplot as plt;\r\nimport seaborn as sns\r\nfrom sklearn.linear_model import LogisticRegression;\r\nimport numpy as np;\r\ntitantrain=pd.read_csv('C:\\Kaggle\\\\train.csv');\r\ntitantest=pd.read_csv('C:\\Kaggle\\\\test.csv');\r\nfinRes=pd.read_csv('C:\\Kaggle\\\\gender_submission.csv')\r\nnewdf=pd.DataFrame();\r\nClassdf=pd.DataFrame();\r\nAgedf=pd.DataFrame();\r\nFamdf=pd.DataFrame();\r\nfinTrain=pd.DataFrame();\r\nfinTest=pd.DataFrame();\r\nnewdf['sex']=titantrain['Sex'];\r\nfinTrain['survived']=titantrain['Survived'];\r\nfinTest['sex']=titantest['Sex'];\r\nfinTest['age']=titantest['Age'];\r\nfinTest['pclass']=titantest['Pclass'];\r\nfinTest['sibsp']=titantest['SibSp'];\r\nfinTest['parch']=titantest['Parch'];\r\nnewdf['sibsp']=titantest['SibSp'];\r\nnewdf['sex'].replace('female',0,inplace=True);\r\nnewdf['sex'].replace('male',1,inplace=True);\r\nfinTest['sex'].replace('female',0,inplace=True);\r\nfinTest['sex'].replace('male',1,inplace=True);\r\nnewdf['pclass']=titantrain['Pclass'];\r\nnewdf['sibsp']=titantrain['SibSp'];\r\nnewdf['age']=titantrain['Age'];\r\nnewdf['parch']=titantrain['Parch'];\r\nfinTest['age']=finTest['age'].fillna(finTest['age'].mean());\r\nnewdf['age']=newdf['age'].fillna(newdf['age'].mean());\r\nX_train=newdf[['age','sex','pclass','sibsp','parch']].values;\r\nY_train=finTrain.survived.values;\r\nX_test=finTest[['age','sex','pclass','sibsp','parch']].values;\r\nlg=LogisticRegression();\r\nlg.fit(X_train, Y_train);\r\nfinVal=lg.predict(X_test);\r\n#print finVal;\r\nmySol=pd.DataFrame({'Survived':np.array(finVal)}, index=titantest.PassengerId);\r\nmySol.to_csv('finalSol.csv');\r\n\r\n'''\r\n\r\nClassdf['pclass']=titantrain['Pclass'];\r\nClassdf['survived']=titantrain['Survived'];\r\nAgedf['survived']=titantrain['Survived'];\r\nAgedf['age']=titantrain['Age'];\r\nFamdf['survived']=titantrain['Survived'];\r\nFamdf['sibsp']=titantrain['SibSp'];\r\nAgedf['age']=Agedf['age'].fillna(Agedf['age'].mean());\r\nprint Agedf.describe();\r\nprint Famdf.describe();\r\nprint Classdf.describe();\r\nAgedf.plot(kind='scatter', x='age', y='survived', figsize=(12,8));\r\nsns.lmplot(x='age', y='survived', data=Agedf,title=\"age-survival\");\r\nsns.lmplot(x='pclass', y='survived', data=Classdf,title=\"Class-survival\");\r\nsns.lmplot(x='sex',y='survived',data=newdf,title=\"gender-survival\");\r\nsns.lmplot(x='sibsp',y='survived',data=Famdf,\"relations-survival\");\r\nplt.show();\r\n'''"
}
] | 25 |
NHLBI-BCB/cfCloud | https://github.com/NHLBI-BCB/cfCloud | bb0cff2d2f62cf806fd4b3a04b3caf84c70dad02 | 6b683daf43b2d51c173682d40d4b34f68b1419be | 169389d0ba20a7e428739f72a47c830f8cde0a66 | refs/heads/master | 2021-02-18T20:13:47.146428 | 2020-08-17T02:52:02 | 2020-08-17T02:52:02 | 245,231,835 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5462686419487,
"alphanum_fraction": 0.5641791224479675,
"avg_line_length": 18.235294342041016,
"blob_id": "fc7a7eb747750298a790d422b319a66cfd3e24ad",
"content_id": "a5841ed58c0ddcaea91e20996b90291da45816f5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 335,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 17,
"path": "/run_cluster.sh",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\nresPath=\"results\"\n\nsnakemake -pr -k \\\n -d $resPath \\\n -j 120 \\\n --latency-wait 120 \\\n --cluster-config cluster_config.yaml \\\n --cluster \"sbatch \\\n --cpus-per-task={cluster.cpus} \\\n --mem={cluster.mem} \\\n --time={cluster.time} \\\n -o {cluster.out} \\\n -e {cluster.out} \\\n -J {cluster.jobname}\"\n \n \n"
},
{
"alpha_fraction": 0.6868845820426941,
"alphanum_fraction": 0.7172114849090576,
"avg_line_length": 41.29999923706055,
"blob_id": "cff1a0c779dfaadfcd5d7317f3665f1ba6a55128",
"content_id": "7b5d6b3dd0c9a9fca9efe349af120cfb960aee1d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2541,
"license_type": "permissive",
"max_line_length": 241,
"num_lines": 60,
"path": "/docs/Amazon_Cloud.md",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "### Amazon Cloud\n\n<p align=\"center\">\n <img src=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/images/overview_getting_started.png\" width=\"409\" height=\"338\" title=\"AWS\">\n</p>\n \n\n\n#### 1. Tutorial: Getting started with Amazon EC2 Linux instances\n\n<b>Step-by-step:</b><br />\n [Step 1: Prerequisites](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/get-set-up-for-amazon-ec2.html)<br />\n [Step 2: Launch an instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/launching-instance.html)<br />\n [Step 3: Connect to your instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstances.html)<br />\n [Step 4: Clean up your instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html#ec2-clean-up-your-instance)<br />\n\n<br />\n\n#### 2. Choose the cfCloud Amazon Machine Image (AMI)\n\nTutorail: <b>[AWS EC2: Create EC2 Instance (Linux)](https://medium.com/@GalarnykMichael/aws-ec2-part-1-creating-ec2-instance-9d7f8368f78a)</b><br />\n\n 2.1. On the <b>[Choose an Amazon Machine Image (AMI)](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:)</b> page, search for <b>cfCloud</b> AMI.<br />\n\n 2.2. Check the Root device type listed for each AMI. Notice which AMIs are the type that you need, <br /> either ebs (backed by Amazon EBS) or instance-store (backed by instance store). <br />\n\n 2.3. Check the Virtualization type listed for each AMI. Notice which AMIs are the type that you need,<br /> either hvm or paravirtual. <br />\n\n 2.4. Choose an AMI that meets your needs, and then choose Select.<br />\n\n<br />\n\n#### 3. Connect (login) to AMI and activate cfCloud environment\n\nTutorial: [AWS EC2: Connect to Linux Instance using SSH](https://medium.com/@GalarnykMichael/aws-ec2-part-2-ssh-into-ec2-instance-c7879d47b6b2)\n\n```\n# Example SSH:\nssh -i \"cf_Cloud.pem\" [email protected]\n```\n```\nconda info --envs\nconda activate cfcloud\n```\n\n<br />\n\n#### 4. Download resources\n\n- Bowtie2 Reference Genome <br />\nCan be downloaded from [Illumina iGenomes](https://support.illumina.com/sequencing/sequencing_software/igenome.html)\n\n```\n# e.g. hg19:\ncd ~/cfCloud/resources\nwget http://igenomes.illumina.com.s3-website-us-east-1.amazonaws.com/Homo_sapiens/UCSC/hg19/Homo_sapiens_UCSC_hg19.tar.gz\ntar -xzf Homo_sapiens_UCSC_hg19.tar.gz\n```\n\n<br />\n\n"
},
{
"alpha_fraction": 0.6720154285430908,
"alphanum_fraction": 0.6925545334815979,
"avg_line_length": 24.064516067504883,
"blob_id": "5db85544b5ed5c902a952c3c2bb06d5c6c35ed23",
"content_id": "e9774030e9ad1e94903e125ecbb8b6e65957965f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1558,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 62,
"path": "/docs/Local.md",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "### Local Installation\n\n#### 1. Tool Prerequisites\n\n- [python=3.7](https://www.python.org/downloads/) \n- [samtools and bcftools](https://samtools.github.io/bcftools/howtos/install.html) \n```\n git clone --branch=develop git://github.com/samtools/htslib.git\n git clone --branch=develop git://github.com/samtools/bcftools.git\n git clone --branch=develop git://github.com/samtools/samtools.git\n cd bcftools; make\n cd ../samtools; make \n```\n\n- [PyVCF](https://pypi.org/project/PyVCF/)\n```\npip3 install PyVCF\n```\n- [pandas](https://pandas.pydata.org/)\n```\npip3 install pandas\n```\n- [snakemake](https://snakemake.readthedocs.io/en/stable/index.html)\n```\npip3 install snakemake\n```\n<br />\n\n#### 2. Clone cfCloud\n```\ngit clone https://github.com/NHLBI-BCB/cfCloud.git\ncd cfCloud\n```\n<br />\n\n#### 3. Install resources\n\n- Bowtie2 Reference Genome <br />\nCan be downloaded from [Illumina iGenomes](https://support.illumina.com/sequencing/sequencing_software/igenome.html)\n\n```\n# e.g. hg19:\ncd resources\nwget http://igenomes.illumina.com.s3-website-us-east-1.amazonaws.com/Homo_sapiens/UCSC/hg19/Homo_sapiens_UCSC_hg19.tar.gz\ntar -xzf Homo_sapiens_UCSC_hg19.tar.gz\n```\n<br />\n\n- SNPs list <br />\nCan be downloaded from [Illumina Genotyping Kits](https://www.illumina.com/products/by-type/microarray-kits.html)\n```\n# e.g. Infinium Omni2.5:\ncd reources\nunzip InfiniumOmni25.hg19.snps.cleaned.zip\n```\n\n<br /><br />\n\n<hr size=5 style=\"display: block; height: 3px;\n border: 0; border-top: 1px solid #ccc;\n margin: 1em 0; padding: 0;\" />\n<br /><br />\n\n\n\n\n"
},
{
"alpha_fraction": 0.7638888955116272,
"alphanum_fraction": 0.7712418437004089,
"avg_line_length": 56.904762268066406,
"blob_id": "fbf1f2d158187b1f51eeaedd6ded6440cf981271",
"content_id": "27319d22f38d3196f1be1157ede01987e0f2f20b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1224,
"license_type": "permissive",
"max_line_length": 853,
"num_lines": 21,
"path": "/README.md",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "# cfCloud: A Cloud-based Workflow for Cell-Free DNA Data Analysis\n\nCell-free DNA (cfDNA) is double stranded, non-randomly fragmented short (<200bp) DNA molecules circulating in the blood stream as a result of apoptosis, necrosis or active secretion from cells. The amount of cfDNA in blood increases dramatically with cellular injury or necrosis and therefore, can be used as a biomarker as a non-invasive prenatal testing, tumor-derived DNA in plasma, or monitoring the graft health in an organ transplantation. The Genome Transplant Dynamics, a rigorous and highly reproducible universal NGS based method, has been commonly used to utilizes genotype information differences in recipient and donor to quantify donor derived cell-free DNA percent. Here we implement a fully automated Snakemake pipeline on-premise as well as a Cloud implementation to systematize the quantification of donor derived cell-free DNA amount.\n\n## cfCloud Workflow \n\n<p align=\"center\">\n <img src=\"figure/cfCloud.png\" width=\"831\" height=\"720\" title=\"cfCloud workflow\">\n</p>\n\n<br /><br />\n\n## Installation and Usage\n\n### [Local](docs/Local.md)\n\n### [Conda Environment](docs/Conda.md)\n\n### [Amazon Cloud](docs/Amazon_Cloud.md)\n\n<br /><br />\n\n\n\n\n\n\n \n"
},
{
"alpha_fraction": 0.6939393877983093,
"alphanum_fraction": 0.723737359046936,
"avg_line_length": 25.052631378173828,
"blob_id": "8ac441a5cf82e701549f686348e4f24c4f8148c8",
"content_id": "544c93abaeecb5f633e062bb4648b8ad40db0f1b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1980,
"license_type": "permissive",
"max_line_length": 187,
"num_lines": 76,
"path": "/docs/Conda.md",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "### Conda Environment\n\n#### 1. Download and install conda\n\nDownload an an installer for Python v3. (cfCloud requires python=3.7)\n\ndistribution | instructions\n---- | ----\n[Anaconda](https://www.anaconda.com/products/individual#download-section) | Current version \"Python 3.7 version\"\n[Miniconda](https://repo.anaconda.com/miniconda/) | Download the `Miniconda3-latest-*` installer based on your operating system\n\nRun the installer file. Depends on. your OS. It may be an executable installer or run from the command-line: `bash INSTALLER.sh` . Please see the instruction provided with the installer.\n```\n# Example:\nwget https://repo.anaconda.com/archive/Anaconda3-2020.02-Linux-x86_64.sh\nbash Anaconda3-2020.02-Linux-x86_64.sh\n```\n\n\n\n#### 2. Create a new environment \n```\nconda create --name cfcloud python=3.6\nconda activate cfcloud\nconda info --envs\n```\n\n\n#### 3. Tool Prerequisites\n```shell\nconda install -c anaconda pandas -y\nconda install -c bioconda snakemake -y\nconda install -c bioconda bcftools -y\nconda install -c bioconda samtools -y\nconda install -c bioconda pyvcf -y\nconda list\nsudo apt install unzip\n```\n\n#### 4. Clone cfCloud\n```\ngit clone https://github.com/NHLBI-BCB/cfCloud.git\ncd cfCloud\n```\n<br />\n\n#### 5. Install resources\n\n- Bowtie2 Reference Genome <br />\nCan be downloaded from [Illumina iGenomes](https://support.illumina.com/sequencing/sequencing_software/igenome.html)\n\n```\n# e.g. hg19:\ncd resources\nwget http://igenomes.illumina.com.s3-website-us-east-1.amazonaws.com/Homo_sapiens/UCSC/hg19/Homo_sapiens_UCSC_hg19.tar.gz\ntar -xzf Homo_sapiens_UCSC_hg19.tar.gz\n```\n<br />\n\n- SNPs list <br />\nCan be downloaded from [Illumina Genotyping Kits](https://www.illumina.com/products/by-type/microarray-kits.html)\n```\n# e.g. Infinium Omni2.5:\ncd resurces\nunzip InfiniumOmni25.hg19.snps.cleaned.zip\n```\n\n<br /><br />\n\n\n\n\n<hr size=5 style=\"display: block; height: 3px;\n border: 0; border-top: 1px solid #ccc;\n margin: 1em 0; padding: 0;\" />\n<br /><br />\n"
},
{
"alpha_fraction": 0.6630434989929199,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 13.5,
"blob_id": "f99e572b83803165ecdc515c2c60427642987eb8",
"content_id": "46f9d221e2b7130193ebd3533606e09516f040a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 6,
"path": "/run_local.sh",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\nresPath=\"results\"\n\nsnakemake -pr -k -d $resPath --configfile config.yaml \n\n\n\n\n"
},
{
"alpha_fraction": 0.608849048614502,
"alphanum_fraction": 0.6205211877822876,
"avg_line_length": 24.22602653503418,
"blob_id": "32cd8be545e71ca051cba2f8e69c41ee274de51a",
"content_id": "3d8350475d6d364f3450686c742634707121a7ae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3684,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 146,
"path": "/Snakefile",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom collections import defaultdict\nimport vcf\n\n\n\nSAMPLES = list(config['samples'].keys())\nos.makedirs(\"logs\",exist_ok=True)\n\n\nrule all:\n\tinput:\n\t\texpand(\"bam/{sample}.bam\",sample=SAMPLES),\n\t\texpand(\"snps/{sample}.snps\",sample=SAMPLES),\n\t\texpand(\"pileup/{sample}.pileup\",sample=SAMPLES),\n\t\texpand(\"calls/{sample}.calls\",sample=SAMPLES),\n\t\t\"final.tsv\"\n\n\t\t\n\n\t\t\nrule bowtie:\n\tinput:\n\t\tconfig['fastqPath']+\"/{sample}.fastq.gz\"\n\toutput:\n\t\t\"bam/{sample}.bam\"\n\tparams:\n\t\tthreads=4,\n\t\tindex=config['bowtie_index'],\n\t\tlog=\"logs/bowtie_{sample}.out\",\n\t\tmem=\"4G\"\n\tshell:\n\t\t\"bowtie2 -p {params.threads} -x {params.index} -U {input} \\\n\t\t| samtools sort -m {params.mem} -@ {params.threads} | samtools markdup - {output} \"\n\t\t\t\t\n\n\t\t\n\nrule grep_snps_from_vcf:\n\tinput:\n\t\tconfig['GenoVCF'],\n\toutput:\n\t\t\"snps/{sample}.vcf\",\n\tparams:\n\t\tlog=\"logs/grep_snps_{sample}.out\",\n\t\trec_id=lambda wildcards: config['samples'][wildcards.sample]['rec_id'],\n\t\tdonor_id=lambda wildcards: config['samples'][wildcards.sample]['donor_id'],\n\tshell:\n\t\t\"bcftools query -f '%CHROM,%POS,%ID,%REF,%ALT[,%GT]\\n' -s {params.rec_id},{params.donor_id} {input} > {output}\t\"\n\n\n\n\t\t\nrule make_snps:\n\tinput:\n\t\t\"snps/{sample}.vcf\",\n\toutput:\n\t\t\"snps/{sample}.snps\",\n\tparams:\n\t\tlog=\"logs/make_snps_{sample}.out\",\n\t\trec_id=lambda wildcards: config['samples'][wildcards.sample]['rec_id'],\n\t\tdonor_id=lambda wildcards: config['samples'][wildcards.sample]['donor_id'],\n\trun:\n\t\tdf=pd.read_csv(input[0],header=None)\n\n\t\tchr_list=['chr%s' %x for x in list(range(1,23))+['X']]\n\t\t\n\t\tdf.columns=['chr','pos','rsid','ref','alt','GT_rec','GT_donor']\n\t\tdf=df.loc[ (df['GT_rec']==\"0/0\") | (df['GT_rec']==\"1/1\") ,: ]\n\t\tdf=df.loc[ (df['GT_donor']==\"0/0\") | (df['GT_donor']==\"1/1\") ,: ]\n\n\t\tdf.loc[df['GT_rec']==\"0/0\",'rec_base']=df.loc[df['GT_rec']==\"0/0\",'ref']\n\t\tdf.loc[df['GT_rec']==\"1/1\",'rec_base']=df.loc[df['GT_rec']==\"1/1\",'alt']\n\n\t\tdf.loc[df['GT_donor']==\"0/0\",'donor_base']=df.loc[df['GT_donor']==\"0/0\",'ref']\n\t\tdf.loc[df['GT_donor']==\"1/1\",'donor_base']=df.loc[df['GT_donor']==\"1/1\",'alt']\n\n\t\tdf=df.loc[df['chr'].isin(chr_list),:]\n\t\tdf.to_csv(output[0],sep='\\t',index=False)\n\t\tshell(\"rm {input} \")\n\n\t\t\n\t\t\nrule mileup:\n\tinput:\n\t\tbam=\"bam/{sample}.bam\",\n\t\tsnps=\"snps/{sample}.snps\"\n\toutput:\n\t\t\"pileup/{sample}.pileup\",\n\tparams:\n\t\tlog=\"logs/mpileup_{sample}.out\",\n\t\tref=config['human_fasta'],\n\t\tsamtools018=\"/home/tunci/tools/samtools-0.1.18/samtools\"\n\tshell:\n\t\t\"awk 'NR >1 {{print $1,$2}}' {input.snps} > {output}.pos.tmp ;\"\n\t\t\"samtools mpileup -O -s -f {params.ref} -l {output}.pos.tmp {input.bam} > {output} ;\"\n\t\t\"rm {output}.pos.tmp \"\n\n\n\t\n\nrule call_cfdna:\n\tinput:\n\t\tpileup=\"pileup/{sample}.pileup\",\n\t\tsnps=\"snps/{sample}.snps\",\t\t\n\toutput:\n\t\t\"calls/{sample}.calls\",\n\tparams:\n\t\tlog=\"logs/make_snps_{sample}.out\",\n\t\trec_id=lambda wildcards: config['samples'][wildcards.sample]['rec_id'],\n\t\tdonor_id=lambda wildcards: config['samples'][wildcards.sample]['donor_id'],\n\t\tbq_filter=config['bq_filter'],\n\t\tmq_filter=config['mq_filter'],\n\t\tref_snps=config['ref_snp'],\n\t\tfilter_snps=config['filter_snps']\n\tshell:\n\t\t\"python {config[binPath]}/call_cfdna.py \"\n\t\t\"--snps {input.snps} \"\n\t\t\"--rec-id {params.rec_id} \"\n\t\t\"--sampleid {wildcards.sample} \"\t\t\n\t\t\"--donor-id {params.donor_id} \"\n\t\t\"--pileup {input.pileup} \"\n\t\t\"--bq {params.bq_filter} \"\n\t\t\"--mq {params.mq_filter} \"\n\t\t\"--ref_snps {params.ref_snps} \"\n\t\t\"--filter_snps {params.filter_snps} \"\t\t\n\t\t\"--out {output} \"\n\n\n\n\n\t\t\nrule final_report:\n\tinput:\n\t\texpand(\"calls/{sample}.calls\",sample=SAMPLES)\n\toutput:\n\t\t\"final.tsv\"\n\trun:\n\t\tdfm=None\n\t\tfor f in input:\n\t\t\tdf=pd.read_csv(f,sep='\\t')\n\t\t\tif dfm is None:\n\t\t\t\tdfm=df\n\t\t\t\tcontinue\n\t\t\tdfm=dfm.append(df)\n\t\tdfm.to_csv(output[0],sep='\\t')\n\n"
},
{
"alpha_fraction": 0.6359872817993164,
"alphanum_fraction": 0.6477707028388977,
"avg_line_length": 21.39285659790039,
"blob_id": "807418edf55a310b7b56ee6cc805d28bb8e9c205",
"content_id": "aec65c4c157518ef00ac083ce974574c2665e287",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3140,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 140,
"path": "/bin/call_cfdna.py",
"repo_name": "NHLBI-BCB/cfCloud",
"src_encoding": "UTF-8",
"text": "\n\n\nimport csv\nimport sys\nimport argparse\nfrom collections import defaultdict\nimport re\nimport pandas as pd\nimport vcf\n\n\ndef get_catg(obs_snp,rec,donor):\n\t\n\tif rec==donor:\n\t\tif obs_snp==rec:\n\t\t\treturn \"bg_rec\"\n\t\telse:\n\t\t\treturn \"bg_err\"\n\telse:\n\t\tif obs_snp==rec:\n\t\t\treturn \"homo_rec\"\n\t\telif obs_snp==donor:\n\t\t\treturn \"homo_donor\"\n\t\telse:\n\t\t\treturn \"homo_err\"\n\treturn None\n\n \t\nnt_list=['A','C','G','T']\n\n\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('--snps',dest='snps',help='SNP file')\nparser.add_argument('--donor-id',dest='donor_id',help='donor ID')\nparser.add_argument('--rec-id',dest='rec_id',help='recipient ID')\nparser.add_argument('--sampleid',dest='sampleid',help='sample ID')\nparser.add_argument('--pileup',dest='pileup_file',help='input pileup format file')\nparser.add_argument('--bq',dest='bq',type=int,default=26,help='base quality Phred [26] Ascii +33')\nparser.add_argument('--mq',dest='mapq',type=int,default=35,help='mapQ filter for alignment Phred [35]')\nparser.add_argument('--filter_snps',dest='filter_snps',type=int,help='filter SNPs based on reference SNPs')\nparser.add_argument('--ref_snps',dest='ref_snps',help='keep SNPs if they are in this file ')\nparser.add_argument('--out',dest='out',help='output')\n\n\nargs = parser.parse_args()\n\n\ndonor_id=args.donor_id\nrec_id=args.rec_id\n\npileup_file=args.pileup_file\n\nbq_filter=args.bq\nmq_filter=args.mapq\nout=args.out\n\n\n\nif args.filter_snps:\n\tdct_ref_snps=defaultdict(dict)\n\tfor r in csv.reader(open(args.ref_snps),delimiter='\\t'):\n\t\tk='%s-%s' %(r[0],r[2])\n\t\tdct_ref_snps[k]=1\n\n\ndct_geno=defaultdict(dict)\nfor r in csv.DictReader(open(args.snps),delimiter='\\t',quoting=csv.QUOTE_NONE):\n\tk='%s-%s' %(r['chr'],r['pos'])\n\tdct_geno[k]={'rec_base':r['rec_base'],'donor_base':r['donor_base']}\n\n\n\t\t\ndd=defaultdict(int)\nfor tag in ['homo_rec','homo_donor','homo_err','bg_rec','bg_err']:\n\tdd[tag]=0\n\n\n_re_del=re.compile('\\^.|\\$')\n\nfor r in csv.reader(open(pileup_file),delimiter='\\t',quoting=csv.QUOTE_NONE):\n\tif re.search('[*+-]',r[4]):\n\t\tcontinue\n\n\tk='%s-%s' %(r[0],r[1])\n\n\tif k not in dct_geno:\n\t\tcontinue\n\n\tif args.filter_snps:\n\t\tif k not in dct_ref_snps:\n\t\t\tcontinue\n\n\n\tr[4]=_re_del.sub('',r[4])\n\n\tfor obs_snp,bq,mq,seq_pos in zip(list(r[4]),list(r[5]),list(r[6]),r[7].split(',')):\n\n\t\tobs_snp=obs_snp.upper()\n\n\t\tif obs_snp==\"N\":\n\t\t\tcontinue\t\t\n\t\t#if obs_snp==\"$\":\n\t\t#\tcontinue\n\n\t\tif obs_snp==\".\" or obs_snp==\",\":\n\t\t\tobs_snp=r[2].upper()\n\n\t\tif int(ord(bq)-33) < bq_filter:\n\t\t\tcontinue\n\n\t\tif int(ord(mq)-33) < mq_filter:\n\t\t\tcontinue\n\n\t\tcatg=get_catg(obs_snp,dct_geno[k]['rec_base'],dct_geno[k]['donor_base'])\n\t\tdd[catg]=dd[catg]+1\n\t\t\n\n\n\t\t\ndd['total_counts']=dd['homo_donor']+dd['homo_rec']\n\ntry:\t\t\n\tdd['%cfdna']=dd['homo_donor']/(dd['homo_rec']+dd['homo_donor'])*100\nexcept:\n\tdd['%cfdna']=0\n\ntry:\n\tdd['%error']=dd['bg_err']/dd['bg_rec']*100\nexcept:\n\tdd['%error']=0\n\n\n\t\n\nwith open(out,'w') as fout:\n\tfout.write('sampleID\\tRecID\\tDonorID\\tTotalCounts\\tDonorCounts\\t%cfdna\\t%bgError\\n')\n\tfout.write('%s\\t%s\\t%s\\t' %(args.sampleid,rec_id,donor_id))\n\t\n\tfor tag in ['total_counts','homo_donor','%cfdna','%error']:\n\t\tfout.write('%s\\t' %dd[tag])\n\tfout.write('\\n')\n\t\n"
}
] | 8 |
mdberkey/Discord_Convo_Bots | https://github.com/mdberkey/Discord_Convo_Bots | ae989fb582fc73808935fab640387393d24e393a | 221d1c22f8233ff289388256f3bbf8edf69579e1 | 55e63b688ed4eff83ee53d1ef1422096ddd51e35 | refs/heads/master | 2023-03-02T11:40:24.909356 | 2021-02-11T04:33:44 | 2021-02-11T04:33:44 | 336,653,540 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7576642632484436,
"alphanum_fraction": 0.7620437741279602,
"avg_line_length": 51.69230651855469,
"blob_id": "b1e605b97848d9113fa13cae9bb5f802f81d6ffb",
"content_id": "305160ad58b139c86bdc0a762b8c0b7d72a727a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1370,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 26,
"path": "/README.md",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "Discord Conversational Bots (2019)\n\nPersonal project of mine. These bots use the Discord.py API to have humerous conversations with each other.\n\nSETUP:\nThey take in data: SourceWords.txt, which can contain any block of text.\nProduce a markov chain structure of every word\n\nUSE:\nStarting up, one bot initiates the conversation with a randomly generated sentences/phrases from the markov chain.\nIt then converts this to an mp3 file using FFMPEG and TTS.\nNow using the discdord API, it sends the text in a desired text channel while at the same time playing the mp3 file in the voice chat.\nThe other bots detects this, generates its own setences/phrases (with bias towards the topic at hand) and the cycle repeats indefinitley.\n\nThe bots also occasionally:\n- send random links from their speech from google\n- react to each other's text in the text channel with emoji reactions (discord feature)\n- appear to be typing before text is sent and while they are \"speaking\" in the voice chat.\n\nThis little project was very fun and a good first personal python project for me. Depending on what text you feed it, it can produce VERY funny results and\n near endless entertainment. That being said, I also gained a few skills from the project like:\n - Learning/using an API\n - Using Python\n - Bug fixing\n - Increasing efficiency\n - etc.\n"
},
{
"alpha_fraction": 0.6230283975601196,
"alphanum_fraction": 0.6361724734306335,
"avg_line_length": 29.677419662475586,
"blob_id": "9a8f6068e5b04cbf89011ebb8040dbb86b7cdce9",
"content_id": "c093dcfa85eaed8c5cd0c4564bc3b716a5c45078",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1902,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 62,
"path": "/MarkovGeneration.py",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "import random\nimport ast\n\n# reads dictionary.txt\ndictionaryFile = open(\"dictionary.txt\", \"r\")\ncontents = dictionaryFile.read()\ndictionary = ast.literal_eval(contents)\ndictionaryFile.close()\n\n\n# generates a unique string using the markov chain values\nclass generation:\n def main(self):\n lastWord = '3u9fh27d31r' # random string unlikely to appear in dictionary\n finalAnswer = ''\n counter = 0\n randNumA = random.randint(1, 4) # set to about 1-4 sentences to output\n\n for i in range(1, 10000):\n newWord = getNextWord(lastWord, dictionary, self)\n if newWord.endswith('.') or newWord.endswith('!') or newWord.endswith('?'):\n counter += 1\n finalAnswer = finalAnswer + \" \" + newWord\n lastWord = newWord\n if counter >= randNumA:\n break\n return finalAnswer\n\n\ndef getNextWord(lastWord, dict, self):\n if lastWord not in dict:\n # selects a new starting word for a new sentence\n newWord = randomSelect(dict)\n return newWord\n else:\n # selects next word from the dictionary's list\n newWord = weightedSelect(lastWord, dict, self)\n return newWord\n\n\n# returns a random word from the dictionary\ndef randomSelect(dict):\n randInt = random.randint(0, len(dict) - 1)\n newWord = list(dict.keys())[randInt]\n return newWord\n\n\n# returns the next random word in the markov chain using weighted values\ndef weightedSelect(lastWord, dict, self):\n weightedList = []\n\n for word in dict[lastWord]:\n weightInt = dict[lastWord][word]\n\n # adds more weight to words that appear in self\n if word in self:\n weightInt += (weightInt * 10)\n\n for i in range(0, weightInt):\n weightedList.append(word)\n randInt = random.randint(0, len(weightedList) - 1)\n return weightedList[randInt]\n"
},
{
"alpha_fraction": 0.6594982147216797,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 22.25,
"blob_id": "a758f88ebf51581ac27ea2558f228e1cd9066109",
"content_id": "75fac35513aa660864010fad7efbecd860bc00b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 12,
"path": "/TextToSpeech.py",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "from gtts import gTTS\n\n\n# returns tts file of text\ndef TTSus(text):\n tts = gTTS(text, lang='en-us') # female english American accent\n tts.save(\"audio.mp3\")\n\n\ndef TTSau(text):\n tts = gTTS(text, lang='en-au') # female english Australian accent\n tts.save(\"audio.mp3\")\n"
},
{
"alpha_fraction": 0.6512455344200134,
"alphanum_fraction": 0.6619216799736023,
"avg_line_length": 30.22222137451172,
"blob_id": "77f6d6a66f39ab2efa2098781498a780ebc55503",
"content_id": "571f3bad87ad8d86fe4c70a40bdfd78423db383d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 9,
"path": "/GoogleSearch.py",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "# returns the first link in a google search\ndef googleSearch(query):\n try:\n from googlesearch import search\n except ImportError:\n print(\"No module named 'google' found\")\n\n for result in search(query, tld=\"com\", num=1, stop=1, pause=2):\n return result\n"
},
{
"alpha_fraction": 0.668181836605072,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 24.882352828979492,
"blob_id": "89627e333ce5d311ccd50f1e108a16ce24be0f11",
"content_id": "582ad62442dd7fedaa8d39dc4c27408d050310dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 17,
"path": "/GetRandomEmoji.py",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "import ast\nimport random\n\n# reads emoticons.txt\nemoticonsFile = open(\"emoticons.txt\", \"r\")\ncontentsB = emoticonsFile.read()\nemoticons = ast.literal_eval(contentsB)\nemoticonsFile.close()\n\n\n# returns a random emoji from the string\ndef getRandomEmoji():\n self = 0\n randNum = random.randint(0, 279) # number of emojis in emoticons.txt\n for i in range(0, len(emoticons)):\n if randNum == i:\n return emoticons[i]\n"
},
{
"alpha_fraction": 0.5976505279541016,
"alphanum_fraction": 0.6571218967437744,
"avg_line_length": 32.219512939453125,
"blob_id": "5e0552dfc8afa595a02f7f23c60e9fe4ff4c57da",
"content_id": "fcbc9b5f674bf83c254ee05b73c40b176c873028",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2724,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 82,
"path": "/DiscordConnection.py",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport random\n\nimport discord\nfrom discord.ext import commands\n\nfrom GetRandomEmoji import getRandomEmoji\nfrom GoogleSearch import googleSearch\nfrom MarkovGeneration import generation\nfrom TextToSpeech import TTSau\n\n# Gibs the kangaroo bot\nTOKEN = 'NzEzODYyODQxNjY4NzMwOTIw.XsmSlA.WMH33p-HEDf_SiR5ifW2krT1SjE'\n\nclient = commands.Bot(command_prefix='$')\nchannel = client.get_channel(339964184223809547) # text channel 716458739074465813\n\[email protected]\nasync def on_ready():\n await client.change_presence(status=discord.Status.online, activity=discord.Game(name='the didgeridoo'))\n print('client ready')\n\n\[email protected](pass_context=True)\nasync def start(ctx):\n vChannel = client.get_channel(339964184223809548) # voice channel 376441029752258562\n channel = client.get_channel(339964184223809547) # text channel 716458739074465813\n await vChannel.connect()\n await asyncio.sleep(3)\n await channel.send(\"Hello\")\n\n\[email protected](pass_context=True)\nasync def end(ctx):\n vGuild = ctx.guild\n voice_client = vGuild.voice_client\n await voice_client.disconnect()\n\n\[email protected]\nasync def on_message(message):\n\n if message.author == client.user: # ignores its own messages\n return\n elif message.content.startswith('$' or 'http'): # ignores commands and links\n await client.process_commands(message)\n return\n elif message.author.id == 716121158348439634: # user that bot responds to\n response = generation.main(message.content)\n channel = client.get_channel(716458739074465813) # 716458739074465813\n\n # random numbers for reactions and link probabilities\n randNumA = random.randint(1, 10)\n randNumB = random.randint(1, 10)\n\n # converts text response to voice that is played in the voice channel\n TTSau(response)\n vGuild = message.guild\n voice_client = vGuild.voice_client\n voice_client.play(discord.FFmpegPCMAudio('audio.mp3'), after=None)\n\n # adds typing appearance\n while voice_client.is_playing():\n async with channel.typing():\n await asyncio.sleep(1)\n await channel.send(response)\n\n # sends a related link about 20% of the time\n if randNumA > 7:\n link = googleSearch(response)\n if link:\n await channel.send(link)\n\n # reacts to the previous message with emoticons 40%\n if randNumB > 6:\n for i in range(random.randint(1, 5)):\n await message.add_reaction(getRandomEmoji())\n\n # allows other commands to be used\n await client.process_commands(message)\n\nclient.run(TOKEN)\n"
},
{
"alpha_fraction": 0.5711252689361572,
"alphanum_fraction": 0.5774946808815002,
"avg_line_length": 29.33333396911621,
"blob_id": "1eee6dc3ca8776564d515ac1f0d99a8183c3e0d5",
"content_id": "8058d521d2e4727285fa5167639744128c64dd42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 942,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 30,
"path": "/SetupFiles/LearningDictionary.py",
"repo_name": "mdberkey/Discord_Convo_Bots",
"src_encoding": "UTF-8",
"text": "\r\nfile = open(\"Source_Words.txt\")\r\nstring = file.read()\r\n\r\n\r\n# creates dictionary of words in Markov Chain fashion\r\ndef learn(dict, input):\r\n tokens = input.split(\" \")\r\n for i in range(0, len(tokens) - 1):\r\n currentWord = tokens[i]\r\n nextWord = tokens[i + 1]\r\n\r\n if currentWord not in dict:\r\n # adds new word to dictionary\r\n dict[currentWord] = {nextWord: 1}\r\n else:\r\n # word is already in dictionary\r\n allNextWords = dict[currentWord]\r\n\r\n if nextWord not in allNextWords:\r\n # adds new future state word to dictionary\r\n dict[currentWord][nextWord] = 1\r\n else:\r\n # increases frequency to state word already in dictionary\r\n dict[currentWord][nextWord] = dict[currentWord][nextWord] + 1\r\n return dict\r\n\r\n\r\ndictionary = {}\r\ndictionary = learn(dictionary, string)\r\nprint(dictionary)\r\n"
}
] | 7 |
delladelladella/freshstart | https://github.com/delladelladella/freshstart | acbd28754d79423e90750af61bc923fc9ff98547 | cdc1956d0745d42fb7a8e2506f1ba00cd4aa54b7 | 1ee8da7a40a8b49218f5cacb975772716dc3aa82 | refs/heads/main | 2023-02-10T20:04:35.612836 | 2021-01-05T15:37:43 | 2021-01-05T15:37:43 | 309,777,816 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6941896080970764,
"alphanum_fraction": 0.7003058195114136,
"avg_line_length": 53.5,
"blob_id": "94a1940fb4b1e79d5179ee04eac5181de2fcd93a",
"content_id": "05d863e972106f53bfe8577369a6044396b19f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 327,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 6,
"path": "/templates/thanks.html",
"repo_name": "delladelladella/freshstart",
"src_encoding": "UTF-8",
"text": "<!--page shown when a user sends their email and major that isn't available-->\n<title>Thank You</title>\n<link rel=\"stylesheet\" href=\"{{ url_for('static', filename = 'index.css') }}\">\n<div class=\"header\">\n <h3>Thanks for submiting your email. We will reach out to you when we have recommendations for your field.</h3>\n</div>\n"
},
{
"alpha_fraction": 0.6163690686225891,
"alphanum_fraction": 0.6211309432983398,
"avg_line_length": 35.13978576660156,
"blob_id": "4c7d57e4075d9984a0a867b8bc9545922ca1dbfd",
"content_id": "38ce20fe4e12568fd7de9064a38b60036520c77e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3360,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 93,
"path": "/app.py",
"repo_name": "delladelladella/freshstart",
"src_encoding": "UTF-8",
"text": "import flask\nimport os\nimport sqlite3\n\napp = flask.Flask(__name__)\n\n'''The database consist of 2 tables: LOCATIONS and EMAIL_LIST\nDATABASE STRUTURE\n EMAIL_LIST:\n EMAIL | MAJOR\n LOCATIONS: \n MAJOR | CITY | AVERAGE_SALARY | SALARY_RANGE | IN_THE_USA\n'''\n\n#sqlite3 database connection\ndatabase_path = './FS_Database.db'\ndef get_db():\n db = getattr(flask, '_database', None)\n if db is None:\n print(\"db call\")\n db = flask._database = sqlite3.connect(database_path, check_same_thread=False)\n return db\n\n#fetches data from the database based on the major the user selected and whether or not they are in the US\ndef query_db(major, in_USA):\n if in_USA.lower() == \"yes\":\n db = get_db().execute(\"SELECT * FROM LOCATION WHERE MAJOR='\"+major+\"' AND IN_THE_USA='\"+in_USA.lower()+\"';\")\n else:#if user is looking abroad\n db = get_db().execute(\"SELECT * FROM LOCATION WHERE MAJOR='\"+major+\"' AND IN_THE_USA='\"+in_USA.lower()+\"';\")\n db_data = db.fetchall()\n db.close()\n return db_data #returns a list that contains a list\n\n#insert an email into the database\ndef insert_db(email, major):\n db = get_db().execute(\"INSERT INTO EMAIL_LIST (EMAIL, MAJOR) VALUES ('\"+email+\"', '\"+major+\"');\") \n get_db().commit() #save changes\n db.close()\n\[email protected]('/', methods=['GET','POST'])\ndef index():\n if flask.request.method == 'GET':\n #print(query_db(\"Cyber Security\",\"yes\"))\n return flask.render_template(\"index.html\")\n elif flask.request.method == \"POST\": #user submits from the index page\n #parse the answers the user submited\n major = flask.request.json.get('Major')\n in_the_US = flask.request.json.get('Location')\n #use them to fetch data from the database\n database_data = query_db(major,in_the_US)\n #create the body for the response\n response_body = []\n for i in range(len(database_data)):\n json_data = {\n 'major' : database_data[i][0],\n 'city' : database_data[i][1],\n 'average_salary' : database_data[i][2],\n 'salary_range' : database_data[i][3]\n }\n #each database record we retrieve is added to the response body\n response_body.append(json_data)\n #flask will print an error if you jsonify your response\n response = flask.jsonify(response_body)\n return response\n else:\n return flask.render_template(\"Error.html\")\n\[email protected]('/NOTA', methods=['GET','POST'])\ndef NOTA():\n if flask.request.method == 'GET':\n return flask.render_template(\"NOTA-Email-Submit.html\")\n elif flask.request.method == \"POST\": #user submits to NOTA page\n #parses the user email and major\n email = flask.request.form['Email']\n major = flask.request.form['Major']\n #adds them to the database\n insert_db(email,major)\n #sends back the thanks.html\n return flask.render_template(\"thanks.html\")\n else:\n return flask.render_template(\"Error.html\")\n\napp.run(\n port = int(os.getenv('PORT',8080)),\n host = os.getenv('IP', '0.0.0.0'),\n debug = True)\n\n#closes the database once the application is terminated\[email protected]_appcontext\ndef close_connection(exception):\n db = getattr(flask,'_database', None)\n if db is not None:\n db.close()"
},
{
"alpha_fraction": 0.758459746837616,
"alphanum_fraction": 0.7642940282821655,
"avg_line_length": 39.80952453613281,
"blob_id": "fb7eea3ff2dd5aca00721a7ffa491d7d47f660c9",
"content_id": "1cb9d2743ee4f1c8aba9556f4559cf164380a6ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 857,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 21,
"path": "/README.md",
"repo_name": "delladelladella/freshstart",
"src_encoding": "UTF-8",
"text": "# A Fresh Start\n\nA Fresh Start is a web application that suggests, to college students, possible places to move to after graduation based on their major and whether or not they want to reside in the US. Salaries of jobs related to the college student's major are also provided with the locations.\n\nWho is A Fresh Start for?\nRecent college graduates. \n\nHow is A Fresh Start installed?\nNo installation is required, A Fresh Start is a web application. \n\nWhat language(s) is A Fresh Start written in?\nHTML, JavaScript, Python, and CSS. \n\nWho are the developers of A Fresh Start?\nRose Oliver, Jaiye Agbonavbare, & Della Mbaacha. \n\n\nTo Run the web app:\n- Using a terminal/command prompt, proceed to the directory in which the files of freshstart are located.\n- Enter `python3 app.py` into the terminal and press enter\n- In your browser, type in the url `localhost:8080` and press enter\n"
}
] | 3 |
INLabASU/castex-v2.0 | https://github.com/INLabASU/castex-v2.0 | 76dea8af4311124653e7285a808ef0e7f79a146d | 0df764f6d8dd4fd00ca3130aafc06375fe9fa59d | e7a46730f672e65b0a161f90cbeaaa94b4692625 | refs/heads/master | 2021-04-30T06:23:29.801222 | 2018-05-02T21:56:34 | 2018-05-02T21:56:34 | 121,441,423 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5617120862007141,
"alphanum_fraction": 0.5759533047676086,
"avg_line_length": 39.92356872558594,
"blob_id": "9e05bd10a1d70a46dfa8f86400193301ff67ce05",
"content_id": "01f5e40e77dd28a305812fbeea02574ebdd2a17b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 12850,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 314,
"path": "/app/src/main/java/rte/packetization/RTEH264Packetizer.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte.packetization\n\nimport android.media.MediaCodec\nimport android.media.MediaCodecInfo\nimport android.media.MediaFormat\nimport android.os.Build\nimport android.util.Log\nimport android.view.Surface\nimport android.view.WindowManager\nimport rte.MediaCodecInputStream\nimport rte.RTEFrame\nimport rte.RTEPacket\nimport rte.RTEProtocol\nimport rte.session.RTESession\nimport java.io.IOException\nimport java.math.BigInteger\nimport java.net.DatagramPacket\n\n/**\n * Created by jk on 3/13/18.\n */\nclass RTEH264Packetizer(session:RTESession): RTEPacketizer(), Runnable {\n companion object {\n const val TAG = \"RTEH264Packetizer\"\n }\n\n private val mediaCodec: MediaCodec = MediaCodec.createEncoderByType(\"video/avc\")\n private val session:RTESession = session\n\n private var sendBuffer: ByteArray? = null\n private var naluLength = 0\n private var header = ByteArray(5)\n private var ts: Long = 0\n private var count = 0\n private var sps: ByteArray? = null\n private var pps:ByteArray? = null\n private var packetSize: Int = 0\n private var fid: Long = 0\n\n //A STAP-A NAL (NAL type 24) containing the sps and pps of the stream\n private var stapa: ByteArray? = null\n private lateinit var params: ByteArray\n\n\n init{\n\n// mediaCodec =\n val mediaFormat = MediaFormat.createVideoFormat(\"video/avc\", 360, 640)\n mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, session.bitrate!!)\n mediaFormat.setInteger(MediaFormat.KEY_FRAME_RATE, session.framerate!!)\n mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface)\n mediaFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1)\n mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 0)\n mediaFormat.setInteger(MediaFormat.KEY_REPEAT_PREVIOUS_FRAME_AFTER, 1000)\n mediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE)\n var inputSurface: Surface? = null\n if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {\n inputSurface = MediaCodec.createPersistentInputSurface()\n mediaCodec.setInputSurface(inputSurface)\n } else {\n mediaCodec.createInputSurface()\n// mediaCodec!!.setInputSurface(inputSurface)\n }\n mediaCodec.start()\n inputStream = MediaCodecInputStream(mediaCodec)\n\n val virtualDisplay = session.mediaProjection!!.createVirtualDisplay(\"test\", session!!.streamWidth!!, session!!.streamHeight!!,session!!.videoDensity!!,\n WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH or WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE,\n inputSurface, null, null)\n\n }\n\n override fun stop(){\n try {\n inputStream?.close()\n } catch (e: IOException) {}\n\n super.stop()\n }\n\n fun setStreamParameters(pps: ByteArray?, sps: ByteArray?) {\n this.pps = pps\n this.sps = sps\n\n // A STAP-A NAL (NAL type 24) containing the sps and pps of the stream\n if (pps != null && sps != null) {\n // STAP-A NAL header + NALU 1 (SPS) size + NALU 2 (PPS) size = 5 bytes\n stapa = ByteArray(sps.size + pps.size + 5)\n\n // STAP-A NAL header is 24\n stapa!![0] = 24\n\n // Write NALU 1 size into the array (NALU 1 is the SPS).\n stapa!![1] = (sps.size shr 8).toByte()\n stapa!![2] = (sps.size and 0xFF).toByte()\n\n // Write NALU 2 size into the array (NALU 2 is the PPS).\n stapa!![sps.size + 3] = (pps.size shr 8).toByte()\n stapa!![sps.size + 4] = (pps.size and 0xFF).toByte()\n\n // Write NALU 1 into the array, then write NALU 2 into the array.\n System.arraycopy(sps, 0, stapa, 3, sps.size)\n System.arraycopy(pps, 0, stapa, 5 + sps.size, pps.size)\n }\n }\n\n override fun run() {\n this.packetSize = RTEProtocol.RTE_STANDARD_PACKET_LENGTH\n while(runnerThread?.isInterrupted == false) {\n sendNalUnit()\n }\n }\n\n private fun sendNalUnit(){\n var type:Int = 0\n\n // NAL units are preceeded with 0x00000001\n fill(header, 0, 5)\n ts = (inputStream as MediaCodecInputStream).lastBufferInfo.presentationTimeUs * 1000L\n// frameLength = inputStream!!.available() + 1 // The length of the entire frame buffer. May contain multiple NAL units.\n naluLength = inputStream!!.available() + 1 // The length of the entire frame buffer. May contain multiple NAL units.\n if (!(header[0].toInt() == 0 && header[1].toInt() == 0 && header[2].toInt() == 0)) {\n // Turns out, the NAL units are not preceeded with 0x00000001\n Log.e(TAG, \"NAL units are not preceeded by 0x00000001\")\n return\n }\n\n // Parses the NAL unit type\n type = (header[4].toInt()) and 0x1F\n\n\n // The stream already contains NAL unit type 7 or 8, we don't need\n // to add them to the stream ourselves\n if (type == 7 || type == 8) {\n Log.v(TAG, \"SPS or PPS present in the stream.\")\n params = ByteArray(30)\n System.arraycopy(header, 0, params, 0, header.size)\n // Get both sps and pps NAL units.\n val len = fill(params, header.size, naluLength - 1)\n // Send parameters over socket.\n val dGramPackets = getPackets(params)\n // Send out frames on UDP socket.\n for (p in dGramPackets) {\n session.vSock!!.send(p)\n }\n fid++\n\n count++\n if (count > 4) {\n sps = null\n pps = null\n }\n }\n\n// // We send two packets containing NALU type 7 (SPS) and 8 (PPS)\n// // Those should allow the H264 stream to be decoded even if no SDP was sent to the decoder.\n// if (type == 5 && sps != null && pps != null) {\n// //TODO: fix this\n//// buffer = socket.requestBuffer()\n//// socket.markNextPacket()\n//// socket.updateTimestamp(ts)\n// sendBuffer = ByteArray(RTEProtocol.MTU)\n// System.arraycopy(stapa, 0, sendBuffer, 0, stapa!!.size)\n//// super.send(rtphl + stapa.size)\n// val dGramPackets = getPackets(sendBuffer!!)\n// // Send out frames on UDP socket.\n// for (p in dGramPackets) {\n// session.vSock!!.send(p)\n// }\n// fid++\n// }\n\n // Small NAL unit => Send a single NAL unit\n if (naluLength <= RTEProtocol.MAX_PACKET_SIZE - RTEProtocol.RTE_HEADER_LENGTH - 2) {\n// buffer = socket.requestBuffer()\n// buffer[rtphl] = header[4]\n sendBuffer = ByteArray(naluLength)\n System.arraycopy(header, 0, sendBuffer, 0, header.size)\n val len = fill(sendBuffer!!, header.size, naluLength - 1)\n// socket.updateTimestamp(ts)\n// socket.markNextPacket()\n// super.send(naluLength + rtphl)\n //Log.d(TAG,\"----- Single NAL unit - len:\"+len+\" delay: \"+delay);\n val dGramPackets = getPackets(sendBuffer!!)\n // Send out frames on UDP socket.\n for (p in dGramPackets) {\n session.vSock!!.send(p)\n }\n } else { // Large NAL unit => Split nal unit\n\n// // Set FU-A header\n// header[1] = (header[4].toInt() and 0x1F).toByte() // FU header type\n// header[1] += 0x80 // Start bit\n// // Set FU-A indicator\n// header[0] = (header[4] and 0x60 and 0xFF).toByte() // FU indicator NRI\n// header[0] += 28\n//\n var sum = 1\n while (sum < naluLength) {\n// buffer = socket.requestBuffer()\n val bufSize = (if (naluLength - sum > RTEProtocol.MAX_PACKET_SIZE) RTEProtocol.MAX_PACKET_SIZE else naluLength - sum)\n sendBuffer = ByteArray(bufSize)\n var len = 0\n if(sum == 1){\n System.arraycopy(header, 0, sendBuffer, 0, header.size)\n len = fill(sendBuffer!!, header.size, bufSize)\n }else{\n len = fill(sendBuffer!!, 0, bufSize)\n }\n// buffer[rtphl] = header[0]\n// buffer[rtphl + 1] = header[1]\n// socket.updateTimestamp(ts)\n if (len < 0) return\n sum += len\n// // Last packet before next NAL TODO: I think this is an RTP thing/ not necessary at this level.\n// if (sum >= naluLength) {\n// // End bit on\n// sendBuffer!![0] = (0x40 + sendBuffer!![0]).toByte()\n//// socket.markNextPacket()\n// }\n// super.send(len + rtphl + 2)\n// // Switch start bit\n// header[1] = (header[1] and 0x7F).toByte()\n// //Log.d(TAG,\"----- FU-A unit, sum:\"+sum);\n val dGramPackets = getPackets(sendBuffer!!)\n // Send out frames on UDP socket.\n for (p in dGramPackets) {\n session.vSock!!.send(p)\n }\n }\n }// Large NAL unit => Split nal unit\n fid++\n }\n\n private fun fill(buffer: ByteArray, offset: Int, length: Int): Int {\n var sum = 0\n var len: Int\n while (sum < length) {\n len = inputStream!!.read(buffer, offset + sum, length - sum)\n if (len < 0) {\n throw IOException(\"End of stream\")\n } else\n sum += len\n }\n return sum\n }\n\n private fun getPackets(buffer: ByteArray): ArrayList<DatagramPacket> {\n\n if(session == null){\n throw Exception(\"No session associated with H264 Packetizer\")\n } else {\n\n// val starttime = System.currentTimeMillis()\n val dGramPackets = arrayListOf<DatagramPacket>()\n// return DatagramPacket(outputData, outputData.size, group, CastexPreferences.PORT_OUT)\n\n var pid = 0 // Packet ID for this frame.\n var offset = 0 // Offset of the current packet within this frame.\n var frameSize = buffer.size\n var bytesRemaining = frameSize // The remaining number of bytes left to send\n var packetLength = if (bytesRemaining >= packetSize) packetSize else bytesRemaining\n\n while (offset < frameSize) {\n val packet = RTEPacket()\n\n packet.header.magic = RTEProtocol.PACKET_MAGIC\n packet.header.type = session.videoType!!\n\n packet.fid = this.fid\n packet.totalLength = frameSize.toLong()\n packet.pid = pid\n // Number of packets is equal to the ratio of frame size to packet size plus an\n // additional packet if there is a remainder.\n packet.totalPackets = ((frameSize / packetSize) + (if (frameSize % packetSize > 0) 1 else 0)).toLong()\n packet.offset = offset\n packet.length = packetLength\n packet.timestamp = BigInteger.valueOf(System.nanoTime() / 1000000) // TODO: See if setting this earlier improves performance\n\n val outData = ByteArray(packetLength)\n System.arraycopy(buffer, offset, outData, 0, packetLength)\n packet.data = outData\n// packet.data = buffer.sliceArray(offset..(offset + packetLength))\n packet.header.length = RTEProtocol.RTE_STANDARD_PACKET_LENGTH + packet.data.size /* size of header+packet w/o data + size of data */\n val serialized = packet.serialize()\n val dGramPacket = DatagramPacket(serialized, serialized!!.size, session.receiverAddress, session.receiverPort!!)\n dGramPackets.add(dGramPacket)\n\n pid++\n offset += packetLength\n bytesRemaining -= packetLength\n packetLength = if (bytesRemaining >= packetSize) packetSize else bytesRemaining\n }\n\n// Log.d(TAG, \"Packetization process took \" + (System.currentTimeMillis() - starttime).toString() + \"ms\")\n return dGramPackets\n }\n }\n\n /**\n * Packetizes the frame into a list of packets to be sent to the receiver.\n *\n * @param rteFrame The frame to be sent\n * @param group The IP Address (as a multicast group) to send to.\n * @param fid The frame ID of the current frame\n * @param packetSize The desired packet size. This is variable to allow tuning of packet\n * size for increased performance.\n */\n override fun packetize(rteFrame: RTEFrame, packetSize: Int): ArrayList<DatagramPacket> {\n\n sendNalUnit()\n return arrayListOf()\n }\n}\n"
},
{
"alpha_fraction": 0.6323529481887817,
"alphanum_fraction": 0.638786792755127,
"avg_line_length": 32,
"blob_id": "7333c6a0850f274fdf0b996935d46a42f805b793",
"content_id": "b97cc81cc640efd0c663bdbc4f710682a5b6f5fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 1088,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 33,
"path": "/app/src/main/java/info/jkjensen/castexv2/CastexPreferences.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package info.jkjensen.castexv2\n\nimport rte.RTEProtocol\n\n/**\n * Created by jk on 10/31/17.\n */\nclass CastexPreferences{\n companion object {\n /** Enables multicast, allowing a one-to-many transmission.\n * NOTE: This complicates the host-device operation if the host device is acting as AP.\n * Also, most public and private AP's don't allow multicast without additional network\n * configuration.\n */\n public val KEY_MULTICAST = \"multicast\"\n public val MULTICAST = false\n\n /**\n * Enables the use of TCP sockets instead of UDP. This will likely improve stream quality\n * but severely increase network latency.\n * NOTE: This setting holds higher priority than the multicast setting, meaning that if both\n * are set the transmission will be made via TCP.\n */\n public val KEY_TCP = \"tcp\"\n public val TCP = false\n\n public val KEY_DEBUG = \"debug\"\n public val DEBUG = false\n\n val KEY_PORT_OUT = \"port out\"\n val PORT_OUT = RTEProtocol.DEFAULT_PORT\n }\n}"
},
{
"alpha_fraction": 0.5563636422157288,
"alphanum_fraction": 0.6230303049087524,
"avg_line_length": 24.030303955078125,
"blob_id": "dd20a7c2c65b3fd252f7d86e4170fcdfa0f5b819",
"content_id": "34cf2ffbf8562218a6071f38c6438d58f02797a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 825,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 33,
"path": "/app/src/main/java/rte/RTEProtocol.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\n/**\n * Created by jk on 3/2/18.\n * This class is an overarching carrier for all things RTE.\n */\nclass RTEProtocol {\n companion object {\n\n const val MTU = 1300\n // Maximum size of RTE packets\n const val MAX_PACKET_SIZE = MTU - 28\n\n const val RTE_STANDARD_PACKET_LENGTH = 1024\n const val DEFAULT_PACKET_SIZE = 1024\n const val RTE_HEADER_LENGTH = 48 // Bytes\n\n const val PACKET_MAGIC:Long = 0x87654321\n\n const val MEDIA_TYPE_JPEG = 0x01\n const val MEDIA_TYPE_H264 = 0x02\n\n const val MEDIA_TYPE_AAC = 0x02\n\n const val DEFAULT_PORT = 32000\n\n const val DEFAULT_VIDEO_BITRATE = 1000000\n const val DEFAULT_VIDEO_FRAME_RATE = 15\n\n val SENDER_SESSION_TYPE = \"sender\"\n val RECEIVER_SESSION_TYPE = \"receiver\"\n }\n}"
},
{
"alpha_fraction": 0.5584912896156311,
"alphanum_fraction": 0.565575361251831,
"avg_line_length": 36.04255294799805,
"blob_id": "c38962edebdc0daeec8c562d95309103115c69fc",
"content_id": "43ec238d4d31045664fb193a9c672038a36cccc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 5223,
"license_type": "no_license",
"max_line_length": 212,
"num_lines": 141,
"path": "/app/src/main/java/rte/MediaCodecInputStream.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\n/*\n * Copyright (C) 2011-2015 GUIGUI Simon, [email protected]\n *\n * This file is part of libstreaming (https://github.com/fyhertz/libstreaming)\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport java.io.IOException\nimport java.io.InputStream\nimport java.nio.ByteBuffer\nimport android.annotation .SuppressLint\nimport android.media.MediaCodec\nimport android.media.MediaCodec.BUFFER_FLAG_PARTIAL_FRAME\nimport android.media.MediaFormat\nimport android.util.Log\n\n/**\n * An InputStream that uses data from a MediaCodec.\n * The purpose of this class is to interface existing RTP packetizers of\n * libstreaming with the new MediaCodec API. This class is not thread safe !\n */\n@SuppressLint(\"NewApi\")\nclass MediaCodecInputStream(mediaCodec: MediaCodec) : InputStream() {\n\n val TAG = \"MediaCodecInputStream\"\n\n private var mMediaCodec: MediaCodec? = null\n val lastBufferInfo = MediaCodec.BufferInfo()\n// private var mBuffers: Array<ByteBuffer>? = null\n private var mBuffer: ByteBuffer? = null\n private var mIndex = MediaCodec.INFO_TRY_AGAIN_LATER\n private var mClosed = false\n\n// var mMediaFormat: MediaFormat\n\n init {\n mMediaCodec = mediaCodec\n// mBuffers = mMediaCodec!!.outputBuffers\n }\n\n override fun close() {\n mClosed = true\n }\n\n @Throws(IOException::class)\n override fun read(): Int {\n return 0\n }\n\n @Throws(IOException::class)\n override fun read(buffer: ByteArray, offset: Int, length: Int): Int {\n var min = 0\n\n try {\n if (mBuffer == null) {\n while (!Thread.interrupted() && !mClosed) {\n mIndex = mMediaCodec!!.dequeueOutputBuffer(lastBufferInfo, 500000)\n //\t\t\t\t\tLog.d(\"PreviewTest\", \"Index: \" + mIndex);\n if (mIndex >= 0) {\n //\t\t\t\t\t\tLog.d(TAG,\"Index: \"+mIndex+\" Time: \"+mBufferInfo.presentationTimeUs+\" size: \"+mBufferInfo.size);\n if(lastBufferInfo.flags and BUFFER_FLAG_PARTIAL_FRAME != 0){\n Log.d(TAG, \"Partial frame\")\n // TODO: Continue to aggregate frames into a full batch until a buffer without this flag. (This doesn't seem to be a problem on the Pixel as it always provides one NAL unit per frame.)\n } else{\n Log.d(TAG, \"not partial with size ${lastBufferInfo.size}\")\n }\n// Log.d(TAG, \"Getting output from buffer $mIndex\")\n mBuffer = mMediaCodec!!.getOutputBuffer(mIndex)\n mBuffer!!.position(0)\n countNALs(mBuffer!!)\n break\n } else if (mIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {\n// mBuffers = mMediaCodec!!.outputBuffers\n Log.i(TAG, \"Buffers changed\")\n } else if (mIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {\n// mMediaFormat = mMediaCodec!!.outputFormat\n Log.i(TAG, \"Format changed: \" + mMediaCodec!!.outputFormat.toString())\n } else if (mIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {\n Log.v(TAG, \"No buffer available...\")\n //return 0;\n } else {\n Log.e(TAG, \"Unknown error. Message: $mIndex\")\n //return 0;\n }\n }\n }\n\n if (mClosed) throw IOException(\"This InputStream was closed\")\n\n min = if (length < lastBufferInfo.size - mBuffer!!.position()) length else lastBufferInfo.size - mBuffer!!.position()\n mBuffer!!.get(buffer, offset, min)\n if (mBuffer!!.position() >= lastBufferInfo.size) {\n //\t\t\t\tLog.e(TAG, \"Releasing \" + mIndex);\n mMediaCodec!!.releaseOutputBuffer(mIndex, false)\n mBuffer = null\n }\n\n } catch (e: RuntimeException) {\n e.printStackTrace()\n }\n\n return min\n }\n\n override fun available(): Int {\n return if (mBuffer != null)\n lastBufferInfo.size - mBuffer!!.position()\n else 0\n }\n\n fun countNALs(buffer: ByteBuffer){\n var count = 0\n var ind = 0\n val size = buffer.remaining()\n\n while(ind < size - 3) {\n if (buffer[ind].toInt() == 0 && buffer[ind + 1].toInt() == 0 && buffer[ind + 2].toInt() == 0 && buffer[ind + 3].toInt() == 1) {\n count++\n }\n ind++\n }\n\n if(count > 1) {\n Log.d(TAG, \"Buffer contained $count NAL units\")\n }\n }\n\n}\n"
},
{
"alpha_fraction": 0.7022116780281067,
"alphanum_fraction": 0.7061611413955688,
"avg_line_length": 30.674999237060547,
"blob_id": "9728267ad4f2c58edd7c9edf77318cdd4a712b84",
"content_id": "5a3a698cc3099f9544b8041a3eab2188fb13a73e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 1266,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 40,
"path": "/app/src/main/java/rte/ScreenRecordNotification.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport android.annotation.TargetApi\nimport android.app.NotificationChannel\nimport android.app.NotificationManager\nimport android.content.Context\nimport android.graphics.Color\nimport android.os.Build\n\n@TargetApi(Build.VERSION_CODES.O)\n/**\n * Created by jk on 1/9/18.\n */\nclass ScreenRecordNotification(context:Context) {\n\n companion object {\n val id = \"net.majorkernelpanic.streaming.ScreenRecordNotification\"\n val name = \"Screen Recording\"\n val description = \"Some description\"\n }\n\n val notificationManager: NotificationManager = context.getSystemService(Context.NOTIFICATION_SERVICE) as NotificationManager\n val importance = NotificationManager.IMPORTANCE_DEFAULT\n var channel: NotificationChannel? = null\n\n\n fun buildChannel(){\n if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {\n channel = NotificationChannel(id, name, importance)\n channel!!.description = description\n channel!!.enableLights(true)\n\n channel!!.lightColor = Color.RED\n channel!!.enableVibration(false)\n channel!!.setSound(null, null)\n// channel!!.vibrationPattern = LongArray(0)\n notificationManager.createNotificationChannel(channel)\n }\n }\n}"
},
{
"alpha_fraction": 0.6690391302108765,
"alphanum_fraction": 0.6725978851318359,
"avg_line_length": 22.41666603088379,
"blob_id": "1ef42c56c5fca13b4075ce7e1769f37ae5471311",
"content_id": "8e6af51d821e0fb9bd5604354ed94ad7ed4551f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 12,
"path": "/app/src/main/cpp/native-lib.cpp",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "#include <jni.h>\n#include <string>\n\nextern \"C\" JNIEXPORT jstring\n\nJNICALL\nJava_info_jkjensen_castexv2_MainActivity_stringFromJNI(\n JNIEnv *env,\n jobject /* this */) {\n std::string hello = \"Hello from the underworld\";\n return env->NewStringUTF(hello.c_str());\n}\n"
},
{
"alpha_fraction": 0.6968954205513,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 28.16666603088379,
"blob_id": "9fd5e5de94e479f66450446d5f94f856ed7d8d26",
"content_id": "f2b858463ad73c908f58ec47f653687215f0a77e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 1224,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 42,
"path": "/app/src/main/java/info/jkjensen/castexv2/Utils.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package info.jkjensen.castexv2\n\nimport android.graphics.Bitmap\nimport android.os.Environment\nimport android.util.Log\nimport java.io.ByteArrayOutputStream\nimport java.io.File\nimport java.io.FileOutputStream\nimport java.math.BigInteger\nimport java.util.*\n\n/**\n * Created by jk on 2/16/18.\n * A set of utility functions to simplify application code.\n */\n\n/**\n * Saves the bitmap to a simple JPEG file with the date as the name.\n */\nfun Bitmap.saveToDateFile(){\n\n val now = Date()\n android.text.format.DateFormat.format(\"yyyy-MM-dd_hh:mm:ss\", now)\n val filepath = Environment.getExternalStorageDirectory().toString() + \"/\" + now + \".jpg\"\n val imageFile = File(filepath)\n val outputStream = FileOutputStream(imageFile)\n val quality = 100\n this.compress(Bitmap.CompressFormat.JPEG, quality, outputStream)\n outputStream.close()\n}\n\nfun ByteArrayOutputStream.printDump(){\n val bytesOut = this.toByteArray()\n val ss = StringBuilder()\n bytesOut\n .map { String.format(\"%02X\", it) + \" \" }\n .forEach { ss.append(it) }\n Log.d(\"OutputStream.dump\", \"Bytes: \" + ss.toString())\n}\n\nfun Long.toBigInteger() = BigInteger.valueOf(this)\nfun Int.toBigInteger() = BigInteger.valueOf(toLong())"
},
{
"alpha_fraction": 0.7084130048751831,
"alphanum_fraction": 0.7179732322692871,
"avg_line_length": 32.709678649902344,
"blob_id": "5d944e3720d023d730113b37054affd6d21a1f11",
"content_id": "d0e81b4d5cbc183383b6b8d7d394055433959cd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1046,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 31,
"path": "/plotTiming.py",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\n\ntimingFile = open('screenCaptureTiming.txt', 'r')\ntimes = timingFile.read().split('\\n')\ntimes = filter(lambda x: x is not '', times)\nprint('Number of timestamps recorded: ' + str(len(times)))\n\n# Convert to floats\ntimesflt = map(float, times) \n# Normalize the array so the first time is zero\ntimesflt = map(lambda x: x - timesflt[0], timesflt)\n\n# We want to plot the differences, not the actual time values.\ndiffs = []\nfor i in range(0, (len(timesflt) - 1)):\n diffs.append(timesflt[i+1] - timesflt[i])\n\n\nplt.plot(diffs, 'ro')\nplt.title('Timestamps of Screen Capture Over Time')\nplt.xlabel('timestamp number')\nplt.ylabel('time to next capture')\n\n# print(timesflt)\navgTimeBetweenTS = reduce((lambda x, y: x + y), diffs)\navgTimeBetweenTS = avgTimeBetweenTS/len(diffs)\nprint('Average time between timestamps: ' +str(avgTimeBetweenTS))\n\nplt.text(50, 56, 'Number of timestamps recorded: ' + str(len(times)) + '\\n' +'Average time between timestamps: ' +str(avgTimeBetweenTS), fontsize=15)\nplt.show()\n\n"
},
{
"alpha_fraction": 0.6577426195144653,
"alphanum_fraction": 0.6592637896537781,
"avg_line_length": 32.20201873779297,
"blob_id": "7d46d09fea05c74109418ee73e0aced475b2ec67",
"content_id": "061d554b2bee2f488e67dc01be3ea1cfd21e2be4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 3287,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 99,
"path": "/app/src/main/java/info/jkjensen/castexv2/ReceiverActivity.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package info.jkjensen.castexv2\n\nimport android.content.Context\nimport android.content.Intent\nimport android.os.AsyncTask\nimport android.support.v7.app.AppCompatActivity\nimport android.os.Bundle\nimport android.text.Editable\nimport android.text.TextWatcher\nimport android.util.Log\nimport kotlinx.android.synthetic.main.activity_receiver.*\nimport java.net.DatagramPacket\nimport java.net.DatagramSocket\nimport java.net.Socket\n\n/**\n * An activity used to stream content from other devices.\n */\nclass ReceiverActivity : AppCompatActivity() {\n /**\n * The task that receives incoming network packets.\n */\n lateinit var packetReceiverTask:PacketReceiverTask\n\n /**\n * The UDP socket used to send data to this device.\n */\n var clientSocket: DatagramSocket? = null\n\n /**\n * The TCP Socket used to send data to this device (unused for now).\n */\n var tcpSocket: Socket? = null\n\n /**\n * The IP Address of the sending device.\n */\n var senderIp:String = \"\"\n\n companion object {\n const val TAG = \"ReceiverActivity\"\n const val EXTRA_IP = \"ipAddress\"\n\n /**\n * Gets a new intent for this activity.\n */\n fun getIntent(context: Context, ip:String):Intent{\n return Intent(context, ReceiverActivity::class.java).apply {\n putExtra(EXTRA_IP, ip)\n }\n }\n }\n\n override fun onCreate(savedInstanceState: Bundle?) {\n super.onCreate(savedInstanceState)\n setContentView(R.layout.activity_receiver)\n\n // Get the IP Address if it was sent from the calling activity.\n senderIp = intent.getStringExtra(EXTRA_IP) ?: \"\"\n\n // Start the task to receive packets. When a packet is received, onData is called with the\n // received UDP packet.\n packetReceiverTask = PacketReceiverTask(clientSocket = clientSocket, onPacketReady = { p -> onDataReceived(p) }, tcpSock = tcpSocket)\n packetReceiverTask.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR)\n\n receiverEditText.addTextChangedListener(object:TextWatcher{\n\n override fun afterTextChanged(p0: Editable?) {\n senderIp = p0.toString()\n receiverText.text = if(senderIp == \"\") \"Enter an ip address to begin.\" else \"Ready to receive from $p0 now.\"\n }\n\n override fun beforeTextChanged(s: CharSequence?, start: Int, count: Int, after: Int) {\n }\n\n override fun onTextChanged(s: CharSequence?, start: Int, before: Int, count: Int) {\n }\n })\n\n receiverText.text = if(senderIp == \"\") \"Enter an ip address to begin.\" else \"Ready to receive from $senderIp now.\"\n }\n\n override fun onStop() {\n super.onStop()\n // Turn off receiving. Closes any associated sockets.\n packetReceiverTask.cancel(true)\n }\n\n\n /**\n * Callback function called when data is received via the packetReceiverTask.\n * This function does the bulk of the processing for incoming packets.\n */\n private fun onDataReceived(packet:DatagramPacket){\n Log.d(TAG, \"Called on data with packet!\")\n // TODO: Deserialize packet into RTEPacket\n // TODO: Add newly created RTEPacket to RTEFrameReceiveBuffer for the renderer to pick it up.\n }\n}\n"
},
{
"alpha_fraction": 0.5248791575431824,
"alphanum_fraction": 0.5479101538658142,
"avg_line_length": 37.653846740722656,
"blob_id": "274e12109296537264d572ac655bda3a12fe4d24",
"content_id": "c63a3a07912f62752a14235c8e99f21c49cd4801",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 7034,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 182,
"path": "/app/src/main/java/rte/RTEPacket.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport java.io.ByteArrayOutputStream\nimport java.math.BigInteger\nimport java.util.*\n\n/**\n * Created by jk on 3/2/18.\n * This class represents a packet according to the RTE Protocol.\n */\ndata class RTEPacket(var header: RTEPacketHeader = RTEPacketHeader(),\n var fid:Long = -1,\n var totalLength:Long = -1,\n var pid:Int = -1,\n var totalPackets:Long = -1,\n var offset:Int = -1,\n var length:Int = -1,\n var timestamp:BigInteger = BigInteger.valueOf(-1),\n var flag:BigInteger = BigInteger.valueOf(-1),\n var data: ByteArray = ByteArray(0)) {\n\n companion object {\n const val TAG = \"RTEPacket\"\n\n\n /**\n * Reconstructs a packet object from the given ByteArray.\n */\n fun deserialize(buffer:ByteArray):RTEPacket{\n val retPacket = RTEPacket()\n\n// retPacket.header.magic =\n\n return retPacket\n }\n }\n\n /**\n * Prepares the data in this packet to be sent over the network as a C-structured byte stream.\n */\n fun serialize(): ByteArray? {\n val starttime = System.currentTimeMillis()\n val outputStream = ByteArrayOutputStream()\n\n // Header magic, 32 bits\n outputStream.write(byteArrayOf(\n (this.header.magic and 0xFF).toByte(),\n ((this.header.magic shr 8) and 0xFF).toByte(),\n ((this.header.magic shr 16) and 0xFF).toByte(),\n ((this.header.magic shr 24) and 0xFF).toByte()\n ))\n\n // Stream type, 16 bits\n outputStream.write(byteArrayOf(\n (this.header.type and 0xFF).toByte(),\n (this.header.type shr 8 and 0xFF).toByte()\n ))\n\n // Packet length, including header, 16 bits\n outputStream.write(byteArrayOf(\n (this.header.length and 0xFF).toByte(),\n ((this.header.length shr 8) and 0xFF).toByte()\n ))\n\n // frame ID, 32 bits\n outputStream.write(byteArrayOf(\n (this.fid and 0xFF).toByte(),\n ((this.fid shr 8) and 0xFF).toByte(),\n ((this.fid shr 16) and 0xFF).toByte(),\n ((this.fid shr 24) and 0xFF).toByte()\n ))\n\n // total length of this packet frame, 32 bits\n outputStream.write(byteArrayOf(\n (this.totalLength and 0xFF).toByte(),\n ((this.totalLength shr 8) and 0xFF).toByte(),\n ((this.totalLength shr 16) and 0xFF).toByte(),\n ((this.totalLength shr 24) and 0xFF).toByte()\n ))\n\n // packet ID, 32 bits\n outputStream.write(byteArrayOf(\n (this.pid and 0xFF).toByte(),\n ((this.pid shr 8) and 0xFF).toByte(),\n ((this.pid shr 16) and 0xFF).toByte(),\n ((this.pid shr 24) and 0xFF).toByte()\n ))\n\n // total number of packets, 32 bits\n outputStream.write(byteArrayOf(\n (this.totalPackets and 0xFF).toByte(),\n ((this.totalPackets shr 8) and 0xFF).toByte(),\n ((this.totalPackets shr 16) and 0xFF).toByte(),\n ((this.totalPackets shr 24) and 0xFF).toByte()\n ))\n\n // Offset of this packet within the frame, 32 bits\n outputStream.write(byteArrayOf(\n (this.offset and 0xFF).toByte(),\n ((this.offset shr 8) and 0xFF).toByte(),\n ((this.offset shr 16) and 0xFF).toByte(),\n ((this.offset shr 24) and 0xFF).toByte()\n ))\n\n // Payload length of this packet\n outputStream.write(byteArrayOf(\n (this.length and 0xFF).toByte(),\n ((this.length shr 8) and 0xFF).toByte(),\n ((this.length shr 16) and 0xFF).toByte(),\n ((this.length shr 24) and 0xFF).toByte()\n ))\n\n // Timestamp of this frame. Could possibly fix this to be faster by separating the 64-bit\n // BigInteger into top half and bottom half instead of calling .toInt() every time.\n outputStream.write(byteArrayOf(\n (this.timestamp.toInt() and 0xFF).toByte(),\n ((this.timestamp shr 8).toInt() and 0xFF).toByte(),\n ((this.timestamp shr 16).toInt() and 0xFF).toByte(),\n ((this.timestamp shr 24).toInt() and 0xFF).toByte(),\n ((this.timestamp shr 32).toInt() and 0xFF).toByte(),\n ((this.timestamp shr 40).toInt() and 0xFF).toByte(),\n ((this.timestamp shr 48).toInt() and 0xFF).toByte(),\n ((this.timestamp shr 56).toInt() and 0xFF).toByte()\n ))\n\n // Flag for this packet. Could possibly fix this to be faster by separating the 64-bit\n // BigInteger into top half and bottom half instead of calling .toInt() every time.\n outputStream.write(byteArrayOf(\n (this.flag.toInt() and 0xFF).toByte(),\n ((this.flag shr 8).toInt() and 0xFF).toByte(),\n ((this.flag shr 16).toInt() and 0xFF).toByte(),\n ((this.flag shr 24).toInt() and 0xFF).toByte(),\n ((this.flag shr 32).toInt() and 0xFF).toByte(),\n ((this.flag shr 40).toInt() and 0xFF).toByte(),\n ((this.flag shr 48).toInt() and 0xFF).toByte(),\n ((this.flag shr 56).toInt() and 0xFF).toByte()\n ))\n\n // Payload data\n outputStream.write(this.data)\n\n // Print the stream for debugging.\n// outputStream.printDump()\n\n// Log.d(TAG, \"Serialization process took \" + (System.currentTimeMillis() - starttime).toString() + \"ms\")\n return outputStream.toByteArray()\n }\n\n override fun equals(other: Any?): Boolean {\n if (this === other) return true\n if (javaClass != other?.javaClass) return false\n\n other as RTEPacket\n\n if (header != other.header) return false\n if (fid != other.fid) return false\n if (totalLength != other.totalLength) return false\n if (pid != other.pid) return false\n if (totalPackets != other.totalPackets) return false\n if (offset != other.offset) return false\n if (length != other.length) return false\n if (timestamp != other.timestamp) return false\n if (flag != other.flag) return false\n if (!Arrays.equals(data, other.data)) return false\n\n return true\n }\n\n override fun hashCode(): Int {\n var result = header.hashCode()\n result = (31 * result + fid).toInt()\n result = (31 * result + totalLength).toInt()\n result = 31 * result + pid\n result = (31 * result + totalPackets).toInt()\n result = 31 * result + offset\n result = 31 * result + length\n result = 31 * result + timestamp.hashCode()\n result = 31 * result + flag.hashCode()\n result = 31 * result + Arrays.hashCode(data)\n return result\n }\n}"
},
{
"alpha_fraction": 0.5974819660186768,
"alphanum_fraction": 0.6298695206642151,
"avg_line_length": 41.98823547363281,
"blob_id": "24e26cc189df162b82827d416d6b9e5460ba8a20",
"content_id": "e905bc08d38493a6f26b32ccc95b16764708c048",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 10961,
"license_type": "no_license",
"max_line_length": 251,
"num_lines": 255,
"path": "/app/src/main/java/rte/ScreenCapturerService.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport android.annotation.TargetApi\nimport android.app.*\nimport android.content.Intent\nimport android.content.IntentFilter\nimport android.graphics.Bitmap\nimport android.graphics.PixelFormat\nimport android.media.Image\nimport android.media.ImageReader\nimport android.media.projection.MediaProjection\nimport android.os.Build\nimport android.os.Handler\nimport android.os.Looper\nimport android.support.v4.app.NotificationCompat\nimport android.util.Log\nimport android.view.WindowManager\nimport android.widget.Toast\nimport info.jkjensen.castexv2.MainActivity\nimport info.jkjensen.castexv2.R\nimport org.jetbrains.anko.mediaProjectionManager\nimport rte.packetization.RTEJpegPacketizer\nimport rte.session.RTESession\nimport java.lang.Thread.sleep\n\n\n/**\n * Created by jk on 1/3/18.\n */\nclass ScreenCapturerService: IntentService(\"ScreenCaptureService\") {\n\n companion object {\n val MEDIA_PROJECTION_RESULT_CODE = \"mediaprojectionresultcode\"\n val MEDIA_PROJECTION_RESULT_DATA = \"mediaprojectionresultdata\"\n const val SESSION_CODE = \"servicecode\"\n val STOP_ACTION = \"Castex.StopAction\"\n }\n\n private val TAG = \"ScreenCaptureService\"\n private val ONGOING_NOTIFICATION_IDENTIFIER = 1\n\n private val REQUEST_MEDIA_PROJECTION_CODE = 1\n private val REQUEST_CAMERA_CODE = 200\n\n private var resultCode: Int = 0\n private var resultData: Intent? = null\n private var mediaProjection: MediaProjection? = null\n private val broadcastReceiver = RTENotificationBroadcastReceiver()\n // Used to feed captured frames to our buffer\n private var imageReader:ImageReader? = null\n private var session:RTESession? = null\n\n private var handler: Handler? = null\n\n // ID of the current capturing frame.\n private var fid = 0\n private var captureThread:Thread? = null\n\n @TargetApi(Build.VERSION_CODES.O)\n override fun onCreate() {\n\n // Create a notification channel for the recording process\n ScreenRecordNotification(this).buildChannel()\n\n Log.d(\"ScreenCaptureService\", \"Service started.\")\n val filter = IntentFilter()\n filter.addAction(ScreenCapturerService.STOP_ACTION)\n registerReceiver(broadcastReceiver, filter)\n\n Toast.makeText(this, \"Sharing screen\", Toast.LENGTH_LONG).show()\n\n val notificationIntent = Intent(this, applicationContext.javaClass)\n val pendingIntent = PendingIntent.getActivity(this, 0, notificationIntent, 0)\n\n\n val stopAction = Intent()\n stopAction.action = STOP_ACTION\n val stopIntent = PendingIntent.getBroadcast(applicationContext, 12345, stopAction, PendingIntent.FLAG_UPDATE_CURRENT)\n val action = NotificationCompat.Action.Builder(R.drawable.notification_animated, \"Stop streaming\", stopIntent).build()\n\n val builder = NotificationCompat.Builder(this, ScreenRecordNotification.id)\n .setContentTitle(getText(R.string.notification_title))\n .setContentText(getText(R.string.notification_message))\n .setSmallIcon(R.drawable.notification_animated)\n .setContentIntent(pendingIntent)\n .setTicker(getText(R.string.notification_message))\n .addAction(action)\n\n startForeground(ONGOING_NOTIFICATION_IDENTIFIER, builder.build())\n super.onCreate()\n }\n\n override fun onDestroy() {\n Log.d(TAG, \"Destroying\")\n unregisterReceiver(broadcastReceiver)\n captureThread?.interrupt()\n super.onDestroy()\n }\n\n override fun onHandleIntent(intent: Intent?) {\n//\n// val editor = PreferenceManager.getDefaultSharedPreferences(this).edit()\n//// editor.putString(RtspServer.KEY_PORT, 1234.toString())\n// editor.commit()\n//\n// val wifiManager = applicationContext.getSystemService(Context.WIFI_SERVICE) as WifiManager\n// val multicastLock = wifiManager.createMulticastLock(\"multicastLock\")\n// multicastLock.setReferenceCounted(false)\n// multicastLock.acquire()\n//\n//\n// val layout:RelativeLayout = layoutInflater.inflate(R.layout.bg_surface_view, null) as RelativeLayout\n// val params = WindowManager.LayoutParams(1,1,\n// WindowManager.LayoutParams.TYPE_APPLICATION_OVERLAY,\n// FLAG_WATCH_OUTSIDE_TOUCH or FLAG_NOT_FOCUSABLE,\n// PixelFormat.TRANSPARENT)\n//\n// val wm = getSystemService(Context.WINDOW_SERVICE) as WindowManager\n// wm.addView(layout, params)\n//\n// val svf:SurfaceView = layout.findViewById(R.id.surface_view_fake)\n// val sh:SurfaceHolder = svf.holder\n// svf.setZOrderOnTop(true)\n// sh.setFormat(PixelFormat.TRANSPARENT)\n//\n// sessionBuilder = sessionBuilder\n// .setContext(applicationContext)\n//// .setSurfaceView(TransmitterActivity2.sv)\n// .setSurfaceView(svf)\n// .setCamera(1)\n// .setPreviewOrientation(90)\n// .setContext(applicationContext)\n// .setAudioEncoder(SessionBuilder.AUDIO_NONE)\n// //Supposedly supported resolutions: 1920x1080, 1600x1200, 1440x1080, 1280x960, 1280x768, 1280x720, 1024x768, 800x600, 800x480, 720x480, 640x480, 640x360, 480x640, 480x360, 480x320, 352x288, 320x240, 240x320, 176x144, 160x120, 144x176\n//\n//// .setVideoQuality(VideoQuality(320,240,30,2000000)) // Supported\n//// .setVideoQuality(VideoQuality(640,480,30,2000000)) // Supported\n//// .setVideoQuality(VideoQuality(720,480,30,2000000)) // Supported\n//// .setVideoQuality(VideoQuality(800,600,30,2000000)) // Supported\n// .setVideoQuality(VideoQuality(TransmitterActivity2.STREAM_WIDTH,\n// TransmitterActivity2.STREAM_HEIGHT,\n// TransmitterActivity2.STREAM_FRAMERATE,\n// TransmitterActivity2.STREAM_BITRATE)) // Supported\n//// .setVideoQuality(VideoQuality(1280,960,4,8000000)) // Supported\n//// .setVideoQuality(VideoQuality(1080,1920,30,8000000)) // Supported\n//// .setDestination(\"192.168.43.19\")// mbp\n//// .setDestination(\"192.168.43.20\")// iMac\n//// .setDestination(\"192.168.43.19\")// mbp\n//// .setDestination(\"192.168.43.110\")// Galaxy s7\n//// .setDestination(\"192.168.43.6\")// OnePlus 5\n//// .setDestination(\"232.0.1.2\") // multicast\n//// .setCallback(this)\n// sessionBuilder.videoEncoder = SessionBuilder.VIDEO_H264\n//\n// val resultCode = intent?.getIntExtra(MEDIA_PROJECTION_RESULT_CODE, 0)\n// val resultData:Intent? = intent?.getParcelableExtra<Intent>(MEDIA_PROJECTION_RESULT_DATA)\n// mediaProjection = mediaProjectionManager.getMediaProjection(resultCode!!, resultData)\n//\n// sessionBuilder.setMediaProjection(mediaProjection)\n//\n// val metrics: DisplayMetrics = applicationContext.resources.displayMetrics\n// sessionBuilder.setDisplayMetrics(metrics)\n//\n// session = sessionBuilder.build()\n// session!!.videoTrack.streamingMethod = MediaStream.MODE_MEDIACODEC_API\n// session!!.configure()\n// startService(Intent(applicationContext, RtspServer::class.java))\n// Log.d(\"ScreenCaptureService\", \"Starting session preview\")\n// session!!.startPreview()\n//\n// while(true){\n// Thread.sleep(1000000)\n// }\n\n\n\n val resultCode = intent?.getIntExtra(MEDIA_PROJECTION_RESULT_CODE, 0)\n val resultData:Intent? = intent?.getParcelableExtra(MEDIA_PROJECTION_RESULT_DATA)\n mediaProjection = mediaProjectionManager.getMediaProjection(resultCode!!, resultData)\n if(mediaProjection == null){\n throw Exception(\"Failed to get mediaprojection.\")\n }\n\n session = intent.getParcelableExtra(SESSION_CODE)\n session!!.context = applicationContext\n session!!.start(mediaProjection!!)\n\n\n // TODO: Separate this out into JPEG-specific stuff\n if(session!!.videoType == RTEProtocol.MEDIA_TYPE_JPEG) {\n // Create a new thread to run all capturing on.\n captureThread = Thread {\n Looper.prepare()\n handler = Handler()\n Looper.loop()\n }\n captureThread!!.start()\n\n imageReader = ImageReader.newInstance(session!!.streamWidth!!, session!!.streamHeight!!, PixelFormat.RGBA_8888, 5)\n val virtualDisplay = mediaProjection!!.createVirtualDisplay(\"test\", session!!.streamWidth!!, session!!.streamHeight!!, session!!.videoDensity!!,\n WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH or WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE,\n imageReader!!.surface, null, handler)\n var image: Image?\n var bitmap: Bitmap?\n Log.d(MainActivity.TAG, \"Writing timing log to \" + filesDir.absolutePath + \"/screenCaptureTiming.txt\")\n imageReader!!.setOnImageAvailableListener({\n image = null\n bitmap = null\n if (!captureThread!!.isInterrupted) {\n\n try {\n image = imageReader!!.acquireLatestImage() ?: throw Exception(\"Failed to get latest image\")\n val planes = image!!.planes\n val buffer = planes[0].buffer\n ?: throw Exception(\"Failed to get image buffer\")\n\n // For debugging, write timestamps to a text file for external timing analysis\n// fos?.write(((System.currentTimeMillis() - startTime).toString() + \"\\n\").toByteArray())\n\n buffer.rewind()\n val pixelStride = planes[0].pixelStride\n val rowStride = planes[0].rowStride\n val rowPadding = rowStride - pixelStride * image!!.width\n bitmap = Bitmap.createBitmap(image!!.width + rowPadding / pixelStride, image!!.height, Bitmap.Config.ARGB_8888)\n bitmap!!.copyPixelsFromBuffer(buffer)\n// Log.d(TAG, \"Adding image with fid: $fid\")\n val timestamp = System.nanoTime() / 1000\n// (session!!.packetizer as RTEJpegPacketizer).images.add(RTEFrame(bitmap!!, fid, timestamp))\n fid++\n\n\n } catch (e: Exception) {\n e.printStackTrace()\n } finally {\n\n if (image != null)\n image?.close()\n }\n }\n }, handler)\n\n// Thread(Runnable {\n// while (true) {\n// Thread.sleep(5)\n//// openScreenshot()\n//\n// }\n// }).start()\n }\n\n while(true){\n sleep(10)\n }\n }\n}"
},
{
"alpha_fraction": 0.6467310786247253,
"alphanum_fraction": 0.6501981019973755,
"avg_line_length": 39.792930603027344,
"blob_id": "6f2165effdfe14b03f7a03b0470066c93a572422",
"content_id": "b13723a92b63f0a96cfb49d58659c88ca6c0918b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 8076,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 198,
"path": "/app/src/main/java/rte/session/RTESession.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte.session\n\nimport android.Manifest\nimport android.content.Context\nimport android.content.Intent\nimport android.content.pm.PackageManager\nimport android.media.projection.MediaProjection\nimport android.net.wifi.WifiManager\nimport android.os.Parcel\nimport android.os.Parcelable\nimport android.support.v4.content.ContextCompat.checkSelfPermission\nimport android.util.Log\nimport rte.RTEProtocol\nimport rte.RTEProtocol.Companion.RECEIVER_SESSION_TYPE\nimport rte.RTEProtocol.Companion.SENDER_SESSION_TYPE\nimport rte.ScreenCapturerService\nimport rte.packetization.RTEH264Packetizer\nimport rte.packetization.RTEJpegPacketizer\nimport rte.packetization.RTEPacketizer\nimport java.io.Serializable\nimport java.net.InetAddress\nimport java.net.MulticastSocket\n\n/**\n * Created by jk on 3/13/18.\n * A streaming session for the RTE Protocol and its associated data.\n */\nclass RTESession() :Parcelable{\n\n var sessionType: String? = null\n var context: Context? = null\n var multicastLockHeld: Boolean = false\n // Video socket\n var vSock: MulticastSocket? = null\n // Audio socket\n var aSock: MulticastSocket? = null\n var receiverAddressStr:String? = null\n var receiverAddress:InetAddress? = null\n var receiverPort:Int? = RTEProtocol.DEFAULT_PORT\n var packetizer: RTEPacketizer? = null\n var videoType: Int? = null\n var audioType: Int? = null\n var streamWidth: Int? = null\n var streamHeight: Int? = null\n var videoDensity: Int? = null\n var bitrate: Int? = RTEProtocol.DEFAULT_VIDEO_BITRATE\n var framerate: Int? = RTEProtocol.DEFAULT_VIDEO_FRAME_RATE\n\n var mediaProjectionResultCode: Int? = null\n var mediaProjectionResultData: Intent? = null\n\n // The members below are not serialized as part of the session, so they are not to be added\n // until after the session is part of the screencaptureservice.\n var mediaProjection:MediaProjection? = null\n\n private val TAG = \"RTESession\"\n\n var setupSuggestion:String? = null\n\n constructor(parcel: Parcel) : this() {\n sessionType = parcel.readString()\n multicastLockHeld = (parcel.readByte().toInt()) != 0\n receiverAddressStr = parcel.readString()\n receiverPort = parcel.readValue(Int::class.java.classLoader) as? Int\n videoType = parcel.readValue(Int::class.java.classLoader) as? Int\n audioType = parcel.readValue(Int::class.java.classLoader) as? Int\n streamWidth = parcel.readValue(Int::class.java.classLoader) as? Int\n streamHeight = parcel.readValue(Int::class.java.classLoader) as? Int\n videoDensity = parcel.readValue(Int::class.java.classLoader) as? Int\n bitrate = parcel.readValue(Int::class.java.classLoader) as? Int\n framerate = parcel.readValue(Int::class.java.classLoader) as? Int\n mediaProjectionResultCode = parcel.readValue(Int::class.java.classLoader) as? Int\n mediaProjectionResultData = parcel.readParcelable(Intent::class.java.classLoader)\n }\n\n /**\n * Initializes the session with the given parameters.\n */\n fun start(mediaProjection: MediaProjection){\n this.mediaProjection = mediaProjection\n\n when(videoType){\n RTEProtocol.MEDIA_TYPE_JPEG -> {\n this.packetizer = RTEJpegPacketizer(this)\n } RTEProtocol.MEDIA_TYPE_H264 -> {\n this.packetizer = RTEH264Packetizer(this)\n }\n else -> throw Exception(\"Invalid video type for rte.session.RTESessionBuilder.setMediaType()\")\n }\n\n if(this.videoType != null){\n this.vSock = MulticastSocket()\n this.vSock!!.reuseAddress = true\n }\n if(this.audioType != null){\n this.aSock = MulticastSocket()\n this.aSock!!.reuseAddress = true\n }\n\n if(this.receiverAddressStr != null){\n this.receiverAddress = InetAddress.getByName(receiverAddressStr)\n } else{\n // If the address for the receiver is not set, set it to a default multicast address.\n this.receiverAddress = InetAddress.getByName(\"224.0.0.1\")\n }\n\n packetizer!!.start()\n\n }\n\n /**\n * Verifies that the session is ready to be started. This means that all necessary permissions\n * have been obtained and all of the necessary fields are non-null.\n */\n fun isStartable(): Boolean {\n // Check fields and permissions required for both sender and receiver\n if(this.context == null){\n throw Exception(\"No local context set. Set context with SessionBuilder.setContext()\")\n } else if(videoType == null && audioType == null) {\n throw Exception(\"Audio and video types are both null. Either Session.videoType or \" +\n \"Session.audioType must be set via SessionBuilder.set{media}Type() for the \" +\n \"session to be startable.\")\n }\n\n try {\n when {\n checkSelfPermission(context!!, Manifest.permission.INTERNET)\n != PackageManager.PERMISSION_GRANTED ->\n throw Exception(\"Internet permission not granted.\")\n checkSelfPermission(context!!, Manifest.permission.ACCESS_WIFI_STATE)\n != PackageManager.PERMISSION_GRANTED ->\n throw Exception(\"Access Wifi State permission not granted.\")\n checkSelfPermission(context!!, Manifest.permission.CHANGE_WIFI_MULTICAST_STATE)\n != PackageManager.PERMISSION_GRANTED ->\n throw Exception(\"CHANGE_WIFI_MULTICAST_STATE permission not granted.\")\n checkSelfPermission(context!!, Manifest.permission.ACCESS_NETWORK_STATE) !=\n PackageManager.PERMISSION_GRANTED ->\n throw Exception(\"ACCESS_NETWORK_STATE permission not granted.\")\n }\n } catch (e:Exception){\n e.printStackTrace()\n return false\n }\n\n when(sessionType){\n SENDER_SESSION_TYPE ->{\n if(videoType != null && (streamHeight == null || streamWidth == null || videoDensity == null)){\n throw Exception(\"Session width, height, and density must be set for video streaming.\")\n } else if(mediaProjectionResultCode == null || mediaProjectionResultData == null){\n Log.e(TAG, \"Transmitter session must include media projection results.\")\n setupSuggestion = \"Please allow screen sharing permissions.\"\n return false\n }\n }\n RECEIVER_SESSION_TYPE ->{\n if(!multicastLockHeld){\n throw Exception(\"The MulticastLock associated with this session is not held.\" +\n \"MulticastLock.acquire() must be called for the session to be startable.\")\n }\n\n }\n else -> throw Exception(\"No session type set. Call SessionBuilder.setup(type) to set \" +\n \"the session type as sender or receiver.\")\n }\n return true\n }\n\n override fun writeToParcel(parcel: Parcel, flags: Int) {\n parcel.writeString(sessionType)\n parcel.writeByte(if(!multicastLockHeld) 0 else 1)\n parcel.writeString(receiverAddressStr)\n parcel.writeValue(receiverPort)\n parcel.writeValue(videoType)\n parcel.writeValue(audioType)\n parcel.writeValue(streamWidth)\n parcel.writeValue(streamHeight)\n parcel.writeValue(videoDensity)\n parcel.writeValue(bitrate)\n parcel.writeValue(framerate)\n parcel.writeValue(mediaProjectionResultCode)\n parcel.writeParcelable(mediaProjectionResultData, flags)\n }\n\n override fun describeContents(): Int {\n return 0\n }\n\n companion object CREATOR : Parcelable.Creator<RTESession> {\n override fun createFromParcel(parcel: Parcel): RTESession {\n return RTESession(parcel)\n }\n\n override fun newArray(size: Int): Array<RTESession?> {\n return arrayOfNulls(size)\n }\n }\n\n}"
},
{
"alpha_fraction": 0.6035704016685486,
"alphanum_fraction": 0.6052706241607666,
"avg_line_length": 29.170940399169922,
"blob_id": "cf73a265e1ff7e7f4c3a93afa94b3854363ed30a",
"content_id": "352cb248285779ed4a5dc88a9c664c10881d6bd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 3529,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 117,
"path": "/app/src/main/java/rte/RTEFrameReceiveBuffer.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport java.math.BigInteger\nimport java.nio.ByteBuffer\n\n\n/**\n * A 2D frame buffer for storing frames that have been decoded. Supports partial frames and frame\n * timeout.\n *\n */\nclass RTEFrameReceiveBuffer(val slotCount:Int, val slotSize:Int, val frameTimeout:Int, val maxSize:Int = 5) {\n\n val map = mutableMapOf<Int, ByteArray>() // https://kotlinlang.org/api/latest/jvm/stdlib/kotlin.collections/-map/index.html\n val frameList:MutableList<RTEFrameBufferEntry> = mutableListOf()\n\n var initialized = false\n var firstTimestamp:Long = -1\n var renderTimestamp:Long = -1\n\n /**\n * Record the timestamp from the first packet and the current timestamp as the starting time\n * for buffering.\n */\n fun startEngine(timestamp:Long){\n firstTimestamp = timestamp\n renderTimestamp = System.currentTimeMillis()\n initialized = true\n this.empty()\n }\n\n /**\n * Add a packet to the buffer.\n */\n fun enqueue(packet:RTEPacket){\n if(frameList.any{ it.fid == packet.fid }){\n // If the frame already has an entry in the buffer then just add this packet to it.\n val frameEntry:RTEFrameBufferEntry = frameList.first{ it.fid == packet.fid }\n // Insert the packet data at the offset given.\n frameEntry.frameBuffer.put(packet.data, packet.offset, packet.length)\n } else {\n val frameEntry = RTEFrameBufferEntry(slotSize, packet.fid, packet.timestamp,\n packet.totalLength, packet.totalPackets)\n // Insert the packet data at the offset given.\n frameEntry.frameBuffer.put(packet.data, packet.offset, packet.length)\n\n frameList.add(frameEntry)\n }\n }\n\n /**\n * Remove the most recent ready frame.\n */\n fun dequeue():ByteBuffer?{\n // Synchronize the buffer so any expired frames are removed.\n syncUp()\n // If the frame is not yet ready, don't return it yet.\n if(!nextFrameReady()) return null\n\n val frameOut = frameList.removeAt(0)\n\n return frameOut.frameBuffer\n }\n\n /**\n * Synchronize the buffer so that frames that are old are removed from the buffer.\n */\n fun syncUp(){\n\n }\n\n /**\n * Determine whether the next frame in line is full and ready to be dequeued.\n */\n fun nextFrameReady():Boolean{\n return false\n }\n\n fun empty(){\n frameList.clear()\n }\n\n inner class RTEFrameBufferEntry(dataSize:Int,\n var fid:Long,\n var presentationTimestamp:BigInteger,\n var totalLength:Long,\n var totalNumberOfPackets:Long){\n /** the following are only modified at dequeue */\n // Flag denoting enqueue/dequeue operation\n var slotFlag:Long = -1\n\n /** The buffer holding frame data. */\n var frameBuffer:ByteBuffer = ByteBuffer.allocate(dataSize)\n\n fun addPacket(packet:RTEPacket){\n presentationTimestamp = packet.timestamp\n }\n\n fun isExpired():Boolean{\n if(!initialized) return false\n\n if(this.presentationTimestamp < BigInteger.ZERO) return false\n return false\n }\n\n fun isComplete():Boolean{\n if(!initialized) return false\n return false\n }\n\n fun isSynchronized():Boolean{\n if(!initialized) return false\n return false\n }\n\n }\n}"
},
{
"alpha_fraction": 0.6432900428771973,
"alphanum_fraction": 0.6476190686225891,
"avg_line_length": 25.88372039794922,
"blob_id": "fdec1bcf3379a02bc54caab0b0836251577d57c2",
"content_id": "e6be34db37102ae1ed6252a645037997914b72e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 1155,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 43,
"path": "/app/src/main/java/rte/packetization/RTEPacketizer.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte.packetization\n\nimport android.os.Parcelable\nimport rte.RTEFrame\nimport java.io.InputStream\nimport java.net.DatagramPacket\nimport java.net.InetAddress\n\n/**\n * Created by jk on 3/13/18.\n */\nabstract class RTEPacketizer:Runnable {\n internal var runnerThread:Thread? = null\n var inputStream: InputStream? = null\n\n /**\n * A helper function to set up and start the thread that will run this packetizer.\n */\n fun start(){\n if(runnerThread == null){\n runnerThread = Thread(this)\n runnerThread!!.start()\n }\n }\n\n /** Stops the packetizer. */\n open fun stop(){\n runnerThread?.interrupt()\n try{\n runnerThread?.join()\n } catch (e:InterruptedException){}\n runnerThread = null\n }\n\n /**\n * Packetizes the frame into a list of packets to be sent to the receiver.\n *\n * @param rteFrame The frame to be sent\n * @param packetSize The desired packet size. This is variable to allow tuning of packet\n * size for increased performance.\n */\n abstract fun packetize(rteFrame: RTEFrame, packetSize:Int): ArrayList<DatagramPacket>\n}"
},
{
"alpha_fraction": 0.6573556661605835,
"alphanum_fraction": 0.669858992099762,
"avg_line_length": 40.081966400146484,
"blob_id": "fdf94b3801ad08f5b65c73a1db4aa6b9babcb9a2",
"content_id": "843aa4adfdd89e2d780cc029622e3be8f5857203",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 7518,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 183,
"path": "/app/src/main/java/info/jkjensen/castexv2/MainActivity.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package info.jkjensen.castexv2\n\nimport android.Manifest\nimport android.content.Context\nimport android.content.Intent\nimport android.content.SharedPreferences\nimport android.net.Uri\nimport android.net.wifi.WifiManager\nimport android.os.Build\nimport android.support.v7.app.AppCompatActivity\nimport android.os.Bundle\nimport android.provider.Settings\nimport android.support.v4.app.ActivityCompat\nimport android.util.AttributeSet\nimport android.util.DisplayMetrics\nimport android.view.View\nimport android.widget.Toast\nimport kotlinx.android.synthetic.main.activity_main.*\nimport org.jetbrains.anko.mediaProjectionManager\nimport org.jetbrains.anko.startActivity\nimport rte.RTEProtocol\nimport rte.ScreenCapturerService\nimport rte.packetization.RTEPacketizer\nimport rte.session.RTESessionBuilder\nimport java.io.FileOutputStream\nimport java.net.InetAddress\n\nclass MainActivity : AppCompatActivity() {\n\n /**\n * An example native method that is implemented by the 'native-lib' native library,\n * which is packaged with this application.\n */\n external fun stringFromJNI(): String\n\n companion object {\n\n // Used to load the 'native-lib' library on application startup.\n init {\n System.loadLibrary(\"native-lib\")\n }\n\n const val TAG =\"info.jkjensen.castex\"\n\n private const val REQUEST_MEDIA_PROJECTION_CODE = 101\n private const val REQUEST_OVERLAY_CODE = 201\n private const val REQUEST_FILE_CODE = 301\n }\n\n /** Display metrics for screen attributes */\n private var metrics: DisplayMetrics? = null\n /** Used for writing stats to a file while debugging */\n private var fos: FileOutputStream? = null\n /** Used to track timestamps during execution */\n private var startTime = System.currentTimeMillis()\n /** Sender address TODO: Make this address dynamic. */\n private var group1: InetAddress? = null\n val sessionBuilder = RTESessionBuilder()\n var packetizer: RTEPacketizer? = null\n var multicastLock: WifiManager.MulticastLock? = null\n\n override fun onCreate(savedInstanceState: Bundle?) {\n super.onCreate(savedInstanceState)\n\n setContentView(R.layout.activity_main)\n\n // Example of a call to a native method\n sample_text.text = stringFromJNI()\n\n this.setupSharedPreferences()\n\n // Acquire a multicast lock (used so the device can receive packets not explicitly addressed\n // to it.\n val wifiManager = applicationContext.getSystemService(Context.WIFI_SERVICE) as WifiManager\n multicastLock = wifiManager.createMulticastLock(\"multicastLock\")\n multicastLock!!.setReferenceCounted(false)\n multicastLock!!.acquire()\n\n metrics = applicationContext.resources.displayMetrics\n\n// group1 = InetAddress.getByName(\"192.168.43.172\") // Duo\n// group1 = InetAddress.getByName(\"192.168.43.15\") // Linux Box\n// group1 = InetAddress.getByName(\"192.168.43.81\") // Anirban\n// group1 = InetAddress.getByName(\"10.26.152.237\") // Linux Box\n\n\n startStreamButton.setOnClickListener {\n\n // Android M+ require us to explicitly ask for overlay permissions.\n if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M && !Settings.canDrawOverlays(this)) {\n val overlayIntent = Intent(Settings.ACTION_MANAGE_OVERLAY_PERMISSION)\n overlayIntent.data = Uri.parse(\"package:\" + packageName)\n startActivityForResult(overlayIntent, REQUEST_OVERLAY_CODE)\n } else{\n startActivityForResult(\n mediaProjectionManager.createScreenCaptureIntent(),\n REQUEST_MEDIA_PROJECTION_CODE)\n }\n }\n\n closeStreamButton.setOnClickListener{\n fos?.close()\n// imageReader?.close()\n }\n\n receiverButton.setOnClickListener{\n val receiverIntent = ReceiverActivity.getIntent(this, group1?.hostAddress ?: \"\")\n startActivity(receiverIntent)\n }\n\n // Explicitly ask for permission to read/write files (only needed for debugging at this point).\n ActivityCompat.requestPermissions(this,\n arrayOf(Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.READ_EXTERNAL_STORAGE),\n REQUEST_FILE_CODE)\n }\n\n override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {\n // If result is from media projection, we can begin capturing.\n if (requestCode == REQUEST_MEDIA_PROJECTION_CODE) {\n super.onActivityResult(requestCode, resultCode, data)\n\n\n\n\n sessionBuilder\n .setContext(this)\n .setMulticastLock(multicastLock!!)\n .setReceiverAddress(\"192.168.43.15\") // Intel nuc\n// .setReceiverAddress(\"192.168.43.81\") // Anirban's\n// .setReceiverAddress(\"192.168.43.20\") // iMac\n .setVideoType(RTEProtocol.MEDIA_TYPE_H264)\n// .setAudioType(RTEProtocol.MEDIA_TYPE_AAC)\n .setStreamHeight(metrics!!.heightPixels/2)\n .setStreamWidth(metrics!!.widthPixels/2)\n .setStreamDensity(metrics!!.densityDpi)\n .setMediaProjectionResults(resultCode, data)\n .setup(RTEProtocol.SENDER_SESSION_TYPE)\n\n // Check if the setup was successful. If not, the sessionBuilder will provide a useful\n // message for the user in sessionBuilder.setupSuggestion.\n if(sessionBuilder.setupSuggestion != null){\n val t = Toast.makeText(this, \"Streaming is not allowed. ${sessionBuilder.setupSuggestion}\", Toast.LENGTH_LONG)\n t.show()\n }else{\n // Start the screencapturerservice\n val serviceIntent = Intent(this, ScreenCapturerService::class.java)\n serviceIntent.putExtra(ScreenCapturerService.MEDIA_PROJECTION_RESULT_CODE, resultCode)\n serviceIntent.putExtra(ScreenCapturerService.MEDIA_PROJECTION_RESULT_DATA, data)\n serviceIntent.putExtra(ScreenCapturerService.SESSION_CODE, sessionBuilder.session)\n startService(serviceIntent)\n }\n\n\n } else if(requestCode == REQUEST_OVERLAY_CODE){\n /* If the result is from the overlay request, we must now request the media projection\n permissions */\n startActivityForResult(\n mediaProjectionManager.createScreenCaptureIntent(),\n REQUEST_MEDIA_PROJECTION_CODE)\n }\n }\n\n /**\n * Pops the oldest frame from the FIFO buffer and displays it on the imageview. Also packetizes\n * it and sends the packets over the network.\n */\n @Synchronized private fun openScreenshot() {\n\n }\n\n /**\n * Establish all app-specific parameters to be available system-wide.\n */\n private fun setupSharedPreferences(){\n val sharedPreferences: SharedPreferences = getSharedPreferences(\"appConfig\", Context.MODE_PRIVATE)\n val editor: SharedPreferences.Editor = sharedPreferences.edit()\n editor.putBoolean(CastexPreferences.KEY_DEBUG, CastexPreferences.DEBUG)\n editor.putBoolean(CastexPreferences.KEY_MULTICAST, CastexPreferences.MULTICAST)\n editor.putBoolean(CastexPreferences.KEY_TCP, CastexPreferences.TCP)\n editor.putInt(CastexPreferences.KEY_PORT_OUT, CastexPreferences.PORT_OUT)\n editor.apply()\n }\n}\n"
},
{
"alpha_fraction": 0.614375114440918,
"alphanum_fraction": 0.6172016859054565,
"avg_line_length": 38.00787353515625,
"blob_id": "ebf541e8ba65fbc4fbd7d47091aa0c3a418f710d",
"content_id": "c2093dcacef58f1bb564cc4643cd818e545b1002",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 4953,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 127,
"path": "/app/src/main/java/rte/packetization/RTEJpegPacketizer.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte.packetization\n\nimport android.graphics.Bitmap\nimport android.util.Log\nimport rte.RTEFrame\nimport rte.RTEPacket\nimport rte.RTEProtocol\nimport rte.session.RTESession\nimport java.io.ByteArrayOutputStream\nimport java.lang.Thread.sleep\nimport java.net.DatagramPacket\n\n/**\n * Created by jk on 2/26/18.\n * Packetizes frame data into UDP packets for transmission to receiving devices.\n */\nopen class RTEJpegPacketizer(session:RTESession): RTEPacketizer(), Runnable{\n\n companion object {\n private const val TAG = \"RTEJpegPacketizer\"\n }\n\n // Image FIFO buffer for capture -> stream\n private var images:ArrayList<RTEFrame> = arrayListOf()\n private var session:RTESession? = null\n /* Tracks the previous bitmap displayed so that it may be recycled immediately when it is no\n longer needed */\n private var prevImage: RTEFrame? = null\n\n /**\n * Constructor\n */\n init {\n this.session = session\n }\n\n /**\n * An ongoing Runnable used to continuously packetize JPEG data frames for transmission\n * over the network.\n */\n override fun run() {\n var currentImage: RTEFrame?\n while(runnerThread?.isInterrupted == false) {\n sleep(5)\n\n // Skip this run if there are no images in the queue.\n if (images.isEmpty()) {\n continue\n }\n\n currentImage = images.removeAt(0)\n if (prevImage != null) {\n prevImage!!.bitmap.recycle()\n }\n prevImage = currentImage\n\n // Prepare the frame as several UDP packets.\n val packets: ArrayList<DatagramPacket> = packetize(currentImage, RTEProtocol.RTE_STANDARD_PACKET_LENGTH)\n Log.d(TAG, \"Sending \" + packets.size + \" packets.\")\n\n // Send out frames on UDP socket.\n for (p in packets) {\n session!!.vSock?.send(p)\n }\n }\n }\n\n /**\n * Packetizes a single frame into a list of UDP packets to be sent to the receiver.\n *\n * @param rteFrame The frame to be sent\n * @param packetSize The desired packet size. This is variable to allow tuning of packet\n * size for increased performance.\n */\n override fun packetize(rteFrame: RTEFrame, packetSize: Int): ArrayList<DatagramPacket> {\n if(session == null){\n throw Exception(\"No session associated with JPEG Packetizer\")\n } else {\n\n// val starttime = System.currentTimeMillis()\n val baos = ByteArrayOutputStream()\n // Make sure that if the bitmap has already been recycled we don't try to use it.\n if(rteFrame.bitmap.isRecycled) return arrayListOf()\n rteFrame.bitmap.compress(Bitmap.CompressFormat.JPEG, 50, baos)\n val outputData = baos.toByteArray()\n val dGramPackets = arrayListOf<DatagramPacket>()\n// return DatagramPacket(outputData, outputData.size, group, CastexPreferences.PORT_OUT)\n\n var pid = 0 // Packet ID for this frame.\n var offset = 0 // Offset of the current packet within this frame.\n val frameSize = outputData.size\n var bytesRemaining = frameSize // The remaining number of bytes left to send\n var packetLength = if (bytesRemaining >= packetSize) packetSize else bytesRemaining\n\n while (offset < frameSize) {\n val packet = RTEPacket()\n\n packet.header.magic = RTEProtocol.PACKET_MAGIC\n packet.header.type = session!!.videoType!!\n\n packet.fid = rteFrame.fid\n packet.totalLength = frameSize.toLong()\n packet.pid = pid\n // Number of packets is equal to the ratio of frame size to packet size plus an\n // additional packet if there is a remainder.\n packet.totalPackets = ((frameSize / packetSize) + (if (frameSize % packetSize > 0) 1 else 0)).toLong()\n packet.offset = offset\n packet.length = packetLength\n packet.timestamp = rteFrame.timestamp\n\n packet.data = outputData.slice(offset..(offset + packetLength)).toByteArray()\n packet.header.length = RTEProtocol.RTE_STANDARD_PACKET_LENGTH + packet.data.size /* size of header+packet w/o data + size of data */\n val serialized = packet.serialize()\n val dGramPacket = DatagramPacket(serialized, serialized!!.size, session!!.receiverAddress, session!!.receiverPort!!)\n dGramPackets.add(dGramPacket)\n\n pid++\n offset += packetLength\n bytesRemaining -= packetLength\n packetLength = if (bytesRemaining >= packetSize) packetSize else bytesRemaining\n }\n\n// Log.d(TAG, \"Packetization process took \" + (System.currentTimeMillis() - starttime).toString() + \"ms\")\n return dGramPackets\n }\n }\n}"
},
{
"alpha_fraction": 0.5269709825515747,
"alphanum_fraction": 0.5518672466278076,
"avg_line_length": 23.200000762939453,
"blob_id": "51a697804fc0e7b1c6598128f96d13caacbfde2f",
"content_id": "725be3eba64fc146c77922947130b4b18fec1e1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 10,
"path": "/app/src/main/java/rte/RTEPacketHeader.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport java.io.Serializable\n\n/**\n * Created by jk on 3/2/18.\n */\ndata class RTEPacketHeader(var magic:Long = RTEProtocol.PACKET_MAGIC,\n var type:Int = -1,\n var length:Int = -1)"
},
{
"alpha_fraction": 0.6536856889724731,
"alphanum_fraction": 0.660639762878418,
"avg_line_length": 26.69230842590332,
"blob_id": "6ad133cb5452ff654d77299fef7c4a3781e081da",
"content_id": "a3222bdbd5b487747697a4eb1ab47178d756d8e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 26,
"path": "/app/src/main/java/rte/RTENotificationBroadcastReceiver.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport android.content.BroadcastReceiver\nimport android.content.Context\nimport android.content.Intent\nimport android.util.Log\n\n/**\n * Created by jk on 1/18/18.\n */\nclass RTENotificationBroadcastReceiver: BroadcastReceiver() {\n companion object {\n val TAG = \"CastexBroadcastReceiver\"\n }\n\n override fun onReceive(context: Context?, intent: Intent?) {\n when(intent?.action){\n ScreenCapturerService.STOP_ACTION ->{\n Log.d(TAG, \"Stopping ScreenCaptureService\")\n context?.stopService(Intent(context, ScreenCapturerService::class.java))\n// context?.stopService(Intent(context, RtspServer::class.java))\n }\n }\n }\n\n}"
},
{
"alpha_fraction": 0.7364621162414551,
"alphanum_fraction": 0.7545126080513,
"avg_line_length": 26.799999237060547,
"blob_id": "981cfed8128a1014fbe45283496778490f37a5a9",
"content_id": "ccbab18a8882c633a4384361659bd21dce4d6810",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 10,
"path": "/app/src/main/java/rte/RTEFrame.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte\n\nimport android.graphics.Bitmap\nimport java.math.BigInteger\n\n/**\n * Created by jk on 2/26/18.\n * Just a filler class for now, may be necessary for future use by adding timestamps.\n */\ndata class RTEFrame(var bitmap: Bitmap, var fid: Long, var timestamp: BigInteger)"
},
{
"alpha_fraction": 0.6382065415382385,
"alphanum_fraction": 0.6401302218437195,
"avg_line_length": 29.17410659790039,
"blob_id": "1c63d4be182ef580cf4d6bc406482ea835cfd24a",
"content_id": "a0d8ecd885275e40dd2d8b1249dfb3b1477bf4f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 6758,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 224,
"path": "/app/src/main/java/rte/session/RTESessionBuilder.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package rte.session\n\nimport android.content.Context\nimport android.content.Intent\nimport android.media.projection.MediaProjection\nimport android.net.wifi.WifiManager\nimport android.util.Log\nimport android.widget.Toast\nimport rte.RTEProtocol\nimport rte.RTEProtocol.Companion.RECEIVER_SESSION_TYPE\nimport rte.RTEProtocol.Companion.SENDER_SESSION_TYPE\nimport rte.packetization.RTEH264Packetizer\nimport rte.packetization.RTEJpegPacketizer\nimport java.net.InetAddress\nimport java.net.MulticastSocket\n\n/**\n * Created by jk on 3/13/18.\n *\n */\nclass RTESessionBuilder {\n companion object {\n const val TAG = \"RTESessionBuilder\"\n }\n\n val session = RTESession()\n val setupSuggestion:String? get() = this.session.setupSuggestion\n\n /**\n * Required for receiver.\n *\n * The Session requires a [multicast lock][https://developer.android.com/reference/android/net/wifi/WifiManager.MulticastLock.html]\n * if the device is a receiver.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setMulticastLock(lock: WifiManager.MulticastLock):RTESessionBuilder{\n this.session.multicastLockHeld = lock.isHeld\n return this\n }\n\n /**\n * Required only for transmitter.\n *\n * Set the address for the receiver.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setReceiverAddress(address:String): RTESessionBuilder{\n this.session.receiverAddressStr = address\n return this\n }\n\n /**\n * Required for transmitter.\n *\n * Set the media projection codes for this session. You must request a mediaprojection before\n * being able to start the session.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setMediaProjectionResults(resultCode:Int, resultData: Intent?): RTESessionBuilder {\n this.session.mediaProjectionResultCode = resultCode\n this.session.mediaProjectionResultData = resultData\n return this\n }\n\n /**\n * Required only for transmitter. Required only for streams including video.\n *\n * Set the stream width in pixels.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setStreamWidth(width:Int): RTESessionBuilder{\n if(width <= 0){\n throw Exception(\"Invalid stream width\")\n }\n this.session.streamWidth = width\n return this\n }\n\n /**\n * Required only for transmitter. Required only for streams including video.\n *\n * Set the stream height in pixels.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setStreamHeight(height:Int): RTESessionBuilder{\n if(height <= 0){\n throw Exception(\"Invalid stream height\")\n }\n this.session.streamHeight = height\n return this\n }\n\n /**\n * Required only for transmitter. Required only for streams including video.\n *\n * Set the stream density in dpi. This can be obtained from DisplayMetrics.densityDPI.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setStreamDensity(density:Int): RTESessionBuilder{\n if(density <= 0){\n throw Exception(\"Invalid stream density\")\n }\n this.session.videoDensity = density\n return this\n }\n\n /**\n * Required only for transmitter.\n *\n * Set the stream bitrate.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setStreamBitrate(bitrate:Int): RTESessionBuilder{\n if(bitrate <= 0){\n throw Exception(\"Invalid stream bitrate\")\n }\n this.session.bitrate = bitrate\n return this\n }\n\n\n /**\n * Required only for transmitter. Required only for streams including video.\n *\n * Set the stream frame rate.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setStreamFramerate(framerate:Int): RTESessionBuilder{\n if(framerate <= 0){\n throw Exception(\"Invalid stream frame rate.\")\n }\n this.session.framerate = framerate\n return this\n }\n\n /**\n * Required for both transmitter and receiver.\n *\n * Set the video type of this session.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setVideoType(videoType:Int): RTESessionBuilder{\n this.session.videoType = videoType\n return this\n }\n\n /**\n * Required for both transmitter and receiver.\n *\n * Set the audio type of this session.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setAudioType(audioType:Int): RTESessionBuilder{\n when(audioType){\n RTEProtocol.MEDIA_TYPE_AAC -> {\n// this.session.packetizer = RTEAACPacketizer(this.session)\n }\n else -> throw Exception(\"Invalid video type for rte.session.RTESessionBuilder.setMediaType()\")\n }\n this.session.audioType = audioType\n return this\n }\n\n /**\n * Required for both transmitter and receiver.\n *\n * Set the context of this session.\n *\n * @return this RTESessionBuilder for function chaining.\n */\n fun setContext(context: Context): RTESessionBuilder{\n this.session.context = context\n return this\n }\n\n fun setup(sessionType:String): RTESessionBuilder {\n this.session.sessionType = sessionType\n // Check if all necessary fields and permissions are set before setting up this session.\n if(this.session.isStartable()) {\n\n when (sessionType) {\n SENDER_SESSION_TYPE -> {\n setupSender()\n }\n RECEIVER_SESSION_TYPE -> {\n setupReceiver()\n }\n else -> throw Exception(\"Invalid type parameter to rte.session.RTESessionBuilder.setup()\")\n }\n this.session.sessionType = sessionType\n }\n return this\n }\n\n private fun setupSender(): RTESessionBuilder{\n Log.d(TAG, \"RTE Session set up for sending to \" + this.session.receiverAddressStr +\n \" on port \" + this.session.receiverPort)\n return this\n }\n\n private fun setupReceiver(): RTESessionBuilder{\n // The receiver does not require a packetizer.\n this.session.packetizer = null\n // Check if the user has a multicast lock\n if(!session.multicastLockHeld){\n throw Exception(\"User must acquire MulticastLock and give it to the session via RTESessionBuilder.setMulticastLock() before calling setup()\")\n }\n return this\n }\n\n fun start(mediaProjection: MediaProjection){\n this.session.start(mediaProjection)\n }\n}"
},
{
"alpha_fraction": 0.5369458198547363,
"alphanum_fraction": 0.545859694480896,
"avg_line_length": 37.07143020629883,
"blob_id": "8f93d3e77aa77919651d4ac96e958c67af847317",
"content_id": "c04030f22c87622b92e70239ba865f822130e261",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Kotlin",
"length_bytes": 4263,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 112,
"path": "/app/src/main/java/info/jkjensen/castexv2/PacketReceiverTask.kt",
"repo_name": "INLabASU/castex-v2.0",
"src_encoding": "UTF-8",
"text": "package info.jkjensen.castexv2\n\nimport android.os.AsyncTask\nimport android.util.Log\nimport java.io.IOException\nimport java.net.DatagramPacket\nimport java.net.DatagramSocket\nimport java.nio.ByteBuffer\nimport java.util.ArrayList\nimport java.net.Socket\n\n/**\n * This task is used solely for receiving incoming packets on a socket. This task does not do any\n * processing. Instead it calls @param onPacketReady, a callback function provided by its\n * instantiator.\n */\nclass PacketReceiverTask constructor(private var clientSocket: DatagramSocket? = null,\n private var tcpEnabled:Boolean = false,\n private var tcpSock:Socket? = null,\n private var multicastEnabled:Boolean = false,\n val onPacketReady: (DatagramPacket)->Unit) : AsyncTask<String, String, String>() {\n\n private var dPacket: DatagramPacket? = null\n private var discrepancy: Long = 0\n private var prevDiscrepancy: Long = 0\n private val framesMissed = ArrayList<Int>()\n\n private val pFrameTotal = 0\n private var pFrameCount = 0\n private var pFrameAverage = 0\n private var expectedFrameNumber: Long = 0\n\n companion object {\n const val TAG = \"PacketReceiverTask\"\n }\n\n override fun doInBackground(vararg strings: String): String {\n // Continuously receive packets and put them on a priorityqueue\n while (!this.isCancelled) {\n try {\n val buff = ByteArray(100535)\n dPacket = DatagramPacket(buff, 100535)\n // Some Android devices require you to manually reset data every time or the\n // previous data size will be used.\n dPacket!!.data = buff\n if (tcpEnabled && tcpSock != null) {\n val buf = ByteBuffer.wrap(buff)\n val input = tcpSock!!.channel\n var n = 0\n while (n >= 0) {\n n = input.read(buf)\n }\n dPacket!!.data = buf.array()\n } else if (multicastEnabled) {\n clientSocket?.receive(dPacket!!)\n } else {\n clientSocket?.receive(dPacket!!)\n }\n val buf = ByteBuffer.wrap(dPacket!!.data)\n// discrepancyTest(buf)\n buf.compact()\n dPacket!!.setData(buf.array(), buf.arrayOffset(), buf.limit())\n// addToQueue(ByteBuffer.wrap(dPacket!!.data, dPacket!!.offset, dPacket!!.length).duplicate())\n //\n onPacketReady(dPacket!!)\n } catch (e: IOException) {\n if (isCancelled) return \"\"\n e.printStackTrace()\n }\n\n }\n return \"\"\n }\n\n override fun onCancelled(result: String?) {\n Log.d(TAG, \"Task cancelled\")\n clientSocket?.close()\n tcpSock?.close()\n }\n\n /**\n * Used for debugging incoming frames.\n */\n fun discrepancyTest(buf:ByteBuffer){\n\n val frameNumber = buf.int\n if (frameNumber == 1) {\n expectedFrameNumber = 0\n pFrameCount = 0\n pFrameAverage = 0\n }\n expectedFrameNumber++\n val type = buf.get(8).toInt() and 0x1f\n Log.d(\"FrameSizeTest\", \"Size:\" + dPacket!!.length + \"\\nType: \" + String.format(\"0x%02X\", type))\n if (type == 0x01) {\n // pFrameCount++;\n // pFrameTotal+= dPacket.getLength();\n // pFrameAverage = pFrameTotal / pFrameCount;\n // Log.d(\"FrameSizeTest\", \"Average P Frame Size: \" + pFrameAverage);\n } else if (type == 0x05) {\n Log.d(\"iframesizetest\", \"Length: \" + dPacket!!.length)\n }\n\n discrepancy = frameNumber - expectedFrameNumber\n if (prevDiscrepancy != discrepancy) {\n Log.d(\"FrameCountTest\", \"Discrepancy:\" + (frameNumber - expectedFrameNumber))\n framesMissed.add(frameNumber - 1)\n Log.d(\"FrameCountTest\", framesMissed.toString())\n prevDiscrepancy = discrepancy\n }\n }\n}"
}
] | 21 |
hybby/sreport | https://github.com/hybby/sreport | 2705c23d94323dbdf5dba1300919e3323d785321 | 5ec35de946a3dbd8a250084fe132b5c6bf6668be | cc59268c8299c3ec464c10f8ecb6da3dc439b9f4 | refs/heads/main | 2022-12-29T21:04:40.863262 | 2020-10-05T23:28:23 | 2020-10-05T23:28:23 | 300,889,761 | 0 | 0 | null | 2020-10-03T13:43:23 | 2020-10-04T21:31:20 | 2020-10-04T21:33:28 | Python | [
{
"alpha_fraction": 0.584625780582428,
"alphanum_fraction": 0.6082265973091125,
"avg_line_length": 23.71666717529297,
"blob_id": "07f0599eb1ec551ba83c3f315092ed255434c1d8",
"content_id": "b175b02616481e2521f9c2b792bd64d202d9990a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4449,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 180,
"path": "/tests/test_urls.py",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUnit tests for the sreport.py utility relating to URL processing\n\"\"\"\nimport pytest\nimport requests\nfrom sreport import validate_url, process_url\n\n\ndef test_valid_url():\n \"\"\"\n Tests whether the url validator correctly identifies valid URLs\n \"\"\"\n url = \"https://www.google.com\"\n assert validate_url(url) is True\n\n\ndef test_invalid_url():\n \"\"\"\n Tests whether the url validator correctly identifies invalid URLs\n \"\"\"\n url = \"bad://address\"\n assert validate_url(url) is False\n\n\ndef test_missing_url():\n \"\"\"\n Tests whether the url validator throws an exception when no URL is provided\n \"\"\"\n with pytest.raises(ValueError, match=\"No url provided\"):\n validate_url(\"\")\n\n\ndef test_invalid_url_output():\n \"\"\"\n Tests whether we output the correct format of message for an invalid URL\n \"\"\"\n url = \"bad://address\"\n expected_output = {\n \"Url\": url,\n \"Error\": \"invalid url\",\n }\n\n assert process_url(url) == expected_output\n\n\ndef test_ssl_error_output(requests_mock):\n \"\"\"\n Tests whether we output the correct format of message for a URL which\n returns an SSL error. For example, if the site has a bad certificate\n \"\"\"\n url = \"https://badcert.com\"\n requests_mock.get(\n url,\n exc=requests.exceptions.SSLError\n )\n\n expected_output = {\n \"Url\": url,\n \"Error\": \"ssl error\",\n }\n\n assert process_url(url) == expected_output\n\n\ndef test_connection_error_output(requests_mock):\n \"\"\"\n Tests whether we output the correct format of message for a URL which\n refuses our connection. For example, if the DNS lookup fails.\n \"\"\"\n url = \"http://not.exists.bbc.co.uk\"\n requests_mock.get(\n url,\n exc=requests.exceptions.ConnectionError\n )\n\n expected_output = {\n \"Url\": url,\n \"Error\": \"connection error\",\n }\n\n assert process_url(url) == expected_output\n\n\ndef test_connection_timeout_output(requests_mock):\n \"\"\"\n Tests whether we output the correct format of message for a URL which\n takes longer than our timeout value to return a response.\n \"\"\"\n url = \"http://slowsite.com\"\n requests_mock.get(\n url,\n exc=requests.exceptions.Timeout\n )\n\n expected_output = {\n \"Url\": url,\n \"Error\": \"timed out\",\n }\n\n assert process_url(url) == expected_output\n\n\ndef test_too_many_redirects_output(requests_mock):\n \"\"\"\n Tests whether we output the correct format of message for a URL which\n refuses our connection. For example, if the DNS lookup fails.\n \"\"\"\n url = \"http://here.there.everywhere.com\"\n requests_mock.get(\n url,\n exc=requests.exceptions.TooManyRedirects\n )\n\n expected_output = {\n \"Url\": url,\n \"Error\": \"too many redirects\",\n }\n\n assert process_url(url) == expected_output\n\n\ndef test_40x_50x_output(requests_mock):\n \"\"\"\n Tests whether we output correctly for common HTTP 40x and 50x responses\n We expect the URL, status code and datetime of the response to be returned\n \"\"\"\n url = \"http://not.exists.bbc.co.uk/\"\n codes = [\n 400, # bad request\n 401, # unauthorized\n 403, # forbidden\n 404, # not found\n 500, # internal server error\n 502, # bad gateway\n 503, # service unavailable\n 504 # gateway timeout\n ]\n\n for code in codes:\n requests_mock.get(\n url,\n status_code=code,\n headers={'Date': 'Sat, 03 Oct 2020 17:32:59 GMT'}\n )\n\n expected_output = {\n \"Url\": url,\n \"Status_code\": code,\n \"Date\": \"Sat, 03 Oct 2020 17:32:59 GMT\"\n }\n\n assert process_url(url) == expected_output\n\n\ndef test_200_output(requests_mock):\n \"\"\"\n Tests whether we output the correct message for HTTP 200 responses\n We expect the URL, status code, datetime of the response and the content\n length of the response to be returned.\n\n As we always follow redirects, we won't test HTTP 301 responses.\n \"\"\"\n url = \"http://www.example.com\"\n requests_mock.get(\n url,\n status_code=200,\n headers={\n 'Date': 'Sat, 03 Oct 2020 17:32:59 GMT',\n 'Content-Length': '12345'\n }\n )\n\n expected_output = {\n \"Url\": url,\n \"Status_code\": 200,\n \"Content_length\": '12345',\n \"Date\": \"Sat, 03 Oct 2020 17:32:59 GMT\"\n }\n\n assert process_url(url) == expected_output\n"
},
{
"alpha_fraction": 0.6384372115135193,
"alphanum_fraction": 0.6402438879013062,
"avg_line_length": 28.91891860961914,
"blob_id": "04a9f27ef28c067bb6e179705c6664f8b264b5db",
"content_id": "433130934f0406ed52f3611467f8afa3f9429162",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4428,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 148,
"path": "/sreport.py",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\nA utility to make HTTP(S) requests to specified URLs and report on the results\n\"\"\"\nimport sys\nimport json\nimport requests\nfrom validator_collection import checkers\nUSAGE = \"Usage: ./sreport.py < urls.txt\"\n\n\ndef parse_input(input_):\n \"\"\"\n Given an input string, return a list of strings split by newline character\n \"\"\"\n if len(input_) <= 0:\n raise ValueError(\"No input provided\")\n\n return input_.splitlines()\n\n\ndef validate_url(url):\n \"\"\"\n Determine if a given URL is valid. Return True if so, False if not\n \"\"\"\n if not url:\n raise ValueError(\"No url provided\")\n\n return checkers.is_url(url)\n\n\ndef process_url(url, timeout_secs=10):\n \"\"\"\n Given a URL, attempt to make a request to it and return information that\n we're interested in, such as date/time of response, status code and length\n \"\"\"\n output = {}\n output['Url'] = url\n\n if not validate_url(url):\n output['Error'] = 'invalid url'\n return output\n\n # attempt a request and deal with common exceptions we may encounter and\n # wish to report upon. erroring out on other exceptions seems reasonable\n # https://requests.readthedocs.io/en/master/_modules/requests/exceptions\n try:\n response = requests.get( # pylint: disable=unused-variable\n url,\n allow_redirects=True,\n timeout=timeout_secs\n )\n except requests.exceptions.SSLError:\n output['Error'] = \"ssl error\"\n return output\n except requests.exceptions.TooManyRedirects:\n output['Error'] = \"too many redirects\"\n return output\n except requests.exceptions.ConnectionError:\n # catches dns failures and refused connections\n output['Error'] = \"connection error\"\n return output\n except requests.exceptions.Timeout:\n # catches connection timeouts and read timeouts\n output['Error'] = \"timed out\"\n return output\n\n # build our output message, adding attributes if they're available\n if response.status_code:\n output['Status_code'] = response.status_code\n\n if 'Content-Length' in response.headers:\n output['Content_length'] = response.headers['Content-Length']\n\n if 'Date' in response.headers:\n output['Date'] = response.headers['Date']\n\n return output\n\n\ndef generate_summary(summary):\n \"\"\"\n Given a dictionary of status codes and occurrances, generate a report\n object (array of objects) that summarises a count of overall responses\n along with a breakdown of counts of different response codes.\n \"\"\"\n if not isinstance(summary, dict):\n raise TypeError(\"input must be dict\")\n\n overall_responses = 0\n output = []\n\n for status_code, quantity in summary.items():\n if not isinstance(status_code, int):\n raise ValueError(\"bad input; response codes must be integers\")\n\n if not isinstance(quantity, int):\n raise ValueError(\"bad input; response counts must be integers\")\n\n overall_responses = overall_responses + quantity\n output.append({\n 'Status_code': status_code,\n 'Number_of_responses': quantity\n })\n\n output.append({\n 'Number_of_responses': overall_responses\n })\n\n return output\n\n\ndef output_json(output):\n \"\"\"\n Given a dict or a list, output it to stdout as a JSON document\n \"\"\"\n if not isinstance(output, (dict, list)):\n raise TypeError(\"input must be dict or list\")\n\n print(json.dumps(output, indent=4))\n\n\nif __name__ == \"__main__\":\n # requirement: program is run from command line and takes input from stdin\n if sys.stdin.isatty():\n raise ValueError(\n \"This program only accepts input via stdin\\n{}\".format(USAGE)\n )\n\n with sys.stdin as stdin:\n lines = parse_input(stdin.read())\n\n stats = {}\n for line in lines:\n result = process_url(line)\n output_json(result)\n\n # if we recieved a successful response, increment our stats counter\n # presence of a 'Status_code' attribute means a valid response\n if 'Status_code' in result:\n if result['Status_code'] in stats:\n stats[result['Status_code']] = stats[result['Status_code']] + 1\n else:\n stats[result['Status_code']] = 1\n\n # build our summary document\n report = generate_summary(stats)\n output_json(report)\n"
},
{
"alpha_fraction": 0.6899159550666809,
"alphanum_fraction": 0.702521026134491,
"avg_line_length": 21.66666603088379,
"blob_id": "a2e2202b9b1ca8e9a2a5123aa48d08e6b2f415fa",
"content_id": "4f0211d104095832a4c53d34be2c624330adc003",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2380,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 105,
"path": "/README.md",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.org/hybby/sreport)\n\n# sreport\n## Overview\nA Python program to make HTTP(S) requests and report on the results.\n\nProvide a newline serperated list of URLs as `stdin` and each will be tested\nin turn. A summary report will be output after all URLs have been processed\n\n```\n$ ./sreport.py < test.txt\n{\n \"Url\": \"https://www.google.com\",\n \"Status_code\": 200,\n \"Date\": \"Sun, 04 Oct 2020 20:43:44 GMT\"\n}\n[\n {\n \"Status_code\": 200,\n \"Number_of_responses\": 1\n },\n {\n \"Number_of_responses\": 1\n }\n]\n```\n\nRequests will time out after 10 seconds and redirects will be followed.\n\n\n## Getting Started\n### Help?\nEmbedded help is built into the provided Makefile. Just run:\n\n```\nmake\n```\n\n### Installation\nThis script requires Python 3. It is recommended that you perform the\ninstallation of the script's requirements into a Virtual Environment (`venv`)\nto avoid the possibility of requirements clashing with your system Python.\n\nInstall the script's dependencies:\n\n```\nvirtualenv --python=$(which python3) venv\nsource venv/bin/activate\nmake requirements\n```\n\n### Running\nIf you've installed the script's requirements in a Virtual Environment, ensure\nthat it is enabled before running the script:\n\n```\nsource venv/bin/activate\n```\n\n#### One URL\n```\necho \"https://www.google.com\" | ./sreport.py\n```\n\n#### A file of multiple URLs\nRun for a list of sites, contained within a file:\n\n```\necho \"https://www.google.com\" > urls.txt\necho \"https://www.gmail.com\" >> urls.txt\n\n./sreport.py < urls.txt\n```\n\n### Testing (Local)\nThe following tests are provided:\n\n * `pytest` - Unit tests\n * `pycodestyle` - ([PEP8](http://www.python.org/dev/peps/pep-0008/)) code style checks\n * `pylint` - Code linting checks\n\nThese can be run locally using the Makefile, assuming the installation steps\nhave been performed\n\n```\nmake test\n```\n\n### Testing (Docker)\nThe tests can be run inside a `python:3` Docker container.\n\nBuild a container and run the tests by running:\n\n```\nmake dockertest\n```\n\n### Testing (Travis CI)\nThe tests are also run nightly via Travis CI.\n\nFor the build to pass, it is expected that all stages of the `make test` target\nwill succeed.\n\nClicking the badge at the top of this readme can be used to determine which\nversions of Python the script is tested against.\n"
},
{
"alpha_fraction": 0.6399064660072327,
"alphanum_fraction": 0.6399064660072327,
"avg_line_length": 23.673076629638672,
"blob_id": "0f6d06b0c35be8aeb6701197b0387f19a03b393f",
"content_id": "44d3806cc4a0d84e58533b140a6ffd25f9802a83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1283,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 52,
"path": "/tests/test_io.py",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUnit tests for the sreport.py utility relating to input/output operations\n\"\"\"\nimport pytest\nfrom sreport import parse_input, output_json\n\n\ndef test_split_newlines_input():\n \"\"\"\n Tests that newline separated input is split into a list of strings\n \"\"\"\n sample_input = \"foo\\nbar\"\n assert parse_input(sample_input) == ['foo', 'bar']\n\n\ndef test_no_newlines_input():\n \"\"\"\n Tests that input with no newlines becomes a one item list\n \"\"\"\n sample_input = \"foobar\"\n assert parse_input(sample_input) == ['foobar']\n\n\ndef test_no_input():\n \"\"\"\n Tests that an exception is thrown when no input is provided to script\n \"\"\"\n with pytest.raises(ValueError, match=\"No input provided\"):\n parse_input(\"\")\n\n\ndef test_json_output(capsys):\n \"\"\"\n Tests that outputting an object in JSON is done in the manner we expect\n \"\"\"\n sample_object = {\n \"foo\": \"bar\"\n }\n\n output_json(sample_object)\n captured = capsys.readouterr()\n assert captured.out == '{\\n \"foo\": \"bar\"\\n}\\n'\n\n\ndef test_invalid_json_output():\n \"\"\"\n Tests that we raise TypeError if an object doesn't seem 'json-able'\n \"\"\"\n sample_object = \"\"\n\n with pytest.raises(TypeError, match=\"input must be dict or list\"):\n output_json(sample_object)\n"
},
{
"alpha_fraction": 0.8857142925262451,
"alphanum_fraction": 0.8857142925262451,
"avg_line_length": 10.666666984558105,
"blob_id": "abee0e0bfa00d87a72cfa3e57ebd2312c3fe78bf",
"content_id": "cebf4e0890aee1414eb12340450ecdfadcccc0e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "pytest\npycodestyle\npylint\nrequests\nrequests_mock\nvalidator_collection\n"
},
{
"alpha_fraction": 0.6766917109489441,
"alphanum_fraction": 0.7067669034004211,
"avg_line_length": 12.300000190734863,
"blob_id": "cc776c24c67c37eecbe099187ddcda0a0eff2de7",
"content_id": "7d397233d5beb2ecb9b6dc21fce346f0507f9c3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/Dockerfile",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "FROM python:3\nENV TERM=xterm-256color\nENV PYTHONPATH=.\n\nWORKDIR /usr/src/app\n\nCOPY . .\nRUN make requirements\n\nCMD [ \"make\", \"test\" ]\n"
},
{
"alpha_fraction": 0.6286836862564087,
"alphanum_fraction": 0.6385068893432617,
"avg_line_length": 36.703704833984375,
"blob_id": "60f3fea588c0922b9c2ad4f7b959d6c8484137c3",
"content_id": "f84922b7a9541e22c903ad73c71e6e630f9507f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1018,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 27,
"path": "/Makefile",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "PROJECT = sreport\nDESCRIPTION = A Python program to make HTTP(S) requests and report on the results\nBOLD := $(shell tput bold)\nRESET := $(shell tput sgr0)\n\nhelp: ## This 'help' documentation\n\t@printf \"$(BOLD)$(PROJECT): $(RESET)$(DESCRIPTION)\\n\\n\"\n\t@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"$(BOLD)%-24s\\033[0m $(RESET)%s\\n\", $$1, $$2}' | sort\n\nrequirements: ## Install dependencies required to run and test this script\n\tpip3 install -r requirements.txt\n\ntest: ## Run unit tests and code style checks\n\t@printf \"\\n$(BOLD)Running unit tests (py.test)$(RESET)\\n\"\n\tpy.test tests\n\n\t@printf \"$(BOLD)Running pycodestyle...$(RESET)\\n\"\n\tfind . -name '*.py' ! -path './venv/*' -exec pycodestyle {} +;\n\n\t@printf \"\\n$(BOLD)Running pylint...$(RESET)\\n\"\n\tfind . -name '*.py' ! -path './venv/*' -exec pylint {} +;\n\ndockertest: ## Build and run tests inside a Docker container\n\tdocker build . --tag $(PROJECT):latest\n\tdocker run -t --rm $(PROJECT):latest\n\n.PHONY: help init test\n"
},
{
"alpha_fraction": 0.555858314037323,
"alphanum_fraction": 0.5798364877700806,
"avg_line_length": 22.227848052978516,
"blob_id": "ee9bc95ce58eeccf05be3475c104ecf61d9f3def",
"content_id": "ec0998a359380cf0fd8b0b9faa856e4621b0fe77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1835,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 79,
"path": "/tests/test_summary.py",
"repo_name": "hybby/sreport",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUnit tests for the sreport.py utility relating to summary report generation\n\"\"\"\nimport pytest\nfrom sreport import generate_summary\n\n\ndef test_summary_report_output():\n \"\"\"\n Tests that given a summary dict of response codes and response counts,\n we produce a report in the format that we expect\n \"\"\"\n sample_summary = {\n 200: 50,\n 403: 2,\n 404: 10,\n 500: 1\n }\n\n expected_output = [\n {\n \"Status_code\": 200,\n \"Number_of_responses\": 50\n },\n {\n \"Status_code\": 403,\n \"Number_of_responses\": 2\n },\n\n {\n \"Status_code\": 404,\n \"Number_of_responses\": 10\n },\n {\n \"Status_code\": 500,\n \"Number_of_responses\": 1\n },\n {\n \"Number_of_responses\": 63\n }\n ]\n\n assert generate_summary(sample_summary) == expected_output\n\n\ndef test_summary_bad_input_error():\n \"\"\"\n Tests that we raise TypeError if a dictionary hasn't been provided\n \"\"\"\n sample_object = \"\"\n\n with pytest.raises(TypeError, match=\"input must be dict\"):\n generate_summary(sample_object)\n\n\ndef test_summary_bad_count_error():\n \"\"\"\n Tests that we throw a ValueError if a non-integer response count provided\n \"\"\"\n sample_object = {\n 404: \"sixty-one\"\n }\n\n error = \"bad input; response counts must be integers\"\n with pytest.raises(ValueError, match=error):\n generate_summary(sample_object)\n\n\ndef test_summary_bad_code_error():\n \"\"\"\n Tests that we throw a ValueError if a non-integer response code provided\n \"\"\"\n sample_object = {\n \"four-oh-four\": 100\n }\n\n error = \"bad input; response codes must be integers\"\n with pytest.raises(ValueError, match=error):\n generate_summary(sample_object)\n"
}
] | 8 |
infoknight/Machine-Learning | https://github.com/infoknight/Machine-Learning | 5bae456a8ea08a309a5d80b175bc9912501b5e19 | d00c741d101cb199b0b623d2466330fc3898dd8a | dbd17a985b56049928d564db6d550e367cab4754 | refs/heads/master | 2021-04-22T14:43:18.364894 | 2020-04-09T15:16:38 | 2020-04-09T15:16:38 | 249,853,438 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7201540470123291,
"alphanum_fraction": 0.7265725135803223,
"avg_line_length": 32.84782791137695,
"blob_id": "e5ebd0d1266d656cab1348c8e8657a5c330e4bc1",
"content_id": "223fe5779979012c7a188d0632f981d355a6642a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1558,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 46,
"path": "/Regression/6_decision_tree_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/6_Position_Salaries.csv\")\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n#Encoding Categorical Data\n#Not Applicable as Level indicates Positions in the dataset\n\n#Splitting dataset into Training & Testing set\n#Not Applicable as dataset is too small. Splitting would result in accuracy issues\n\n#Feature Scaling to normalise data\n#Not Required \n\n#Fitting the DTR to the dataset\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(criterion = \"mse\", random_state = 0)\nregressor.fit(X, y)\n\n#Visualising the DecisionTreeRegressor Result with Higher Resoultuion\n'''\nX_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape(len(X_grid),1)\nplt.scatter(X, y, color = \"red\")\nplt.plot(X_grid, regressor.predict(X_grid), color = \"blue\") \nplt.title(\"Decision Tree Regression\")\nplt.xlabel(\"Positon\")\nplt.ylabel(\"Salary\")\nplt.show()\n'''\n\n#Predicting the Result for real world application\nuserInput = float(input(\"Enter the Position Level : \")) #Get the user input as float\nuserInput = np.asmatrix(userInput) #Convert the user input to matrix value\ny_pred = regressor.predict(userInput) #Prediction Model\nprint(\"Predicted Salary : %f\\n\" %y_pred) #Display the result as float\n\n"
},
{
"alpha_fraction": 0.6978609561920166,
"alphanum_fraction": 0.7165775299072266,
"avg_line_length": 31.05714225769043,
"blob_id": "2188ea9611cbe465d9229edd66847377d843e5f3",
"content_id": "379b63e37ff7c828f9145b269e74f266360873e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1122,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 35,
"path": "/ReinforcementLearning/random_selection.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Random Selection Algorithm for Reinforcement Learning\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset\ndataset = pd.read_csv(\"../Data/18_Ads_CTR_Optimisation.csv\")\n#print(dataset)\n\n#Implementing Random Selection Algorithm\nimport random\nnum_rounds = 10000\nnum_ads = 10\nads_selected = []\ntotal_reward = 0\n\nfor rnd in range(0, num_rounds): #Simulating 10000 rounds\n ad = random.randrange(num_ads) #Simulating ads clicked (out of 10 ads) in each round\n ads_selected.append(ad) \n reward = dataset.values[rnd, ad] #Compare simulated-ads-clicke with the dataset. If matches reward = 1 else 0\n total_reward = total_reward + reward #Sum up the rewards\n\n#print(ads_selected) #List of ads selected in each round\n#print(reward)\n#print(total_reward)\n\n#Visualising the Random Selection Algorithm Result : Histogram\nplt.hist(ads_selected)\nplt.title(\"Reinforcement Learning : Random Selection Algorithm\")\nplt.xlabel(\"Ads\")\nplt.ylabel(\"Number of times ad selected\")\nplt.show()\n"
},
{
"alpha_fraction": 0.6464981436729431,
"alphanum_fraction": 0.6568586826324463,
"avg_line_length": 36.6875,
"blob_id": "c46caa3bb6f3838b79d03ad8b1daaae85758dfa8",
"content_id": "93c65baafc03f9950f5de4f0ad436c30a1827fb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2413,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 64,
"path": "/ModelSelection/XGBoost.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#XGBoost for Faster Performance\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#--------------------------------------DATA PREPROCESSING---------------------------------------------------------------------------#\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/28_Churn_Modelling.csv\")\nX = dataset.iloc[:, 3:13].values #Matrix of Independent Variables\ny = dataset.iloc[:, -1].values #Vector of Dependent Variables\n#print(dataset)\n#print(X)\n#print(y)\n\n#Taking Care of Missing Columns\n#Not Applicable\n\n#Encoding Categorical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nlabelencoder_Geography = LabelEncoder()\nX[:, 1] = labelencoder_Geography.fit_transform(X[:, 1])\nlabelencoder_Gender = LabelEncoder()\nX[:, 2] = labelencoder_Gender.fit_transform(X[:, 2])\ncolumntransformer = ColumnTransformer([(\"ohe\", OneHotEncoder(), [1])], remainder = \"passthrough\")\nX = np.array(columntransformer.fit_transform(X))\n#print(X)\n\n#Avoiding Dummy Variable Trap\nX = X[:, 1:]\n#print(X)\n\n#Splitting Dataset into Training & Test Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n#print(X_train)\n#print(X_test)\n#print(y_train)\n#print(y_test)\n\n#Fitting XGBoost to the Training Set\nfrom xgboost import XGBClassifier\nclassifier = XGBClassifier(max_depth = 3, learning_rate = 0.1, n_estimators = 100, gamma = 0)\nclassifier.fit(X_train, y_train)\n\n\n#--------------------------------------Predicting the Result & Determining Accuracy-------------------------------------------------#\n#Predicting the Test Set Results\ny_pred = classifier.predict(X_test) #y_pred is a floating point number \n\n#Confirming the Accuracy of Prediction Using Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\n\n#--------------------------------------Applying K-Fold Cross Validation-------------------------------------------------------------#\nfrom sklearn.model_selection import cross_val_score\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\nprint(accuracies)\nprint(\"Avg Accuracy : %f\" %accuracies.mean())\nprint(\"Standard Deviation : %f\" %accuracies.std())\n\n"
},
{
"alpha_fraction": 0.6547788977622986,
"alphanum_fraction": 0.6754636168479919,
"avg_line_length": 37.94444274902344,
"blob_id": "8eb4dac5a30acbd9618a879804b910a47e18d13e",
"content_id": "5aa7e3497270efd592729a2530c46ba577f72321",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 36,
"path": "/AssociationRuleLearning/apriori.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Assciation Rule Learning : Apriori Model\n#This Model requires \"apyori_library.py\" to be available in the same folder.\n#Input to apriory method is a List of Lists\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset\ndataset = pd.read_csv(\"../Data/17_Market_Basket_Optimisation.csv\", header = None)\n#print(dataset)\n\n#Preparing the List of Lists for all Transactions\ntransactions = [] #Initialise of List of Transactions\n\nrows = dataset.shape[0] #Number of Rows = Number of Transactions\ncolumns = dataset.shape[1] #Number of Columns = Number of Items\n#print(numTransactions)\n#print(numItems)\n\nfor i in range(0, rows):\n transactions.append([str(dataset.values[i, j]) for j in range(0, columns)])\n#print(transactions)\n\n#Training apyori Library on the Dataset\nfrom apyori_library import apriori\nrules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)\n #min_support: Let us consider an item that is bought atleat thrice a day for a week over the period of total transa ctions (ie., 7501) ==> 3 * 7 / 7500 = 0.003\n\nrules = list(rules) #Create a list of rules\n\n#Beautifully Print the Rules using a for Loop\nfor rule in rules:\n print(rule)\n"
},
{
"alpha_fraction": 0.7168653011322021,
"alphanum_fraction": 0.7255327105522156,
"avg_line_length": 34.0379753112793,
"blob_id": "c4daa3eca08851d5924ffed67b38d8ad37cc8927",
"content_id": "1ec5ae4af2ad0234bd3cee4db124bd930c695afa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2769,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 79,
"path": "/coronatracker.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Data and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"Data/corona_newcases.csv\")\nX = dataset.iloc[:, 0:1].values #Serial Number\ny_Italy = dataset.iloc[:, 2:3].values #Italy\ny_India = dataset.iloc[:, -1].values #India\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n'''\n#Encoding Categorical Data (Only LabelEncoding is adequate)\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.compose import ColumnTransformer\nlabelencoder_Date = LabelEncoder()\nlabelencoder_Date = labelencoder_Date.fit(X[:, 0])\nX[:, 0] = labelencoder_Date.transform(X[:, 0])\n'''\n\n#X_poly = poly_reg.fit_transform(X) #Creating polynomial expression with required degrees\n#Splitting Dataset into Training & Testing data\n#Not Required as dataaset is small\n\n#Feature Scaling to normalise Data\nfrom sklearn.preprocessing import StandardScaler\nss_y = StandardScaler()\n#print(y_Italy)\n#print(y_India)\n#y_Italy = ss_y.fit_transform(y_Italy)\n#y_India = ss_y.fit_transform(y_India)\n\n#Fitting the Polynomial Linear Regression to the dataset\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree = 28)\nX_poly = poly_reg.fit_transform(X) #Creating polynomial expression with required degrees\n#X_poly_Italy = poly_reg.fit_transform(y_Italy) #Creating polynomial expression with required degrees\n#X_poly_India = poly_reg.fit_transform(y_India) #Creating polynomial expression with required degrees\n\n#Italy\nlin_reg_Italy = LinearRegression()\nlin_reg_Italy.fit(X_poly, y_Italy) #Italy\n#India\nlin_reg_India = LinearRegression()\nlin_reg_India.fit(X_poly, y_India) #India\n\n#Predicting the Result\npred_Italy = lin_reg_Italy.predict(X_poly)\npred_India = lin_reg_India.predict(X_poly)\n\n#Visualising the PLR Result\nfig, (ax1, ax2) = plt.subplots(2)\n#Italy\nax1.scatter(X, y_Italy, color = \"red\")\nax1.plot(X, pred_Italy, color = \"blue\")\nax1.set_title(\"CoronaVirus : New Cases in Italy\")\n#ax1.set_xlabel(\"Days Passed\")\nax1.set_ylabel(\"New Cases Detected\")\n#India\nax2.scatter(X, y_India, color = \"red\")\nax2.plot(X, pred_India, color = \"green\")\nax2.set_title(\"CoronaVirus : New Cases in India\")\nax2.set_xlabel(\"Days Passed\")\nax2.set_ylabel(\"New Cases Detected\")\nplt.show()\n\n'''\n#Predicting National Performance\nuserInput = float(input(\"Enter the new cases identified : \")) #Get the user input\nuserInput = np.array([[userInput]]) #Convert the user input into np array\nuI_pred = lin_reg.predict(userInput)\nprint(\"Prediction Result : %f\" %uI_pred)\n'''\n\n"
},
{
"alpha_fraction": 0.6264960765838623,
"alphanum_fraction": 0.6494015455245972,
"avg_line_length": 39.72269058227539,
"blob_id": "536a1d25568e0b005f4017922262491f59cb0937",
"content_id": "e073dfc9945f30854b4c248c781bf8d04459cd77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4846,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 119,
"path": "/ModelSelection/grid_search.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Grid Search for Model Selection and Hyper-parameter Tuning\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/26_Social_Network_Ads.csv\")\nX = dataset.iloc[:, [2, 3]].values\ny = dataset.iloc[:, 4].values\n#print(dataset)\n#print(X)\n#print(y)\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n#Encoding Categorical Data\n#Not Applicable\n\n#Spliting Dataset into Training & Testing Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n#print(X_train)\n#print(X_test)\n#print(y_train)\n#print(y_test)\n\n#Feature Scaling to Normalise Data\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n#print(X_train)\n#print(X_test)\n\n#Fitting the Kernel SVM to Training Set\nfrom sklearn.svm import SVC\nclassifier = SVC(kernel = \"rbf\", random_state = 0)\nclassifier.fit(X_train, y_train)\n\n#Predicting the KernelSVM Result\ny_pred = classifier.predict(X_test)\n#print(y_pred)\n\n#Confirm the Prediction Accuracy using Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n#print(cm)\n\n#Applying K-Fold Cross Validation for Evaluating the Model\n#Accuracy predicted in confusion_matrix is only for a single Training set and Test set and may not be holding good for different test#sets.\nfrom sklearn.model_selection import cross_val_score\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1)\n #cv = cross_validator (k)\n #n_jobs = -1 ==> include all CPUs\na_mean = accuracies.mean() #Find the average accuracy of the model\nvariance = accuracies.std() #Find the Variance (Standard Deviation) of the Model\nprint(accuracies)\nprint(\"Avg Accuracy : %f\" %a_mean)\nprint(\"Standard Deviation : %f\" %variance)\n\n#Applying Grid Search to Find the Best Model and Best Hyper-Parameters\nfrom sklearn.model_selection import GridSearchCV\nparameters = [{'C' : [1, 10, 100, 1000], 'kernel' : ['linear']},\n {'C' : [1, 10, 100, 1000], 'kernel' : ['rbf'], 'gamma' : [0.5, 0.1, 0.01, 0.001]}\n ] #Select the parameters according to SVC (classifier used in this case) documenataion\ngrid_search = GridSearchCV(estimator = classifier,\n param_grid = parameters,\n scoring = 'accuracy',\n n_jobs = -1, #Include all CPUs for faster computation\n cv = 10) #cv = k; 10-Fold Cross Validation\ngrid_search.fit(X_train, y_train)\nbest_accuracy = grid_search.best_score_\nbest_parameters = grid_search.best_params_\nprint(\"Best Accuracy : %f\" %best_accuracy)\nprint(\"Best Parameters : \")\nprint(best_parameters) #Select the best model & tune your parameters accordingly\nprint(\"Tune your Parameters accordingly and execute the program for better performance :)\")\n\n'''\n#Visualising the Training Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\"))(i), label = j)\nplt.title(\"Kernel SVM : Training Set\")\nplt.xlabel(\"Age\")\nplt.ylabel(\"Salary\")\nplt.legend()\nplt.show()\n\n#Visualising the Test Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\"))(i), label = j)\nplt.title(\"Kernel SVM : Test Set\")\nplt.xlabel(\"Age\")\nplt.ylabel(\"Salary\")\nplt.legend()\nplt.show()\n'''\n"
},
{
"alpha_fraction": 0.746268630027771,
"alphanum_fraction": 0.7543532252311707,
"avg_line_length": 32.45833206176758,
"blob_id": "368f2c047c362f51d8a84b7d43da82aaf517aebb",
"content_id": "68a5118257c9fa604700bb151368deb398d6d084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1608,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 48,
"path": "/Regression/3_multiple_linear_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/3_50Startups.csv\")\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n#Encoding Categorical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nlabelencoder_State = LabelEncoder()\nlabelencoder_State = labelencoder_State.fit(X[:, 3])\nX[:, 3] = labelencoder_State.transform(X[:, 3])\ncolumn_transformer = ColumnTransformer([(\"ohe\", OneHotEncoder(), [3])], remainder = \"passthrough\")\nX = np.array(column_transformer.fit_transform(X), dtype = np.int)\n\n#Avoiding Dummy Variable Trap\nX = X[:, 1:] #Need not specifically code as MLR algorithm automatically takes care of it\n\n#Splitting Dataset into Training & Testing Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n#Fitting the Training Set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n#Modelling Prediction Using Training Set\ntrain_pred = regressor.predict(X_train)\n\n#Validating Prediction Using Testing Set\ntest_pred = regressor.predict(X_test)\n\n#Comparing the Actual Values with the Predicted Values\nprint(\"Actual Result for X_test : \\n\")\nprint(y_test)\nprint(\"\\n\")\nprint(\"Predicted Values for X_test : \\n\")\nprint(test_pred)\n\n\n"
},
{
"alpha_fraction": 0.6294000744819641,
"alphanum_fraction": 0.6484878659248352,
"avg_line_length": 37.056602478027344,
"blob_id": "219c8c72f8b942177f2227d081b66c0b91281475",
"content_id": "6347c5c75db6bd39d6c4980c89791244063e5324",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4034,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 106,
"path": "/DimensionalityReduction/kernel_pca.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Dimensionality Reduction : Kernel-PCA Technique\n#Though Logistic Regression is a Linear Regression, Kernel-PCA helps it to classify non-linear dataset\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#--------------------------------------Data Preprocessing---------------------------------------------------------------------------#\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/25_Social_Network_Ads.csv\")\nX = dataset.iloc[:, 2:4].values #Includes only the age & salary column\n#X = dataset.iloc[:, [2, 3]].values #Includes only the age & salary column\ny = dataset.iloc[:, 4].values\n#print(\"dataset\")\nprint(dataset)\n#print(\"X\")\n#print(X)\n#print(\"y\")\n#print(y)\n#Taking Care of Columns with Missing Data\n#Not Applicable for this dataset\n\n#Encoding Categorical Data\n\n#Splitting Dataset into Training & Testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .25, random_state = 0)\n#print(\"X_train\")\n#print(X_train)\n#print(\"X_test\")\n#print(X_test)\n#print(\"y_train\")\n#print(y_train)\n#print(\"y_test\")\n#print(y_test)\n\n#Feature Scaling to Normalise Data\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n#print(\"X_train\")\n#print(X_train)\n#print(\"X_test\")\n#print(X_test)\n\n#--------------------------------------Dimensionality Reduction---------------------------------------------------------------------#\n#Applying Kernel-PCA\nfrom sklearn.decomposition import KernelPCA\nkpca = KernelPCA(n_components = 2, kernel = \"rbf\") #rbf ==> Gaussian Function\nX_train = kpca.fit_transform(X_train)\nX_test = kpca.transform(X_test)\n\n#Fitting Logistic Regression Classifier to the Training Set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)\n\n#Predicting the Test Set Results\ny_pred = classifier.predict(X_test)\n#print(\"y_pred\")\n#print(y_pred)\n\n#Making Confusion Matrix to Confirm Accuracy of Prediction\nfrom sklearn.metrics import confusion_matrix #confusion_matrix is a function and not a Class\ncm = confusion_matrix(y_test, y_pred)\n#print(\"Confusion Matrix\")\n#print(cm)\n\n#Visualising the Training Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier Using Kernel PCA : Training Set\")\nplt.xlabel(\"PC-1\")\nplt.ylabel(\"PC-2\")\nplt.legend()\nplt.show()\n\n#Visualising the Test Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier Using Kernel PCA : Test Set\")\nplt.xlabel(\"PC-1\")\nplt.ylabel(\"PC-2\")\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.7332935333251953,
"alphanum_fraction": 0.7452267408370972,
"avg_line_length": 28.875,
"blob_id": "9bb90ba124dcce11d978e565936617c6342285dd",
"content_id": "7bc927ed1d1a0b7d1be0880df6772fba20f8f984",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1676,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 56,
"path": "/Regression/2_simple_linear_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 19:29:47 2020\n\n@author: root\n\"\"\"\n#Import Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Import Dataset & Segregate Independent and Dependent Variables\ndataset = pd.read_csv(\"../Data/2_Salary_Data.csv\")\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n#Taking Care of Columns with Missing Data\n##Not Applicable here as there are no missing data##\n\n#Encoding Categorical Data\n##Not Applicable##\n\n#Splitting Dataset into Training & Testing Dataset\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\n\n#Feature Scaling to Normalise Dataset\n##Not Applicable as Simple Linear Regression Algorithm Automatically Takes Care of it\n\n#Fitting Simple Linear Regression to Training Set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n#Modelling the Prediction Using Training Set\ntrain_pred = regressor.predict(X_train)\n\n#Validating Model on Testing Set\ntest_pred = regressor.predict(X_test)\n\n#Visualising the Model based on Training Set\nplt.scatter(X_train, y_train, color = \"red\")\nplt.plot(X_train, train_pred, color = \"blue\")\nplt.title(\"Experience vs Salary (Training Set)\")\nplt.xlabel(\"Years of Experience\")\nplt.ylabel(\"Salary in $\")\nplt.show()\n\n#Visualising the Model based on Testing Set\nplt.scatter(X_test, y_test, color = \"red\")\nplt.plot(X_train, train_pred, color = \"blue\")\nplt.title(\"Experience vs Salary (Testing Set)\")\nplt.xlabel(\"Years of Experience\")\nplt.ylabel(\"Salary in $\")\nplt.show()\n\n\n\n"
},
{
"alpha_fraction": 0.6889200806617737,
"alphanum_fraction": 0.7068723440170288,
"avg_line_length": 38.17582321166992,
"blob_id": "57e067434cc613de06f9373cc8c32b805dc61764",
"content_id": "174ef05ef72dc33d3b7b5e9aed5114941b4c6b88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3565,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 91,
"path": "/Regression/3.1_mlr_backward_elimination.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/3_50Startups.csv\")\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n#Encoding Categorical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nlabelencoder_State = LabelEncoder()\nlabelencoder_State = labelencoder_State.fit(X[:, 3])\nX[:, 3] = labelencoder_State.transform(X[:, 3])\ncolumn_transformer = ColumnTransformer([(\"ohe\", OneHotEncoder(), [3])], remainder = \"passthrough\")\nX = np.array(column_transformer.fit_transform(X), dtype = np.int)\n\n#Avoiding Dummy Variable Trap\nX = X[:, 1:] #Need not specifically code as MLR algorithm automatically takes care of it\n\n#Splitting Dataset into Training & Testing Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n#Fitting the Training Set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n#Modelling Prediction Using Training Set\ntrain_pred = regressor.predict(X_train)\n\n#Validating Prediction Using Testing Set\ntest_pred = regressor.predict(X_test)\n\n'''\n#Comparing the Actual Values with the Predicted Values\nprint(\"Actual Result for X_test : \\n\")\nprint(y_test)\nprint(\"\\n\")\nprint(\"Predicted Values for X_test : \\n\")\nprint(test_pred)\n'''\n\n#Building Optimal Model Using Backward Elimination\nimport statsmodels.api as sm\n##MLR ==> y = b0 + b1.X1 + b2.X2 + .... + bn.Xn\n### ==> y = b0.X0 + b1.X1 + b2.X2 + .... + bn.Xn where X0 = 1\n##Insert X0 (a column of 1s in the beginning of X\n##This manipulation is automatically taken care of by LinearRegression(). However, statsmodel.formula.api needs this to be done manually.\nX = np.append(arr = np.ones((50, 1)).astype(int), values = X, axis = 1) #Adding intercept i.e column of ones \n\n##Manually finding the most significant Independent Variables with P < 0.05\n'''\n##Fitting the model with all possible predictors i.e., columns 0, 1, 2, 3 ...\n##X : Original Matrix of Features\n##X_optimal : Matrix of Optimal Features after eliminating insignifcant columns using Backward Elimination\nX_optimal = X[:, [0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_optimal).fit()\nregressor_OLS.summary() #Repeat the last three steps to remove the columns with P > 0.05 till identifying only the most significant I.Vs\n'''\n\n#Automatically finding the most significant Independent Variables with P < 0.05\ndef backwardElimination(sl, x_opt):\n numColumns = len(x_opt[0]) #Number of Columns\n for i in range(0, numColumns):\n regressor_OLS = sm.OLS(endog = y, exog = x_opt).fit()\n maxPvalue = max(regressor_OLS.pvalues).astype(float)\n\n if maxPvalue > sl:\n for j in range(0, numColumns - i):\n if(regressor_OLS.pvalues[j].astype(float) == maxPvalue):\n x_opt = np.delete(x_opt, j, 1)\n print(regressor_OLS.summary())\n return x_opt\n\nsig_level = 0.05\nX_optimal = X[:, [0, 1, 2, 3, 4, 5]]\nX_modelled = backwardElimination(sig_level, X_optimal)\n\n\n#Build automatic Backward Elimination Model using P-values and Adjusted R squares values\nprint(\"\\n\\n\")\nprint(\"***Build Automatic Backward Elimination Model Using P-values and Adjusted-R-Squares values***\")\n"
},
{
"alpha_fraction": 0.5867850184440613,
"alphanum_fraction": 0.6278764009475708,
"avg_line_length": 41.845069885253906,
"blob_id": "911f2df6668105cccc6be08b3bb8402de1d9c58b",
"content_id": "5211f82f1a01f123c548e80176ccec767a387a3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3042,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 71,
"path": "/Clustering/15_K-Means.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Problem Statement : Find the Clusters based on Annual Income and Spending Score\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/15_Mall_Customers.csv\")\nX = dataset.iloc[:, [3, 4]].values #Select Annual Income & Spending Score\n#y #y is not required as we need to find only the clusters\n#print(dataset)\n#print(X)\n\n#Visualising the dataset\n#print(X[:, 0])\n#print(X[:, 1])\nplt.scatter(X[:, 0], X[:, 1])\nplt.title(\"Mall Clients\")\nplt.xlabel(\"Annual Income (K$)\")\nplt.ylabel(\"Spending Score\")\nplt.show()\n\n#Find K : Using the Elbow Method to Find the Optimal Number of Clusters (K)\nfrom sklearn.cluster import KMeans\nwcss = [] #Within Cluster Sum of Squares \nfor i in range(1, 11): #K = 1 to 10\n kmeans = KMeans(n_clusters = i, init = \"k-means++\", n_init = 10, max_iter = 300, random_state = 0)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\n'''\nplt.plot(range(1,11), wcss)\nplt.title(\"The Elbow Method to Find K\")\nplt.xlabel(\"Number of Clusters 'K'\")\nplt.ylabel(\"WCSS\")\nplt.show()\n'''\n#K = 5, as seen from the plot\n\n#Predict the Results\nkmeans = KMeans(n_clusters = 5, init = \"k-means++\", n_init = 10, max_iter = 300, random_state = 0)\ny_kmeans = kmeans.fit_predict(X)\n\n#Visualising the K-Means Clusters\n'''\nplt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = \"red\", label = \"Cluster-0\")\nplt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = \"blue\", label = \"Cluster-1\")\nplt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = \"green\", label = \"Cluster-2\")\nplt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = \"cyan\", label = \"Cluster-3\")\nplt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = \"magenta\", label = \"Cluster-4\")\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = \"yellow\", label = \"Centroids\")\nplt.title(\"K-Means Clustering : Cluster of Mall Clients\")\nplt.xlabel(\"Annual Income (k$)\")\nplt.ylabel(\"Spending Score (1 - 100)\")\nplt.legend()\nplt.show()\n'''\n\n#Visualising After Renaming Clusters wrt Business Needs\nplt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = \"red\", label = \"Comfort Seekers\")\nplt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = \"blue\", label = \"Spendthrift\")\nplt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = \"green\", label = \"Luxury\")\nplt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = \"cyan\", label = \"Sensible\")\nplt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = \"magenta\", label = \"Targets\")\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = \"yellow\", label = \"Centroids\")\nplt.title(\"K-Means Clustering : Cluster of Mall Clients\")\nplt.xlabel(\"Annual Income (k$)\")\nplt.ylabel(\"Spending Score (1 - 100)\")\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.6620575189590454,
"alphanum_fraction": 0.6825221180915833,
"avg_line_length": 31.576576232910156,
"blob_id": "cd41c0825e7199daf3b35c266fafe678b6bf8694",
"content_id": "af67218c9317428b6dfea21d7209b66342ea8795",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3616,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 111,
"path": "/Classification/8.1_logistic_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#This script is the amended version of 8_logistic_regression.py to include the columns Gender & Salary\n#Gender column is encoded using LabelEncoder\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/8_Social_Network_Ads.csv\")\nX = dataset.iloc[:, [1,3]].values #Includes only the Gender & salary column\ny = dataset.iloc[:, 4].values\n'''\nprint(\"dataset\")\nprint(dataset)\nprint(\"X\")\nprint(X)\nprint(\"y\")\nprint(y)\n'''\n\n#Taking Care of Columns with Missing Data\n#Not Applicable for this dataset\n\n#Encoding Categorical Data\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_Gender = LabelEncoder()\nX[:, 0] = labelencoder_Gender.fit_transform(X[:, 0].reshape(X.shape[0], 1))\n\n#Splitting Dataset into Training & Testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .25, random_state = 0)\n'''\nprint(\"X_train\")\nprint(X_train)\nprint(\"X_test\")\nprint(X_test)\nprint(\"y_train\")\nprint(y_train)\nprint(\"y_test\")\nprint(y_test)\n'''\n\n#Feature Scaling to Normalise Data\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n'''\nprint(\"X_train\")\nprint(X_train)\nprint(\"X_test\")\nprint(X_test)\n'''\n\n#Fitting Logistic Regression Classifier to the Training Set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)\n\n#Predicting the Test Set Results\ny_pred = classifier.predict(X_test)\n'''\nprint(\"y_pred\")\nprint(y_pred)\n'''\n\n#Making Confusion Matrix to Confirm Accuracy of Prediction\nfrom sklearn.metrics import confusion_matrix #confusion_matrix is a function and not a Class\ncm = confusion_matrix(y_test, y_pred)\n'''\nprint(\"Confusion Matrix\")\nprint(cm)\n'''\n\n#Visualising the Training Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier : Training Set\")\nplt.xlabel(\"Gender\")\nplt.ylabel(\"Salary\")\nplt.legend()\nplt.show()\n\n#Visualising the Test Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier : Test Set\")\nplt.xlabel(\"Gender\")\nplt.ylabel(\"Salary\")\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.7508849501609802,
"alphanum_fraction": 0.7584070563316345,
"avg_line_length": 36,
"blob_id": "4f7d878f8d737aef515f04738e29ec9b9aca789c",
"content_id": "f6460d9e84ad91e54d583b9a2ceda2bfd5864457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2260,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 61,
"path": "/Regression/4_polynomial_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent and Dependent Variables\ndataset = pd.read_csv(\"../Data/4_Position_Salaries.csv\")\nX = dataset.iloc[:, 1:2].values # columns[1:2] --> column[1]; However, the result is a Matrix not a Vector\ny = dataset.iloc[:, 2].values\nprint(\"Printing X :\")\nprint(X)\nprint(\"\\n\")\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n#Encoding Categorical Data\n#Not Applicable as Position is already encoded with Level\n\n#Splitting Dataset into Testing & Training Set\n#Not needed as the dataset is very small. Also the random selection of training data might might affect the accuracy of the test data if the values are close by\n\n#Feature Scaling to Normalise Data\n#Not required as the result would be a polynomial expression with exponential outcome and not linear outcome\n\n#Fitting the Regression Model to the complete Dataset as it was not split into Training & Test set\n##We will display the graph for both Simple Linear Regression and Polynomial Linear Regression for comparision\n###Fitting the SLR to the Dataset\nfrom sklearn.linear_model import LinearRegression\nregressor_lin = LinearRegression()\nregressor_lin.fit(X, y)\n\n###Fitting the PLR to the Dataset\n###Observe how the model changes by varying \"degree\". Can it be automated to predict the optimum value?\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_X = PolynomialFeatures(degree = 3) #Convert Simple Expression (ie., y = b0 + b1.X1) to Polynomial Exp (ie., y = b0 + b1.X1 + (b2.X1square))\nX_withPoly = poly_X.fit_transform(X)\nprint(\"\\nPrinting X Cube\")\nprint(X_withPoly)\nprint(\"\\n\")\n\nregressor_poly = LinearRegression()\nregressor_poly.fit(X_withPoly, y)\n\n#Visualising Linear Regression\nplt.scatter(X, y, color = \"red\")\nplt.plot(X, regressor_lin.predict(X), color = \"blue\")\nplt.title(\"Plotting Simple Linear Regression Model\")\nplt.xlabel(\"Position\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n#Visualising Polynomial Regression\nplt.scatter(X, y, color = \"red\")\nplt.plot(X, regressor_poly.predict(X_withPoly), color = \"blue\")\nplt.title(\"Plotting Polynomial Regression Model\")\nplt.xlabel(\"Position\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n\n\n"
},
{
"alpha_fraction": 0.6371000409126282,
"alphanum_fraction": 0.6549209952354431,
"avg_line_length": 37.5625,
"blob_id": "3604c6ba16e9a0bdef0cd01f1413e74b9027c01c",
"content_id": "51e2d9281554f013ad628020952416ad7c8ce21c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2469,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 64,
"path": "/ReinforcementLearning/upper_confidence_bound.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Reinforcement Learning : Upper Confidence Bound Algorithm\n#Additional Reading : https://www.udemy.com/course/machinelearning/learn/#questions/4816936\n\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset\ndataset = pd.read_csv(\"../Data/18_Ads_CTR_Optimisation.csv\")\n#print(dataset)\n\n#Implementing UCB Algorithm from scratch\n#Step 1:\n#At each round, consider two numbers for each ad.\n#Number of times the ad was selected : number_ad_selected\n#Sum of rewards for the ad : total_reward\nnum_rounds = 10000\nnum_ads = 10\nads_selected = []\nnumber_ad_selected = [0] * num_ads #Initialise an ad_selected counter for each ad\nsums_of_reward = [0] * num_ads #Initialise a net reward counter for each ad\ntotal_reward = 0\n\n#Step 2:\n#Compute Average Reward for each ad : avg_reward = total_reward / number_ad_selected\n#Compute Upper Confidence Bound : avg_reward + Delta\n#Compute Delta for Confidence Interval for each ad : \nimport math \nfor rnd in range(0, num_rounds): #For each round\n ad = 0\n max_upper_bound = 0\n for i in range(0, num_ads): #For each ad \n if (number_ad_selected[i] > 0):\n avg_reward = sums_of_reward[i] / number_ad_selected[i]\n delta_i = math.sqrt(3/2 * math.log(rnd + 1)/number_ad_selected[i]) # rnd + 1 as rnd starts from 0\n upper_bound = avg_reward + delta_i #Upper Bound for Confidence level\n else:\n upper_bound = 1e400 #10**400\n #Step 3:\n #Select the ad that has maximum Upper Bound : max(avg_reward + delta)\n if upper_bound > max_upper_bound:\n max_upper_bound = upper_bound\n ad = i\n ads_selected.append(ad)\n number_ad_selected[ad] = number_ad_selected[ad] + 1\n reward = dataset.values[rnd, ad]\t\t\t#Data from simulated dataset; real-world appln get from online clicks\n sums_of_reward[ad] = sums_of_reward[ad] + reward\n total_reward = total_reward + reward\n\n\n#print(\"Total Reward : %d\" %total_reward)\n#print(\"Ad Selected : \")\n#print(ads_selected)\n\n#Visualising the Result & Selecting the Best Ad\n#plt.hist(ads_selected)\nplt.bar(range(0, num_ads), number_ad_selected)\nplt.title(\"Reinforcement Learning : Upper Confidence Bound Algorithm\")\nplt.xlabel(\"Ads\")\nplt.ylabel(\"Number of times each Ad was selected\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.7098159790039062,
"alphanum_fraction": 0.7177914381027222,
"avg_line_length": 35.20000076293945,
"blob_id": "581f4f63d9f1c99d844ac7b9be062a1a158e8ab3",
"content_id": "43ad9d736ce50fb29e22d4ce8e9afaba853c9817",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1630,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 45,
"path": "/Regression/7_random_forest_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/7_Position_Salaries.csv\")\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n#Taking Care of Columns with Missing Data\n#Not Applicable\n\n#Encoding Categorical Data\n#Not Applicable as Position is already encoded as Level\n\n#Splitting dataset into Training & Testing set\n#Not Required as the dataset is already very small\n\n#Feature Scaling to Normalise data\n#Depends on the ML algorithm if it incudes this by default\n\n#Fitting the Random Forest Regression to dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators = 300, random_state = 0)\nregressor.fit(X, y)\n\n#Visualising the Random Forest Regression Result with High Resolution\nX_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape(len(X_grid), 1)\nplt.scatter(X, y, color = \"red\")\nplt.plot(X_grid, regressor.predict(X_grid), color = \"blue\")\nplt.title(\"Random Forest Regression\")\nplt.xlabel(\"Position\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n#Prediction the Result for real world applciation\nuserInput = float(input(\"Enter the Position : \")) #Get the user input as float\nuserInput = np.asmatrix(userInput) #Convert the user input to matrix\ny_pred = regressor.predict(userInput) #Predict the Salary \n# #Feature scaling if applicable \nprint(\"Predicted Salary : %f\\n\" %y_pred) #Print the prediction\n\n"
},
{
"alpha_fraction": 0.6042869091033936,
"alphanum_fraction": 0.6422094106674194,
"avg_line_length": 38.75409698486328,
"blob_id": "cbe5facfc7d99b40dae3dc697488ee2e96e03245",
"content_id": "9e72237981bd3e5e01df6db6c824442a6dd9e025",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2426,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 61,
"path": "/Clustering/16_hierarchical_clustering.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset\ndataset = pd.read_csv(\"../Data/16_Mall_Customers.csv\")\nX = dataset.iloc[:, [3, 4]].values #Select Annual Income and Spending Score columns\n#print(dataset)\n#print(X)\n\n#Visualising the Dataset for Appreciation\nplt.scatter(X[:, 0], X[:, 1])\nplt.title(\"Mall Dataset\")\nplt.xlabel(\"Annual Income\")\nplt.ylabel(\"Spending Score\")\nplt.show()\n\n#Find K : Number of Clusters Using Dendrogram\nimport scipy.cluster.hierarchy as sch # <<== Using Scipy \ndendrogram = sch.dendrogram(sch.linkage(X, method = \"ward\"))\nplt.title(\"Finding K using Dendrogram\")\nplt.xlabel(\"Customers\") #Not Annual Income \nplt.ylabel(\"Euclidean Distance\")\nplt.show()\n\n#K = 5 #as observed from the Dendrogram\n\n#Fitting Hierarchical Clustering to the Dataset & Predicting Clusters\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters = 5, affinity = \"euclidean\", linkage = \"ward\")\ny_pred = hc.fit_predict(X) #Predicting the Clusters\n#print(y_pred)\n\n'''\n#Visualising Clusters after Renaming Labels According to Business Needs\nplt.scatter(X[y_pred == 0, 0], X[y_pred == 0, 1], s = 100, c = \"red\", label = \"Cluster-0\")\nplt.scatter(X[y_pred == 1, 0], X[y_pred == 1, 1], s = 100, c = \"blue\", label = \"Cluster-1\")\nplt.scatter(X[y_pred == 2, 0], X[y_pred == 2, 1], s = 100, c = \"green\", label = \"Cluster-2\")\nplt.scatter(X[y_pred == 3, 0], X[y_pred == 3, 1], s = 100, c = \"yellow\", label = \"Cluster-3\")\nplt.scatter(X[y_pred == 4, 0], X[y_pred == 4, 1], s = 100, c = \"brown\", label = \"Cluster-4\")\nplt.title(\"Clusters of Customers\")\nplt.legend()\nplt.xlabel(\"Annual Income (K$)\")\nplt.ylabel(\"Spending Score (1 - 100)\")\nplt.show()\n'''\n\n#Visualising Clusters\nplt.scatter(X[y_pred == 0, 0], X[y_pred == 0, 1], s = 100, c = \"red\", label = \"Target\")\nplt.scatter(X[y_pred == 1, 0], X[y_pred == 1, 1], s = 100, c = \"blue\", label = \"Standard\")\nplt.scatter(X[y_pred == 2, 0], X[y_pred == 2, 1], s = 100, c = \"green\", label = \"Luxury\")\nplt.scatter(X[y_pred == 3, 0], X[y_pred == 3, 1], s = 100, c = \"yellow\", label = \"Spendthrift\")\nplt.scatter(X[y_pred == 4, 0], X[y_pred == 4, 1], s = 100, c = \"brown\", label = \"Sensible\")\nplt.title(\"Clusters of Customers\")\nplt.legend()\nplt.xlabel(\"Annual Income (K$)\")\nplt.ylabel(\"Spending Score (1 - 100)\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.62056565284729,
"alphanum_fraction": 0.6397411227226257,
"avg_line_length": 37.99065399169922,
"blob_id": "1add2482f83b2b638d8df119a335412dc74dbcc6",
"content_id": "52868cdada8df0b5973467d3641244ea1a62fd1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4172,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 107,
"path": "/DimensionalityReduction/lda.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Dimensionality Reduction : Linear Discriminant Analysis (LDA)\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#--------------------------------------Data Preprocessing---------------------------------------------------------------------------#\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/23_Wine.csv\")\nX = dataset.iloc[:, 0:13].values #Includes only the age & salary column\ny = dataset.iloc[:, 13].values\n#print(\"dataset\")\n#print(dataset)\n#print(\"X\")\n#print(X)\n#print(\"y\")\n#print(y)\n\n#Taking Care of Columns with Missing Data\n#Not Applicable for this dataset\n\n#Encoding Categorical Data\n\n#Splitting Dataset into Training & Testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n#print(\"X_train\")\n#print(X_train)\n#print(\"X_test\")\n#print(X_test)\n#print(\"y_train\")\n#print(y_train)\n#print(\"y_test\")\n#print(y_test)\n\n#Feature Scaling to Normalise Data #Feature Scaling is a must in Dimensionality Reduction Techniques\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n#print(\"X_train\")\n#print(X_train)\n#print(\"X_test\")\n#print(X_test)\n\n#--------------------------------------Dimensioneality Reduction--------------------------------------------------------------------#\n#Applying LDA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nlda = LDA(n_components = 2) #Apply 2 to Num of Most Seperated Components for easy visualisation\nX_train = lda.fit_transform(X_train, y_train)\nX_test = lda.transform(X_test)\n#print(X_train) #Notice only 2 columns corresponding to Most Seperated Components\n#print(X_test) \n\n#Fitting Logistic Regression Classifier to the Training Set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)\n\n#Predicting the Test Set Results\ny_pred = classifier.predict(X_test)\n#print(\"y_pred\")\nprint(y_pred)\n\n#Making Confusion Matrix to Confirm Accuracy of Prediction\nfrom sklearn.metrics import confusion_matrix #confusion_matrix is a function and not a Class\ncm = confusion_matrix(y_test, y_pred)\n#print(\"Confusion Matrix\")\nprint(cm)\n\n#Visualising the Training Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\", \"blue\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\", \"blue\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier Using LDA : Training Set\")\nplt.xlabel(\"LD-1\")\nplt.ylabel(\"LD-2\")\nplt.legend()\nplt.show()\n\n#Visualising the Test Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\", \"blue\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\", \"blue\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier Using LDA : Test Set\")\nplt.xlabel(\"LD-1\")\nplt.ylabel(\"LD-2\")\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.622890293598175,
"alphanum_fraction": 0.632647693157196,
"avg_line_length": 39.308509826660156,
"blob_id": "b891d55cb809b8e751929871495163ce67977d18",
"content_id": "d31649201e7d5561835a06666b9f712cfb348412",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3792,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 94,
"path": "/DL/ann.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Deep Learning : Artificial Neural Network\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#--------------------------------------DATA PREPROCESSING---------------------------------------------------------------------------#\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/21_Churn_Modelling.csv\")\nX = dataset.iloc[:, 3:13].values #Matrix of Independent Variables\ny = dataset.iloc[:, -1].values #Vector of Dependent Variables\n#print(dataset)\n#print(X)\n#print(y)\n\n#Taking Care of Missing Columns\n#Not Applicable\n\n#Encoding Categorical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nlabelencoder_Geography = LabelEncoder()\nX[:, 1] = labelencoder_Geography.fit_transform(X[:, 1])\nlabelencoder_Gender = LabelEncoder()\nX[:, 2] = labelencoder_Gender.fit_transform(X[:, 2])\ncolumntransformer = ColumnTransformer([(\"ohe\", OneHotEncoder(), [1])], remainder = \"passthrough\")\nX = np.array(columntransformer.fit_transform(X))\n#print(X)\n\n#Avoiding Dummy Variable Trap\nX = X[:, 1:]\n#print(X)\n\n#Splitting Dataset into Training & Test Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n#print(X_train)\n#print(X_test)\n#print(y_train)\n#print(y_test)\n\n#Feature Scaling to Normalise Data\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n#print(X_train)\n#print(X_test)\n\n#--------------------------------------ARTIFICIAL NERUAL NETWORK--------------------------------------------------------------------#\n\n#Importing Libraries\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n#Initialising the ANN\nclassifier = Sequential()\n\n#Adding the Input Layer and the First Hidden Layer\nclassifier.add(Dense(input_dim = 11, units = 6, kernel_initializer = \"uniform\", activation = \"relu\"))\n #input_dim : Num of Independent Variables as in X_train i.e., 11\n #Number of Neurons in Output layer =1 (for binary classification problems)\n #units : Num of Neurons in Hidden_Layer = (Neruons in input_dim + Neurons in output layer) / 2\n #activation : Activation Function for Hidden Layer = ReLU (Rectifier Function)\n # Output Layer = Sigmoid Function\n #kernel_initializer : Adjust weights close to 0\n \n#Adding the Second Hidden Layer\nclassifier.add(Dense(units = 6, kernel_initializer = \"uniform\", activation = \"relu\"))\n\n#Adding the Ouput Layer\nclassifier.add(Dense(units = 1, kernel_initializer = \"uniform\", activation = \"sigmoid\"))\n\n#Compiling the ANN by Applying Stochastic Gradient Descent\nclassifier.compile(optimizer = \"adam\", loss = \"binary_crossentropy\", metrics = [\"accuracy\"])\n #adam : Stochastic Gradient Descent Optimizer\n\n#Fitting the ANN to the Training Set\nclassifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)\n\n\n#--------------------------------------Predicting the Result & Determining Accuracy-------------------------------------------------#\n#Predicting the Test Set Results\ny_pred = classifier.predict(X_test) #y_pred is a floating point number \ny_pred = (y_pred > 0.5) #y_pred == True if y_pred > 0.5\n # False if y_pred < 0.5\n\n#Confirming the Accuracy of Prediction Using Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\n\n\n\n"
},
{
"alpha_fraction": 0.7223313450813293,
"alphanum_fraction": 0.7282252907752991,
"avg_line_length": 27.79245376586914,
"blob_id": "baa6e1e2a0c0343626cd2b6c6c32bf6acb261854",
"content_id": "12825c532e114b493333d237bb40496c2bbdf1f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 53,
"path": "/Regression/5_support_vector_regression.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/5_Position_Salaries.csv\")\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n#Taking care of columns with missing data\n#NA\n\n#Encoding Categorical Data\n#NA\n\n#Splitting dataset into Training & Testing set\n#NA\n\n#Feature Scaling to normalise data\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nss_y = StandardScaler()\nX_scaled = ss_X.fit_transform(X)\ny_scaled = ss_y.fit_transform(y.reshape(-1, 1))\n\n#Fitting the SVR Model to the dataset\nfrom sklearn.svm import SVR\nregressor = SVR(kernel = \"rbf\", epsilon = 0.1)\n\n#Predicting the Result & Unscaling\ny_pred = regressor.fit(X_scaled, y_scaled)\nans = ss_y.inverse_transform(regressor.predict(X_scaled))\n\n#Visualising the SVR result\n'''\nplt.scatter(X, y, color = \"red\")\nplt.plot(X, ans, color = \"blue\")\nplt.title(\"Prediction using SVR\")\nplt.xlabel(\"Position\")\nplt.ylabel(\"Salary\")\nplt.show()\n'''\n\n#Predicting the real world user queries\nuserInput = input(\"Enter the Value : \") #Get the user input\nuserInput = np.array([[userInput]]) #Convert the user input into np array\nuI_scaled = ss_X.transform(userInput) #Apply featue scaling to user input\nuI_pred = regressor.predict(uI_scaled) #Predict the result\nml_pred = ss_y.inverse_transform(uI_pred) #Unscale the result\nprint(\"Resut : %s\\n\" %float(ml_pred))\n\n"
},
{
"alpha_fraction": 0.6698853373527527,
"alphanum_fraction": 0.6928183436393738,
"avg_line_length": 30.264150619506836,
"blob_id": "10f960f4b092b1ca55d82a926681202777b3e67c",
"content_id": "b9f27062d0f39fde21b34df007f19c0af14a4175",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1657,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 53,
"path": "/ReinforcementLearning/thompson_sampling.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Reinforcement Learning : Thompson Sampling Algorithm\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset\ndataset = pd.read_csv(\"../Data/19_Ads_CTR_Optimisation.csv\")\n#print(dataset)\n\n#Implementing Thompson Sampling Algorithm from scratch\n#At each round, we consider two numbers for each ad\n#Number of times the ad got reward 1 upto round n: numbers_ad_rewarded_1\n#Number of times the ad got reward 0 upto round n: numbers_ad_rewarded_0\nimport random\n\nnum_rounds = 10000\nnum_ads = 10\nads_selected = []\ntotal_reward = 0\n\n#Step 1:\nnumbers_ad_rewarded_1 = [0] * num_ads \nnumbers_ad_rewarded_0 = [0] * num_ads\n\n#Step 2:\nfor rnd in range(0, num_rounds):\n ad = 0\n max_random = 0\n for i in range(0, num_ads):\n random_beta = random.betavariate(numbers_ad_rewarded_1[i] + 1, numbers_ad_rewarded_0[i] + 1)\n if random_beta > max_random:\n max_random = random_beta\n ad = i\n ads_selected.append(ad)\n reward = dataset.values[rnd, ad] #Data from simulated dataset; real-world appln get from online clicks\n if reward == 1:\n numbers_ad_rewarded_1[ad] = numbers_ad_rewarded_1[ad] + 1\n else:\n numbers_ad_rewarded_0[ad] = numbers_ad_rewarded_0[ad] + 1\n total_reward = total_reward + reward\n\nprint(\"Total Reward : %d\" %total_reward)\n\n#Visualising the Result and Selecting the Best Ad\nplt.hist(ads_selected)\n#plt.bar(range(0, num_ads), numbers_ad_rewarded_1)\nplt.title(\"Reinforcement Learning : Thompson Sampling Algorithm\")\nplt.xlabel(\"Ads\")\nplt.ylabel(\"Number of times each Ad was selected\")\nplt.show()\n"
},
{
"alpha_fraction": 0.5968788266181946,
"alphanum_fraction": 0.6132954955101013,
"avg_line_length": 42.28070068359375,
"blob_id": "0eeff7b8235c7ce2eefa915cea1bd672970ed2a6",
"content_id": "0be93356b3780bb6749b87cb5ec9da52339175a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4934,
"license_type": "no_license",
"max_line_length": 265,
"num_lines": 114,
"path": "/DimensionalityReduction/pca.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Dimensionality Reduction : Principal Component Analysis (PCA)\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#--------------------------------------Data Preprocessing---------------------------------------------------------------------------#\n#Importing Dataset and Segregating Independent & Dependent Variables\ndataset = pd.read_csv(\"../Data/23_Wine.csv\")\nX = dataset.iloc[:, 0:13].values #Includes only the age & salary column\ny = dataset.iloc[:, 13].values\n#print(\"dataset\")\n#print(dataset)\n#print(\"X\")\n#print(X)\n#print(\"y\")\n#print(y)\n\n#Taking Care of Columns with Missing Data\n#Not Applicable for this dataset\n\n#Encoding Categorical Data\n\n#Splitting Dataset into Training & Testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n#print(\"X_train\")\n#print(X_train)\n#print(\"X_test\")\n#print(X_test)\n#print(\"y_train\")\n#print(y_train)\n#print(\"y_test\")\n#print(y_test)\n\n#Feature Scaling to Normalise Data #Feature Scaling is a must in Dimensionality Reduction Techniques\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n#print(\"X_train\")\n#print(X_train)\n#print(\"X_test\")\n#print(X_test)\n\n#--------------------------------------Dimensioneality Reduction--------------------------------------------------------------------#\n#Applying PCA\nfrom sklearn.decomposition import PCA\n#pca = PCA(n_components = None)\n#X_train = pca.fit_transform(X_train)\n#X_test = pca.transform(X_test)\n#explained_variance = pca.explained_variance_ratio_ #Explains the variance among the Features. Based on the results, it \n#print(explained_variance) #appears that the first two features accounts for 57% of variance (the co #lumns are sorted now according to variance and not the original columns\n #as per the dataset. Now, apply these two columns for PCS in the followin #g section.\n\npca = PCA(n_components = 2) #Apply 2 to Num of Pricipal Components for easy visualisation\nX_train = pca.fit_transform(X_train) #y_train is not included as PCA is an ***Unsupervised Model*** \nX_test = pca.transform(X_test)\nexplained_variance = pca.explained_variance_ratio_ \n#print(explained_variance) \n\n#Fitting Logistic Regression Classifier to the Training Set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)\n\n#Predicting the Test Set Results\ny_pred = classifier.predict(X_test)\n#print(\"y_pred\")\n#print(y_pred)\n\n#Making Confusion Matrix to Confirm Accuracy of Prediction\nfrom sklearn.metrics import confusion_matrix #confusion_matrix is a function and not a Class\ncm = confusion_matrix(y_test, y_pred)\n#print(\"Confusion Matrix\")\n#print(cm)\n\n#Visualising the Training Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\", \"blue\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\", \"blue\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier Using PCA : Training Set\")\nplt.xlabel(\"PC-1\")\nplt.ylabel(\"PC-2\")\nplt.legend()\nplt.show()\n\n#Visualising the Test Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() -1, stop = X_set[:, 0].max() +1, step = 0.01), \\\n np.arange(start = X_set[:, 1].min() -1, stop = X_set[:, 1].max() +1, step = 0.01))\nplt.contour(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, \n cmap = ListedColormap((\"red\", \"green\", \"blue\")))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\n\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap((\"red\", \"green\", \"blue\"))(i), label = j)\nplt.title(\"Logistic Regression Classifier Using PCA : Test Set\")\nplt.xlabel(\"PC-1\")\nplt.ylabel(\"PC-2\")\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.7336956262588501,
"alphanum_fraction": 0.7433574795722961,
"avg_line_length": 30.169811248779297,
"blob_id": "8b26f967c2e58fbe5e7a1e8f72d68294c4d08afa",
"content_id": "17295953756b5916390e8b5ee2b223d08095c351",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1656,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 53,
"path": "/Regression/1_data_preprocessing.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n#Import Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Import Dataset & Segregate Independent and Dependent Variables\ndataset = pd.read_csv(\"../Data/1_Data.csv\")\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n#Taking Care of Columns with Missing Values\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan, strategy = \"mean\")\nimputer = imputer.fit(X[:, 1:3])\nX[:, 1:3] = imputer.transform(X[:, 1:3])\n\n#Encoding Categorical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nlabelencoder_Country = LabelEncoder()\nlabelencoder_Country = labelencoder_Country.fit(X[:, 0])\nX[:, 0] = labelencoder_Country.transform(X[:, 0])\ncolumn_transformer = ColumnTransformer([(\"ohe\",OneHotEncoder(), [0])], remainder = \"passthrough\")\nX = np.array(column_transformer.fit_transform(X),dtype = np.float)\n\n'''\nlabelencoder_Purchase = LabelEncoder()\nlabelencoder_Purchase = labelencoder_Purchase.fit(y)\ny = labelencoder_Purchase.transform(y)\n'''\n\n#Splitting the Dataset into Training & Testing Dataset\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n#Feature Scaling to Normalise Dataset\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.fit_transform(X_test)\n\nprint(X_train)\nprint(X_test)\n\n'''\nss_y = StandardScaler()\ny_train = ss_y.fit_transform(y_train)\ny_test = ss_y.fit_transform(y_test)\nprint(y_train)\nprint(y_test)\n'''\n\n\n\n\n"
},
{
"alpha_fraction": 0.6585793495178223,
"alphanum_fraction": 0.6708564758300781,
"avg_line_length": 44.61333465576172,
"blob_id": "70035488c2953d5356b7378f4f78c7854eca46f1",
"content_id": "97410b6c0c077379f292baea821586cbd15e6074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3421,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 75,
"path": "/NLP/natural_language_processing.py",
"repo_name": "infoknight/Machine-Learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Natural Language Processing\n\n#Importing Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing Dataset\ndataset = pd.read_csv(\"../Data/20_Restaurant_Reviews.tsv\", delimiter = \"\\t\", quoting = 3) #quoting = 3 --> Ignore double quotes\n#print(dataset)\n\n#Cleaning the Text\nimport re\nimport nltk #NLP Toolkit\nnltk.download(\"stopwords\") #stopwords package that contains articles, prepositions etc that are irrelevant\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer #Packaget to stem the words : i.e., loved -> love; likes -> like \n\ncorpus = [] #dataset of all cleaned reviews\nfor i in range(0, dataset.shape[0]): \n review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i]) #Token Pattern: Remove non-alphabets (punctuations) and insert space (' ' )\n review = review.lower() #Convert to lower case\n review = review.split() #Create a list of words by splitting sentences\n #review = [word for word in review if not word in set(stopwords.words(\"english\"))] #Remove the irrelevant words viz., this, is,.\n ps = PorterStemmer()\n review = [ps.stem(word) for word in review if not word in set(stopwords.words(\"english\"))] #Stemming the words to their root words\n review = ' '.join(review) #Join the words in the List to form a sentence\n #print(review)\n corpus.append(review) #Add the cleaned review to the corpus \n#print(corpus)\n\n#Creating the Bag of Words Model : (Sparse Matrix)\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(max_features = 1500) #Use max_features to limit the column values\nX = cv.fit_transform(corpus).toarray() #X = Matrix of Independent Variables\n #convert corpus to matrix using .toarray()\n#print(X.shape) #Sparse Matrix ; Use max_features = 1500 to limit the column values\ny = dataset.iloc[:, 1].values #Dependent Variable\n\n\n##Applying Classification Algorithm\n#Splitting Dataset into Training & Testing Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n#print(X_train)\n#print(X_test)\n#print(y_train)\n#print(y_test)\n\n#Fitting Naive Bayes Classifier to Training Set\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\n#print(classifier)\n\n#Predicting Naive Bayes Result\ny_pred = classifier.predict(X_test)\n\n#Confirming the Prediction Accuracy using confusion_matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n#print(cm)\ncorrect_0 = cm[0][0]\ncorrect_1 = cm[1][1]\nincorrect_0 = cm[0][1]\nincorrect_1 = cm[1][0]\naccuracy_rate = (correct_0 + correct_1) / X_test.shape[0]\nerror_rate = (incorrect_0 + incorrect_1) / X_test.shape[0]\nprint(\"Correct Predictions of Negative Review : %d\" %correct_0)\nprint(\"Correct Predictions of Positive Review : %d\" %correct_1)\nprint(\"Incorrect Predictions of Negative Review : %d\" %incorrect_0)\nprint(\"Incorrect Predictions of Positive Review : %d\" %incorrect_1)\nprint(\"Accuracy Rate : %f\" %accuracy_rate)\nprint(\"Error Rate : %f\" %error_rate)\n"
}
] | 23 |
stefb965/gratipay.com | https://github.com/stefb965/gratipay.com | 7aa8175c75e220afa9524f339146087905a9933b | 5f3b5922d6b3a7ff64f51574a1087bab2378cbd8 | 3e3a39296a0c11ca0f5ab787a096b5a4d22369ee | refs/heads/master | 2021-01-12T09:06:05.346326 | 2016-12-17T12:26:17 | 2016-12-17T12:26:17 | 76,761,240 | 0 | 0 | NOASSERTION | 2016-12-18T03:49:13 | 2016-12-18T03:49:18 | 2019-05-11T23:47:23 | Python | [
{
"alpha_fraction": 0.6453781723976135,
"alphanum_fraction": 0.6571428775787354,
"avg_line_length": 16.5,
"blob_id": "c9bdec8cf774a9c27bbe73bb648e74b6735cf60f",
"content_id": "1ecbb7c407005a5add3a81b3d768ae8210af6e50",
"detected_licenses": [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 34,
"path": "/bin/resend-emails.py",
"repo_name": "stefb965/gratipay.com",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"This is a one-off script to resend emails for #3355.\"\"\"\n\nimport sys\n\nfrom gratipay import wireup\n\nenv = wireup.env()\ndb = wireup.db(env)\n\n# Temporary, will fill with actual values when running script\nemail_txt = \"\"\"\n [email protected]\n [email protected]\n\"\"\"\n\nemails = [email.strip() for email in email_txt.split()]\n\nassert len(emails) == 176\n\nparticipants = []\n\nparticipants = db.all(\"\"\"\n SELECT p.*::participants\n FROM participants p\n WHERE email_address IN %s\n\"\"\", (tuple(emails), ))\n\nfor p in participants:\n p.queue_email('double_emails')\n\nprint(\"Done\")\nsys.exit()\n"
}
] | 1 |
failedxyz/xinircd | https://github.com/failedxyz/xinircd | c3376691cf0c0db56c82a957c939efbcdedd816e | 21af6c057ca5cba775d52379454f7df4053a095f | 33b59061e4d77fe9efdc9a3a63a14c7c84d3ad0d | refs/heads/master | 2017-12-17T04:21:51.267646 | 2017-03-01T18:35:43 | 2017-03-01T18:35:43 | 77,584,393 | 3 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6703125238418579,
"alphanum_fraction": 0.6703125238418579,
"avg_line_length": 21.068965911865234,
"blob_id": "777055905e13c6dd911c205218214851302b7425",
"content_id": "3e14a2de605e947e04482c160a0d7f1310513649",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 640,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 29,
"path": "/ircd/__main__.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "from daemon.runner import DaemonRunner\n\nimport ircd\nfrom ircd.constants import *\n\n\ndef run_command(self):\n server = ircd.XinIRCd()\n server.run()\n\n\ndef configure_command(self):\n ircd.config.configure()\n\n\nif __name__ == \"__main__\":\n print(YELLOW + \"XinIRCd\" + NORMAL + \", by Michael Zhang\")\n ircd.logger.basicConfig(level=ircd.logger.DEBUG)\n\n ircd.logger.debug(\"Starting XinIRCd...\")\n ircd.util.check_config()\n\n app = ircd.XinIRCd()\n runner = DaemonRunner(app)\n runner.register_action_func(configure_command, name=\"configure\")\n runner.register_action_func(run_command, name=\"run\")\n runner.do_action()\n\n exit()\n"
},
{
"alpha_fraction": 0.6013221144676208,
"alphanum_fraction": 0.6446318626403809,
"avg_line_length": 26.94267463684082,
"blob_id": "2ca2024ba655d6a18118927ef19094be410e8159",
"content_id": "4f6daa220ded4d8653f0938b360451059a7a4206",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4387,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 157,
"path": "/ircd/logger.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# encoding: utf-8\n# from\n# http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output/1336640#1336640\n\nimport logging\nimport platform\n\nFOREGROUND_BLUE = 0x0001 # text color contains blue.\nFOREGROUND_GREEN = 0x0002 # text color contains green.\nFOREGROUND_RED = 0x0004 # text color contains red.\nFOREGROUND_INTENSITY = 0x0008 # text color is intensified.\nFOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED\n# winbase.h\nSTD_INPUT_HANDLE = -10\nSTD_OUTPUT_HANDLE = -11\nSTD_ERROR_HANDLE = -12\n\n# wincon.h\nFOREGROUND_BLACK = 0x0000\nFOREGROUND_BLUE = 0x0001\nFOREGROUND_GREEN = 0x0002\nFOREGROUND_CYAN = 0x0003\nFOREGROUND_RED = 0x0004\nFOREGROUND_MAGENTA = 0x0005\nFOREGROUND_YELLOW = 0x0006\nFOREGROUND_GREY = 0x0007\nFOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.\n\nBACKGROUND_BLACK = 0x0000\nBACKGROUND_BLUE = 0x0010\nBACKGROUND_GREEN = 0x0020\nBACKGROUND_CYAN = 0x0030\nBACKGROUND_RED = 0x0040\nBACKGROUND_MAGENTA = 0x0050\nBACKGROUND_YELLOW = 0x0060\nBACKGROUND_GREY = 0x0070\nBACKGROUND_INTENSITY = 0x0080 # background color is intensified.\n\n\n# now we patch Python code to add color support to logging.StreamHandler\ndef add_coloring_to_emit_windows(fn):\n # add methods we need to the class\n def _out_handle(self):\n import ctypes\n return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)\n\n out_handle = property(_out_handle)\n\n def set_color(self, code):\n import ctypes\n # Constants from the Windows API\n self.STD_OUTPUT_HANDLE = -11\n hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)\n ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)\n\n setattr(logging.StreamHandler, '_set_color', set_color)\n\n def new(*args):\n levelno = args[1].levelno\n if (levelno >= 50):\n color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY\n elif (levelno >= 40):\n color = FOREGROUND_RED | FOREGROUND_INTENSITY\n elif (levelno >= 30):\n color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY\n elif (levelno >= 20):\n color = FOREGROUND_GREEN\n elif (levelno >= 10):\n color = FOREGROUND_MAGENTA\n else:\n color = FOREGROUND_WHITE\n args[0]._set_color(color)\n\n ret = fn(*args)\n args[0]._set_color(FOREGROUND_WHITE)\n # print \"after\"\n return ret\n\n return new\n\n\ndef add_coloring_to_emit_ansi(fn):\n # add methods we need to the class\n def new(*args):\n levelno = args[1].levelno\n if (levelno >= 50):\n color = '\\x1b[31m' # red\n elif (levelno >= 40):\n color = '\\x1b[31m' # red\n elif (levelno >= 30):\n color = '\\x1b[33m' # yellow\n elif (levelno >= 20):\n color = '\\x1b[32m' # green\n elif (levelno >= 10):\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n args[1].msg = color + args[1].msg + '\\x1b[0m' # normal\n # print \"after\"\n return fn(*args)\n\n return new\n\n\ndef init():\n if platform.system() == 'Windows':\n # Windows does not support ANSI escapes and we are using API calls to\n # set the console color\n logging.StreamHandler.emit = add_coloring_to_emit_windows(\n logging.StreamHandler.emit)\n else:\n # all non-Windows platforms are supporting ANSI escapes so we use them\n logging.StreamHandler.emit = add_coloring_to_emit_ansi(\n logging.StreamHandler.emit)\n # log = logging.getLogger()\n # log.addFilter(log_filter())\n # //hdlr = logging.StreamHandler()\n # //hdlr.setFormatter(formatter())\n\n\ninit()\n\nCRITICAL = logging.CRITICAL\nERROR = logging.ERROR\nWARNING = logging.WARNING\nINFO = logging.INFO\nDEBUG = logging.DEBUG\nNOTSET = logging.NOTSET\n\n\ndef basicConfig(*args, **kwargs):\n logging.basicConfig(*args, **kwargs)\n\n\ndef critical(*args, **kwargs):\n logging.critical(*args, **kwargs)\n\n\ndef error(*args, **kwargs):\n logging.error(*args, **kwargs)\n\n\ndef warning(*args, **kwargs):\n logging.warning(*args, **kwargs)\n\n\ndef info(*args, **kwargs):\n logging.info(*args, **kwargs)\n\n\ndef debug(*args, **kwargs):\n logging.debug(*args, **kwargs)\n\n\ndef exception(*args, **kwargs):\n logging.exception(*args, **kwargs)\n"
},
{
"alpha_fraction": 0.6002570986747742,
"alphanum_fraction": 0.6053984761238098,
"avg_line_length": 21.882352828979492,
"blob_id": "43a3a5101daac5130e87bae0f2f850a409ed3dcb",
"content_id": "a601f52b3e852785c4646b61eb1bc354752af193",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 778,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 34,
"path": "/ircd/util.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "\"\"\"\n ircd.util\n ~~~~~~~~~\n\n A set of utility functions that are called from other parts of the IRCd.\n\"\"\"\n\nimport ircd\nfrom ircd.constants import *\n\n\ndef check_config():\n if not os.path.exists(CONFIG_FILE):\n ircd.logger.error(\"Cannot read initialization file: %s\" % CONFIG_FILE)\n shutdown(1)\n\n\ndef shutdown(status=0):\n if status > 0:\n ircd.logger.error(\"Server shutdown.\")\n else:\n ircd.logger.debug(\"Server shutdown.\")\n # pylint: disable=protected-access\n os._exit(status)\n\n\ndef isnick(nickname):\n if len(nickname) > ircd.config.getopt(\"NICK_LENGTH\"):\n return False\n if any(c in nickname for c in \"<>,./?:;@'~#=+()*&%$ \\\"!\"):\n return False\n if nickname[0].isdigit():\n return False\n return True\n"
},
{
"alpha_fraction": 0.50819993019104,
"alphanum_fraction": 0.5170382261276245,
"avg_line_length": 31.22468376159668,
"blob_id": "807042d69fdb54828e12362da0060878196e46b7",
"content_id": "9b3219b9c754c973b6e1f6951f31846c7544f96b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10183,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 316,
"path": "/ircd/models.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "import time\nimport traceback\nfrom uuid import uuid4\n\nimport ircd\nfrom ircd.constants import *\n\n\nclass Channel(object):\n channels = {}\n\n @classmethod\n def get(cls, name, user=None):\n return cls.channels.get(name, None)\n\n def __init__(self, name, creator):\n self.name = name\n self._modes = \"\"\n self.topic = \"\"\n\n self.creator = creator.id\n self.created = time.time()\n self.topicset = 0\n self.setby = creator.nickname\n self.key = \"\"\n\n self.banlist = []\n self.exemptlist = []\n self.invitelist = []\n\n def __contains__(self, item):\n if isinstance(item, User):\n for record in item.chans:\n if record.channel.name == self.name:\n return True\n return False\n return False\n\n @property\n def moderated(self):\n return \"m\" in self._modes\n\n @property\n def modes(self):\n # TODO actually implement this\n return\n\n def mode(self, user):\n record = user.rec(self)\n if (record.uc_modes & 1) > 0:\n return \"@\"\n if (record.uc_modes & 2) > 0:\n return \"+\"\n if (record.uc_modes & 4) > 0:\n return \"%\"\n return \"\"\n\n def status(self, user):\n record = user.rec(self)\n if (record.uc_modes & 1) > 0:\n return 4\n if (record.uc_modes & 2) > 0:\n return 1\n if (record.uc_modes & 4) > 0:\n return 2\n return 0\n\n def remove(self, user, reason=None):\n created = 0\n record = user.rec(self)\n if reason:\n user.send_channel(self, \"PART %s :%s\" % (self.name, reason))\n else:\n user.send_channel(self, \"PART :%s\" % self.name)\n user.chans.remove(record)\n\n if not len(self.users()):\n del Channel.channels[self.name]\n\n def users(self):\n inchannel = []\n for user in User.users.values():\n if user in self:\n inchannel.append(user)\n return inchannel\n\n\nclass UserRecord(object):\n def __init__(self, channel, created=False):\n # TODO actually implement this\n self.channel = channel\n self.uc_modes = 1 if created else 0\n\n\nclass User(object):\n users = {}\n nicktable = {}\n\n @classmethod\n def add_client(cls, socket, addr):\n id = uuid4()\n while id in cls.users:\n id = uuid4()\n user = User(id, socket, addr)\n\n @classmethod\n def get_by_id(cls, id):\n return cls.users.get(id)\n\n @classmethod\n def get_by_nick(cls, nick):\n ids = cls.nicktable.get(nick.lower())\n if not ids:\n return set()\n return map(lambda id: cls.users.get(id), ids)\n\n @classmethod\n def same_nick(cls, a, b):\n return a.lower() == b.lower()\n\n @classmethod\n def send_opers(cls, message):\n for user in cls.users.values():\n if \"o\" in user.modes and \"s\" in user.modes:\n user.send_serv(\"NOTICE %s :%s\" % (user.nickname, message))\n\n def __init__(self, id, socket, addr):\n self.id = id\n self._nickname = None\n self.ident = None\n self.registered = 0\n\n self.host, self.port = addr\n self.lastactive = time.time()\n self.lastping = 1\n self.modes = \"\"\n\n self.chans = []\n\n self.socket = socket\n self.__invites = []\n self.quit = False\n User.users[id] = self\n\n while not self.quit:\n try:\n buffer = \"\"\n while len(buffer) < BUFFER_SIZE:\n char = self.socket.recv(1).decode(\"utf-8\")\n if char == \"\\n\":\n break\n buffer += char\n # if not buffer:\n # del self\n # break\n ircd.logger.info(\"<< %s: %s\" % (self.id, buffer))\n ircd.commands.Command.process_command(self, buffer)\n except IOError as e:\n if e.errno == 32:\n ircd.logger.error(\"Pipe was broken.\")\n self.socket.close()\n except Exception as e:\n ircd.logger.error(\"Something bad happened:\\n%s\" %\n traceback.format_exc())\n\n def auth(self):\n # TODO actually implement authentication\n return True\n\n def connect(self):\n \"\"\"\n Actually connect the user to the network, displays the MOTD, etc.\n\n :return: Nothing\n \"\"\"\n\n self.registered = 7\n self.lastactive = time.time()\n\n if not self.auth():\n self.send(\"ERROR :Closing link: Invalid password.\")\n self.send_serv(\"NOTICE Auth :Welcome to \\002XinIRC Network\\002!\")\n self.send_serv(\"001 %s :Welcome to the XinIRC Network %s!%s@%s.\" % (\n self.nickname, self.nickname, self.ident, self.host))\n self.send_serv(\"002 %s :Your host is %s, running version %s.\" %\n (self.nickname, ircd.config.getopt(\"SERVER_NAME\"),\n VERSION))\n self.send_serv(\"003 %s :This server was created at %s.\" %\n (self.nickname, ircd.config.getopt(\"CREATION\")))\n self.motd()\n\n User.send_opers(\"*** Client connecting on port %d: %s!%s@%s\" %\n (self.port, self.nickname, self.ident, self.host))\n\n def join(self, name, key=None):\n created = 0\n channel = Channel.get(name)\n ircd.logger.debug(\n \"Joining channel %s (with key %s)\" % (name, repr(key)))\n if channel:\n if self in channel:\n return\n created = 1\n else:\n channel = Channel(name, self)\n Channel.channels[name] = channel\n created = 2\n\n if len(self.chans) == ircd.config.getopt(\"MAX_CHANNEL\"):\n ircd.logger.debug(\"User channel maximum exceeded: %s %s\" %\n (self.nickname, channel.name))\n self.send_serv(\"405 %s %s :You are on too many channels.\" % (\n self.nickname, channel.name))\n return\n\n rec = UserRecord(channel, created=(created == 2))\n self.chans.append(rec)\n self.send_channel(channel, \"JOIN :%s\" % channel.name)\n if channel.topicset:\n self.send_serv(\"332 %s %s :%s\" %\n (self.nickname, channel.name, channel.topic))\n self.send_serv(\"333 %s %s %s %d\" % (\n self.nickname, channel.name, channel.setby, channel.topicset))\n self.userlist(channel)\n self.send_serv(\"366 %s %s :End of /NAMES list.\" %\n (self.nickname, channel.name))\n self.send_serv(\"324 %s %s +%s\" %\n (self.nickname, channel.name, channel.modes))\n self.send_serv(\"329 %s %s %d\" %\n (self.nickname, channel.name, channel.created))\n\n def motd(self):\n if not ircd.config.getopt(\"MOTD_FILE\"):\n self.send_serv(\n \"422 %s :Message of the day file is missing.\" % self.nickname)\n return\n motd = open(ircd.config.getopt(\"MOTD_FILE\"), \"r\")\n self.send_serv(\"375 %s :- %s message of the day\" %\n (self.nickname, ircd.config.getopt(\"SERVER_NAME\")))\n for line in motd:\n line = line.strip(\"\\n\")\n self.send_serv((\"372 %s :- %s\") % (self.nickname, line))\n self.send_serv(\"376 %s :End of %s message of the day.\" %\n (self.nickname, ircd.config.getopt(\"SERVER_NAME\")))\n\n def rec(self, channel):\n for record in self.chans:\n if record.channel.name == channel.name:\n return record\n\n def send(self, line):\n ircd.logger.info(\">> %s\" % line)\n self.socket.sendall(bytes(\"%s\\n\" % line, encoding=\"utf-8\"))\n\n def send_channel(self, channel, line, exclude=False):\n for user in channel.users():\n if exclude and user.id == self.id:\n continue\n self.send_to(user, line)\n\n def send_common(self, line, exclude=False):\n my_channels = map(lambda rec: rec.channel.name, self.chans)\n if not exclude:\n self.send_from(self, line)\n for user in User.users.values():\n for record in user.chans:\n if record.channel.name in my_channels:\n user.send_from(self, line)\n break\n\n def send_from(self, user, line):\n self.send(\":%s!%s@%s %s\" %\n (user.nickname, user.ident, user.host, line))\n\n def send_to(self, dest, line):\n dest.send_from(self, line)\n\n def send_serv(self, line):\n self.send(\":%s %s\" % (ircd.config.getopt(\"SERVER_NAME\"), line))\n\n def userlist(self, channel):\n users = \"353 %s = %s :\" % (self.nickname, channel.name)\n for user in User.users.values():\n if user in channel:\n # TODO +i user doesn't show up in list\n users += channel.mode(user)\n users += user.nickname\n users += \" \"\n if len(users) > 480 - ircd.config.getopt(\"NICK_LENGTH\"):\n self.send_serv(users)\n users = \"353 %s = %s :\" % (self.nickname, channel.name)\n if users[-1] != \":\":\n self.send_serv(users)\n\n @property\n def nickname(self):\n return self._nickname\n\n @nickname.setter\n def nickname(self, value):\n nickname = self._nickname.lower() if self._nickname else None\n if nickname in User.nicktable and self.id in User.nicktable[nickname]:\n User.nicktable[nickname].remove(self.id)\n if not User.nicktable[nickname]:\n del User.nicktable[nickname]\n if value:\n if value not in User.nicktable:\n User.nicktable[value.lower()] = set()\n User.nicktable[value.lower()].add(self.id)\n self._nickname = value\n\n def __del__(self):\n self.nickname = None\n ircd.logger.info(\n \"Connection closed with %s:%s\" % (self.host, self.port))\n self.socket.close()\n"
},
{
"alpha_fraction": 0.4972342252731323,
"alphanum_fraction": 0.5125213861465454,
"avg_line_length": 32.36577224731445,
"blob_id": "0055cebf9f9e37ef408df5d6ca1f216ae62c5f5f",
"content_id": "b296047ad933892e37c8a5779845cc4c79439b86",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9943,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 298,
"path": "/ircd/commands.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "import time\n\nimport ircd\n\nCOMMAND_LIST = []\n\n\nclass Command:\n @staticmethod\n def register(name, handler, flags, min_params):\n cmd = Command(name, handler, flags, min_params)\n COMMAND_LIST.append(cmd)\n\n def __init__(self, name, handler, flags_needed, min_params):\n self.name = name\n self.handler = handler\n self.flags_needed = flags_needed\n self.min_params = min_params\n\n self.use_count = 0\n self.total_bytes = 0\n\n def process_parameters(self, parameters):\n paramlist = []\n parameters = parameters.strip()\n if parameters[0] == \":\":\n return 1, parameters[1:]\n if parameters:\n if not parameters.count(\" \") or parameters[0] == \":\":\n paramlist = [parameters]\n if not parameters.count(\" \") and parameters[0] == \":\":\n paramlist[0] = paramlist[0][1:]\n return 1, paramlist\n c = 0\n for i, v in enumerate(parameters):\n if v == \" \":\n parameter = parameters[c:i + 1].strip()\n if parameter[0] == \":\":\n c += 1\n break\n paramlist.append(parameter)\n c = i + 1\n if parameters[c] == \":\":\n c += 1\n paramlist.append(parameters[c:].strip())\n return len(paramlist), paramlist\n\n @staticmethod\n def process_command(user, line):\n global COMMAND_LIST\n line = line.strip()\n if not line:\n return\n\n parameters = \"\"\n paramlist = []\n if not line.count(\" \"):\n items = 0\n cmd = line.upper()\n paramlist = None\n else:\n ind = line.find(\" \")\n cmd = line[:ind].upper()\n parameters = line[ind + 1:]\n\n for command in COMMAND_LIST:\n if command.name == cmd:\n if parameters:\n items, paramlist = command.process_parameters(parameters)\n else:\n items = 0\n paramlist = None\n\n user.lastactive = time.time()\n user.nping = user.lastactive + 120\n\n if items < command.min_params:\n ircd.logger.debug(\n \"process_command: Not enough parameters: %s\"\n % command.name)\n user.send_serv(\"461 %s %s :Not enough parameters\" %\n (user.nickname, command.name))\n return\n # TODO flags_needed\n allowed = [\"USER\", \"NICK\", \"PASS\"]\n if command.name not in allowed and not user.nickname:\n ircd.logger.debug(\n \"process_command: You have not registered: %s\"\n % command.name)\n user.send_serv(\n \"451 %s :You have not registered\" % command.name)\n return\n if command.handler:\n command.use_count += 1\n # TODO total_bytes\n command.handler(paramlist, items, user)\n return\n\n ircd.logger.debug(\"process_command: Not in table: %s\" % cmd)\n user.send_serv(\"421 %s %s :Unknown command\" % (user.nickname, cmd))\n\n\ndef setup_command_table():\n \"\"\"\n Populates the command table with commands using the `Command.register`\n function.\n \"\"\"\n\n Command.register(\"JOIN\", handle_join, 0, 1)\n Command.register(\"MOTD\", handle_motd, 0, 0)\n Command.register(\"MODE\", handle_mode, 0, 1)\n Command.register(\"NAMES\", handle_names, 0, 1)\n Command.register(\"NICK\", handle_nick, 0, 1)\n Command.register(\"PART\", handle_part, 0, 1)\n Command.register(\"PASS\", handle_pass, 0, 1)\n Command.register(\"PING\", handle_ping, 0, 1)\n Command.register(\"PONG\", handle_pong, 0, 1)\n Command.register(\"QUIT\", handle_quit, 0, 1)\n Command.register(\"PRIVMSG\", handle_privmsg, 0, 2)\n Command.register(\"TOPIC\", handle_topic, 0, 1)\n Command.register(\"USER\", handle_user, 0, 4)\n Command.register(\"WHO\", handle_who, 0, 1)\n # TODO finish all commands\n\n\ndef handle_join(args, pcnt, user):\n channels = args[0].split(\",\")\n keys = [\"\"] * len(channels)\n if pcnt > 1:\n keys = args[1].split(\",\")\n if len(keys) < len(channels):\n keys += [\"\"] + (len(channels) - len(keys))\n channels = zip(channels, keys)\n\n for channel in channels:\n user.join(*channel)\n\n\ndef handle_mode(args, pcnt, user):\n dests = ircd.models.User.get_by_nick(args[0])\n outpars, direction = \"\", 1\n if dests and pcnt == 1:\n user.send_serv(\"221 %s :+%s\" % (user.nickname, user.modes))\n return\n if dests and pcnt > 1:\n for dest in dests:\n can_change = 0\n if ircd.models.User.same_nick(user.nickname,\n dest.nickname) or \"o\" in user.modes:\n can_change = 1\n if not can_change:\n user.send_serv(\"482 %s :Can't change mode for other users.\"\n % user.nickname)\n return\n\n\ndef handle_motd(args, pcnt, user):\n user.motd()\n\n\ndef handle_names(args, pcnt, user):\n channel = ircd.models.Channel.get(args[0])\n if channel:\n user.userlist(channel)\n user.send_serv(\"366 %s %s :End of /NAMES list.\"\n % (user.nickname, channel.name))\n else:\n user.send_serv(\"401 %s %s :No suck nick/channel.\"\n % (user.nickname, args[0]))\n\n\ndef handle_nick(args, pcnt, user):\n if pcnt < 1:\n ircd.logger.debug(\"Not enough params for handle_nick.\")\n return\n if not args[0]:\n ircd.logger.debug(\"Invalid nick passed to handle_nick.\")\n return\n if not user:\n ircd.logger.debug(\"Invalid user passed to handle_nick.\")\n return\n if user.nickname and user.nickname.lower() == args[0].lower():\n ircd.logger.debug(\"Nickname is the same, skipping.\")\n return\n if not ircd.util.isnick(args[0]):\n ircd.logger.debug(\"Invalid nickname.\")\n return\n if user.registered == 7:\n user.send_common(\"NICK %s\" % args[0])\n user.nickname = args[0]\n if user.registered < 3:\n user.registered |= 2\n elif user.registered == 3:\n user.connect()\n\n\ndef handle_part(args, pcnt, user):\n if pcnt == 1:\n args.append(None)\n channel = ircd.models.Channel.get(args[0])\n channel.remove(user, args[1])\n\n\ndef handle_pass(args, pcnt, user):\n user.password = args[0]\n\n\ndef handle_ping(args, pcnt, user):\n user.send_serv(\n \"PONG %s :%s\" % (ircd.config.getopt(\"SERVER_NAME\"), args[0]))\n\n\ndef handle_pong(args, pcnt, user):\n user.lastping = 1\n\n\ndef handle_privmsg(args, pcnt, user):\n if args[0][0] == \"#\":\n channel = ircd.models.Channel.get(args[0])\n if channel:\n if user not in channel:\n user.send_serv(\n \"404 %s %s :Cannot send to channel (no external messages).\" % (\n user.nickname, channel.name))\n if channel.moderated and channel.status(user) < 1:\n user.send_serv(\"404 %s %s :Cannot send to channel (+m).\" %\n (user.nickname, channel.name))\n user.send_channel(channel, \"PRIVMSG %s :%s\" %\n (channel.name, args[1]), exclude=True)\n else:\n user.send_serv(\"401 %s %s :No such nick/channel.\" %\n (user.nickname, channel.name))\n else:\n dests = ircd.models.User.get_by_nick(args[0])\n if not dests:\n user.send_serv(\"401 %s %s :No such nick/channel.\" %\n (user.nickname, args[0]))\n return\n for dest in ircd.models.User.get_by_nick(args[0]):\n if dest:\n # TODO autorespond with away message\n user.send_to(dest, \"PRIVMSG %s :%s\" % (dest.nickname, args[1]))\n\n\ndef handle_quit(args, pcnt, user):\n pass\n\n\ndef handle_topic(args, pcnt, user):\n channel = ircd.models.Channel.get(args[0])\n if pcnt == 1:\n if channel:\n if channel.topicset:\n user.send_serv(\"332 %s %s :%s\" %\n (user.nickname, channel.name, channel.topic))\n user.send_serv(\"333 %s %s %s %d\" % (\n user.nickname, channel.name, channel.setby,\n channel.topicset))\n else:\n user.send_serv(\"331 %s %s :No topic is set.\" %\n (user.nickname, channel.name))\n else:\n user.send_serv(\"331 %s %s :No topic is set.\" %\n (user.nickname, channel.name))\n else:\n if channel:\n if channel.status(user) < 2:\n user.send_serv(\n \"482 %s %s :You must be at least a half-operator.\" % (\n user.nickname, channel.name))\n return\n channel.topic = args[1]\n channel.setby = user.nickname\n channel.topicset = time.time()\n user.send_channel(channel, \"TOPIC %s :%s\" %\n (channel.name, channel.topic))\n else:\n user.send_serv(\"401 %s %s :No such nick/channel.\" %\n (user.nickname, args[0]))\n\n\ndef handle_user(args, pcnt, user):\n if user.registered < 3:\n user.send_serv(\n \"NOTICE Auth :No ident response, ident prefixed with ~.\")\n user.ident = args[0]\n user.fullname = args[3]\n user.registered |= 1\n else:\n user.send_serv(\"462 %s :You may not reregister.\" % user.nickname)\n\n if user.registered == 3:\n user.connect()\n\n\ndef handle_who(args, pcnt, user):\n # TODO actually implement this\n pass\n"
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 14,
"blob_id": "4aeec113a8a580e1e95e4f147531c3cf4ffbc056",
"content_id": "9851e0e9206ec48fe5cec6a31a35547fadb4cdc9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 44,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 3,
"path": "/xinircd",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n/usr/bin/env python3 -m ircd $1"
},
{
"alpha_fraction": 0.6095238327980042,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 51.5,
"blob_id": "5cae59b44c60bf2073a0ae697960dc512381a6ad",
"content_id": "584e538f7aad4e0dc5258c2e2c01a3e6445e5e5a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "pyyaml==3.12\ngit+https://github.com/failedxyz/python-daemon.git@b04c5b85043947d0e8146e5a759ecdf3bc3fda7e\n"
},
{
"alpha_fraction": 0.49132445454597473,
"alphanum_fraction": 0.49624061584472656,
"avg_line_length": 31.018518447875977,
"blob_id": "d7074bbc9bd5dadc5a67102c6784646a63e28ddb",
"content_id": "3016a565eabec4f11932bf484d976919a80ab99a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3458,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 108,
"path": "/ircd/__init__.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\n ircd\n ~~~~\n\n An extensible IRC server built with love.\n\n :copyright: Michael Zhang\n :license: MIT, see LICENSE.md for details.\n\"\"\"\n\nimport os\nimport select\nimport socket\nimport time\nfrom threading import Thread\n\nimport ircd.commands\nimport ircd.config\nimport ircd.logger\nimport ircd.models\nimport ircd.util\nimport ssl\n\n\nclass XinIRCd():\n output = os.path.abspath(\"output.log\")\n pidfile = os.path.abspath(\"lock.pid\")\n\n def __init__(self):\n if not os.path.exists(XinIRCd.output):\n with open(XinIRCd.output, \"a\"):\n os.utime(XinIRCd.output, None)\n\n self.stdin_path = XinIRCd.output\n self.stdout_path = XinIRCd.output\n self.stderr_path = XinIRCd.output\n\n self.pidfile_path = XinIRCd.pidfile\n self.pidfile_timeout = 5\n\n @staticmethod\n def run():\n if os.getpid() == 0 or os.getgid() == 0:\n ircd.logger.error(\"You shouldn't be running this as root!\")\n ircd.util.shutdown(1)\n\n ircd.commands.setup_command_table()\n ircd.logger.debug(\"Set up commands.\")\n\n ircd.config.read_config()\n ircd.logger.debug(\"Read the config.\")\n\n ports = ircd.config.getopt(\"PORTS\", [])\n if len(ports) == 0:\n ircd.logger.error(\"No ports to bind to!\")\n ircd.util.shutdown(1)\n\n ssl_sockets = []\n sockets = []\n\n for port in ports:\n ssl_on = False\n if port[-1] == \"+\":\n ssl_on = True\n port = port[:-1]\n host = \"0.0.0.0\"\n port = int(port)\n ircd.logger.debug(\"Attempting to bind to %s:%s\" % (host, port))\n try:\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((host, port))\n server.listen(5)\n sockets.append(server)\n if ssl_on:\n ssl_sockets.append(server)\n except socket.error as exception:\n if server:\n server.close()\n ircd.logger.error(\"Could not bind to %s:%s: %s\" %\n (host, port, exception))\n ircd.logger.debug(\"Bound to %s/%s ports.\" % (len(sockets), len(ports)))\n ircd.config.setopt(\"CREATION\", time.time())\n\n while True:\n try:\n reads = select.select(sockets, [], [])[0]\n for read in reads:\n for server in sockets:\n if read != server:\n continue\n conn, addr = server.accept()\n if server in ssl_sockets:\n conn = ssl.wrap_socket(\n conn,\n server_side=True,\n certfile=ircd.config.getopt(\"CERTFILE\"),\n keyfile=ircd.config.getopt(\"KEYFILE\"))\n thread = Thread(\n target=ircd.models.User.add_client,\n args=(conn, addr))\n thread.start()\n ircd.logger.debug(\"Client connected from %s:%s\" % addr)\n except KeyboardInterrupt:\n ircd.logger.info(\"Application stopped by user.\")\n ircd.util.shutdown(0)\n"
},
{
"alpha_fraction": 0.6774193644523621,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 14.5,
"blob_id": "982e9a29476301e091fada5948cb0f7ba8f322be",
"content_id": "1a567e331fc6444350846e05255b8960d7ec6481",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 2,
"path": "/ircd/modules/__init__.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "class Module(object):\n pass\n"
},
{
"alpha_fraction": 0.5369847416877747,
"alphanum_fraction": 0.5417842864990234,
"avg_line_length": 28.764705657958984,
"blob_id": "486c98e15d2c21904a75937cb66ef1121fb0458e",
"content_id": "e817c8172ea8730b1588ce5517a2b9b095f74c84",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3542,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 119,
"path": "/ircd/config.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\n ircd.configure\n ~~~~~~~~~~~~~~\n\n Command-line configuration script for XinIRCd.\n\"\"\"\n\nimport yaml\n\nfrom ircd.constants import *\n\nOPTIONS = {}\n\nDEFAULTS = {\n \"MODULE_DIR\": os.path.abspath(\n os.path.dirname(__file__) + os.sep + \"modules\"),\n \"MAX_CLIENT\": 1024,\n \"NICK_LENGTH\": 32,\n \"CHAN_LENGTH\": 64,\n \"MAX_CHANNEL\": 20,\n \"MOTD_FILE\": os.path.abspath(\"motd.txt\"),\n \"PORTS\": [\"6667\"]\n}\n\n\ndef getopt(key, default=None):\n return OPTIONS.get(key, default)\n\n\ndef setopt(key, value, persist=False):\n OPTIONS[key] = value\n if persist:\n # TODO persist\n pass\n\n\ndef read_config():\n global OPTIONS\n OPTIONS = yaml.load(open(CONFIG_FILE, \"r\"))\n # TODO config validation\n # TODO make sure bind is a list of host/port pairs\n\n\ndef configure():\n conf = dict(DEFAULTS.items())\n\n print(YELLOW + \"Welcome to the XinIRCd Configuration!\" + NORMAL)\n print()\n\n print(\"In what directory should the modules loaded from?\")\n conf[\"MODULE_DIR\"] = input(\"~ [\" + GREEN + DEFAULTS[\"MODULE_DIR\"] + NORMAL\n + \"] \") or DEFAULTS[\"MODULE_DIR\"]\n print()\n\n if not os.path.exists(conf[\"MODULE_DIR\"]):\n print(\"The module directory doesn't exist. Create it? (y/n)\")\n if input(\"~ [\" + GREEN + \"y\" + NORMAL + \"] \").lower() != \"n\":\n os.makedirs(conf[\"MODULE_DIR\"])\n print()\n\n print(\"Server name? (required for SSL, put FQDN)\")\n conf[\"SERVER_NAME\"] = input(\"~ \")\n print()\n\n while not conf[\"SERVER_NAME\"]:\n print(RED + \"Please enter a valid server name.\" + NORMAL)\n conf[\"SERVER_NAME\"] = input(\"~ \")\n print()\n\n print(\"Maximum number of clients at any time? (\" + GREEN +\n \"1-\" + str(DEFAULTS[\"MAX_CLIENT\"]) + NORMAL + \")\")\n conf[\"MAX_CLIENT\"] = input(\n \"~ [\" + GREEN + str(DEFAULTS[\"MAX_CLIENT\"]) + NORMAL + \"] \") or \\\n str(DEFAULTS[\"MAX_CLIENT\"])\n print()\n\n while not conf[\"MAX_CLIENT\"].isdigit() or int(conf[\"MAX_CLIENT\"]) > \\\n DEFAULTS[\"MAX_CLIENT\"]:\n print(RED + \"Please enter a valid number between \" +\n GREEN + \"1-\" + str(DEFAULTS[\"MAX_CLIENT\"]) + NORMAL)\n conf[\"MAX_CLIENT\"] = input(\n \"~ [\" + GREEN + str(DEFAULTS[\"MAX_CLIENT\"]) + NORMAL + \"] \") or \\\n str(DEFAULTS[\"MAX_CLIENT\"])\n print()\n conf[\"MAX_CLIENT\"] = int(conf[\"MAX_CLIENT\"])\n\n print(\"Maximum length of nicknames?\")\n conf[\"NICK_LENGTH\"] = input(\n \"~ [\" + GREEN + str(DEFAULTS[\"NICK_LENGTH\"]) + NORMAL + \"] \") or \\\n str(DEFAULTS[\"NICK_LENGTH\"])\n conf[\"NICK_LENGTH\"] = int(conf[\"NICK_LENGTH\"])\n print()\n\n print(\"Maximum length of channel names?\")\n conf[\"CHAN_LENGTH\"] = input(\n \"~ [\" + GREEN + str(DEFAULTS[\"CHAN_LENGTH\"]) + NORMAL + \"] \") or \\\n str(DEFAULTS[\"CHAN_LENGTH\"])\n conf[\"CHAN_LENGTH\"] = int(conf[\"CHAN_LENGTH\"])\n print()\n\n print(\"Maximum number of channels a user can join?\")\n conf[\"MAX_CHANNEL\"] = input(\n \"~ [\" + GREEN + str(DEFAULTS[\"MAX_CHANNEL\"]) + NORMAL + \"] \") or \\\n str(DEFAULTS[\"MAX_CHANNEL\"])\n conf[\"MAX_CHANNEL\"] = int(conf[\"MAX_CHANNEL\"])\n print()\n\n with open(CONFIG_FILE, \"w\") as out:\n yaml.dump(conf, out, default_flow_style=False)\n print(\"DONE!\")\n print(\"*** \" + CYAN + \"Remember to edit your configuration files!\" +\n NORMAL + \" ***\")\n print()\n\n\nif __name__ == \"__main__\":\n configure()\n"
},
{
"alpha_fraction": 0.44312795996665955,
"alphanum_fraction": 0.5947867035865784,
"avg_line_length": 16.58333396911621,
"blob_id": "f1ec250ec947696f2e8df09bbd036c46c334ee0b",
"content_id": "32f9441fc49e6838f3030ac7737902ee73532be5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 24,
"path": "/ircd/constants.py",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "\"\"\"\n ircd.constants\n ~~~~~~~~~~~~~~\n\n A list of constants that may be imported into various source files.\n\"\"\"\n\nimport os\n\nVERSION = \"0.2-alpha\"\n\nNORMAL = \"\\033[0m\"\nBLACK = \"\\033[0;30m\"\nRED = \"\\033[0;31m\"\nGREEN = \"\\033[0;32m\"\nYELLOW = \"\\033[0;33m\"\nBLUE = \"\\033[0;34m\"\nPURPLE = \"\\033[0;35m\"\nCYAN = \"\\033[0;36m\"\nLIGHTGRAY = \"\\033[0;37m\"\nWHITE = \"\\033[1;37m\"\n\nCONFIG_FILE = os.path.abspath(\".xin\")\nBUFFER_SIZE = 2048\n"
},
{
"alpha_fraction": 0.7385129332542419,
"alphanum_fraction": 0.7406014800071716,
"avg_line_length": 38.26229476928711,
"blob_id": "11ee9b9ae2be3ab545b778f7e85618ae38865048",
"content_id": "7c134109ef94ebe739c53ea552119a8d9dc28e3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2394,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 61,
"path": "/README.md",
"repo_name": "failedxyz/xinircd",
"src_encoding": "UTF-8",
"text": "XinIRCd\n-------\n\nXinIRCd is a new, open-source IRC server, written in Python 2.7. Here's its progress on its journey to becoming a full-fledged IRCd:\n\n- [x] Functional chat rooms.\n- [ ] Comprehensive user privileges.\n- [ ] Module system.\n- [x] SSL support.\n- [ ] Some kind of persistent memory.\n- [ ] Configuration file hot-swapping.\n- [ ] Full compliance with RFCs or whatever.\n- [ ] Actual documentation.\n- [ ] Makes me tea in the morning.\n\nGetting Started\n---------------\n\nRunning XinIRCd is extremely simple (if you are on Linux. This software is still untested on Windows and I don't plan on testing\nany time soon); just clone this repository, and then follow the steps below. Make sure that the version of Python you are using is\nsome flavor of Python 3.\n\n### Step 1: Configuration\n\nRun `configure.py` for an interactive (not graphical! sorry) configuration wizard. Upon completion of this wizard, a `.xin`\nconfiguration file will be generated and placed into the root directory of the repository. Please review the configuration file\nafter it has been generated. Since it starts with a `.`, you may have to perform `ls -a` or enable hidden files to see it.\n\n### Step 2: Deployment\n\nThere are two methods of running XinIRCd:\n\n- **Execute `./xinircd start`.** This script runs the XinIRCd application as a daemon (which means it runs in the background). A PID\n file is temporarily saved to keep track of the process to stop it easily later. If you are simply using XinIRCd to run a server\n and do not wish to contribute to development, please use this approach.\n- **Execute `./xinircd run`.** This script runs the XinIRCd application directly. All output will appear in the same terminal where\n you ran it, and you will be able to connect immediately. This is useful for debugging purposes, but should *never* be used in production.\n For production environments, please refer to the above approach.\n\nThat's all! If you encounter any issues, please submit it to the Github issue tracker.\n\nDocumentation\n-------------\n\nWill be offered later.\n\nContributing\n------------\n\nIf you make any of those things on the list above happen (or help the project get closer to achieving one of those things),\nthen you can get:\n\n- A spot on the contributors list (doesn't exist yet).\n- That warm and fuzzy feeling of contributing to an open-source project.\n\nContact\n-------\n\nAuthor: Michael Zhang\n\nLicense: MIT"
}
] | 12 |
nathanielmit/reminders | https://github.com/nathanielmit/reminders | 6b8e55549be8550bd08a764b0ceda827001ea0e8 | f36f90565d4145ec7d0e3b9240bd268cf3ff843c | 7c55e0a17ab0777026f8c769a88dd73c833c7853 | refs/heads/master | 2020-06-27T13:58:14.827707 | 2019-08-01T03:48:54 | 2019-08-01T03:48:54 | 199,971,167 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6746780872344971,
"alphanum_fraction": 0.694420576095581,
"avg_line_length": 22.795917510986328,
"blob_id": "6b2082da2fd681a5b20bc08dc760aa2a1e4aa42f",
"content_id": "82478040b95439e5d7e0ef93f2a55fac402b421a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1165,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 49,
"path": "/reminders.py",
"repo_name": "nathanielmit/reminders",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nimport tkinter.simpledialog as simpledialog\nimport tkinter.messagebox as messagebox\nimport datetime as DT\nimport time\nimport os\nimport sys\n\nclass popupManager(simpledialog.Dialog):\n\n\tdef body(self, master):\n\n\t\tLabel(master, text=\"Enter Description:\").grid(row=0)\n\t\tLabel(master, text=\"Enter Time (h:mm AM/PM:\").grid(row=1)\n\n\t\tself.e1 = Entry(master)\n\t\tself.e2 = Entry(master)\n\t\tprint(self.e1.get())\n\n\t\tself.e1.grid(row=0, column=1)\n\t\tself.e2.grid(row=1, column=1)\n\t\t\n\t\teventDescription = self.e1.get()\n\t\teventTime = self.e2.get()\n\t\t\n\t\treturn self.e1 # initial focus\n\n\tdef apply(self):\n\t\teventDescription = str(self.e1.get())\n\t\teventTime = str(self.e2.get())\n\n\t\t#Calculate time\n\t\thours, minutes = eventTime.split(\":\")\n\t\tminutes, shift = minutes.split(\" \")\n\t\tif (shift == \"PM\"):\n\t\t\thours = int(hours,10) + 12\n\t\tnow = DT.datetime.now()\n\t\ttarget = DT.datetime.combine(DT.date.today(), DT.time(hour=hours, minute=int(minutes,10)))\n\t\tif target < now:\n\t\t\ttarget += DT.timedelta(days=1)\n\n\t\tseconds = (target-now).total_seconds()\n\t\ttime.sleep(seconds)\n\n\t\tmessagebox.showinfo(\"Reminder\", eventDescription)\n\nroot = Tk()\nroot.withdraw()\npopupManager(root)"
},
{
"alpha_fraction": 0.786223292350769,
"alphanum_fraction": 0.786223292350769,
"avg_line_length": 104.25,
"blob_id": "14a5e8991a199a22c9f6163fd6c298b015a87b1e",
"content_id": "7dba9e1be112de8e49a5ae797dbc901b74a346c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 354,
"num_lines": 4,
"path": "/README.md",
"repo_name": "nathanielmit/reminders",
"src_encoding": "UTF-8",
"text": "# reminders\nSmall python program using tkinter to set reminders.\n\nI use this at work by assigning the script as an autohotkey so that it can be run quickly when I need to remember to do something at a specific time during a shift. When I have something to set, I run the script with a key-combination, type the reminder description and time (in the format h:mm AM/PM). Windows then creates a popup when the time arrives.\n"
}
] | 2 |
G-Levine/puppersim | https://github.com/G-Levine/puppersim | de3e0afc2a38122b58eac381ca423d6e28fba47f | 8433efd897bdbd6ff9efc87be01cea4f8c90b03a | 66af5cd59fd71d442ab80b63b2d01628364b11ce | refs/heads/main | 2023-08-02T12:12:32.334689 | 2021-10-10T17:16:00 | 2021-10-10T17:16:00 | 388,628,648 | 2 | 1 | Apache-2.0 | 2021-07-22T23:58:24 | 2021-06-12T16:10:44 | 2021-06-12T01:09:44 | null | [
{
"alpha_fraction": 0.7285023927688599,
"alphanum_fraction": 0.7362318634986877,
"avg_line_length": 34.084747314453125,
"blob_id": "001048be5a85155fcaeb51002668a5e2c5a52bdf",
"content_id": "28c224dc83c950b23349a6c313d188d1411cad42",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2070,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 59,
"path": "/puppersim/pupper_v2.py",
"repo_name": "G-Levine/puppersim",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Add the new Pupper robot.\"\"\"\nimport gin\nimport gym\nimport numpy as np\nfrom puppersim import pupper_constants\nfrom pybullet_envs.minitaur.robots import quadruped_base\nfrom pybullet_envs.minitaur.robots import robot_urdf_loader\nfrom pybullet_envs.minitaur.robots import robot_config\n\[email protected]\nclass Pupper(quadruped_base.QuadrupedBase):\n \"\"\"The Pupper class that simulates the quadruped from Unitree.\"\"\"\n\n def _pre_load(self):\n \"\"\"Import the Pupper specific constants.\n \"\"\"\n self._urdf_loader = robot_urdf_loader.RobotUrdfLoader(\n pybullet_client=self._pybullet_client,\n urdf_path=pupper_constants.URDF_PATH,\n enable_self_collision=False ,\n init_base_position=pupper_constants.INIT_POSITION,\n init_base_orientation_quaternion=pupper_constants.INIT_ORIENTATION,\n init_joint_angles=pupper_constants.INIT_JOINT_ANGLES,\n joint_offsets=pupper_constants.JOINT_OFFSETS,\n joint_directions=pupper_constants.JOINT_DIRECTIONS,\n motor_names=pupper_constants.MOTOR_NAMES,\n end_effector_names=pupper_constants.END_EFFECTOR_NAMES,\n user_group=pupper_constants.MOTOR_GROUP,\n )\n\n\n def get_neutral_motor_angles():\n ABDUCTION_ANGLE=0\n HIP_ANGLE=0.6\n KNEE_ANGLE=-1.2\n initial_joint_poses = [ABDUCTION_ANGLE,HIP_ANGLE,KNEE_ANGLE]*4\n return initial_joint_poses\n \n\n @classmethod\n def get_constants(cls):\n del cls\n return pupper_constants\n"
},
{
"alpha_fraction": 0.6954568028450012,
"alphanum_fraction": 0.7159261107444763,
"avg_line_length": 26.067567825317383,
"blob_id": "ddf55bfbc39ab802cfee762610266d6af902f0ed",
"content_id": "8d994be98a70cd5031032eb26100b43a456d9c71",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2003,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 74,
"path": "/puppersim/pupper_example.py",
"repo_name": "G-Levine/puppersim",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"An example that the Pupper stands.\n\n\"\"\"\nfrom absl import app\nfrom absl import flags\nimport os\nimport time\nimport gin\nfrom pybullet_envs.minitaur.agents.baseline_controller import static_gait_controller\nfrom pybullet_envs.minitaur.envs_v2 import env_loader\nimport pybullet as p\nimport puppersim\n\nimport os\n\n\nflags.DEFINE_bool(\"render\", True, \"Whether to render the example.\")\n\nFLAGS = flags.FLAGS\nCONFIG_DIR = puppersim.getPupperSimPath()+\"/\"\n_CONFIG_FILE = os.path.join(CONFIG_DIR, \"pupper_with_imu.gin\")\n_NUM_STEPS = 10000\n_ENV_RANDOM_SEED = 13\n\n\ndef _load_config(render=False):\n gin.parse_config_file(_CONFIG_FILE)\n gin.bind_parameter(\"SimulationParameters.enable_rendering\", render)\n\n\ndef run_example(num_max_steps=_NUM_STEPS):\n \"\"\"Runs the example.\n\n Args:\n num_max_steps: Maximum number of steps this example should run for.\n \"\"\"\n env = env_loader.load()\n env.seed(_ENV_RANDOM_SEED)\n print(\"env.action_space=\",env.action_space)\n observation = env.reset()\n policy = static_gait_controller.StaticGaitController(env.robot)\n \n for _ in range(num_max_steps):\n #action = policy.act(observation)\n action = [0, 0.6,-1.2,0, 0.6,-1.2,0, 0.6,-1.2,0, 0.6,-1.2]\n obs, reward, done, _ = env.step(action)\n time.sleep(0.01)\n if done:\n break\n\n\ndef main(_):\n _load_config(FLAGS.render)\n run_example()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
},
{
"alpha_fraction": 0.7530612349510193,
"alphanum_fraction": 0.7612245082855225,
"avg_line_length": 27.823530197143555,
"blob_id": "81826a4f41b56cacbdd14f0fc630105d2092f10c",
"content_id": "15010639f9c4fd41902ce651aea12ee79069fbe8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 980,
"license_type": "permissive",
"max_line_length": 214,
"num_lines": 34,
"path": "/README.md",
"repo_name": "G-Levine/puppersim",
"src_encoding": "UTF-8",
"text": "# puppersim\nSimulation for DJI Pupper v2 robot\n\n## Usage:\n\npython setup.py develop\n\nThen run puppersim/pupper_server.py\n\nIn a separate terminal, run the StanfordQuadruped run_djipupper_sim from this [fork](https://github.com/erwincoumans/StanfordQuadruped).\n\nKeyboard controls:\n* wasd: left joystick\n* arrow keys: right joystick\n* q: L1\n* e: R1\n* ijkl: d-pad\n* x: X\n* square: u\n* triangle: t\n* circle: c\n\n## Training a Gym environment\n\nYou can train the pupper using pybullet [envs_v2](https://github.com/bulletphysics/bullet3/tree/master/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2) and this [ARS fork](https://github.com/erwincoumans/ars).\n\n```\npip install pybullet arspb ray puppersim\nray start --head\npython puppersim/pupper_ars_train.py --policy_type=linear\npython puppersim/pupper_ars_run_policy.py --expert_policy_file=data/lin_policy_plus_best_xxx.npz --json_file=data/params.json\n```\n\nSee a video of a trained policy: https://www.youtube.com/watch?v=JzNsax4M8eg\n"
}
] | 3 |
DKHEllO/pydatature | https://github.com/DKHEllO/pydatature | 1881034f4f5eb18c8b6dc833a70871e0f831bea3 | d72a8dac652e9d0ad0438697cf265d3016dfa9f2 | 171b786f91c85d5469eaf578a84b3a526bccf1c5 | refs/heads/master | 2020-03-07T18:17:48.902416 | 2018-04-10T16:00:53 | 2018-04-10T16:00:53 | 127,633,736 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6258602142333984,
"alphanum_fraction": 0.6375066041946411,
"avg_line_length": 41.43258285522461,
"blob_id": "eb54d4c1124e787dede3cb84d3fea4cbf8cac406",
"content_id": "6b98d0aaf1f499cbb5a47d18a8bc77c604a702e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7562,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 178,
"path": "/algorithm/recursive.py",
"repo_name": "DKHEllO/pydatature",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Recursion is a method of solving the problem. It breaks the problem down into smaller sub-problems until a sufficiently\n# small problem can be solved simply. Usually recursively involves the function call itself. Recursion allows us to write\n# elegant solutions that solve problems that may be difficult to program.\n\n# simple example\n# Calculate integer sums\n\n# normal\ndef listsum_normal(numList):\n \"\"\"\n :param numList: \n :return: \n \"\"\"\n theSum = 0\n for i in numList:\n theSum = theSum + i\n return theSum\n\n# recursion\ndef listsum_recursion(numList):\n \"\"\"\n :param numList: \n :return: \n \"\"\"\n if len(numList) == 1:\n return numList[0]\n else:\n return numList[0] + listsum_recursion(numList[1:])\n\n# Each time we make a recursive call, we will solve a smaller problem until the problem cannot be reduced. When we arrive\n# at the point of simple questions, we begin to piece together the answers to each small question until the initial problem\n# is solved.\n\n# Like Asimov robots, all recursive algorithms must obey three important laws:\n# Recursive algorithms must have basic conditions.\n# The recursive algorithm must change its state and approach the basic situation.\n# The recursive algorithm must call itself recursively.\n\n# Integer converted to an arbitrary string\n\n\ndef int_2_str(n, base):\n \"\"\"\n transfer int to arbitrary str\n :param n: the tranfered number\n :param base: system\n :return: \n \"\"\"\n base_str = \"0123456789ABCDEF\"\n if n < base:\n return base_str[n]\n else:\n return int_2_str(n//base, base) + base_str[n % base]\n\n# How Python implements a recursive function call\n# When calling a function in Python, a stack is allocated to handle the function's local variables. When the function\n# returns, the return value is left at the top of the stack for access by the calling function\n# The stack frame also provides a scope for the variables used by the function. Even if we repeatedly call the same\n# function, each call creates a new scope for the function's local variables.\n\n# Sherbinski triangle(twice)\n# /\\\n# / \\\n# /____\\\n# /\\ /\\\n# / \\ / \\\n# /____\\/____\\\n# /\\ /\\ /\\\n# / \\ / \\ / \\\n# /____\\/____\\/____\\\n# /\\ /\\ /\\ /\\\n# / \\ / \\ / \\ / \\\n# /____\\/____\\/____\\/____\\\n\n\ndef sherb_triangle(points, triangle_dic, n):\n \"\"\"\n :param points: triangle's points list(top, left, right) \n :param triangle_dic: store triangles that each split \n :param n: split times\n :return: triangle_dic\n \"\"\"\n if n < 1:\n return\n else:\n triangle_dic.setdefault(n, [])\n triangle_dic[n].append(points)\n mid_top_left = [(points[0][0] + points[1][0]) / 2, (points[0][1] + points[1][1]) / 2]\n mid_top_right = [(points[0][0] + points[2][0]) / 2, (points[0][1] + points[2][1]) / 2]\n mid_left_right = [(points[1][0] + points[2][0]) / 2, (points[1][1] + points[2][1]) / 2]\n sherb_triangle([mid_top_left, points[1], mid_left_right], triangle_dic, n-1)\n sherb_triangle([points[0], mid_top_left, mid_top_right], triangle_dic, n-1)\n sherb_triangle([mid_left_right, mid_left_right, points[2]], triangle_dic, n-1)\n return triangle_dic\n\n\n# **********************************************************************************************************************\n# Hanoi Tower Games\n# Tower of Hanoi was invented by the French mathematician Edward Lucas in 1883. His inspiration comes from a legend\n# that has a Hindu temple that gives the young pastor a puzzle. At the beginning, the priests were given three poles and\n# a pile of 64 gold discs, each one smaller than the one below it. Their task is to transfer all 64 dishes from one of\n# three bars to another. There are two important constraints, they can only move one plate at a time, and they cannot\n# place larger plates on top of smaller plates. The priest keeps moving a plate every second day and night. When they\n# completed their work, it was said that the temples would become dust and the world would disappear.\n\n# Here's how to use the middle lever to move the tower from the start rod to the target rod:\n# 1、Use the target rod to move the height-1 tower to the middle rod.\n# 2、Move start rod's tower to target rod.\n# 3、Use the start rod to move the height-1 tower to the target rod.\n\n\ndef move_tower(height, from_pole, to_pole, with_pole):\n \"\"\"\n :param height: the height of Hanoi tower \n :param from_pole: start pole position\n :param to_pole: end pole position\n :param with_pole: middle pole position\n :return: the method\n \"\"\"\n if height >= 1:\n if height > 2:\n print \"-\"*(height-1) + \"move %s from %s to %s with %s\" % (height-1, from_pole, with_pole, to_pole)\n move_tower(height-1, from_pole, with_pole, to_pole)\n if height == 2:\n print \"moving disk from\", from_pole, \"to\", to_pole\n else:\n print\"-\"*(height-1) + \"moving disk from\", from_pole, \"to\", to_pole\n if height > 2:\n print \"-\"*(height-1) + \"move %s from %s to %s with %s\" % (height-1, with_pole, to_pole, from_pole)\n move_tower(height-1, with_pole, to_pole, from_pole)\n\n# move_tower(5, 1, 3, 2)\n\n# **********************************************************************************************************************\n\n\n# Dynamic planning\n# Many programs in computer science are written to optimize some values; for example, finding the shortest path between\n# two points, finding the line that best fits a set of points, or finding the smallest set of objects that meet certain\n# criteria. Computer scientists use many strategies to solve these problems. Dynamic planning is a strategy for these\n# types of optimization problems\n\n# coin change\n# A truly dynamic programming algorithm will take a more systematic approach to solving this problem. Our dynamic\n# programming solution will start with finding a penny and systematically find the amount of change we need. This\n# ensures that at every step of the algorithm we already know the minimum number of coins needed to make a change\n# for any smaller quantity.\n# this method create a change table, each item corresponds the least amount of change, every change only to use all\n# possible coins to change the specified amount. find the number of coins remaining in the change table, and then\n# compare all possible cases to find the smallest\n\ndef coin_change(coin_list, change):\n \"\"\"\n :param coin_list: coins that can be used \n :param change: \n :return: min_coin_list(the minimum number of coins used in all case), min_change_list(coins that be used in all case)\n \"\"\"\n min_coin_list = [0]\n min_change_list = [[]]\n for i in range(1, change+1):\n coin_num = i\n for j in [c for c in coin_list if c <= i]:\n change_list = []\n if min_coin_list[i - j] + 1 < coin_num:\n coin_num = min_coin_list[i-j] + 1\n change_list = [j] + min_change_list[i-j]\n else:\n change_list += [j] + min_change_list[i-j]\n min_coin_list.append(coin_num)\n min_change_list.append(change_list)\n return min_coin_list, min_change_list\n\n# summary\n# Recursive algorithms usually map naturally to the expressions of the problem you are trying to solve\n# Recursion is not always the answer. At times, recursive solutions may be computationally more expensive than iterative\n# algorithms\n\n\n\n"
},
{
"alpha_fraction": 0.5382184386253357,
"alphanum_fraction": 0.5420750975608826,
"avg_line_length": 27.48244285583496,
"blob_id": "a00ad72ca80cb4e1742c746e914ff997561a41ed",
"content_id": "78a3cbac88dc3df6691e54615b10dd58c5055b7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18669,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 655,
"path": "/data_structure.py",
"repo_name": "DKHEllO/pydatature",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This file define some basic data structures and does a simple implementation\n# such as Queue,Stack,Deque,Linked list,Tree,Graph,Hash table\n\n\nclass Stack(object):\n \"\"\"\n Create a empty stack\n items: storage elements in the stack\n \"\"\"\n def __init__(self):\n self.items = []\n\n def push(self, item):\n \"\"\"\n Add a new item to the top of the stack\n :param item: The elements to add to the stack \n :return: \n \"\"\"\n self.items.append(item)\n\n def pop(self):\n \"\"\"\n Delete a new item to the top of the stack\n :return: \n \"\"\"\n return self.items.pop(-1)\n\n def is_empty(self):\n \"\"\"\n Test stack is empty\n :return: True or False\n \"\"\"\n return self.items == []\n\n def size(self):\n \"\"\"\n Returns the number of items in the stack\n :return: int\n \"\"\"\n return len(self.items)\n\n def peek(self):\n \"\"\"\n Return to top item from stack\n :return: item(Undefined type, depending on the type of stack)\n \"\"\"\n return self.items[-1]\n\n\nclass Queue(object):\n \"\"\"\n Create a empty queue\n items: storage elements in the queue\n \"\"\"\n def __init__(self):\n self.items = []\n\n def enqueue(self, item):\n \"\"\"\n Add new items to the end of the queue\n :param item: The elements to add to the queue \n :return: \n \"\"\"\n self.items.append(item)\n\n def dequeue(self):\n \"\"\"\n Remove item from the header of queue and return \n :return: item(Undefined type, depending on the type of queue)\n \"\"\"\n item = self.items.pop(0)\n return item\n\n def is_empty(self):\n \"\"\"\n Test stack is empty\n :return: True or Flase\n \"\"\"\n return self.items == []\n\n def size(self):\n \"\"\"\n Returns the number of items in the queue\n :return: \n \"\"\"\n return len(self.items)\n\n\nclass Deque(object):\n \"\"\"\n Deque is an ordered collection of items that are similar to queues. It has two ends, a header and a tail, and the \n items remain unchanged in the set. The difference with deque is that adding and deleting items is non-limiting. \n You can add new items before or after. Similarly, existing items can be removed from either end. In a sense, this \n hybrid linear structure provides all the capabilities of stacks and queues in a single data structure.\n items: storage items in the deque\n \"\"\"\n def __init__(self):\n self.items = []\n\n def add_front(self, item):\n \"\"\"\n Add a new item to the deque's header\n :param item: The elements to add to the deque \n :return: \n \"\"\"\n self.items.append(item)\n\n def add_rear(self, item):\n \"\"\"\n Add a new item to the tail of deque\n :param item: The elements to add to the deque \n :return: \n \"\"\"\n self.items.insert(0, item)\n\n def remove_front(self):\n \"\"\"\n Delete a new item to the header of deque\n :return: \n \"\"\"\n self.items.pop(-1)\n\n def remove_rear(self):\n \"\"\"\n Delete a new item to the tail of deque\n :return: \n \"\"\"\n self.items.pop(0)\n\n def size(self):\n \"\"\"\n Returns the number of items in the deque\n :return: int\n \"\"\"\n return len(self.items)\n\n def is_empty(self):\n \"\"\"\n Test stack is empty\n :return: True or Flase\n \"\"\"\n return self.items == []\n\n\nclass Node(object):\n \"\"\"\n The basic structure of the linked list, each node object holds at least two information\n data: The data field of the node, the information of the node itself\n next: The reference to the next node\n \"\"\"\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def get_data(self):\n \"\"\"\n Return the data of the node itself\n :return: Uncertain type\n \"\"\"\n return self.data\n\n def get_next(self):\n \"\"\"\n Return the reference to the next node\n :return: Node\n \"\"\"\n return self.next\n\n def set_data(self, data):\n \"\"\"\n Modify the data of the node itself\n :param data: Modified data\n :return: \n \"\"\"\n self.data = data\n\n def set_next(self, node):\n \"\"\"\n Modify the reference to the next node\n :param node: The next node to reference\n :return: \n \"\"\"\n self.next = node\n\n\nclass LinkedList(object):\n \"\"\"\n Create a empty linked list. Linked list is built from a set of nodes, each of which is linked to the next node by an explicit reference. \n As long as we know where to find the first node (including the first item), each subsequent item can be found by \n following the next link in succession.\n head: Linked list of head references \n \"\"\"\n def __init__(self):\n self.head = None\n\n def is_empty(self):\n \"\"\"\n Test stack is empty\n :return: False or True\n \"\"\"\n return self.head == None\n\n def add(self, item):\n \"\"\"\n Add a node to the linked list\n :param item: The elements to add to the linked list \n :return: \n \"\"\"\n temp = Node(item)\n temp.set_next(self.head)\n self.head = temp\n\n def size(self):\n \"\"\"\n Returns the number of nodes in the linked list\n :return: int\n \"\"\"\n current = self.head\n count = 0\n while current != None:\n count += 1\n current = current.get_next()\n return count\n\n def search(self, item):\n \"\"\"\n Finding the specified node in the linked list\n :param item: The node to find\n :return: Node\n \"\"\"\n current = self.head\n while current!=None:\n if current.data == item:\n break\n else:\n current = current.get_next()\n return current\n\n def remove(self, item):\n \"\"\"\n Removing the specified node in the linked list\n :param item: The node to remove\n :return: True or False\n \"\"\"\n current = self.head\n previous = None\n found = False\n while current!=None:\n if current.data == item:\n found = True\n if previous == None:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n self.head = previous\n break\n else:\n current = current.get_next()\n previous = current\n return found\n\n\nclass OrderedList(object):\n \"\"\"\n Create a ordered linked list\n \"\"\"\n def __init__(self):\n pass\n\n\nclass BinaryTree(object):\n \"\"\"\n Create a binary tree object with root node and left and right subtrees \n root: The root object of the tree, can be a reference to any object\n left_child: The reference to the tree's left subtree\n right_child: The reference to the tree's right subtree\n \"\"\"\n def __init__(self, root):\n self.root = root\n self.left_child = None\n self.right_child = None\n\n def insert_left_child(self, node):\n \"\"\"\n Insert a left subtree to the tree\n :param node: The reference to the tree\n :return: \n \"\"\"\n if self.left_child == None:\n self.left_child = BinaryTree(node)\n else:\n temp = BinaryTree(node)\n temp.left_child = self.left_child\n self.left_child = temp\n\n def insert_right_child(self, node):\n \"\"\"\n Insert a right subtree to the tree\n :param node: The reference to the tree\n :return: \n \"\"\"\n if self.right_child == None:\n self.right_child = BinaryTree(node)\n else:\n temp = BinaryTree(node)\n temp.right_child = self.right_child\n self.right_child = temp\n\n def get_right_child(self):\n \"\"\"\n Return the reference of right subtree\n :return: any object\n \"\"\"\n return self.right_child\n\n def get_left_child(self):\n \"\"\"\n Return the reference of left subtree\n :return: Object\n \"\"\"\n return self.left_child\n\n def get_root_val(self):\n \"\"\"\n Return the root object\n :return: Object\n \"\"\"\n return self.root\n\n def set_root_val(self, root_obj):\n \"\"\"\n Modify the root object\n :param root_obj: Modified object\n :return: \n \"\"\"\n self.root = root_obj\n\n\nclass Vertex(object):\n \"\"\"\n Create a vertex of graph, each vertex has an id and a dictionary that tracks the vertices it connects to and the \n weight of each edge\n id: The vertex's id\n connected_to: Trace the dictionary of connections and weights\n \"\"\"\n def __str__(self):\n return str(self.id) + ' connectedTo: ' + str([x.id for x in self.connected_to])\n\n def __init__(self, id):\n self.id = id\n self.connected_to = {}\n\n def add_nbr(self, nbr, weight=0):\n \"\"\"\n Add a connection to another vertex\n :param nbr: The vertex to connect\n :param weight: The weight of edge\n :return: \n \"\"\"\n self.connected_to[nbr] = weight\n\n def get_connections(self):\n \"\"\"\n Return all vertices in the adjacency table\n :return: list\n \"\"\"\n return self.connected_to.keys()\n\n def get_id(self):\n \"\"\"\n Return the vertex's id\n :return: Uncertain type\n \"\"\"\n return self.id\n\n def get_weight(self, nbr):\n \"\"\"\n Return the weight of the edge to another vertex\n :param nbr: The specified vertex\n :return: int\n \"\"\"\n return self.connected_to[nbr]\n\n\nclass Graph(object):\n \"\"\"\n The list to save vertices\n vtx_list: The list to storage all vertexs\n num_vtx: The number of vertexs in the list\n \"\"\"\n def __init__(self):\n self.vtx_list = {}\n self.num_vtx = 0\n\n def __iter__(self):\n return iter(self.vtx_list.values())\n\n def __contains__(self, n):\n return n in self.vtx_list\n\n def add_vtx(self, key):\n \"\"\"\n Add a vertex to the graph\n :param key: The added vertex id\n :return: \n \"\"\"\n self.num_vtx += 1\n new_vtx = Vertex(key)\n self.vtx_list[key] = new_vtx\n\n def get_vtx(self, key):\n \"\"\"\n Return the specified vertex reference\n :param key: The specified vertex id\n :return: Vertex or None\n \"\"\"\n if key in self.vtx_list:\n return self.vtx_list[key]\n else:\n return None\n\n def add_edge(self, s, e, cost=0):\n \"\"\"\n Add an edge to the graph\n :param s: The start vertex of edge\n :param e: The end vertex of edge\n :param cost: The edge's weight\n :return: \n \"\"\"\n if s not in self.vtx_list:\n self.add_vtx(s)\n if e not in self.vtx_list:\n self.add_vtx(e)\n self.vtx_list[s].add_nbr(self.vtx_list[e], cost)\n\n def get_vtxs(self):\n \"\"\"\n Return all vertexs from the graph\n :return: list of Vertex\n \"\"\"\n return self.vtx_list.keys()\n\n\n# Binary Heap\n\"\"\"\n The heap looks like a tree, but when we implement it, we use only a single list as an internal representation. The \n binary heap has two common variants: the smallest heap (where the smallest key is always in front) and the largest \n heap (where the largest key is always in front)\n \n In order for our heap to work effectively, we will use the logarithmic nature of the binary tree to represent our \n heap. In order to ensure logarithmic performance, we must keep the balance of the tree. The balanced binary tree \n has roughly the same number of nodes in the left and right subtrees of the root. In our heap implementation, we \n maintain the balance of the tree by creating a complete binary tree. A complete binary tree is a tree, where each \n layer has all its nodes, except for the lowest level of the tree, filled from left to right.Another interesting \n property of a complete binary tree is that we can use a single list to represent it. We don't need to use nodes \n and references, or even lists of lists. Because the tree is complete, the parent's left child (at position p) is \n the node found in position 2p in the list. Similarly, the position of the right child of the parent node in the \n list 2p + 1\n \n The heap's sorting properties are as follows: In the heap, for each node x with a parent p, the key in p is less \n than or equal to the key in x\n\"\"\"\n# Smallest Heap\n\n\nclass SmallestHeap(object):\n \"\"\" \n Smallest Heap: where the smallest key is always in front\n heap_list: The entire binary heap is represented by a single list. An empty binary heap has a single zero. This zero \n is used for simple integer division later.\n size: Binary heap size\n \"\"\"\n def __init__(self):\n self.heap_list = [0]\n self.size = 0\n\n def insert(self, k):\n \"\"\"\n insert a value to binary heap\n :param k: the specified value\n :return: \n \"\"\"\n self.size += 1\n self.heap_list.append(k)\n i = self.size\n while i//2:\n if self.heap_list[i] < self.heap_list[i//2]:\n temp = self.heap_list[i//2]\n self.heap_list[i//2] = self.heap_list[i]\n self.heap_list[i//2] = temp\n i = i//2\n\n def find_min(self, i):\n \"\"\"\n Find the smallest subtree position of specified position \n :param i: specified position\n :return: int(smallest subtree position)\n \"\"\"\n if i * 2 + 1 > self.size:\n return i * 2 + 1\n else:\n if self.heap_list[i * 2] > self.heap_list[i * 2 + 1]:\n return i * 2 + 1\n else:\n return i * 2\n\n def del_min(self):\n \"\"\"\n Del the smallest value in the binary heap, fill the last value to the root after del the smallest value to keep \n our heap structure. Then restore heap order by pushing the new root node down the tree to its correct position. \n :return: None\n \"\"\"\n tail = self.heap_list.pop()\n self.size -= 1\n self.heap_list[1] = tail\n i = 1\n while i*2 <= self.size:\n min_pos = self.find_min(i)\n if self.heap_list[min_pos] < self.heap_list[i]:\n temp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[min_pos]\n self.heap_list[min_pos] = temp\n i = min_pos\n\n def is_empty(self):\n \"\"\"\n Return the heap is empty\n :return: False or True \n \"\"\"\n return self.heap_list == [0]\n\n def size(self):\n \"\"\"\n Return size of heap\n :return: int\n \"\"\"\n return self.size\n\n def build_list(self, new_list):\n \"\"\"\n Build a smallest heap from specified list\n :param new_list: specified list\n :return: \n \"\"\"\n list_len = len(new_list)\n self.size = list_len\n i = list_len // 2\n self.heap_list = [0] + new_list\n while i > 0:\n min_pos = self.find_min(i)\n if self.heap_list[min_pos] < self.heap_list[i]:\n temp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[min_pos]\n self.heap_list[min_pos] = temp\n i = i - 1\n\n\n# Largest Heap\n\n\nclass LargestHeap(object):\n \"\"\" \n Largest Heap: where the largest key is always in front\n heap_list: The entire binary heap is represented by a single list. An empty binary heap has a single zero. This zero \n is used for simple integer division later.\n size: Binary heap size\n \"\"\"\n def __init__(self):\n self.heap_list = []\n self.size = 0\n\n def insert(self, k):\n \"\"\"\n insert a value to binary heap\n :param k: the specified value\n :return: \n \"\"\"\n self.size += 1\n self.heap_list.append(k)\n i = self.size\n if i == 1:\n pass\n else:\n while i//2:\n if self.heap_list[i] > self.heap_list[i//2]:\n temp = self.heap_list[i//2]\n self.heap_list[i//2] = self.heap_list[i]\n self.heap_list[i//2] = temp\n i = i//2\n\n def find_max(self, i):\n \"\"\"\n Find the largest subtree position of specified position \n :param i: specified position\n :return: int(smallest subtree position)\n \"\"\"\n if i * 2 + 1 > self.size:\n return i * 2 + 1\n else:\n if self.heap_list[i * 2] < self.heap_list[i * 2 + 1]:\n return i * 2 + 1\n else:\n return i * 2\n\n def del_max(self):\n \"\"\"\n Del the largest value in the binary heap, fill the last value to the root after del the largest value to keep \n our heap structure. Then restore heap order by pushing the new root node down the tree to its correct position. \n :return: None\n \"\"\"\n tail = self.heap_list.pop()\n self.size -= 1\n self.heap_list[1] = tail\n i = 1\n while i*2 <= self.size:\n max_pos = self.find_max(i)\n if self.heap_list[max_pos] > self.heap_list[i]:\n temp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_pos]\n self.heap_list[max_pos] = temp\n i = max_pos\n\n def is_empty(self):\n \"\"\"\n Return the heap is empty\n :return: False or True \n \"\"\"\n return self.heap_list == [0]\n\n def size(self):\n \"\"\"\n Return size of heap\n :return: int\n \"\"\"\n return self.size\n\n def build_list(self, new_list):\n \"\"\"\n Build a largest heap from specified list\n :param new_list: specified list\n :return: \n \"\"\"\n list_len = len(new_list)\n self.size = list_len\n i = list_len // 2\n self.heap_list = [0] + new_list\n while i > 0:\n max_pos = self.find_max(i)\n if self.heap_list[max_pos] > self.heap_list[i]:\n temp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_pos]\n self.heap_list[max_pos] = temp\n i = i - 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.617361843585968,
"alphanum_fraction": 0.6297464370727539,
"avg_line_length": 32.42578125,
"blob_id": "6552e9905ef303c20cebb272d9eba8232c6eb8dd",
"content_id": "c1bfd9cc24803b1ac52841b68a1c564875a7c26b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8693,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 256,
"path": "/algorithm/sorting.py",
"repo_name": "DKHEllO/pydatature",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This file is about sorting and searching algorithms. The content of this document is mainly to explain and implement\n# sequential search and binary search, selection sorting, bubble sorting, merge sorting, quick sorting, insert sorting,\n# and shell sorting.And explain the idea of hashing as a search technology. Using hash to implement Maps abstract data\n# type\n\n# binary search\ndef bin_search(find_list, num):\n \"\"\"\n find a number in sorted list\n :param find_list: the found list\n :param num: the found number\n :return: bool(is it found),int(position)\n \"\"\"\n start = 0\n end = len(find_list) - 1\n position = 0\n found = False\n while not found and start <= end:\n mid = (end + start) // 2\n if num < find_list[mid]:\n end = mid - 1\n elif num > find_list[mid]:\n start = mid + 1\n else:\n position = mid\n found = True\n return found, position\n\n# Hash searching\n\n# Hash table\n# Hash tables are collections of items that are stored in a way that makes it easy to find them. Each position of the\n# hash table, usually called a slot, can hold an item and is named by an integer value starting at 0.The mapping between\n# the item and the slot to which the item belongs in the hash table is called the hash function. The hash function will\n# receive any item in the collection and return an integer within the slot name range (between 0 and m-1).\n\n# Hash function\n# Given a set of items, the hash function that maps each item to a unique slot is called a perfect hash function.\n\"\"\"\nGroup summation: Divide the items into equal-sized blocks (the last block may not be equal in size).Then add these blocks \ntogether to find the hash value. \nFor example:\n if our item is a telephone number 436-555-4601, we will take the numbers and divide them into 2 digits (43, 65, 55, 46, 01). \n 43 + 65 + 55 + 46 + 01, we get 210. We assume that the hash table has 11 slots, then we need to divide by 11. In this case, \n 210% 11 is 1, so the phone number 436-555-4601 is hashed to slot 1.\n\"\"\"\n\n\"\"\"\nSquare to take the middle number(平方取中法):We first squared the item and then extracted a part of the digital result\nFor example:\n If the item is 44, we will first calculate 44^2 = 1,936. By extracting the middle two numbers 93, we get 5 (93% 11)\n\"\"\"\n\n# Conflict resolution\n# We are now back to the problem of collision. When two items are hashed to the same slot, we must have a systematic way\n# to put the second item in the hash table. This process is called conflict resolution.\n\n# rehash:Finding another slot after a conflict\n\"\"\"\nOpen addressing:\nStart with the original hash position and then move the slots sequentially until you encounter the first empty slot. \nNote that we may need to go back to the first slot (loop) to find the entire hash table. This conflict resolution \nprocess is called open addressing,By systematically visiting each slot at a time, we perform an open addressing \ntechnique called linear probing.\n\nThe disadvantage of linear detection is the tendency of aggregation, items are gathered in the table. This means that if\nmany collisions occur at the same hash value, multiple perimeter slots will be filled by linear probing. This will affect \nother items being inserted\n\nextended linear detection\nInstead of sequentially looking for the next open slot, skip the slots to more evenly distribute the items that have \ncaused the conflict\n\"\"\"\n\n\"\"\"\nZipper method:\nAn alternative to dealing with conflicting issues is to allow each slot to hold a reference to a collection (or chain) \nof items. Links allow many items to exist in the same place in the hash table. When a conflict occurs, the item remains \nin the correct slot in the hash table. As more items are hashed into the same location, the difficulty of searching \nitems in the collection increases\n\"\"\"\n\n\nclass Map(object):\n def __init__(self, size):\n self.size = size\n self.solt = [None] * self.size\n self.data = [None] * self.size\n\n def put(self, key, value):\n hash_value = self.hash_function(key)\n\n if self.solt[hash_value] == None:\n self.solt[hash_value] = key\n self.data[hash_value] = value\n else:\n if self.solt[hash_value] == key:\n self.data[hash_value] = value\n else:\n # rehash\n next_hash = self.rehash(hash_value)\n # rehash until find a None solt or equal key solt\n while self.solt[next_hash] != None and self.solt[next_hash] != key:\n next_hash = self.rehash(next_hash)\n # rpalce if key equal to key\n if self.solt[next_hash] == None:\n self.solt[next_hash] = key\n self.data[next_hash] = value\n else:\n self.data[next_hash] = value\n\n def get(self, key):\n hash_value = self.hash_function(key)\n found = False\n stop = False\n data = None\n if self.solt[hash_value] == key:\n data = self.data[hash_value]\n else:\n next_hash = self.rehash(hash_value)\n while not found and self.solt[next_hash] != None and not stop:\n if self.solt[next_hash] == key:\n found = True\n data = self.data[next_hash]\n else:\n next_hash = self.rehash(next_hash)\n if next_hash == hash_value:\n stop = True\n return data\n\n def __getitem__(self, item):\n return self.get(item)\n\n def __setitem__(self, key, value):\n self.put(key, value)\n\n def hash_function(self, key):\n return key % self.size\n\n def rehash(self, old_hash):\n return (old_hash + 1) % self.size\n\n# Loading factor\n# a = Number of items/Number of solts\n# The average number of times to find success\n# aver = (summary of all items number of times to find success)/number of items\n# The average number of times to find unsuccess\n# 计算查找不成功的次数就直接找关键字到第一个地址上关键字为空的距离即可,然后计算所有查找的和再处以项数(即所有可能的情况)\n\n# sorting\n\n\n# Bubble Sort\ndef bubble_sort(num_list):\n l_len = len(num_list) - 1\n for i in range(l_len):\n for j in range(l_len-i):\n if num_list[j] > num_list[j+1]:\n temp = num_list[j+1]\n num_list[j+1] = num_list[j]\n num_list[j] = temp\n return num_list\n\n\ndef short_bubble_sort(num_list):\n l_len = len(num_list) - 1\n for i in range(l_len):\n exchange = True\n for j in range(l_len-i):\n if num_list[j] > num_list[j+1]:\n exchange = False\n temp = num_list[j+1]\n num_list[j+1] = num_list[j]\n num_list[j] = temp\n if exchange:\n break\n return num_list\n\n# select sort\n\n\ndef select_sort(num_list):\n l_len = len(num_list)-1\n for i in range(l_len+1):\n max_pos = 0\n for j in range(l_len-i+1):\n if num_list[j] > num_list[max_pos]:\n max_pos = j\n temp = num_list[l_len-i]\n num_list[l_len-i] = num_list[max_pos]\n num_list[max_pos] = temp\n return num_list\n\n\n\n# insert sort\n\n\ndef insert_sort(num_list):\n for index in range(1, len(num_list)):\n current_value = num_list[index]\n position = index\n\n while position > 0 and num_list[position - 1] > current_value:\n num_list[position] = num_list[position - 1]\n position = position - 1\n\n num_list[position] = current_value\n\n\n\n# quick sort\n\n\ndef quickSort(alist):\n quickSortHelper(alist,0,len(alist)-1)\n\ndef quickSortHelper(alist,first,last):\n if first<last:\n\n splitpoint = partition(alist,first,last)\n\n quickSortHelper(alist,first,splitpoint-1)\n quickSortHelper(alist,splitpoint+1,last)\n\n\ndef partition(alist,first,last):\n pivotvalue = alist[first]\n\n leftmark = first+1\n rightmark = last\n\n done = False\n while not done:\n\n while leftmark <= rightmark and alist[leftmark] <= pivotvalue:\n leftmark = leftmark + 1\n\n while alist[rightmark] >= pivotvalue and rightmark >= leftmark:\n rightmark = rightmark -1\n\n if rightmark < leftmark:\n done = True\n else:\n temp = alist[leftmark]\n alist[leftmark] = alist[rightmark]\n alist[rightmark] = temp\n\n temp = alist[first]\n alist[first] = alist[rightmark]\n alist[rightmark] = temp\n\n\n return rightmark\n\n\n"
},
{
"alpha_fraction": 0.8292682766914368,
"alphanum_fraction": 0.8292682766914368,
"avg_line_length": 19.5,
"blob_id": "054bae8f3cfb67c26ee89ca376cdf43473c10f54",
"content_id": "ed1a9ff91b52197760085d7b473063c93acb3c5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/README.md",
"repo_name": "DKHEllO/pydatature",
"src_encoding": "UTF-8",
"text": "# pydatature\nPython basic data structure\n"
},
{
"alpha_fraction": 0.5705925226211548,
"alphanum_fraction": 0.5713240504264832,
"avg_line_length": 22.586206436157227,
"blob_id": "91ff1e73a4f90e75272df768207cdd9149eabae9",
"content_id": "5905d9a12310ba1f2f00a4dab93db23cd02a6d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 58,
"path": "/algorithm/tree.py",
"repo_name": "DKHEllO/pydatature",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Analysis Tree\n\nfrom data_structure import Stack, BinaryTree\n\n\ndef build_prase_tree(expression):\n ex_list = list(expression)\n stack = Stack()\n bin_tree = BinaryTree('root')\n for i in ex_list:\n if i == '(':\n stack.push(bin_tree)\n bin_tree.insert_left_child('-')\n bin_tree = bin_tree.left_child\n elif i in '+-*/':\n bin_tree = stack.pop()\n bin_tree.set_root_val(i)\n bin_tree.insert_right_child('')\n stack.push(bin_tree)\n bin_tree = bin_tree.right_child\n elif i == ')':\n bin_tree = stack.pop()\n elif i not in '+-*/':\n bin_tree.set_root_val(i)\n else:\n raise ValueError\n return bin_tree\n\n\n# Preorder traversal\n# root left right\n\ndef preorder_trav(tree):\n if tree:\n print tree.get_root_val()\n preorder_trav(tree.left_child)\n preorder_trav(tree.right_child)\n\n\n# In Order traversal\n# left root right\ndef order_trav(tree):\n if tree:\n order_trav(tree.left_child)\n print tree.get_root_val\n order_trav(tree.right_child)\n\n\n# Postorder traversal\n# left right root\ndef postorder_trav(tree):\n if tree:\n postorder_trav(tree.left_child)\n postorder_trav(tree.right_child)\n print tree.get_root_val"
}
] | 5 |
guillaume-philippon/raspi-test | https://github.com/guillaume-philippon/raspi-test | 02351d2b986f216d0b43615729bbc78137711a61 | bb560941ea9de4f31f5c0a684c9e88a90df3b519 | 3583d27833b85aba0f308ac35285f9162c41d2f0 | refs/heads/master | 2020-04-13T23:16:04.044337 | 2019-01-01T15:49:08 | 2019-01-01T15:49:08 | 163,501,898 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7621951103210449,
"alphanum_fraction": 0.7621951103210449,
"avg_line_length": 26.33333396911621,
"blob_id": "db857f374b39b950706d51c4f4a1d3d41e263b52",
"content_id": "2089f75c9b724a084e2ad38a5fb03c854577dff2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 6,
"path": "/examples/gpio_monitor/cli/__init__.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis sub-module will manage Command Line Interface (CLI) for gpio_monitor.\n- launch arguments\n- output display\n\"\"\"\nfrom gpio_monitor.cli import config, display\n"
},
{
"alpha_fraction": 0.6267123222351074,
"alphanum_fraction": 0.6267123222351074,
"avg_line_length": 26.375,
"blob_id": "f157b52594a00febf3c2c0b70a705b8b329bdbe2",
"content_id": "bbf3a257f57dfee298a8fb6a85dc8d794c03322b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 876,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 32,
"path": "/examples/press_to_enlight/gpio.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module will manage gpio.\n - listening for press button\n - enlight or switch off the LED\n\nWe will use gpiozero module (https://gpiozero.readthedocs.io/en/stable/) to manage it\n\"\"\"\nfrom signal import pause\nfrom gpiozero import LED, Button # pylint: disable=import-error\n\n\nclass GPIO(): # pylint: disable=too-few-public-methods\n \"\"\"\n This is a short module to monitor press button and swith on/off LED\n \"\"\"\n def __init__(self, config):\n \"\"\"\n Initialisation module\n :param config: Configuration file\n :return:\n \"\"\"\n self.led = LED(config['led'])\n self.button = Button(config['button'])\n\n def start(self):\n \"\"\"\n This method will just assign action to when_pressed and when_released Button class\n :return:\n \"\"\"\n self.button.when_pressed = self.led.toggle\n\n pause()\n"
},
{
"alpha_fraction": 0.5886402726173401,
"alphanum_fraction": 0.5895008444786072,
"avg_line_length": 31.27777862548828,
"blob_id": "6e670e8d3d76d08119e347e004b073d702ba19c4",
"content_id": "6880d06161bc34c0a08283fd50707575220a228d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1162,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 36,
"path": "/examples/press_to_enlight/cli/config.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module will manage Command Line Interface (CLI) for gpio-monitor.\nIt will parse argument and build a configuration reference for gpio-monitor.\n\nFor more information about argparse, see https://docs.python.org/3/library/argparse.html\n\"\"\"\nimport argparse\n\n\nclass Config: # pylint: disable=too-few-public-methods\n \"\"\"\n Config class will be use to store configuration give by user\n \"\"\"\n def __init__(self):\n \"\"\"\n initialise class\n \"\"\"\n parser = argparse.ArgumentParser(description='monitor some GPIO from Rasberry Pi')\n parser.add_argument('--led', metavar='led', type=int,\n help='led pin number')\n parser.add_argument('--button', metavar='button', type=int,\n help='button pin number')\n options = parser.parse_args()\n\n self.config = {\n 'led': options.led,\n 'button': options.button\n }\n\n def display(self):\n \"\"\"\n display current configuration\n \"\"\"\n print('Configuration items')\n for item in self.config:\n print('{}: {}'.format(item, self.config[item]))\n"
},
{
"alpha_fraction": 0.6083333492279053,
"alphanum_fraction": 0.6111111044883728,
"avg_line_length": 17,
"blob_id": "f81c301d86c8300ac308dd6e6f5bf25744fa9fad",
"content_id": "f361dc343e53ccd065f5e5a09178d7ee160f7e11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 20,
"path": "/examples/press-to-enlight",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\n\"\"\"\nfrom press_to_enlight.cli import config as cfg\nfrom press_to_enlight.gpio import GPIO\n\n\ndef run():\n \"\"\"\n Define run function that will be started\n \"\"\"\n config = cfg.Config()\n print('Configuration {}'.format(config.display()))\n\n gpio = GPIO(config.config)\n gpio.start()\n\n\nif __name__ == '__main__':\n run()\n"
},
{
"alpha_fraction": 0.7114803791046143,
"alphanum_fraction": 0.7175226807594299,
"avg_line_length": 27.826086044311523,
"blob_id": "2cd90d3fdcf3da5fc41c5ebbcc83ec2a2bc08a62",
"content_id": "e471e680873d3ddc58d7d46c966f8439afdc96d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 662,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 23,
"path": "/README.md",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "# raspi-test\n\nSome few test example for rasberry pi model 3 B+\n\n## gpio-monitor\n\ngpio-monitor will monitor GPIO state. If you want to monitor your pin,\nyou need to start gpio-monitor before the real code as gpio-monitor need\nthe pin to be in GPIO.IN mode which can crash your code.\n\nTo install gpio_monitor\n```\npi@raspi:~ $ mkdir -p ~/.venv/gpio-monitor\npi@raspi:~ $ python3 -m venv ~/.venv/gpio-monitor\npi@raspi:~ $ source ~/.venv/gpio-monitor/bin/activate\n(gpio-monitor) pi@raspi:~ $ pip install -r requirements.txt\n```\n\nTo launch gpio-monitor\n```\n(gpio-monitor) pi@raspi:~ $./gpio-monitor channel1 channel2 ...\n(gpio-monitor) pi@raspi:~ $./gpio-monitor -h\n```"
},
{
"alpha_fraction": 0.7472527623176575,
"alphanum_fraction": 0.7472527623176575,
"avg_line_length": 21.75,
"blob_id": "77949a787bbf52ab51b3761180e7761e4712a322",
"content_id": "51d8b92ed44e7df4e4a30dc5ffa5217682b79f67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 4,
"path": "/examples/press_to_enlight/cli/__init__.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module will manage argument @startup.\n\"\"\"\nfrom press_to_enlight.cli import config\n"
},
{
"alpha_fraction": 0.7289719581604004,
"alphanum_fraction": 0.7289719581604004,
"avg_line_length": 25.75,
"blob_id": "5b7acca33da2aa7ab657fa1b6098f4de4f652603",
"content_id": "1fed711622276204ebf16d7faae5cb1b5095a330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/examples/gpio_monitor/__init__.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module is a proof of concept to use GPIO from Rasberry Pi.\n\"\"\"\nfrom gpio_monitor import gpio, cli\n"
},
{
"alpha_fraction": 0.5893324613571167,
"alphanum_fraction": 0.5958769917488098,
"avg_line_length": 36.7283935546875,
"blob_id": "43fe90138ed9f4b541174e93573c57f947f64764",
"content_id": "5b1b1c06f3f655e141ba774d589e890c1f1df44e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3056,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 81,
"path": "/examples/gpio_monitor/cli/display.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module will define Display class that will be used to\ndisplay output on screen.\n\nTo follow monitoring, we will use matplotlib w/ animation (see\nhttps://matplotlib.org/gallery/index.html)\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom gpio_monitor.gpio import GPIOS_LOCK, GPIOS_CURRENT_STATE, GPIOS_HISTORY\n\n\nclass Display():\n \"\"\"\n This class will create a matplotlib graph animated\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize display class. We will create\n - figure\n - axe through -0.02 and 1.02. In fact we want a axe from 0 to 1 but we add\n some padding. Perhaps there are some other way\n - a dict of line that will store plot for each channel\n \"\"\"\n figure, axe = plt.subplots()\n axe.set_ylim(-0.02, 1.02)\n self.figure = figure\n self.axe = axe\n self.line = dict()\n plt.legend()\n\n def draw(self):\n \"\"\"\n Will start animation and render the plot\n We create a plot dict from GPIOS_HISTORY *and* GPIOS_CURRENT_STATE.\n\n For those not very confident w/ python\n - self.line[channel], = means self.line[channel] will contains the value of\n the *one element tuple* return by self.axe.plot.\n\n Per example:\n >>foo = (1) # foo is a tuple w/ one element\n >>bar, = foo\n >>print(bar)\n 1\n\n For those not very confident w/ matplotlib\n animation.FuncAnimation must be store into a variable name, if not the figure\n will not be displayed.\n - First argument will be the figure itself\n - Second argument will be the callback function. callback function will be\n call w/ a argument which is the frame current number (see doc to add argument\n to callback function)\n - interval is the time (ms) between to redraw\n - blit tell animation to redraw only the graph, not the background\n \"\"\"\n with GPIOS_LOCK:\n for channel in GPIOS_CURRENT_STATE:\n self.line[channel], = self.axe.plot(GPIOS_HISTORY[channel], label=channel)\n self.axe.legend()\n image = animation.FuncAnimation(self.figure, # pylint: disable=unused-variable\n self.redraw,\n interval=1000,\n blit=True)\n plt.show()\n\n def redraw(self, frame): # pylint: disable=unused-argument\n \"\"\"\n Even if frame argument is not use, we must add it as it's automaticaly added\n by FuncAnimation\n :param frame: frame current value\n :return:\n \"\"\"\n output = ()\n with GPIOS_LOCK:\n for channel in GPIOS_CURRENT_STATE:\n GPIOS_HISTORY[channel].appendleft(GPIOS_CURRENT_STATE[channel])\n GPIOS_HISTORY[channel].pop()\n self.line[channel].set_ydata(GPIOS_HISTORY[channel])\n output = output + (self.line[channel], )\n return output\n"
},
{
"alpha_fraction": 0.6988266110420227,
"alphanum_fraction": 0.7014341354370117,
"avg_line_length": 27.407407760620117,
"blob_id": "4570636039d4122db78b9eab5230c91e148393d6",
"content_id": "6b3699897f414c81c8014f6303ebb23dcefdc24c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 27,
"path": "/examples/gpio-monitor",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\ngpio-monitor is a program that will wait and display GPIO status when it change. The goal is\nlearning how to manipulate gpio through python3.\n\nWe will use some basic python module to have more versatility (as argparse) but we will embeded\nit on sub-module so it will not interfer with basic GPIO command.\n\"\"\"\nfrom gpio_monitor.cli import config as cfg, display as dsp\nfrom gpio_monitor.gpio import GPIOSMonitoring\n\n\ndef run():\n \"\"\"\n Define run function that will be started\n \"\"\"\n print('run gpio-monitor')\n config = cfg.Config()\n print('Configuration {}'.format(config.display()))\n\n channels = GPIOSMonitoring(config.config['gpio'])\n display = dsp.Display()\n display.draw()\n\n\nif __name__ == '__main__':\n run()\n"
},
{
"alpha_fraction": 0.5939204692840576,
"alphanum_fraction": 0.5954793691635132,
"avg_line_length": 31.897436141967773,
"blob_id": "2a363c00680f64812fcf46e9d965fabde573a6ab",
"content_id": "0d3c534dd94011fd3d1c23a007ba5eb228279aa5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1283,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 39,
"path": "/examples/gpio_monitor/cli/config.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module will manage Command Line Interface (CLI) for gpio-monitor.\nIt will parse argument and build a configuration reference for gpio-monitor.\n\nFor more information about argparse, see https://docs.python.org/3/library/argparse.html\n\"\"\"\nimport argparse\n\n\nclass Config: # pylint: disable=too-few-public-methods\n \"\"\"\n Config class will be use to store configuration give by user\n \"\"\"\n def __init__(self):\n \"\"\"\n initialise class\n \"\"\"\n # We build argument parser\n # Usage: $0 GPIO ...\n parser = argparse.ArgumentParser(description='monitor some GPIO from Rasberry Pi')\n parser.add_argument('gpio', metavar='GPIO', type=int, nargs='+',\n help='list of GPIO we want monitor')\n options = parser.parse_args()\n\n # We build a dict of options. By default, there are only gpio but we can add some\n # option later\n self.config = {\n 'gpio': list()\n }\n for gpio in options.gpio:\n self.config['gpio'].append(gpio)\n\n def display(self):\n \"\"\"\n display current configuration\n \"\"\"\n for item in self.config:\n print('Configuration items')\n print('{}: {}'.format(item, self.config[item]))\n"
},
{
"alpha_fraction": 0.6341527700424194,
"alphanum_fraction": 0.6370605230331421,
"avg_line_length": 36.4554443359375,
"blob_id": "d986675822a24379d48180b7d7e11d1f0da7f12b",
"content_id": "f9c2e00b9b65cd29187a1907fcaa385f5f04391c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3783,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 101,
"path": "/examples/gpio_monitor/gpio.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module manage GPIO interaction.\n\nWe use RPi.GPIO (https://sourceforge.net/p/raspberry-gpio-python/wiki/Home/) instead\nof gpiozero (https://gpiozero.readthedocs.io/en/stable/) as we have low level access\n\"\"\"\nfrom collections import deque\nfrom threading import RLock\nimport RPi.GPIO as GPIO # pylint: disable=import-error\n\n\n# We define some global variable to share information\n# - GPIOS_LOCK: a thread locker\n# - GPIO_CURRENT_STATE: to store current state of GPIO\n# - GPIO_HISTORY: last \"WINDOW_SIZE\" GPIO state\n# - WINDOW_SIZE: number of event we will store\nGPIOS_LOCK = RLock()\nGPIOS_CURRENT_STATE = dict()\nGPIOS_HISTORY = dict()\nGPIOS_WINDOW_SIZE = 1000\n\n\nclass GPIOMonitoring():\n \"\"\"\n Gpio class will manage interaction between GPIO and the rest of the code\n \"\"\"\n def __init__(self, channel):\n \"\"\"\n To monitor GPIO, we need to setup GPIO in ingress mode (GPIO.IN), after\n that, it can be changed to egress (GPIO.OUT) but it can't be initialize\n at GPIO.OUT.\n\n We also use BCM notation (see: https://fr.pinout.xyz/pinout/pin29_gpio5)\n instead of physical notation as BCM is used by sysfs and it will be easiest\n to test code with commands:\n pi@raspi:~ $ cd /sys/class/gpio\n pi@raspi:/sys/class/gpio $ echo out > gpio*id*/direction\n pi@raspi:/sys/class/gpio $ echo 1 > gpio*id*/value\n pi@raspi:/sys/class/gpio $ sleep 20\n pi@raspi:/sys/class/gpio $ echo 0 > gpio*id*/value\n\n where *id* is the channe we monitor.\n \"\"\"\n self.channel = channel\n self.name = 'gpio{}'.format(self.channel)\n print('Initialize gpio {}'.format(self.channel))\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.channel, GPIO.IN)\n with GPIOS_LOCK:\n GPIOS_CURRENT_STATE[self.name] = self.state()\n GPIOS_HISTORY[self.name] = deque([GPIOS_CURRENT_STATE[self.name]] * GPIOS_WINDOW_SIZE)\n print('GPIOS_CURRENT_STATE: {}'.format(GPIOS_CURRENT_STATE))\n print('GPIOS_HISTORY: {}'.format(GPIOS_HISTORY))\n self.monitor()\n\n def __del__(self):\n \"\"\"\n Clear all GPIO monitoring\n \"\"\"\n print('Destroy gpio {}'.format(self.channel))\n GPIO.remove_event_detect(self.channel)\n GPIO.cleanup(self.channel)\n\n def monitor(self):\n \"\"\"\n Monitor will return the current value of channel when it will change.\n\n add_event_detect will create a thread to follow GPIO status modification,\n GPIO_BOTH means that RISING *and* FAILING will trig thread\n add_event_callback is (or are) function(s) called when a event is detected,\n the callback will be call with *channel* as argument\n \"\"\"\n print('Add event detection for gpio {}'.format(self.channel))\n GPIO.add_event_detect(self.channel, GPIO.BOTH)\n GPIO.add_event_callback(self.channel, self.state)\n\n def state(self, channel=None):\n \"\"\"\n We will modify GPIOS_CURRENT_STATE. As this variable will also be modify by\n callback thread we need to lock access before writing in it.\n :return: GPIO status\n \"\"\"\n with GPIOS_LOCK:\n GPIOS_CURRENT_STATE[self.name] = GPIO.input(self.channel)\n print('gpio {} state is {}'.format(self.channel, GPIO.input(self.channel)))\n return GPIO.input(self.channel)\n\n\nclass GPIOSMonitoring(): # pylint: disable=too-few-public-methods\n \"\"\"\n We store information about all GPIOS we want to monitor\n \"\"\"\n def __init__(self, channels):\n \"\"\"\n Initialize GPIOS\n \"\"\"\n print('Initialize GPIOS')\n self.channels = list()\n for channel in channels:\n gpio = GPIOMonitoring(channel)\n self.channels.append(gpio)\n"
},
{
"alpha_fraction": 0.7469879388809204,
"alphanum_fraction": 0.7469879388809204,
"avg_line_length": 19.75,
"blob_id": "aa546319e36bede66ccbf95ee47a2ab09428d852",
"content_id": "ac506c7d711e267671cfa7d8587b7de90b823c52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 4,
"path": "/examples/press_to_enlight/__init__.py",
"repo_name": "guillaume-philippon/raspi-test",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis module will be use to test gpiozero with a simple electronic\nassembly\n\"\"\"\n"
}
] | 12 |
MAPSWorks/pySlipQt | https://github.com/MAPSWorks/pySlipQt | df3682ece9ce1b7df849a5d3ceffb1e18b183a51 | 3c3c8f497916d311c976b2f23c125fd0fad852b4 | 77beba4991234879fd58d7b64a60467c84a785d5 | refs/heads/master | 2021-07-08T23:27:15.578637 | 2020-07-10T15:46:08 | 2020-07-10T15:46:08 | 155,961,211 | 0 | 0 | MIT | 2018-11-03T07:37:54 | 2020-03-28T22:03:21 | 2020-07-10T15:46:09 | Python | [
{
"alpha_fraction": 0.3264088034629822,
"alphanum_fraction": 0.390221506357193,
"avg_line_length": 35.76371383666992,
"blob_id": "8d1df379bab55bafc76a33c1632c9116c3e10f10",
"content_id": "0cd5ad17af7cd285d31bedcf969678b09cc43ff2",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8713,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 237,
"path": "/pySlipQt/examples/test_viewrel_point.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt view-relative points.\n\nUsage: test_viewrel_point.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.pySlipQt as pySlipQt\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = f'Test view-relative point placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (133.87, -23.7) # Alice Springs\n\nPointViewDataNW = [( 0, 0), ( 2, 0), ( 4, 0), ( 6, 0), ( 8, 0),\n (10, 0), ( 0, 2), ( 0, 4), ( 0, 6), ( 0, 8),\n ( 0,10), ( 2, 2), ( 4, 4), ( 6, 6), ( 8, 8),\n (10,10), (12,12), (14,14), (16,16), (18,18),\n (20,20)\n ]\n\nPointViewDataCN = [( 0, 0), ( -2, 2), ( -4, 4), ( -6, 6),\n ( -8, 8), (-10, 10), ( 2, 2), ( 4, 4),\n ( 6, 6), ( 8, 8), ( 10, 10), ( 0, 2),\n ( 0, 4), ( 0, 6), ( 0, 8), ( 0, 10),\n ( 0, 12), ( 0, 14), ( 0, 16), ( 0, 18),\n ( 0, 20)\n ]\n\nPointViewDataNE = [( 0, 0), ( -2, 0), ( -4, 0), ( -6, 0),\n ( -8, 0), (-10, 0), ( 0, 2), ( 0, 4),\n ( 0, 6), ( 0, 8), ( 0, 10), ( -2, 2),\n ( -4, 4), ( -6, 6), ( -8, 8), (-10, 10),\n (-12, 12), (-14, 14), (-16, 16), (-18, 18),\n (-20, 20)\n ]\n\nPointViewDataCE = [( 0, 0), ( -2, -2), ( -4, -4), ( -6, -6),\n ( -8, -8), (-10,-10), ( -2, 2), ( -4, 4),\n ( -6, 6), ( -8, 8), (-10, 10), ( -2, 0),\n ( -4, 0), ( -6, 0), ( -8, 0), (-10, 0),\n (-12, 0), (-14, 0), (-16, 0), (-18, 0),\n (-20, 0)\n ]\n\nPointViewDataSE = [( 0, 0), ( 0, -2), ( 0, -4), ( 0, -6),\n ( 0, -8), ( 0,-10), ( -2, 0), ( -4, 0),\n ( -6, 0), ( -8, 0), (-10, 0), ( -2, -2),\n ( -4, -4), ( -6, -6), ( -8, -8), (-10,-10),\n (-12,-12), (-14,-14), (-16,-16), (-18,-18),\n (-20,-20)\n ]\n\nPointViewDataCS = [( 0, 0), ( -2, -2), ( -4, -4), ( -6, -6),\n ( -8, -8), (-10,-10), ( 2, -2), ( 4, -4),\n ( 6, -6), ( 8, -8), ( 10,-10), ( 0, -2),\n ( 0, -4), ( 0, -6), ( 0, -8), ( 0,-10),\n ( 0,-12), ( 0,-14), ( 0,-16), ( 0,-18),\n ( 0,-20)\n ]\n\nPointViewDataSW = [( 0, 0), ( 0, -2), ( 0, -4), ( 0, -6),\n ( 0, -8), ( 0,-10), ( 2, 0), ( 4, 0),\n ( 6, 0), ( 8, 0), ( 10, 0), ( 2, -2),\n ( 4, -4), ( 6, -6), ( 8, -8), ( 10,-10),\n ( 12,-12), ( 14,-14), ( 16,-16), ( 18,-18),\n ( 20,-20)\n ]\n\nPointViewDataCW = [( 0, 0), ( 2, -2), ( 4, -4), ( 6, -6),\n ( 8, -8), ( 10,-10), ( 2, 2), ( 4, 4),\n ( 6, 6), ( 8, 8), ( 10, 10), ( 2, 0),\n ( 4, 0), ( 6, 0), ( 8, 0), ( 10, 0),\n ( 12, 0), ( 14, 0), ( 16, 0), ( 18, 0),\n ( 20, 0)\n ]\n\nPointViewDataCC = [( 0, 0), ( 2, -2), ( 4, -4), ( 6, -6),\n ( 8, -8), ( 10,-10),\n ( 0, 0), ( 2, 2), ( 4, 4), ( 6, 6),\n ( 8, 8), ( 10, 10),\n ( 0, 0), ( -2, -2), ( -4, -4), ( -6, -6),\n ( -8, -8), (-10,-10),\n ( 0, 0), ( -2, 2), ( -4, 4), ( -6, 6),\n ( -8, 8), (-10, 10),\n ]\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # add test point layers\n self.pyslipqt.AddPointLayer(PointViewDataNW, placement='nw',\n map_rel=False, colour='blue', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataCN, placement='cn',\n map_rel=False, colour='red', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataNE, placement='ne',\n map_rel=False, colour='green', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataCE, placement='ce',\n map_rel=False, colour='black', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataSE, placement='se',\n map_rel=False, colour='yellow', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataCS, placement='cs',\n map_rel=False, colour='gray', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataSW, placement='sw',\n map_rel=False, colour='#7f7fff', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataCW, placement='cw',\n map_rel=False, colour='#ff7f7f', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n self.pyslipqt.AddPointLayer(PointViewDataCC, placement='cc',\n map_rel=False, colour='#7fff7f', radius=2,\n offset_x=0, offset_y=0,\n name='<point_map_layer>')\n\n # set initial view position\n# self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n self.show()\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the app\nlog(DemoName)\ntile_dir = 'test_viewrel_point'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5520529747009277,
"alphanum_fraction": 0.5623738169670105,
"avg_line_length": 29.52739715576172,
"blob_id": "42ae2392ed4ad8159a719e8041c5dfb04bc73521",
"content_id": "f4cd2c9fbb08962d417fdc52ff3f30f69b424a2b",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8914,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 292,
"path": "/pySlipQt/examples/test_image_placement.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nProgram to test image map-relative and view-relative placement.\nSelect which to show and experiment with placement parameters.\n\nUsage: test_image_placement.py [-h|--help] [-d] [(-t|--tiles) (GMT|OSM)]\n\"\"\"\n\n\nimport os\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget,\n QGridLayout, QVBoxLayout, QHBoxLayout)\n\nimport pySlipQt.pySlipQt as pySlipQt\n\n# set up logging\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\nfrom image_placement import ImagePlacementControl\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = 'Test image placement %s (pySlipQt %s)' % (DemoVersion, pySlipQt.__version__)\n\nDemoHeight = 800\nDemoWidth = 1000\n\n# initial values\nInitViewLevel = 4\nInitViewPosition = (145.0, -20.0)\n\n# tiles info\nTileDirectory = 'test_tiles'\nMinTileLevel = 0\n\n# the number of decimal places in a lon/lat display\nLonLatPrecision = 2\n\n# startup size of the application\nDefaultAppSize = (1000, 700)\n\n# initial values in map-relative LayerControl\nDefaultFilename = 'graphics/shipwreck.png'\nDefaultPlacement = 'ne'\nDefaultPointColour = 'red'\nDefaultPointRadius = 3\nDefaultX = 145.0\nDefaultY = -20.0\nDefaultOffsetX = 0\nDefaultOffsetY = 0\n\n# initial values in view-relative LayerControl\nDefaultViewFilename = 'graphics/compass_rose.png'\nDefaultViewPlacement = 'ne'\nDefaultPointColour = 'red'\nDefaultPointRadius = 0\nDefaultViewX = 0\nDefaultViewY = 0\nDefaultViewOffsetX = 0\nDefaultViewOffsetY = 0\n\n\n\n################################################################################\n# The main application window.\n################################################################################\n\nclass TestImagePlacement(QMainWindow):\n\n def __init__(self, tile_dir=TileDirectory):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # variables for layer IDs\n self.image_map_layer = None\n self.image_view_layer = None\n\n # build the GUI\n grid = QGridLayout()\n grid.setColumnStretch(0, 1)\n grid.setContentsMargins(2, 2, 2, 2)\n\n qwidget = QWidget(self)\n qwidget.setLayout(grid)\n self.setCentralWidget(qwidget)\n\n # build the 'controls' part of GUI\n num_rows = self.make_gui_controls(grid)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt, 0, 0, num_rows + 1, 1)\n grid.setRowStretch(num_rows, 1)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # tie events from controls to handlers\n self.map_image.remove.connect(self.remove_image_map)\n self.map_image.change.connect(self.change_image_map)\n\n self.view_image.remove.connect(self.remove_image_view)\n self.view_image.change.connect(self.change_image_view)\n\n self.pyslipqt.events.EVT_PYSLIPQT_LEVEL.connect(self.handle_level_change)\n self.pyslipqt.events.EVT_PYSLIPQT_POSITION.connect(self.handle_position_event)\n\n self.show()\n\n # set initial view position\n self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n def make_gui_controls(self, grid):\n \"\"\"Build the controls in the right side of the grid.\"\"\"\n\n # the 'grid_row' variable is row to add into\n grid_row = 0\n\n # put level and position into grid at top right\n self.map_level = DisplayText(title='', label='Level:',\n tooltip=None)\n grid.addWidget(self.map_level, grid_row, 1, 1, 1)\n self.mouse_position = DisplayText(title='',\n label='Lon/Lat:', text_width=100,\n tooltip='Shows the mouse longitude and latitude on the map',)\n grid.addWidget(self.mouse_position, grid_row, 2, 1, 1)\n grid_row += 1\n\n # now add the two image control widgets to right part of grid\n self.map_image = ImagePlacementControl('Map-relative Image')\n grid.addWidget(self.map_image, grid_row, 1, 1, 2)\n grid_row += 1\n\n self.view_image = ImagePlacementControl('View-relative Image')\n grid.addWidget(self.view_image, grid_row, 1, 1, 2)\n grid_row += 1\n\n return grid_row\n\n ######\n # event handlers\n ######\n\n##### map-relative image layer\n\n def change_image_map(self, image, placement, radius, colour,\n x, y, off_x, off_y):\n \"\"\"Display updated image.\"\"\"\n\n # remove any previous layer\n if self.image_map_layer:\n self.remove_image_map()\n\n # create the new layer\n image_data = [(x, y, image, {'placement': placement,\n 'radius': radius,\n 'colour': colour,\n 'offset_x': off_x,\n 'offset_y': off_y})]\n self.image_map_layer = self.pyslipqt.AddImageLayer(image_data,\n map_rel=True,\n visible=True,\n name='<image_layer>')\n\n def remove_image_map(self):\n \"\"\"Delete the image map-relative layer.\"\"\"\n\n if self.image_map_layer:\n self.pyslipqt.DeleteLayer(self.image_map_layer)\n self.image_map_layer = None\n\n##### view-relative image layer\n\n def change_image_view(self, image, placement, radius, colour,\n x, y, off_x, off_y):\n \"\"\"Display updated image.\"\"\"\n\n if self.image_view_layer:\n self.remove_image_view()\n\n # create a new image layer\n image_data = [(x, y, image, {'placement': placement,\n 'radius': radius,\n 'colour': colour,\n 'offset_x': off_x,\n 'offset_y': off_y})]\n self.image_view_layer = self.pyslipqt.AddImageLayer(image_data,\n map_rel=False,\n visible=True,\n name='<image_layer>')\n\n def remove_image_view(self):\n \"\"\"Delete the image view-relative layer.\"\"\"\n\n if self.image_view_layer:\n self.pyslipqt.DeleteLayer(self.image_view_layer)\n self.image_view_layer = None\n\n ######\n # Exception handlers\n ######\n\n def handle_position_event(self, event):\n \"\"\"Handle a pySlipQt POSITION event.\"\"\"\n\n posn_str = ''\n if event.mposn:\n (lon, lat) = event.mposn\n posn_str = ('%.*f / %.*f' % (LonLatPrecision, lon,\n LonLatPrecision, lat))\n\n self.mouse_position.set_text(posn_str)\n\n def handle_level_change(self, event):\n \"\"\"Handle a pySlipQt LEVEL event.\"\"\"\n\n self.map_level.set_text('%d' % event.level)\n\n###############################################################################\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n log(msg)\n print(msg)\n# tkinter_error.tkinter_error(msg)\n sys.exit(1)\n\ndef usage(msg=None):\n if msg:\n print(('*'*80 + '\\n%s\\n' + '*'*80) % msg)\n print(__doc__)\n\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# analyse the command line args\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'dht:', ['debug', 'help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_dir = 'test_tiles'\ntile_source = 'GMT'\ndebug = False\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ['-d', '--debug']:\n debug = True\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\nimport pySlipQt.gmt_local as Tiles\n## set up the appropriate tile source\n#if tile_source == 'gmt':\n# print('importing pySlipQt.gmt_local')\n# import pySlipQt.gmt_local as Tiles\n#elif tile_source == 'osm':\n# print('importing pySlipQt.open_street_map')\n# import pySlipQt.open_street_map as Tiles\n#else:\n# usage('Bad tile source: %s' % tile_source)\n# sys.exit(3)\n\n# start the app\napp = QApplication(args)\nex = TestImagePlacement(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5194156169891357,
"alphanum_fraction": 0.5336409211158752,
"avg_line_length": 24.242717742919922,
"blob_id": "064d87c2030180694ea52719bc99ba434ce2ef13",
"content_id": "4c18d0a02bbec50bd0e194978297bfba326ce8c0",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2601,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 103,
"path": "/pySlipQt/examples/graphics/img2py.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\nA little program to take an image file (PNG, JPG, etc) and convert\nit to python data. Does the same thing as the wxPython tool \"img2py\"\nwhich, as of 9 Feb 2018, is really hard to use.\n\nUsage: img2py [-n <name>] <input_file> <output_file>\n\nwhere -n <name> sets the internal name for the image\n\"\"\"\n\nimport sys\nimport os\n\n\nLineLength = 60\n\n\ndef img2py(name, in_file, out_file):\n \"\"\"Convert 'in_file' to python data in 'out_file'.\n\n where name is the internal procedure name\n in_file is the path to the input file\n out_file is the path to the output file\n \"\"\"\n\n header = ['import base64',\n '',\n f'def get_{name}_image():',\n f' \"\"\"Generate \\'{name}\\' image from embedded data.\"\"\"',\n '',\n ' return base64.b64decode(',\n '',\n ]\n\n # make sure the input file *is* there\n if not os.path.exists(in_file):\n print(f\"The input file '{in_file}' doesn't exist.\")\n sys.exit(1)\n\n # make sure the output file isn't there\n if os.path.exists(out_file):\n print(f\"The output file '{out_file}' exists. Delete it before rerunning.\")\n sys.exit(1)\n\n # put raw base64 data in the output file temporarily\n os.system(f'base64 -i {in_file} -o {out_file}')\n\n # read the 'out_file' base64 data string\n with open(out_file, 'r') as fd:\n data = fd.read().strip() # get rid of trailing newline\n\n # generate python code and overwrite 'out_file'\n with open(out_file, 'w') as fd:\n fd.write('\\n'.join(header))\n\n while data:\n l_data = data[:LineLength]\n data = data[LineLength:]\n\n fd.write(f' \"{l_data}\"')\n if data:\n fd.write('\\n')\n\n fd.write(')\\n')\n\n\nif __name__ == '__main__':\n import getopt\n import traceback\n\n # to help the befuddled user\n def usage(msg=None):\n if msg:\n print(('*'*80 + '\\n%s\\n' + '*'*80) % msg)\n print(__doc__)\n\n # parse the CLI params\n argv = sys.argv[1:]\n\n try:\n (opts, args) = getopt.getopt(argv, 'hn:', ['help', 'name='])\n except getopt.GetoptError as err:\n usage(err)\n sys.exit(1)\n\n if len(args) != 2:\n usage()\n sys.exit(1)\n\n name = 'Name'\n\n for (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ['-n', '--name']:\n name = param\n\n # run the program code\n result = img2py(name, args[0], args[1])\n sys.exit(result)\n\n"
},
{
"alpha_fraction": 0.4286591410636902,
"alphanum_fraction": 0.44449901580810547,
"avg_line_length": 29.616540908813477,
"blob_id": "3e0959ac548a40cab04c05fc2db1a40156dac604",
"content_id": "721ba693c8420ee0b85380b5f3603d7fedccbe13",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8144,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 266,
"path": "/pySlipQt/examples/test_assumptions.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"\nTest PySlipQt assumptions.\n\nWe make some assumptions in pySlipQt about relative speeds\nof various operations. Make sure those assumptions hold.\n\"\"\"\n\n\nimport os\nimport time\nimport unittest\n\n\nclass TestAssumptions(unittest.TestCase):\n\n def test_copy_list(self):\n \"\"\"Check 'l_poly = list(poly)' gets us a new list.\n\n At a few places in pySlipQt we need a copy of a list, not the original.\n We do this by:\n l_poly = list(poly)\n new_poly = l_poly[:]\n Is the final ...[:] required?\n \"\"\"\n\n # try to make copy without [:]\n old_list = [1, 2, 3, 4]\n old_list_id = id(old_list)\n new_list = list(old_list)\n new_list_id = id(new_list)\n\n # make sure we DO have a copy\n msg = (\"'new_list = list(old_list)' DOESN'T give us a copy?\\n\"\n \"id(old_list)=%d, id(new_list)=%d\"\n % (old_list_id, new_list_id))\n self.assertTrue(old_list_id != new_list_id, msg)\n\n def test_copy(self):\n \"\"\"Test 'new_list = old_list[:]' does give us a copy.\n\n At a few places in pySlipQt we need a copy of a list, not the original.\n We do this by:\n new_poly = l_poly[:]\n \"\"\"\n\n # try to make a copy with [:]\n old_list = [1, 2, 3, 4]\n old_list_id = id(old_list)\n new_list = old_list[:]\n new_list_id = id(new_list)\n\n msg = (\"'new_list = old_list[:]' DOESN'T give us a copy?\\n\"\n \"id(old_list)=%d, id(new_list)=%d\"\n % (old_list_id, new_list_id))\n self.assertTrue(old_list_id != new_list_id, msg)\n\n def test_copy2(self):\n \"\"\"Check 'list(poly)' is faster than 'poly[:]'.\n\n At a few places in pySlipQt we need a copy of a list and we do:\n new_poly = list(poly)\n Is this faster than:\n new_poly = poly[:]\n \"\"\"\n\n loops = 100000\n\n # create the old list\n old_list = [1, 2, 3, 4, 5, 6]\n\n # time list() approach\n start = time.time()\n for _ in range(loops):\n new_list = list(old_list)\n list_delta = time.time() - start\n\n # time copy approach\n start = time.time()\n for _ in range(loops):\n new_list = old_list[:]\n copy_delta = time.time() - start\n\n msg = (\"'old_list[:]' is SLOWER than 'list(old_list)'?\\n\"\n \"old_list[:]=%.1f, list(old_list)=%.1f \"\n \"(list() is %.2f times faster)\"\n % (list_delta, copy_delta,\n (copy_delta/list_delta)))\n self.assertTrue(list_delta > copy_delta, msg)\n\n def test_dispatch_slower(self):\n \"\"\"Test that dispatch is faster than function if/elif/else.\n\n pySlipQt used to use code like this:\n x = ...\n y = ...\n test = {'ab': 'x+=1;y-=1',\n 'bc': 'x+=2;y+=3',\n ...\n }\n exec test['ab']\n\n Compare the above with something like:\n def test(x, y, place, x_off, y_off):\n if place == 'ab':\n x += 1\n y -= 1\n elif place == 'bc':\n ...\n\n return (x, y)\n\n x = ...\n y = ...\n (x, y) = test(x, y, place, x_off, y_off)\n\n The function approach (which we use) should be faster.\n \"\"\"\n\n LOOPS = 1000000\n\n # check exec timing\n test = {'cc': 'x+=x_off-w2; y+=y_off-h2',\n 'nw': 'x+=x_off; y+=y_off',\n 'cn': 'x+=x_off-w2; y+=y_off',\n 'ne': 'x+=x_off-w; y+=y_off',\n 'ce': 'x+=x_off-w; y+=y_off-h2',\n 'se': 'x+=x_off-w; y+=y_off-h',\n 'cs': 'x+=x_off-w2; y+=y_off-h',\n 'sw': 'x+=x_off; y+=y_off-h',\n 'cw': 'x+=x_off; y+=y_off-h2',\n None: '',\n False: '',\n '': ''}\n for key in test:\n test[key] = compile(test[key], 'string', 'exec')\n\n start = time.time()\n for _ in range(LOOPS):\n x = 0\n y = 0\n place = 'nw'\n x_off = 1\n y_off = 3\n w = 100\n w2 = w/2\n h = 100\n h2 = h/2\n exec(test[place])\n exec_delta = time.time() - start\n\n # now for function equivalent\n def test(x, y, place, w, h, x_off, y_off):\n w2 = w/2\n h2 = h/2\n if place == 'cc':\n x+=x_off-w2; y+=y_off-h2\n elif place == 'nw':\n x+=x_off; y+=y_off\n elif place == 'cn':\n x+=x_off-w2; y+=y_off\n elif place == 'ne':\n x+=x_off-w; y+=y_off\n elif place == 'ce':\n x+=x_off-w; y+=y_off-h2\n elif place == 'se':\n x+=x_off-w; y+=y_off-h\n elif place == 'cs':\n x+=x_off-w2; y+=y_off-h\n elif place == 'sw':\n x+=x_off; y+=y_off-h\n elif place == 'cw':\n x+=x_off; y+=y_off-h2\n \n return (x, y)\n\n start = time.time()\n for _ in range(LOOPS):\n x = 0\n y = 0\n place = 'nw'\n x_off = 1\n y_off = 3\n w = 100\n h = 100\n (x, y) = test(x, y, place, w, h, x_off, y_off)\n func_delta = time.time() - start\n\n msg = (\"Function if/else is slower than 'exec dispatch[i]'?\\n\"\n \"exec=%.2fs, function=%.2fs (exec is %.1f times faster)\"\n % (exec_delta, func_delta, func_delta/exec_delta))\n self.assertTrue(exec_delta > func_delta, msg)\n\n def test_copy_faster(self):\n \"\"\"Test that a[:] copy is slower than copy.deepcopy(a).\"\"\"\n\n import copy\n\n loops = 100000\n\n a = [1,2,3,4,5,6,7,8,9,0] # fake a Z-order list\n\n start = time.time()\n for _ in range(loops):\n b = copy.deepcopy(a)\n copy_delta = time.time() - start\n\n start = time.time()\n for _ in range(loops):\n b = a[:]\n clone_delta = time.time() - start\n\n msg = ('copy.deepcopy() is faster than clone[:]?\\n'\n 'copy=%.2fs, clone=%.2fs'\n % (copy_delta, clone_delta))\n self.assertTrue(clone_delta < copy_delta, msg)\n\n def test_tuple_faster(self):\n \"\"\"Test unpacking tuple is faster than data object attributes.\"\"\"\n\n class DataObj(object):\n def __init__(self, *args, **kwargs):\n if len(args) > 0:\n msg = 'DataObj() must be called with keyword args ONLY!'\n raise RuntimeError(msg)\n\n self.__dict__ = kwargs\n\n tuple_obj = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n data_obj = DataObj(one=1, two=2, three=3, four=4, five=5,\n six=6, seven=7, eight=8, nine=9, ten=10)\n\n loops = 100000\n\n # time tuple object\n start = time.time()\n for _ in range(loops):\n (one, two, three, four, five, six, seven, eight, nine, ten) = tuple_obj\n tuple_delta = time.time() - start\n\n # time data object\n start = time.time()\n for _ in range(loops):\n one = data_obj.one\n two = data_obj.two\n three = data_obj.three\n four = data_obj.four\n five = data_obj.five\n six = data_obj.six\n seven = data_obj.seven\n eight = data_obj.eight\n nine = data_obj.nine\n ten = data_obj.ten\n data_delta = time.time() - start\n\n msg = ('Data object is faster than tuple?\\ndata=%.2fs, tuple=%.2fs'\n % (data_delta, tuple_delta))\n self.assertTrue(tuple_delta < data_delta, msg)\n\n################################################################################\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(TestAssumptions,'test')\n runner = unittest.TextTestRunner()\n runner.run(suite)\n"
},
{
"alpha_fraction": 0.5520058274269104,
"alphanum_fraction": 0.5676137208938599,
"avg_line_length": 34.812225341796875,
"blob_id": "751b0b19b91676d45285bef60d35d4faeab28403",
"content_id": "e594a08c6da995273b2987dfd75c009f192bc8dd",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8201,
"license_type": "permissive",
"max_line_length": 144,
"num_lines": 229,
"path": "/pySlipQt/examples/point_placement.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe custom control for test_point_placement.py program.\n\"\"\"\n\nimport os\nimport sys\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import (QWidget, QGridLayout, QHBoxLayout, QGroupBox,\n QPushButton, QLabel, QComboBox, QLineEdit,\n QSizePolicy, QFileDialog, QColorDialog)\nfrom PyQt5.QtGui import QColor\n\n\n##################################\n# Custom PointPlacementControl widget.\n#\n# Constructor:\n#\n# ppc = PointPlacementControl('test title')\n#\n# Events:\n#\n# .change the contents were changed\n# .remove the image should be removed\n#\n# The '.change' event has attached attributes holding the values from the\n# widget, all checked so they are 'sane'.\n##################################\n\nclass PointPlacementControl(QWidget):\n \"\"\"\n Custom PointPlacementControl widget.\n\n Constructor:\n\n ipc = PointPlacementControl('test title')\n\n Events:\n\n .change the contents were changed\n .remove the image should be removed\n\n The '.change' event has attached attributes holding the values from the\n widget, all checked so they are 'sane'.\n \"\"\"\n\n # various sizes\n LineEditWidth = 40\n ButtonWidth = 40\n ComboboxWidth = 70\n\n # signals raised by this widget\n change = pyqtSignal(str, int, QColor, int, int, int, int)\n remove = pyqtSignal()\n\n # some stylesheets\n LabelStyle = 'QLabel { background-color : #f0f0f0; border: 1px solid gray; border-radius: 3px; }'\n GroupStyle = ('QGroupBox { background-color: rgb(230, 230, 230); }'\n 'QGroupBox::title { subcontrol-origin: margin; '\n ' background-color: rgb(215, 215, 215); '\n ' border-radius: 3px; '\n ' padding: 2 2px; '\n ' color: black; }')\n ButtonStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: white;'\n '}')\n ButtonColourStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: %s;'\n '}')\n\n def __init__(self, title):\n \"\"\"Initialise a LayerControl instance.\n\n title title to give the custom widget\n \"\"\"\n\n super().__init__()\n\n # create subwidgets used in this custom widget\n self.placement = QComboBox()\n for p in ['none', 'nw', 'cn', 'ne', 'ce', 'se', 'cs', 'sw', 'cw', 'cc']:\n self.placement.addItem(p)\n self.placement.setCurrentIndex(9)\n\n self.point_radius = QComboBox()\n for p in range(21):\n self.point_radius.addItem(str(p))\n self.point_radius.setCurrentIndex(3)\n self.point_radius.setFixedWidth(PointPlacementControl.ComboboxWidth)\n\n self.point_colour = QPushButton('')\n self.point_colour.setFixedWidth(PointPlacementControl.ButtonWidth)\n self.point_colour.setToolTip('Click here to change the point colour')\n\n self.x_posn = QComboBox()\n for p in range(0, 121, 10):\n self.x_posn.addItem(str(p - 60))\n self.x_posn.setCurrentIndex(6)\n self.x_posn.setFixedWidth(PointPlacementControl.ComboboxWidth)\n\n self.y_posn = QComboBox()\n for p in range(0, 121, 10):\n self.y_posn.addItem(str(p - 60))\n self.y_posn.setCurrentIndex(6)\n self.y_posn.setFixedWidth(PointPlacementControl.ComboboxWidth)\n\n self.x_offset = QComboBox()\n for p in range(0, 121, 10):\n self.x_offset.addItem(str(p - 60))\n self.x_offset.setCurrentIndex(6)\n self.x_offset.setFixedWidth(PointPlacementControl.ComboboxWidth)\n\n self.y_offset = QComboBox()\n for p in range(0, 121, 10):\n self.y_offset.addItem(str(p - 60))\n self.y_offset.setCurrentIndex(6)\n self.y_offset.setFixedWidth(PointPlacementControl.ComboboxWidth)\n\n btn_remove = QPushButton('Remove')\n btn_remove.resize(btn_remove.sizeHint())\n\n btn_update = QPushButton('Update')\n btn_update.resize(btn_update.sizeHint())\n\n # start the layout\n option_box = QGroupBox(title)\n option_box.setStyleSheet(PointPlacementControl.GroupStyle)\n\n box_layout = QGridLayout()\n box_layout.setContentsMargins(2, 2, 2, 2)\n box_layout.setHorizontalSpacing(1)\n box_layout.setColumnStretch(0, 1)\n\n # start layout\n row = 1\n label = QLabel('placement: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.placement, row, 1)\n\n row += 1\n label = QLabel('point radius: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.point_radius, row, 1)\n label = QLabel('colour: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.point_colour, row, 3)\n\n row += 1\n label = QLabel('X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.x_posn, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.y_posn, row, 3)\n\n row += 1\n label = QLabel('offset X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.x_offset, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.y_offset, row, 3)\n\n row += 1\n box_layout.addWidget(btn_remove, row, 1)\n box_layout.addWidget(btn_update, row, 3)\n\n option_box.setLayout(box_layout)\n\n layout = QHBoxLayout()\n layout.setContentsMargins(1, 1, 1, 1)\n layout.addWidget(option_box)\n\n self.setLayout(layout)\n\n # set size hints\n self.setMinimumSize(300, 200)\n size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setSizePolicy(size_policy)\n\n # connect internal widget events to handlers\n self.point_colour.clicked.connect(self.changePointColour)\n btn_remove.clicked.connect(self.removeImage)\n btn_update.clicked.connect(self.updateData)\n\n def changePointColour(self, event):\n color = QColorDialog.getColor()\n if color.isValid():\n colour = color.name()\n # set colour button background\n self.point_colour.setStyleSheet(PointPlacementControl.ButtonColourStyle % colour);\n \n def removeImage(self, event):\n self.remove.emit()\n\n def updateData(self, event):\n # get data from the widgets\n placement = str(self.placement.currentText())\n if placement == 'none':\n placement = None\n radius = int(self.point_radius.currentText())\n colour = self.point_colour.palette().color(1)\n x_posn = int(self.x_posn.currentText())\n y_posn = int(self.y_posn.currentText())\n x_offset = int(self.x_offset.currentText())\n y_offset = int(self.y_offset.currentText())\n\n print(f'updateData: placement={placement}, radius={radius}, x_posn={x_posn}, y_posn={y_posn}, x_offset={x_offset}, y_offset={y_offset}')\n \n self.change.emit(placement, radius, colour, x_posn, y_posn, x_offset, y_offset)\n"
},
{
"alpha_fraction": 0.5566594004631042,
"alphanum_fraction": 0.5721825361251831,
"avg_line_length": 31.200000762939453,
"blob_id": "3981403d7a90bfb3e4dbe076f01737835f08508f",
"content_id": "d3e9d5f63b80c2a310307b2b89f7329350fccb6f",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3221,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 100,
"path": "/pySlipQt/stamen_watercolor.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA tile source that serves Stamen Watercolor tiles from the internet.\n\nMap tiles by Stamen Design, under CC BY 3.0. Data by OpenStreetMap, under ODbL.\n\"\"\"\n\nimport math\nimport pySlipQt.tiles_net as tiles_net\n\n\n###############################################################################\n# Change values below here to configure an internet tile source.\n###############################################################################\n\n# attributes used for tileset introspection\n# names must be unique amongst tile modules\nTilesetName = 'Stamen Watercolor Tiles'\nTilesetShortName = 'STMW Tiles'\nTilesetVersion = '1.0'\n\n# the pool of tile servers used\nTileServers = ['http://c.tile.stamen.com',\n ]\n\n# the path on the server to a tile\n# {} params are Z=level, X=column, Y=row, origin at map top-left\nTileURLPath = '/watercolor/{Z}/{X}/{Y}.jpg'\n\n# tile levels to be used\nTileLevels = range(16)\n\n# maximum pending requests for each tile server\nMaxServerRequests = 2\n\n# set maximum number of in-memory tiles for each level\nMaxLRU = 10000\n\n# size of tiles\nTileWidth = 256\nTileHeight = 256\n\n# where earlier-cached tiles will be\n# this can be overridden in the __init__ method\nTilesDir = 'stamen_watercolor_tiles'\n\n################################################################################\n# Class for these tiles. Builds on tiles_net.Tiles.\n################################################################################\n\nclass Tiles(tiles_net.Tiles):\n \"\"\"An object to source internet tiles for pySlip.\"\"\"\n\n def __init__(self, tiles_dir=TilesDir, http_proxy=None):\n \"\"\"Override the base class for these tiles.\n\n Basically, just fill in the BaseTiles class with values from above\n and provide the Geo2Tile() and Tile2Geo() methods.\n \"\"\"\n\n super().__init__(TileLevels, TileWidth, TileHeight,\n servers=TileServers, url_path=TileURLPath,\n max_server_requests=MaxServerRequests,\n max_lru=MaxLRU, tiles_dir=tiles_dir,\n http_proxy=http_proxy)\n\n def Geo2Tile(self, geo):\n \"\"\"Convert geo to tile fractional coordinates for level in use.\n\n geo tuple of geo coordinates (xgeo, ygeo)\n\n Note that we assume the point *is* on the map!\n\n Code taken from [http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames]\n \"\"\"\n\n (xgeo, ygeo) = geo\n lat_rad = math.radians(ygeo)\n n = 2.0 ** self.level\n xtile = (xgeo + 180.0) / 360.0 * n\n ytile = ((1.0 - math.log(math.tan(lat_rad) + (1.0/math.cos(lat_rad))) / math.pi) / 2.0) * n\n\n return (xtile, ytile)\n\n def Tile2Geo(self, tile):\n \"\"\"Convert tile fractional coordinates to geo for level in use.\n\n tile a tupl;e (xtile,ytile) of tile fractional coordinates\n\n Note that we assume the point *is* on the map!\n\n Code taken from [http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames]\n \"\"\"\n\n (xtile, ytile) = tile\n n = 2.0 ** self.level\n xgeo = xtile / n * 360.0 - 180.0\n yrad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))\n ygeo = math.degrees(yrad)\n\n return (xgeo, ygeo)\n\n"
},
{
"alpha_fraction": 0.6300715804100037,
"alphanum_fraction": 0.6396181583404541,
"avg_line_length": 26.032258987426758,
"blob_id": "fb51e011d1282a937e73520eb7e477224347dabd",
"content_id": "ff8369a5747067192f082c9033e00bd5f8929b6f",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 31,
"path": "/pySlipQt/examples/test_display_text.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest the DisplayText custom widget used by pySlipQt.\n\"\"\"\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\nfrom display_text import DisplayText\n\nclass DisplayTextExample(QWidget):\n \"\"\"Application to demonstrate the pySlipQt 'DisplayText' widget.\"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.dt_group = DisplayText(title='Group title longer', label='Label:',\n tooltip='A tooltip')\n self.dt_group.set_text(\"14\")\n\n hbox = QHBoxLayout()\n hbox.setSpacing(5)\n hbox.setContentsMargins(1, 1, 1, 1)\n hbox.addWidget(self.dt_group)\n self.setLayout(hbox)\n\n self.setWindowTitle('DisplayText widget')\n self.show()\n\napp = QApplication(sys.argv)\nex = DisplayTextExample()\nsys.exit(app.exec())\n"
},
{
"alpha_fraction": 0.5357469320297241,
"alphanum_fraction": 0.5464585423469543,
"avg_line_length": 29.104999542236328,
"blob_id": "6c650589de5db6d63b181f0cdcb0b148a9c2ced5",
"content_id": "74d68ff1a0e2f2ed5165f8bb055894cdb5786dd6",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12043,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 400,
"path": "/pySlipQt/examples/make_gmt_tiles.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA program to generate a set of mapping tiles from GMT.\n\nUsage: make_gmt_tiles [-t] [-s <size>] [-v] <tile_dir> <stop_level> [<start_level>]\n\nwhere <size> is the tile width/height in pixels (default 256)\n <tile_dir> is the tile directory to create\n <stop_level> is the maximum level number to create\n <start_level> is the (optional) level to start at\n\nThe -t option forces the use of topo data in the tiles.\nThe -v option makes the process verbose.\n\nThis program attempts to use more than one core, if available.\n\nYou *must* have installed the GMT package (and data files)\n[http://gmt.soest.hawaii.edu/]\nas well as the GEBCO data file if you want oceanfloor topo\n[http://www.gebco.net/].\n\nNote: this requires python 3.x.\n\"\"\"\n\nimport sys\nimport os\nimport getopt\nimport tempfile\nimport shutil\nimport pickle\nimport multiprocessing\nimport queue\nimport traceback\nimport datetime\n\nfrom PIL import Image\nimport numofcpus\n\n# number of worker processes\nNumberOfWorkers = numofcpus.determineNumberOfCPUs()\n\n# where the GEBCO elevation file lives\nGEBCOElevationFile = '/home/r-w/GEBCO/gridone.nc'\n\n# default tile size, pixels\nDefaultTileSize = 256\n\n# name of info file for each tileset\nTileInfoFilename = 'tile.info'\n\n# name of the 'missing tile' picture file\nMissingTilePic = 'missing_tile.png'\n\n# various colours and widths (empty string means default)\nPoliticalBorderColour = '255/0/0'\nPoliticalBorderWidth = ''\nWaterColourTuple = (254,254,255)\nWaterColour = '%s/%s/%s' % WaterColourTuple\nLandColourTuple = (253,233,174)\nLandColour = '%s/%s/%s' % LandColourTuple\n\n# dictionary mapping level to detail\nLevel2Detail = {0: 'l', # low\n 1: 'l',\n 2: 'l',\n 3: 'i', # intermediate\n 4: 'i',\n 5: 'i',\n 6: 'h', # high\n 7: 'h',\n 8: 'h',\n 9: 'f', # full\n }\n\n\nclass Worker(multiprocessing.Process):\n def __init__(self, work_queue, w_num, tmp_dir):\n # base class initialization\n multiprocessing.Process.__init__(self)\n \n # job management stuff\n self.work_queue = work_queue\n self.w_num = w_num\n self.tmp_dir = os.path.join(tmp_dir, '%02d' % w_num)\n self.kill_received = False\n\n # set up logging\n self.logfile = 'worker_%02d.log' % w_num\n self.logf = open(self.logfile, 'w')\n\n # our own handler for uncaught exceptions\n def excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n self.log(msg)\n sys.exit(1)\n\n # plug our handler into the python system\n self.save_excepthook = sys.excepthook\n sys.excepthook = excepthook\n\n self.log('Started, UseTopo=%s' % str(UseTopo))\n\n def log(self, msg):\n # get time\n to = datetime.datetime.now()\n hr = to.hour\n min = to.minute\n sec = to.second\n msec = to.microsecond\n\n msg = ('%02d:%02d:%02d.%06d|%s\\n' % (hr, min, sec, msec, msg))\n\n self.logf.write('%s\\n' % msg)\n self.logf.flush()\n\n def run(self):\n self.log('%d starting\\n' % self.w_num)\n\n while not self.kill_received:\n # get a task\n try:\n (tile_file, tile_size, d_opt, r_opt) = self.work_queue.get(timeout=1)\n except queue.Empty as e:\n self.log('Empty queue: %s' % str(e))\n break\n \n # the actual processing - pathnames for temp files\n ps_file = os.path.join(self.tmp_dir, 'tile.ps')\n png_file = os.path.join(self.tmp_dir, 'tile.png')\n\n # draw the coastline tiles\n if UseTopo:\n cmd = ('gmt grdimage %s %s -JX17.5d -fig -P -C%s -I%s -K > %s'\n % (GEBCOElevationFile, r_opt, CptFile, GridFile, ps_file))\n self.do_cmd(cmd)\n cmd = ('gmt pscoast -P %s -JX17.5d %s '\n '-N1/%s,%s -N3/%s,%s -W0.5 -S%s -G%s -O >> %s'\n % (r_opt, d_opt,\n PoliticalBorderWidth, PoliticalBorderColour,\n PoliticalBorderWidth, PoliticalBorderColour,\n WaterColour, LandColour, ps_file))\n self.do_cmd(cmd)\n else:\n cmd = ('gmt pscoast -P %s -JX17.5d %s '\n '-N1/%s,%s -N3/%s,%s -W0.5 -S%s -G%s > %s'\n % (r_opt, d_opt,\n PoliticalBorderWidth, PoliticalBorderColour,\n PoliticalBorderWidth, PoliticalBorderColour,\n WaterColour, LandColour, ps_file))\n self.do_cmd(cmd)\n\n cmd = 'gmt psconvert %s -A -Tg' % ps_file\n self.do_cmd(cmd)\n\n# cmd = ('gmt convert -quality 100 -resize %dx%d! %s %s'\n# % (tile_size, tile_size, png_file, tile_file))\n cmd = ('gmt convert %s %s'\n % (png_file, tile_file))\n self.do_cmd(cmd)\n \n self.log('stopping')\n\n def do_cmd(self, cmd):\n \"\"\"Execute a command.\n \n cmd the command string to execute\n \"\"\"\n\n self.log(cmd)\n if Verbose:\n print(cmd)\n sys.stdout.flush()\n sys.stderr.flush()\n\n res = os.system(cmd)\n if res:\n self.log('Error doing above command: res=%d' % res)\n\n\ndef do_cmd(cmd):\n if Verbose:\n print(cmd)\n sys.stdout.flush()\n sys.stderr.flush()\n\n res = os.system(cmd)\n\n\ndef make_gmt_tiles(tile_dir, min_level, max_level, tile_size):\n \"\"\"Make a set of mapping tiles.\n\n tile_dir the directory for output tilesets\n min_level minimum tileset level number to create\n max_level maximum tileset level number to create\n tile_size size of tiles (width & height) in pixels\n \"\"\"\n\n # generate the topo grid file, if required\n if UseTopo:\n global GridFile, CptFile\n\n CptFile = './bath.cpt'\n cmd = 'gmt makecpt -Cglobe > %s' % CptFile\n do_cmd(cmd)\n print(cmd)\n GridFile = './IO_int.grd'\n cmd = 'gmt grdgradient %s -A0 -Nt -G%s' % (GEBCOElevationFile, GridFile)\n do_cmd(cmd)\n print(cmd)\n\n # prepare queue for workers\n work_queue = multiprocessing.Queue()\n\n # create a temporary working directory\n tmp_dir = tempfile.mkdtemp(prefix='make_gmt_tiles_')\n for i in range(NumberOfWorkers):\n os.mkdir(os.path.join(tmp_dir, '%02d' % i))\n\n # define the extent of the world we are mapping\n # this is the whole world, with the break through South America\n # so we have the South Sandwich Islands and points east in one piece\n # (W, E, S, N)\n extent = (-65.0, 295.0, -66.66, 66.66)\n\n # delete the output directory if it exists before recreating\n #shutil.rmtree(tile_dir, ignore_errors=True)\n try:\n os.mkdir(tile_dir)\n except OSError:\n pass # ignore error if directory already exists\n\n # create top-level info file - contains extent\n info_file = os.path.join(tile_dir, TileInfoFilename)\n fd = open(info_file, 'wb')\n obj = (extent, (DefaultTileSize, DefaultTileSize),\n WaterColourTuple, LandColourTuple)\n pickle.dump(obj, fd)\n fd.close()\n\n # generate each required tileset level\n for level in range(min_level, max_level+1):\n make_tileset(work_queue, tmp_dir, tile_dir, extent, level, tile_size)\n\n # start the workers and wait until all finished\n workers = []\n for i in range(NumberOfWorkers):\n worker = Worker(work_queue, i, tmp_dir)\n worker.start()\n workers.append(worker)\n\n for worker in workers:\n worker.join()\n\n # destroy the temporary working directory\n shutil.rmtree(tmp_dir, ignore_errors=True)\n\n\ndef make_tileset(q, tmp_dir, tile_dir, extent, level, tile_size):\n \"\"\"Make one tileset directory.\n\n q work queue\n tmp_dir temporary scratch directory\n tile_dir path to the base of the tileset directories\n extent global map extent (w, e, s, n)\n level the level of the tileset to generate\n tile_size size (width & height) of each tile in set\n \"\"\"\n\n # unpack the extent\n (w, e, s, n) = extent\n\n # get deltas for lon and lat\n d_lon = (e - w) / pow(2, level) / 2\n d_lat = (n - s) / pow(2, level)\n\n # figure out pixels/degree (for info file)\n ppd_x = tile_size / d_lon\n ppd_y = tile_size / d_lat\n\n # this should give us number of steps in X and Y directions\n num_tiles_x = int((e - w) / d_lon)\n num_tiles_y = int((n - s) / d_lat)\n\n # create the actual tileset directory\n tile_dir = os.path.join(tile_dir, '%02d' % level)\n try:\n os.mkdir(tile_dir)\n except OSError:\n pass # ignore error if directory already exists\n\n # calculate the detail appropriate for the level\n d_opt = '-D%s' % Level2Detail.get(level, 'f')\n\n w_num = 0\n # step through each tile\n for x in range(num_tiles_x):\n for y in range(num_tiles_y):\n # get a worker number\n w_num += 1\n if w_num > NumberOfWorkers:\n w_num = 0\n\n # get output tile filename\n tile_file = os.path.join(tile_dir, 'tile_%d_%d.png' % (x, y))\n\n # figure out -R bits\n r_w = w + x * d_lon\n r_e = r_w + d_lon\n r_n = n - y * d_lat\n r_s = r_n - d_lat\n r_opt = '-R%f/%f/%f/%f' % (r_w, r_e, r_s, r_n)\n\n # prepare data on queue\n q.put((tile_file, tile_size, d_opt, r_opt))\n\n # now create a tileset info file\n info_file = os.path.join(tile_dir, TileInfoFilename)\n obj = (num_tiles_x, num_tiles_y, ppd_x, ppd_y)\n with open(info_file, 'wb') as fd:\n pickle.dump(obj, fd)\n\n################################################################################\n# Program start\n################################################################################\n\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n\ndef main(argv=None):\n global Verbose\n global UseTopo\n\n Verbose = False\n UseTopo = False\n\n # parse the command line parameters\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'hs:tv',\n ['help', 'size=', 'topo', 'verbose'])\n except getopt.error as msg:\n usage()\n return 1\n\n # get all the options\n tile_size = DefaultTileSize\n Verbose = False\n for (opt, param) in opts:\n if opt in ['-s', '--size']:\n try:\n tile_size = int(param)\n except ValueError:\n usage('Tile size must be an integer > 0')\n return 1\n if tile_size < 1:\n usage('Tile size must be an integer > 0')\n return 1\n elif opt in ['-t', '--topo']:\n UseTopo = True\n elif opt in ['-v', '--verbose']:\n Verbose = True\n elif opt in ['-h', '--help']:\n usage()\n return 0\n\n # check we have required params\n if len(args) != 2 and len(args) != 3:\n usage()\n return 1\n\n tile_dir = args[0]\n min_level = 0\n try:\n max_level = int(args[1])\n except ValueError:\n usage('Stop level must be a positive integer')\n return 1\n if max_level < 0:\n usage('Stop level must be a positive integer')\n return 1\n if len(args) == 3:\n try:\n min_level = int(args[2])\n except ValueError:\n usage('Start level must be a positive integer')\n return 1\n if min_level < 0:\n usage('Start level must be a positive integer')\n return 1\n\n # go make the tileset\n make_gmt_tiles(tile_dir, min_level, max_level, tile_size)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n\n"
},
{
"alpha_fraction": 0.5091693997383118,
"alphanum_fraction": 0.5315834283828735,
"avg_line_length": 38.997711181640625,
"blob_id": "9acbfb8455784662ce0c51a7e4f6849f0be34284",
"content_id": "eeaac84c3ba1a6e025ffdfb2db42bc202635cdc4",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87493,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 2184,
"path": "/pySlipQt/examples/pyslipqt_demo.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\npySlipQt demonstration program with user-selectable tiles.\n\nUsage: pyslipqt_demo.py <options>\n\nwhere <options> is zero or more of:\n -d|--debug <level>\n where <level> is either a numeric debug level in the range [0, 50] or\n one of the symbolic debug level names:\n NOTSET 0 nothing is logged (default)\n DEBUG 10 everything is logged\n INFO 20 less than DEBUG, informational debugging\n WARNING 30 less than INFO, only non-fatal warnings\n ERROR 40 less than WARNING\n CRITICAL 50 less than ERROR\n -h|--help\n prints this help and stops\n\"\"\"\n\n\nimport os\nimport sys\nimport copy\nimport getopt\nimport traceback\nfrom functools import partial\n\ntry:\n from PyQt5.QtGui import QPixmap\n from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget,\n QAction, QGridLayout, QErrorMessage)\nexcept ImportError:\n msg = '*'*60 + '\\nSorry, you must install PyQt5\\n' + '*'*60\n print(msg)\n sys.exit(1)\n\ntry:\n import pySlipQt.pySlipQt as pySlipQt\n import pySlipQt.log as log\nexcept ImportError:\n msg = '*'*60 + '\\nSorry, you must install pySlipQt\\n' + '*'*60\n print(msg)\n sys.exit(1)\n\n# initialize the logging system\ntry:\n log = log.Log('pyslip.log')\nexcept AttributeError:\n # already have a log file, ignore\n pass\n\n# get the bits of the demo program we need\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\n\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoName = 'pySlipQt %s - Demonstration' % pySlipQt.__version__\nDemoVersion = '1.2'\n\nDemoWidth = 1000\nDemoHeight = 800\n\n# the default place for the GMT tiles\nDefaultTilesDir = os.path.abspath(os.path.expanduser('~/gmt_local_tiles'))\n\n# initial view level and position\nInitViewLevel = 3\n\n# this will eventually be selectable within the app\n# a selection of cities, position from WikiPedia, etc\n#InitViewPosition = (0.0, 0.0) # \"Null\" Island\n#InitViewPosition = (0.0, 51.48) # Greenwich, England\n#InitViewPosition = (5.33, 60.389444) # Bergen, Norway\n#InitViewPosition = (153.033333, -27.466667) # Brisbane, Australia\n#InitViewPosition = (98.3786761, 7.8627326) # Phuket (ภูเก็ต), Thailand\n#InitViewPosition = (151.209444, -33.859972) # Sydney, Australia\n#InitViewPosition = (-77.036667, 38.895111) # Washington, DC, USA\n#InitViewPosition = (132.455278, 34.385278) # Hiroshima, Japan\n#InitViewPosition = (-8.008889, 31.63) # Marrakech (مراكش), Morocco\n#InitViewPosition = (18.95, 69.65) # Tromsø, Norway\n#InitViewPosition = (-70.933333, -53.166667) # Punta Arenas, Chile\n#InitViewPosition = (168.3475, -46.413056) # Invercargill, New Zealand\n#InitViewPosition = (-147.723056, 64.843611) # Fairbanks, AK, USA\nInitViewPosition = (103.851959, 1.290270) # Singapore\n\n# levels on which various layers show\nMRPointShowLevels = [3, 4]\nMRImageShowLevels = [3, 4]\nMRTextShowLevels = [3, 4]\nMRPolyShowLevels = [3, 4]\nMRPolylineShowLevels = [3, 4]\n\n# the number of decimal places in a lon/lat display\nLonLatPrecision = 3\n\n# default deltas for various layer types\nDefaultPointMapDelta = 40\nDefaultPointViewDelta = 40\nDefaultImageMapDelta = 40\nDefaultImageViewDelta = 40\nDefaultTextMapDelta = 40\nDefaultTextViewDelta = 40\nDefaultPolygonMapDelta = 40\nDefaultPolygonViewDelta = 40\nDefaultPolylineMapDelta = 40\nDefaultPolylineViewDelta = 40\n\n# image used for shipwrecks, glassy buttons, etc\nShipImg = 'graphics/shipwreck.png'\n\nGlassyImg2 = 'graphics/glassy_button_2.png'\nSelGlassyImg2 = 'graphics/selected_glassy_button_2.png'\nGlassyImg3 = 'graphics/glassy_button_3.png'\nSelGlassyImg3 = 'graphics/selected_glassy_button_3.png'\nGlassyImg4 = 'graphics/glassy_button_4.png'\nSelGlassyImg4 = 'graphics/selected_glassy_button_4.png'\nGlassyImg5 = 'graphics/glassy_button_5.png'\nSelGlassyImg5 = 'graphics/selected_glassy_button_5.png'\nGlassyImg6 = 'graphics/glassy_button_6.png'\nSelGlassyImg6 = 'graphics/selected_glassy_button_6.png'\n\n# image used for shipwrecks\nCompassRoseGraphic = 'graphics/compass_rose.png'\n\n# logging levels, symbolic to numeric mapping\nLogSym2Num = {'CRITICAL': 50,\n 'ERROR': 40,\n 'WARNING': 30,\n 'INFO': 20,\n 'DEBUG': 10,\n 'NOTSET': 0}\n\n# list of modules containing tile sources\n# list of (<long_name>, <module_name>)\n# the <long_name>s go into the Tileselect menu\nTilesets = [\n ('BlueMarble tiles', 'blue_marble'),\n ('GMT tiles', 'gmt_local'),\n# ('ModestMaps tiles', 'modest_maps'), # can't access?\n# ('MapQuest tiles', 'mapquest'), # can't access?\n ('OpenStreetMap tiles', 'open_street_map'),\n ('Stamen Toner tiles', 'stamen_toner'),\n ('Stamen Transport tiles', 'stamen_transport'),\n ('Stamen Watercolor tiles', 'stamen_watercolor'),\n ]\n\n# index into Tilesets above to set default tileset: GMT tiles\nDefaultTilesetIndex = 1\n\n\n###############################################################################\n# A small class to manage tileset sources.\n###############################################################################\n\nclass TilesetManager:\n \"\"\"A class to manage multiple tileset objects.\n \n ts = TilesetManager(source_list) # 'source_list' is list of tileset source modules\n ts.get_tile_source(index) # 'index' into 'source_list' of source to use\n\n Features 'lazy' importing, only imports when the tileset is used\n the first time.\n \"\"\"\n\n def __init__(self, mod_list):\n \"\"\"Create a set of tile sources.\n \n mod_list list of module filenames to manage\n\n The list is something like: ['open_street_map.py', 'gmt_local.py']\n\n We can access tilesets using the index of the module in the 'mod_list'.\n \"\"\"\n\n self.modules = []\n for fname in mod_list:\n self.modules.append([fname, os.path.splitext(fname)[0], None])\n\n def get_tile_source(self, mod_index):\n \"\"\"Get an open tileset source for given name.\n\n mod_index index into self.modules of tileset to use\n \"\"\"\n\n tileset_data = self.modules[mod_index]\n (filename, modulename, tile_obj) = tileset_data\n if not tile_obj:\n # have never used this tileset, import and instantiate\n obj = __import__('pySlipQt', globals(), locals(), [modulename])\n tileset = getattr(obj, modulename)\n tile_obj = tileset.Tiles()\n tileset_data[2] = tile_obj\n return tile_obj\n\n###############################################################################\n# The main application frame\n###############################################################################\n\nclass PySlipQtDemo(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # initialize the tileset handler\n self.tileset_manager = self.init_tiles()\n self.tile_source = self.tileset_manager.get_tile_source(DefaultTilesetIndex)\n\n # start the GUI\n grid = QGridLayout()\n grid.setColumnStretch(0, 1)\n grid.setContentsMargins(2, 2, 2, 2)\n\n qwidget = QWidget(self)\n qwidget.setLayout(grid)\n self.setCentralWidget(qwidget)\n\n # build the 'controls' part of GUI\n num_rows = self.make_gui_controls(grid)\n\n self.pyslip = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=InitViewLevel)\n grid.addWidget(self.pyslip, 0, 0, num_rows+1, 1)\n grid.setRowStretch(num_rows, 1)\n\n # add the menus\n self.initMenu()\n\n # do initialisation stuff - all the application stuff\n self.initData()\n\n # create select event dispatch directory\n self.demo_select_dispatch = {}\n\n # selected point, if not None\n self.point_layer = None\n\n # variables referencing various layers\n self.sel_text_highlight = None\n\n # bind events to handlers\n self.pyslip.events.EVT_PYSLIPQT_LEVEL.connect(self.level_change_event)\n self.pyslip.events.EVT_PYSLIPQT_POSITION.connect(self.mouse_posn_event)\n self.pyslip.events.EVT_PYSLIPQT_SELECT.connect(self.select_event)\n self.pyslip.events.EVT_PYSLIPQT_BOXSELECT.connect(self.select_event)\n\n # set the size of the demo window, etc\n self.setGeometry(300, 300, DemoWidth, DemoHeight)\n self.setWindowTitle('%s %s' % (DemoName, DemoVersion))\n self.show()\n\n # set initial view position\n self.pyslip.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n#####\n# Build the GUI\n#####\n\n def make_gui_controls(self, grid):\n \"\"\"Build the 'controls' part of the GUI\n\n grid reference to grid that we populate\n\n Returns the number of rows add ed to the 'grid' layout.\n \"\"\"\n\n # the 'grid_row' variable is row to add into\n grid_row = 0\n\n # put level and position into grid at top right\n self.map_level = DisplayText(title='', label='Level:',\n tooltip=None)\n grid.addWidget(self.map_level, grid_row, 1, 1, 1)\n self.mouse_position = DisplayText(title='',\n label='Lon/Lat:', text_width=100,\n tooltip='Shows the mouse longitude and latitude on the map',)\n grid.addWidget(self.mouse_position, grid_row, 2, 1, 1)\n grid_row += 1\n\n # controls for map-relative points layer\n self.lc_point = LayerControl(self, title='Points, map relative %s'\n % (str(MRPointShowLevels) if MRPointShowLevels else ''),\n selectable=True)\n self.lc_point.change_add.connect(self.pointOnOff) # tie to event handler(s)\n self.lc_point.change_show.connect(self.pointShowOnOff)\n self.lc_point.change_select.connect(self.pointSelectOnOff)\n grid.addWidget(self.lc_point, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for view-relative points layer\n self.lc_point_v = LayerControl(self, 'Points, view relative', selectable=True)\n self.lc_point_v.change_add.connect(self.pointViewOnOff) # tie to event handler(s)\n self.lc_point_v.change_show.connect(self.pointViewShowOnOff)\n self.lc_point_v.change_select.connect(self.pointViewSelectOnOff)\n grid.addWidget(self.lc_point_v, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for map-relative image layer\n self.lc_image = LayerControl(self, 'Images, map relative %s'\n % (str(MRImageShowLevels) if MRImageShowLevels else ''),\n selectable=True)\n self.lc_image.change_add.connect(self.imageOnOff) # tie to event handler(s)\n self.lc_image.change_show.connect(self.imageShowOnOff)\n self.lc_image.change_select.connect(self.imageSelectOnOff)\n grid.addWidget(self.lc_image, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for map-relative image layer\n self.lc_image_v = LayerControl(self, 'Images, view relative', selectable=True) \n self.lc_image_v.change_add.connect(self.imageViewOnOff) # tie to event handler(s)\n self.lc_image_v.change_show.connect(self.imageViewShowOnOff)\n self.lc_image_v.change_select.connect(self.imageViewSelectOnOff)\n grid.addWidget(self.lc_image_v, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for map-relative text layer\n self.lc_text = LayerControl(self, 'Text, map relative %s'\n % (str(MRTextShowLevels) if MRTextShowLevels else ''),\n selectable=True)\n self.lc_text.change_add.connect(self.textOnOff) # tie to event handler(s)\n self.lc_text.change_show.connect(self.textShowOnOff)\n self.lc_text.change_select.connect(self.textSelectOnOff)\n grid.addWidget(self.lc_text, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for view-relative text layer\n self.lc_text_v = LayerControl(self, 'Text, view relative', selectable=True)\n self.lc_text_v.change_add.connect(self.textViewOnOff) # tie to event handler(s)\n self.lc_text_v.change_show.connect(self.textViewShowOnOff)\n self.lc_text_v.change_select.connect(self.textViewSelectOnOff)\n grid.addWidget(self.lc_text_v, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for map-relative polygon layer\n self.lc_poly = LayerControl(self, 'Polygon, map relative %s'\n % (str(MRPolyShowLevels) if MRPolyShowLevels else ''),\n selectable=True)\n self.lc_poly.change_add.connect(self.polyOnOff) # tie to event handler(s)\n self.lc_poly.change_show.connect(self.polyShowOnOff)\n self.lc_poly.change_select.connect(self.polySelectOnOff)\n grid.addWidget(self.lc_poly, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for view-relative polygon layer\n self.lc_poly_v = LayerControl(self, 'Polygon, view relative', selectable=True)\n self.lc_poly_v.change_add.connect(self.polyViewOnOff) # tie to event handler(s)\n self.lc_poly_v.change_show.connect(self.polyViewShowOnOff)\n self.lc_poly_v.change_select.connect(self.polyViewSelectOnOff)\n grid.addWidget(self.lc_poly_v, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for map-relative polyline layer\n self.lc_poll = LayerControl(self, 'Polyline, map relative %s'\n % (str(MRPolyShowLevels) if MRPolyShowLevels else ''),\n selectable=True)\n self.lc_poll.change_add.connect(self.polylineOnOff) # tie to event handler(s)\n self.lc_poll.change_show.connect(self.polylineShowOnOff)\n self.lc_poll.change_select.connect(self.polylineSelectOnOff)\n grid.addWidget(self.lc_poll, grid_row, 1, 1, 2)\n grid_row += 1\n\n # controls for view-relative polyline layer\n self.lc_poll_v = LayerControl(self, 'Polyline, view relative', selectable=True)\n self.lc_poll_v.change_add.connect(self.polylineViewOnOff) # tie to event handler(s)\n self.lc_poll_v.change_show.connect(self.polylineViewShowOnOff)\n self.lc_poll_v.change_select.connect(self.polylineViewSelectOnOff)\n grid.addWidget(self.lc_poll_v, grid_row, 1, 1, 2)\n grid_row += 1\n\n return grid_row\n\n def initMenu(self):\n \"\"\"Add the 'Tilesets' menu to the app.\"\"\"\n\n # create tileset menuitems\n menubar = self.menuBar()\n tilesets = menubar.addMenu('Tilesets')\n\n # this dict: id -> (display_name, module_name, action, tileset_obj)\n self.id2tiledata = {}\n\n # create the tileset menuitems, add to menu and connect to handler\n for (action_id, (name, module_name)) in enumerate(Tilesets):\n # create menu, connect to handler\n new_action = QAction(name, self, checkable=True)\n tilesets.addAction(new_action)\n action_plus_menuid = partial(self.change_tileset, action_id)\n new_action.triggered.connect(action_plus_menuid)\n\n # prepare the dict that handles importing tileset object\n self.id2tiledata[action_id] = (name, module_name, new_action, None)\n\n # check the default tileset\n if action_id == DefaultTilesetIndex:\n # put a check on the default tileset\n new_action.setChecked(True)\n\n def init_tiles(self):\n \"\"\"Initialize the tileset manager.\n\n Return a reference to the manager object.\n \"\"\"\n\n modules = []\n for (action_id, (name, module_name)) in enumerate(Tilesets):\n modules.append(module_name)\n\n return TilesetManager(modules)\n\n def change_tileset(self, menu_id):\n \"\"\"Handle a tileset selection.\n\n menu_id the index in self.id2tiledata of the required tileset\n \"\"\"\n\n log('change_tileset: menu_id=%s' % str(menu_id))\n log('id2tiledata[]=%s' % str(self.id2tiledata))\n\n # ensure only one tileset is checked in the menu, the required one\n for (key, tiledata) in self.id2tiledata.items():\n (name, module_name, action, tile_obj) = tiledata\n action.setChecked(key == menu_id)\n\n # get information for the required tileset\n try:\n (name, module_name, action, new_tile_obj) = self.id2tiledata[menu_id]\n except KeyError:\n # badly formed self.id2tiledata element\n raise RuntimeError('self.id2tiledata is badly formed:\\n%s'\n % str(self.id2tiledata))\n\n log('name=%s, module_name=%s, new_tile_obj=%s'\n % (str(name), str(module_name), str(new_tile_obj)))\n\n if new_tile_obj is None:\n # haven't seen this tileset before, import and instantiate\n log(\"importing '%s' from pySlipQt\" % str(module_name))\n obj = __import__('pySlipQt', globals(), locals(), [module_name])\n log('imported module=%s' % str(obj))\n log('imported module=%s' % str(dir(obj)))\n tileset = getattr(obj, module_name)\n log('tileset=%s' % str(tileset))\n tile_name = tileset.TilesetName\n log('tile_name=%s' % str(tile_name))\n new_tile_obj = tileset.Tiles()\n\n # update the self.id2tiledata element\n self.id2tiledata[menu_id] = (name, module_name, action, new_tile_obj)\n\n log('Before .ChangeTileset, new_tile_obj=%s' % str(new_tile_obj))\n self.pyslip.ChangeTileset(new_tile_obj)\n\n def onClose(self):\n \"\"\"Application is closing.\"\"\"\n\n pass\n\n #self.Close(True)\n\n def make_gui_level(self, parent):\n \"\"\"Build the control that shows the level.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create objects\n txt = wx.StaticText(parent, wx.ID_ANY, 'Level: ')\n self.map_level = ROTextCtrl(parent, '', size=(30,-1),\n tooltip='Shows map zoom level')\n\n # lay out the controls\n sb = AppStaticBox(parent, 'Map level')\n box = wx.StaticBoxSizer(sb, orient=wx.HORIZONTAL)\n box.Add(txt, border=PackBorder, flag=(wx.ALIGN_CENTER_VERTICAL\n |wx.ALIGN_RIGHT|wx.LEFT))\n box.Add(self.map_level, proportion=0, border=PackBorder,\n flag=wx.LEFT|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n\n return box\n\n def make_gui_mouse(self, parent):\n \"\"\"Build the mouse part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create objects\n txt = wx.StaticText(parent, wx.ID_ANY, 'Lon/Lat: ')\n self.mouse_position = ROTextCtrl(parent, '', size=(120,-1),\n tooltip=('Shows the mouse '\n 'longitude and latitude '\n 'on the map'))\n\n # lay out the controls\n sb = AppStaticBox(parent, 'Mouse position')\n box = wx.StaticBoxSizer(sb, orient=wx.HORIZONTAL)\n box.Add(txt, border=PackBorder, flag=(wx.ALIGN_CENTER_VERTICAL\n |wx.ALIGN_RIGHT|wx.LEFT))\n box.Add(self.mouse_position, proportion=0, border=PackBorder,\n flag=wx.RIGHT|wx.TOP|wx.BOTTOM)\n\n return box\n\n def make_gui_point(self, parent):\n \"\"\"Build the points part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n point_obj = LayerControl(parent, 'Points, map relative %s'\n % str(MRPointShowLevels),\n selectable=True)\n\n # tie to event handler(s)\n point_obj.Bind(EVT_ONOFF, self.pointOnOff)\n point_obj.Bind(EVT_SHOWONOFF, self.pointShowOnOff)\n point_obj.Bind(EVT_SELECTONOFF, self.pointSelectOnOff)\n\n return point_obj\n\n def make_gui_point_view(self, parent):\n \"\"\"Build the view-relative points part of the GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n point_obj = LayerControl(parent, 'Points, view relative',\n selectable=True)\n\n # tie to event handler(s)\n point_obj.Bind(EVT_ONOFF, self.pointViewOnOff)\n point_obj.Bind(EVT_SHOWONOFF, self.pointViewShowOnOff)\n point_obj.Bind(EVT_SELECTONOFF, self.pointViewSelectOnOff)\n\n return point_obj\n\n def make_gui_image(self, parent):\n \"\"\"Build the image part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n image_obj = LayerControl(parent, 'Images, map relative %s'\n % str(MRImageShowLevels),\n selectable=True)\n\n # tie to event handler(s)\n image_obj.Bind(EVT_ONOFF, self.imageOnOff)\n image_obj.Bind(EVT_SHOWONOFF, self.imageShowOnOff)\n image_obj.Bind(EVT_SELECTONOFF, self.imageSelectOnOff)\n\n return image_obj\n\n def make_gui_image_view(self, parent):\n \"\"\"Build the view-relative image part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n image_obj = LayerControl(parent, 'Images, view relative',\n selectable=True)\n\n # tie to event handler(s)\n image_obj.Bind(EVT_ONOFF, self.imageViewOnOff)\n image_obj.Bind(EVT_SHOWONOFF, self.imageViewShowOnOff)\n image_obj.Bind(EVT_SELECTONOFF, self.imageViewSelectOnOff)\n\n return image_obj\n\n def make_gui_text(self, parent):\n \"\"\"Build the map-relative text part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n text_obj = LayerControl(parent,\n 'Text, map relative %s' % str(MRTextShowLevels),\n selectable=True, editable=False)\n\n # tie to event handler(s)\n text_obj.Bind(EVT_ONOFF, self.textOnOff)\n text_obj.Bind(EVT_SHOWONOFF, self.textShowOnOff)\n text_obj.Bind(EVT_SELECTONOFF, self.textSelectOnOff)\n\n return text_obj\n\n def make_gui_text_view(self, parent):\n \"\"\"Build the view-relative text part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n text_view_obj = LayerControl(parent, 'Text, view relative',\n selectable=True)\n\n # tie to event handler(s)\n text_view_obj.Bind(EVT_ONOFF, self.textViewOnOff)\n text_view_obj.Bind(EVT_SHOWONOFF, self.textViewShowOnOff)\n text_view_obj.Bind(EVT_SELECTONOFF, self.textViewSelectOnOff)\n\n return text_view_obj\n\n def make_gui_poly(self, parent):\n \"\"\"Build the map-relative polygon part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n poly_obj = LayerControl(parent,\n 'Polygon, map relative %s'\n % str(MRPolyShowLevels),\n selectable=True)\n\n # tie to event handler(s)\n poly_obj.Bind(EVT_ONOFF, self.polyOnOff)\n poly_obj.Bind(EVT_SHOWONOFF, self.polyShowOnOff)\n poly_obj.Bind(EVT_SELECTONOFF, self.polySelectOnOff)\n\n return poly_obj\n\n def make_gui_poly_view(self, parent):\n \"\"\"Build the view-relative polygon part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n poly_view_obj = LayerControl(parent, 'Polygon, view relative',\n selectable=True)\n\n # tie to event handler(s)\n poly_view_obj.Bind(EVT_ONOFF, self.polyViewOnOff)\n poly_view_obj.Bind(EVT_SHOWONOFF, self.polyViewShowOnOff)\n poly_view_obj.Bind(EVT_SELECTONOFF, self.polyViewSelectOnOff)\n\n return poly_view_obj\n\n def make_gui_polyline(self, parent):\n \"\"\"Build the map-relative polyline part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n poly_obj = LayerControl(parent,\n 'Polyline, map relative %s'\n % str(MRPolyShowLevels),\n selectable=True)\n\n # tie to event handler(s)\n poly_obj.Bind(EVT_ONOFF, self.polylineOnOff)\n poly_obj.Bind(EVT_SHOWONOFF, self.polylineShowOnOff)\n poly_obj.Bind(EVT_SELECTONOFF, self.polylineSelectOnOff)\n\n return poly_obj\n\n def make_gui_polyline_view(self, parent):\n \"\"\"Build the view-relative polyline part of the controls part of GUI.\n\n parent reference to parent\n\n Returns reference to containing sizer object.\n \"\"\"\n\n # create widgets\n poly_view_obj = LayerControl(parent, 'Polyline, view relative',\n selectable=True)\n\n # tie to event handler(s)\n poly_view_obj.Bind(EVT_ONOFF, self.polylineViewOnOff)\n poly_view_obj.Bind(EVT_SHOWONOFF, self.polylineViewShowOnOff)\n poly_view_obj.Bind(EVT_SELECTONOFF, self.polylineViewSelectOnOff)\n\n return poly_view_obj\n\n ######\n # demo control event handlers\n ######\n\n##### map-relative point layer\n\n def pointOnOff(self, event):\n \"\"\"Handle OnOff event for point layer control.\"\"\"\n\n if event:\n # event is True, so we are adding the maprel point layer\n self.point_layer = \\\n self.pyslip.AddPointLayer(PointData, map_rel=True,\n colour=PointDataColour, radius=3,\n # offset points to exercise placement\n offset_x=0, offset_y=0, visible=True,\n show_levels=MRPointShowLevels,\n delta=DefaultPointMapDelta,\n placement='nw', # check placement\n name='<pt_layer>')\n else:\n # event is False, so we are removing the maprel point layer\n self.lc_point.set_show(True) # set control state to 'normal'\n self.lc_point.set_select(False)\n\n self.pyslip.DeleteLayer(self.point_layer)\n self.point_layer = None\n\n if self.sel_point_layer:\n self.pyslip.DeleteLayer(self.sel_point_layer)\n self.sel_point_layer = None\n self.sel_point = None\n\n def pointShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for point layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.point_layer)\n if self.sel_point_layer:\n self.pyslip.ShowLayer(self.sel_point_layer)\n else:\n self.pyslip.HideLayer(self.point_layer)\n if self.sel_point_layer:\n self.pyslip.HideLayer(self.sel_point_layer)\n\n def pointSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for point layer control.\"\"\"\n\n layer = self.point_layer\n if event:\n self.add_select_handler(layer, self.pointSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def pointSelect(self, event):\n \"\"\"Handle map-relative point select exception from the widget.\n\n event.type the layer type the select occurred on\n event.layer_id ID of the layer the select occurred on\n event.mposn mouse click in view coordinates\n event.vposn ???\n event.selection list of tuples (x,y,kwargs) of selected point(s)\n (if None then no point(s) selected)\n event.relsel relative selection (unused?)\n\n The selection could be a single or box select.\n\n The point select is designed to be select point(s) for on, then select\n point(s) again for off. Clicking away from the already selected point\n doesn't remove previously selected point(s) if nothing is selected. We\n do this to show the selection/deselection of point(s) is up to the user,\n not the widget.\n\n This code also shows how to combine handling of EventSelect and\n EventBoxSelect events.\n \"\"\"\n\n if event.selection == self.sel_point:\n # same point(s) selected again, turn point(s) off\n self.pyslip.DeleteLayer(self.sel_point_layer)\n self.sel_point_layer = None\n self.sel_point = None\n elif event.selection:\n # some other point(s) selected, delete previous selection, if any\n if self.sel_point_layer:\n self.pyslip.DeleteLayer(self.sel_point_layer)\n\n # remember selection (need copy as highlight modifies attributes)\n self.sel_point = copy.deepcopy(event.selection)\n\n # choose different highlight colour for different type of selection\n selcolour = '#00ffff'\n if event.type == pySlipQt.PySlipQt.EVT_PYSLIPQT_SELECT: # TODO better visibility (like pySlip)\n selcolour = '#0000ff'\n\n # get selected points into form for display layer\n # delete 'colour' and 'radius' attributes as we want different values\n highlight = []\n for (x, y, d) in event.selection:\n del d['colour'] # AddLayer...() ensures keys exist\n del d['radius']\n highlight.append((x, y, d))\n\n # layer with highlight of selected poijnts\n self.sel_point_layer = \\\n self.pyslip.AddPointLayer(highlight, map_rel=True,\n colour=selcolour,\n radius=5, visible=True,\n show_levels=MRPointShowLevels,\n name='<sel_pt_layer>')\n\n # make sure highlight layer is BELOW selected layer\n self.pyslip.PlaceLayerBelowLayer(self.sel_point_layer,\n self.point_layer)\n # else: we ignore an empty selection\n\n return True\n\n##### view-relative point layer\n\n def pointViewOnOff(self, event):\n \"\"\"Handle OnOff event for point view layer control.\"\"\"\n\n if event:\n self.point_view_layer = \\\n self.pyslip.AddPointLayer(PointViewData, map_rel=False,\n placement=PointViewDataPlacement,\n colour=PointViewDataColour, radius=1,\n delta=DefaultPointViewDelta,\n visible=True,\n name='<point_view_layer>')\n else:\n self.lc_point_v.set_show(True) # set control state to 'normal'\n self.lc_point_v.set_select(False)\n\n self.pyslip.DeleteLayer(self.point_view_layer)\n self.point_view_layer = None\n if self.sel_point_view_layer:\n self.pyslip.DeleteLayer(self.sel_point_view_layer)\n self.sel_point_view_layer = None\n self.sel_point_view = None\n\n def pointViewShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for point view layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.point_view_layer)\n if self.sel_point_view_layer:\n self.pyslip.ShowLayer(self.sel_point_view_layer)\n else:\n self.pyslip.HideLayer(self.point_view_layer)\n if self.sel_point_view_layer:\n self.pyslip.HideLayer(self.sel_point_view_layer)\n\n def pointViewSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for point view layer control.\"\"\"\n\n layer = self.point_view_layer\n if event:\n self.add_select_handler(layer, self.pointViewSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def pointViewSelect(self, event):\n \"\"\"Handle view-relative point select exception from the widget.\n\n event.type the event type\n event.layer_id the ID of the layer that was selected\n event.selection [list of] tuple (xgeo,ygeo) of selected point\n (if None then no point(s) selected)\n event.data userdata object of the selected point\n\n The selection could be a single or box select.\n\n The point select is designed to be click point for on, then any other\n select event turns that point off, whether there is a selection or not\n and whether the same point is selected or not.\n \"\"\"\n\n # if there is a previous selection, remove it\n if self.sel_point_view_layer:\n self.pyslip.DeleteLayer(self.sel_point_view_layer)\n self.sel_point_view_layer = None\n\n if event.selection and event.selection != self.sel_point_view:\n # it's a box selection\n self.sel_point_view = event.selection\n\n # get selected points into form for display layer\n highlight = []\n for (x, y, d) in event.selection:\n del d['colour']\n del d['radius']\n highlight.append((x, y, d))\n\n # assume a box selection\n self.sel_point_view_layer = \\\n self.pyslip.AddPointLayer(highlight, map_rel=False,\n placement='se',\n colour='#0000ff',\n radius=3, visible=True,\n name='<sel_pt_view_layer>')\n else:\n self.sel_point_view = None\n\n return True\n\n##### map-relative image layer\n\n def imageOnOff(self, event):\n \"\"\"Handle OnOff event for map-relative image layer control.\"\"\"\n\n if event:\n self.image_layer = \\\n self.pyslip.AddImageLayer(ImageData, map_rel=True,\n visible=True,\n delta=DefaultImageMapDelta,\n show_levels=MRImageShowLevels,\n name='<image_layer>')\n else:\n self.lc_image.set_show(True) # set control state to 'normal'\n self.lc_image.set_select(False)\n\n self.pyslip.DeleteLayer(self.image_layer)\n self.image_layer = None\n if self.sel_image_layer:\n self.pyslip.DeleteLayer(self.sel_image_layer)\n self.sel_image_layer = None\n self.sel_image = None\n\n def imageShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for image layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.image_layer)\n if self.sel_image_layer:\n self.pyslip.ShowLayer(self.sel_image_layer)\n else:\n self.pyslip.HideLayer(self.image_layer)\n if self.sel_image_layer:\n self.pyslip.HideLayer(self.sel_image_layer)\n\n def imageSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for image layer control.\"\"\"\n\n layer = self.image_layer\n if event:\n self.add_select_handler(layer, self.imageSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def imageSelect(self, event):\n \"\"\"Select event from the widget.\n\n event.type the type of point selection: single or box\n event.selection tuple (selection, data, relsel)\n (if None then no point(s) selected)\n event.data userdata object of the selected point\n\n The selection could be a single or box select.\n \"\"\"\n\n #relsel = event.relsel\n\n # select again, turn selection off\n if event.selection == self.sel_image:\n self.pyslip.DeleteLayer(self.sel_image_layer)\n self.sel_image_layer = self.sel_image = None\n elif event.selection:\n # new image selected, show highlight\n if self.sel_image_layer:\n self.pyslip.DeleteLayer(self.sel_image_layer)\n self.sel_image = event.selection\n\n # get selected points into form for display layer\n new_points = []\n for p in event.selection:\n (x, y, d) = p\n del d['colour']\n del d['radius']\n new_points.append((x, y, d))\n\n self.sel_image_layer = \\\n self.pyslip.AddPointLayer(new_points, map_rel=True,\n colour='#0000ff',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_pt_layer>')\n self.pyslip.PlaceLayerBelowLayer(self.sel_image_layer,\n self.image_layer)\n\n return True\n\n def imageBSelect(self, id, selection=None):\n \"\"\"Select event from the widget.\"\"\"\n\n # remove any previous selection\n if self.sel_image_layer:\n self.pyslip.DeleteLayer(self.sel_image_layer)\n self.sel_image_layer = None\n\n if selection:\n # get selected points into form for display layer\n points = []\n for (x, y, f, d) in selection:\n del d['colour']\n del d['radius']\n points.append((x, y, d))\n\n self.sel_image_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=True,\n colour='#e0e0e0',\n radius=13, visible=True,\n show_levels=[3,4],\n name='<boxsel_img_layer>')\n self.pyslip.PlaceLayerBelowLayer(self.sel_image_layer,\n self.image_layer)\n\n return True\n\n##### view-relative image layer\n\n def imageViewOnOff(self, event):\n \"\"\"Handle OnOff event for view-relative image layer control.\n \n event the state of the leyer control master checkbox\n \"\"\"\n\n if event:\n self.image_view_layer = \\\n self.pyslip.AddImageLayer(ImageViewData, map_rel=False,\n delta=DefaultImageViewDelta,\n visible=True,\n name='<image_view_layer>')\n else:\n self.lc_image_v.set_show(True) # set control state to 'normal'\n self.lc_image_v.set_select(False)\n\n self.pyslip.DeleteLayer(self.image_view_layer)\n self.image_view_layer = None\n if self.sel_image_view_layer:\n self.pyslip.DeleteLayer(self.sel_image_view_layer)\n self.sel_image_view_layer = None\n if self.sel_imagepoint_view_layer:\n self.pyslip.DeleteLayer(self.sel_imagepoint_view_layer)\n self.sel_imagepoint_view_layer = None\n\n def imageViewShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for image layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.image_view_layer)\n if self.sel_image_view_layer:\n self.pyslip.ShowLayer(self.sel_image_view_layer)\n if self.sel_imagepoint_view_layer:\n self.pyslip.ShowLayer(self.sel_imagepoint_view_layer)\n else:\n self.pyslip.HideLayer(self.image_view_layer)\n if self.sel_image_view_layer:\n self.pyslip.HideLayer(self.sel_image_view_layer)\n if self.sel_imagepoint_view_layer:\n self.pyslip.HideLayer(self.sel_imagepoint_view_layer)\n\n def imageViewSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for image layer control.\"\"\"\n\n layer = self.image_view_layer\n if event:\n self.add_select_handler(layer, self.imageViewSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def imageViewSelect(self, event):\n \"\"\"View-relative image select event from the widget.\n\n event the event that contains these attributes:\n selection [list of] tuple (xgeo,ygeo) of selected point\n (if None then no point(s) selected)\n relsel relative position of single point select,\n None if box select\n\n The selection could be a single or box select.\n\n The selection mode is different here. An empty selection will remove\n any current selection. This shows the flexibility that user code\n can implement.\n\n The code below doesn't assume a placement of the selected image, it\n figures out the correct position of the 'highlight' layers. This helps\n with debugging, as we can move the compass rose anywhere we like.\n \"\"\"\n\n selection = event.selection\n relsel = event.relsel # None if box select\n\n # only one image selectable, remove old selections (if any)\n if self.sel_image_view_layer:\n self.pyslip.DeleteLayer(self.sel_image_view_layer)\n self.sel_image_view_layer = None\n if self.sel_imagepoint_view_layer:\n self.pyslip.DeleteLayer(self.sel_imagepoint_view_layer)\n self.sel_imagepoint_view_layer = None\n\n if selection:\n # figure out compass rose attributes\n attr_dict = ImageViewData[0][3]\n img_placement = attr_dict['placement']\n\n self.sel_imagepoint_view_layer = None\n if relsel:\n # unpack event relative selection point\n (sel_x, sel_y) = relsel # select relative point in image\n\n# FIXME This should be cleaner, user shouldn't have to know internal structure\n# FIXME or fiddle with placement perturbations\n\n # add selection point\n point_place_coords = {'ne': '(sel_x - CR_Width, sel_y)',\n 'ce': '(sel_x - CR_Width, sel_y - CR_Height/2.0)',\n 'se': '(sel_x - CR_Width, sel_y - CR_Height)',\n 'cs': '(sel_x - CR_Width/2.0, sel_y - CR_Height)',\n 'sw': '(sel_x, sel_y - CR_Height)',\n 'cw': '(sel_x, sel_y - CR_Height/2.0)',\n 'nw': '(sel_x, sel_y)',\n 'cn': '(sel_x - CR_Width/2.0, sel_y)',\n 'cc': '(sel_x - CR_Width/2.0, sel_y - CR_Height/2.0)',\n '': '(sel_x, sel_y)',\n None: '(sel_x, sel_y)',\n }\n\n point = eval(point_place_coords[img_placement])\n self.sel_imagepoint_view_layer = \\\n self.pyslip.AddPointLayer((point,), map_rel=False,\n colour='green',\n radius=5, visible=True,\n placement=img_placement,\n name='<sel_image_view_point>')\n\n # add polygon outline around image\n p_dict = {'placement': img_placement, 'width': 3, 'colour': 'green', 'closed': True}\n poly_place_coords = {'ne': '(((-CR_Width,0),(0,0),(0,CR_Height),(-CR_Width,CR_Height)),p_dict)',\n 'ce': '(((-CR_Width,-CR_Height/2.0),(0,-CR_Height/2.0),(0,CR_Height/2.0),(-CR_Width,CR_Height/2.0)),p_dict)',\n 'se': '(((-CR_Width,-CR_Height),(0,-CR_Height),(0,0),(-CR_Width,0)),p_dict)',\n 'cs': '(((-CR_Width/2.0,-CR_Height),(CR_Width/2.0,-CR_Height),(CR_Width/2.0,0),(-CR_Width/2.0,0)),p_dict)',\n 'sw': '(((0,-CR_Height),(CR_Width,-CR_Height),(CR_Width,0),(0,0)),p_dict)',\n 'cw': '(((0,-CR_Height/2.0),(CR_Width,-CR_Height/2.0),(CR_Width,CR_Height/2.0),(0,CR_Height/2.0)),p_dict)',\n 'nw': '(((0,0),(CR_Width,0),(CR_Width,CR_Height),(0,CR_Height)),p_dict)',\n 'cn': '(((-CR_Width/2.0,0),(CR_Width/2.0,0),(CR_Width/2.0,CR_Height),(-CR_Width/2.0,CR_Height)),p_dict)',\n 'cc': '(((-CR_Width/2.0,-CR_Height/2.0),(CR_Width/2.0,-CR_Height/2.0),(CR_Width/2.0,CR_Height/2.0),(-CR_Width/2.0,CR_Height/2.0)),p_dict)',\n '': '(((x, y),(x+CR_Width,y),(x+CR_Width,y+CR_Height),(x,y+CR_Height)),p_dict)',\n None: '(((x, y),(x+CR_Width,y),(x+CR_Width,y+CR_Height),(x,y+CR_Height)),p_dict)',\n }\n pdata = eval(poly_place_coords[img_placement])\n self.sel_image_view_layer = \\\n self.pyslip.AddPolygonLayer((pdata,), map_rel=False,\n name='<sel_image_view_outline>',\n )\n\n return True\n\n##### map-relative text layer\n\n def textOnOff(self, event):\n \"\"\"Handle OnOff event for map-relative text layer control.\"\"\"\n\n if event:\n self.text_layer = \\\n self.pyslip.AddTextLayer(TextData, map_rel=True,\n name='<text_layer>', visible=True,\n delta=DefaultTextMapDelta,\n show_levels=MRTextShowLevels,\n placement='ne')\n else:\n self.lc_text.set_show(True) # set control state to 'normal'\n self.lc_text.set_select(False)\n self.pyslip.DeleteLayer(self.text_layer)\n if self.sel_text_layer:\n self.pyslip.DeleteLayer(self.sel_text_layer)\n self.sel_text_layer = None\n self.sel_text_highlight = None\n\n def textShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for text layer control.\"\"\"\n\n if event:\n if self.text_layer:\n self.pyslip.ShowLayer(self.text_layer)\n if self.sel_text_layer:\n self.pyslip.ShowLayer(self.sel_text_layer)\n else:\n if self.text_layer:\n self.pyslip.HideLayer(self.text_layer)\n if self.sel_text_layer:\n self.pyslip.HideLayer(self.sel_text_layer)\n\n def textSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for text layer control.\"\"\"\n\n layer = self.text_layer\n if event:\n self.add_select_handler(layer, self.textSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n\n def textSelect(self, event):\n \"\"\"Map-relative text select event from the widget.\n\n event.type the type of point selection: single or box\n event.selection [list of] tuple (xgeo,ygeo) of selected point\n (if None then no point(s) selected)\n\n The selection could be a single or box select.\n\n The selection mode here is more standard: empty select turns point(s)\n off, selected points reselection leaves points selected.\n \"\"\"\n\n selection = event.selection\n\n if self.sel_text_layer:\n # turn previously selected point(s) off\n self.pyslip.DeleteLayer(self.sel_text_layer)\n self.sel_text_layer = None\n\n if selection:\n # get selected points into form for display layer\n points = []\n for (x, y, d) in selection:\n del d['colour'] # remove point attributes, want different\n del d['radius']\n del d['offset_x'] # remove offsets, we want point not text\n del d['offset_y']\n points.append((x, y, d))\n\n self.sel_text_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=True,\n colour='#0000ff',\n radius=5, visible=True,\n show_levels=MRTextShowLevels,\n name='<sel_text_layer>')\n self.pyslip.PlaceLayerBelowLayer(self.sel_text_layer,\n self.text_layer)\n\n return True\n\n##### view-relative text layer\n\n def textViewOnOff(self, event):\n \"\"\"Handle OnOff event for view-relative text layer control.\"\"\"\n\n if event:\n self.text_view_layer = \\\n self.pyslip.AddTextLayer(TextViewData, map_rel=False,\n name='<text_view_layer>',\n delta=DefaultTextViewDelta,\n placement=TextViewDataPlace,\n visible=True,\n fontsize=24, textcolour='#0000ff',\n offset_x=TextViewDataOffX,\n offset_y=TextViewDataOffY)\n else:\n self.lc_text_v.set_show(True) # set control state to 'normal'\n self.lc_text_v.set_select(False)\n\n self.pyslip.DeleteLayer(self.text_view_layer)\n self.text_view_layer = None\n if self.sel_text_view_layer:\n self.pyslip.DeleteLayer(self.sel_text_view_layer)\n self.sel_text_view_layer = None\n\n def textViewShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for view text layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.text_view_layer)\n if self.sel_text_view_layer:\n self.pyslip.ShowLayer(self.sel_text_view_layer)\n else:\n self.pyslip.HideLayer(self.text_view_layer)\n if self.sel_text_view_layer:\n self.pyslip.HideLayer(self.sel_text_view_layer)\n\n def textViewSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for view text layer control.\"\"\"\n\n layer = self.text_view_layer\n if event:\n self.add_select_handler(layer, self.textViewSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def textViewSelect(self, event):\n \"\"\"View-relative text select event from the widget.\n\n event the event that contains these attributes:\n type the type of point selection: single or box\n selection [list of] tuple (xgeo,ygeo) of selected point\n (if None then no point(s) selected)\n\n The selection could be a single or box select.\n\n The selection mode here is more standard: empty select turns point(s)\n off, selected points reselection leaves points selected.\n \"\"\"\n\n selection = event.selection\n\n # turn off any existing selection\n if self.sel_text_view_layer:\n self.pyslip.DeleteLayer(self.sel_text_view_layer)\n self.sel_text_view_layer = None\n\n if selection:\n # get selected points into form for point display layer\n points = []\n for (x, y, d) in selection:\n del d['colour'] # want to override colour, radius\n del d['radius']\n points.append((x, y, d))\n\n self.sel_text_view_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=False,\n colour='black',\n radius=5, visible=True,\n show_levels=MRTextShowLevels,\n name='<sel_text_view_layer>')\n self.pyslip.PlaceLayerBelowLayer(self.sel_text_view_layer,\n self.text_view_layer)\n\n return True\n\n##### map-relative polygon layer\n\n def polyOnOff(self, event):\n \"\"\"Handle OnOff event for map-relative polygon layer control.\"\"\"\n\n if event:\n self.poly_layer = \\\n self.pyslip.AddPolygonLayer(PolyData, map_rel=True,\n visible=True,\n delta=DefaultPolygonMapDelta,\n show_levels=MRPolyShowLevels,\n name='<poly_layer>')\n else:\n self.lc_poly.set_show(True) # set control state to 'normal'\n self.lc_poly.set_select(False)\n\n self.pyslip.DeleteLayer(self.poly_layer)\n self.poly_layer = None\n\n if self.sel_poly_layer:\n self.pyslip.DeleteLayer(self.sel_poly_layer)\n self.sel_poly_layer = None\n self.sel_poly_point = None\n\n def polyShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for polygon layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.poly_layer)\n if self.sel_poly_layer:\n self.pyslip.ShowLayer(self.sel_poly_layer)\n else:\n self.pyslip.HideLayer(self.poly_layer)\n if self.sel_poly_layer:\n self.pyslip.HideLayer(self.sel_poly_layer)\n\n def polySelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for polygon layer control.\"\"\"\n\n layer = self.poly_layer\n if event:\n self.add_select_handler(layer, self.polySelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def polySelect(self, event):\n \"\"\"Map- and view-relative polygon select event from the widget.\n\n event the event that contains these attributes:\n type the type of point selection: single or box\n selection [list of] tuple (xgeo,ygeo) of selected point\n (if None then no point(s) selected)\n\n The selection could be a single or box select.\n\n Select a polygon to turn it on, any other polygon selection turns\n it off, unless previous selection again selected.\n \"\"\"\n\n # .seletion: [(poly,attr), ...]\n selection = event.selection\n\n # turn any previous selection off\n if self.sel_poly_layer:\n self.pyslip.DeleteLayer(self.sel_poly_layer)\n self.sel_poly_layer = None\n\n # box OR single selection\n if selection:\n # get selected polygon points into form for point display layer\n points = []\n for (poly, d) in selection:\n try:\n del d['colour']\n except KeyError:\n pass\n try:\n del d['radius']\n except KeyError:\n pass\n for (x, y) in poly:\n points.append((x, y, d))\n\n self.sel_poly_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=True,\n colour='#ff00ff',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_poly>')\n\n return True\n\n##### view-relative polygon layer\n\n def polyViewOnOff(self, event):\n \"\"\"Handle OnOff event for map-relative polygon layer control.\"\"\"\n\n if event:\n self.poly_view_layer = \\\n self.pyslip.AddPolygonLayer(PolyViewData, map_rel=False,\n delta=DefaultPolygonViewDelta,\n name='<poly_view_layer>',\n placement='cn', visible=True,\n fontsize=24, colour='#0000ff')\n else:\n self.lc_poly_v.set_show(True) # set control state to 'normal'\n self.lc_poly_v.set_select(False)\n\n self.pyslip.DeleteLayer(self.poly_view_layer)\n self.poly_view_layer = None\n\n if self.sel_poly_view_layer:\n self.pyslip.DeleteLayer(self.sel_poly_view_layer)\n self.sel_poly_view_layer = None\n self.sel_poly_view_point = None\n\n def polyViewShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for polygon layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.poly_view_layer)\n if self.sel_poly_view_layer:\n self.pyslip.ShowLayer(self.sel_poly_view_layer)\n else:\n self.pyslip.HideLayer(self.poly_view_layer)\n if self.sel_poly_view_layer:\n self.pyslip.HideLayer(self.sel_poly_view_layer)\n\n def polyViewSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for polygon layer control.\"\"\"\n\n layer = self.poly_view_layer\n if event:\n self.add_select_handler(layer, self.polyViewSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def polyViewSelect(self, event):\n \"\"\"View-relative polygon select event from the widget.\n\n event the event that contains these attributes:\n type the type of point selection: single or box\n selection tuple (sel, udata, None) defining the selected\n polygon (if None then no point(s) selected)\n\n The selection could be a single or box select.\n \"\"\"\n\n selection = event.selection\n\n # point select, turn any previous selection off\n if self.sel_poly_view_layer:\n self.pyslip.DeleteLayer(self.sel_poly_view_layer)\n self.sel_poly_view_layer = None\n\n # for box OR single selection\n if selection:\n # get selected polygon points into form for point display layer\n points = []\n for (poly, d) in selection:\n try:\n del d['colour']\n except KeyError:\n pass\n try:\n del d['radius']\n except KeyError:\n pass\n for (x, y) in poly:\n points.append((x, y, d))\n\n self.sel_poly_view_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=False,\n colour='#ff00ff',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_view_poly>')\n\n return True\n\n##### map-relative polyline layer\n\n def polylineOnOff(self, event):\n \"\"\"Handle OnOff event for map-relative polyline layer control.\"\"\"\n\n if event:\n self.polyline_layer = \\\n self.pyslip.AddPolylineLayer(PolylineData, map_rel=True,\n visible=True,\n delta=DefaultPolylineMapDelta,\n show_levels=MRPolyShowLevels,\n name='<polyline_layer>')\n else:\n self.lc_poll.set_show(True) # set control state to 'normal'\n self.lc_poll.set_select(False)\n\n self.pyslip.DeleteLayer(self.polyline_layer)\n self.polyline_layer = None\n\n if self.sel_polyline_layer:\n self.pyslip.DeleteLayer(self.sel_polyline_layer)\n self.sel_polyline_layer = None\n self.sel_polyline_point = None\n if self.sel_polyline_layer2:\n self.pyslip.DeleteLayer(self.sel_polyline_layer2)\n self.sel_polyline_layer2 = None\n\n def polylineShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for polycwlinegon layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.polyline_layer)\n if self.sel_polyline_layer:\n self.pyslip.ShowLayer(self.sel_polyline_layer)\n if self.sel_polyline_layer2:\n self.pyslip.ShowLayer(self.sel_polyline_layer2)\n else:\n self.pyslip.HideLayer(self.polyline_layer)\n if self.sel_polyline_layer:\n self.pyslip.HideLayer(self.sel_polyline_layer)\n if self.sel_polyline_layer2:\n self.pyslip.HideLayer(self.sel_polyline_layer2)\n\n def polylineSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for polyline layer control.\"\"\"\n\n layer = self.polyline_layer\n if event:\n self.add_select_handler(layer, self.polylineSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def polylineSelect(self, event):\n \"\"\"Map- and view-relative polyline select event from the widget.\n\n event the event that contains these attributes:\n type the type of point selection: single or box\n selection [list of] tuple (xgeo,ygeo) of selected point\n (if None then no point(s) selected)\n relsel a tuple (p1,p2) of polyline segment\n\n The selection could be a single or box select.\n\n Select a polyline to turn it on, any other polyline selection turns\n it off, unless previous selection again selected.\n \"\"\"\n\n # .seletion: [(poly,attr), ...]\n selection = event.selection\n relsel = event.relsel\n\n # turn any previous selection off\n if self.sel_polyline_layer:\n self.pyslip.DeleteLayer(self.sel_polyline_layer)\n self.sel_polyline_layer = None\n if self.sel_polyline_layer2:\n self.pyslip.DeleteLayer(self.sel_polyline_layer2)\n self.sel_polyline_layer2 = None\n\n # box OR single selection\n if selection:\n # show segment selected first, if any\n if relsel:\n self.sel_polyline_layer2 = \\\n self.pyslip.AddPointLayer(relsel, map_rel=True,\n colour='#40ff40',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_polyline2>')\n\n # get selected polygon points into form for point display layer\n points = []\n for (poly, d) in selection:\n try:\n del d['colour']\n except KeyError:\n pass\n try:\n del d['radius']\n except KeyError:\n pass\n for (x, y) in poly:\n points.append((x, y, d))\n\n self.sel_polyline_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=True,\n colour='#ff00ff',\n radius=3, visible=True,\n show_levels=[3,4],\n name='<sel_polyline>')\n return True\n\n##### view-relative polyline layer\n\n def polylineViewOnOff(self, event):\n \"\"\"Handle OnOff event for map-relative polyline layer control.\"\"\"\n\n if event:\n self.polyline_view_layer = \\\n self.pyslip.AddPolylineLayer(PolylineViewData, map_rel=False,\n delta=DefaultPolylineViewDelta,\n name='<polyline_view_layer>',\n placement='cn', visible=True,\n fontsize=24, colour='#0000ff')\n else:\n self.lc_poll_v.set_show(True) # set control state to 'normal'\n self.lc_poll_v.set_select(False)\n\n self.pyslip.DeleteLayer(self.polyline_view_layer)\n self.polyline_view_layer = None\n\n if self.sel_polyline_view_layer:\n self.pyslip.DeleteLayer(self.sel_polyline_view_layer)\n self.sel_polyline_view_layer = None\n self.sel_polyline_view_point = None\n\n if self.sel_polyline_view_layer2:\n self.pyslip.DeleteLayer(self.sel_polyline_view_layer2)\n self.sel_polyline_view_layer2 = None\n\n def polylineViewShowOnOff(self, event):\n \"\"\"Handle ShowOnOff event for polyline layer control.\"\"\"\n\n if event:\n self.pyslip.ShowLayer(self.polyline_view_layer)\n if self.sel_polyline_view_layer:\n self.pyslip.ShowLayer(self.sel_polyline_view_layer)\n if self.sel_polyline_view_layer2:\n self.pyslip.ShowLayer(self.sel_polyline_view_layer2)\n else:\n self.pyslip.HideLayer(self.polyline_view_layer)\n if self.sel_polyline_view_layer:\n self.pyslip.HideLayer(self.sel_polyline_view_layer)\n if self.sel_polyline_view_layer2:\n self.pyslip.HideLayer(self.sel_polyline_view_layer2)\n\n def polylineViewSelectOnOff(self, event):\n \"\"\"Handle SelectOnOff event for polyline layer control.\"\"\"\n\n layer = self.polyline_view_layer\n if event:\n self.add_select_handler(layer, self.polylineViewSelect)\n self.pyslip.SetLayerSelectable(layer, True)\n else:\n self.del_select_handler(layer)\n self.pyslip.SetLayerSelectable(layer, False)\n\n def polylineViewSelect(self, event):\n \"\"\"View-relative polyline select event from the widget.\n\n event the event that contains these attributes:\n type the type of point selection: single or box\n selection tuple (sel, udata, None) defining the selected\n polyline (if None then no point(s) selected)\n\n The selection could be a single or box select.\n \"\"\"\n\n selection = event.selection\n relsel = event.relsel\n\n # point select, turn any previous selection off\n if self.sel_polyline_view_layer:\n self.pyslip.DeleteLayer(self.sel_polyline_view_layer)\n self.sel_polyline_view_layer = None\n if self.sel_polyline_view_layer2:\n self.pyslip.DeleteLayer(self.sel_polyline_view_layer2)\n self.sel_polyline_view_layer2 = None\n\n # for box OR single selection\n if selection:\n # first, display selected segment\n if relsel:\n # get original polyline attributes, get placement and offsets\n (_, attributes) = PolylineViewData[0]\n place = attributes.get('placement', None)\n offset_x = attributes.get('offset_x', 0)\n offset_y = attributes.get('offset_y', 0)\n\n self.sel_polyline_view_layer2 = \\\n self.pyslip.AddPointLayer(relsel, map_rel=False,\n placement=place,\n offset_x=offset_x,\n offset_y=offset_y,\n colour='#4040ff',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_view_polyline2>')\n\n # get selected polyline points into form for point display layer\n points = []\n for (poly, d) in selection:\n try:\n del d['colour']\n except KeyError:\n pass\n try:\n del d['radius']\n except KeyError:\n pass\n for (x, y) in poly:\n points.append((x, y, d))\n\n self.sel_polyline_view_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=False,\n colour='#ff00ff',\n radius=3, visible=True,\n show_levels=[3,4],\n name='<sel_view_polyline>')\n\n return True\n\n def level_change_event(self, event):\n \"\"\"Handle a \"level change\" event from the pySlipQt widget.\n \n event.type the type of event\n event.level the new map level\n \"\"\"\n\n self.map_level.set_text(str(event.level))\n\n def mouse_posn_event(self, event):\n \"\"\"Handle a \"mouse position\" event from the pySlipQt widget.\n \n The 'event' object has these attributes:\n event.etype the type of event\n event.mposn the new mouse position on the map (xgeo, ygeo)\n event.vposn the new mouse position on the view (x, y)\n \"\"\"\n\n if event.mposn:\n (lon, lat) = event.mposn\n # we clamp the lon/lat to zero here since we don't want small\n # negative values displaying as \"-0.00\"\n if abs(lon) < 0.01:\n lon = 0.0\n if abs(lat) < 0.01:\n lat = 0.0\n self.mouse_position.set_text('%.2f/%.2f' % (lon, lat))\n else:\n self.mouse_position.set_text('')\n\n def select_event(self, event):\n \"\"\"Handle a single select click.\n\n event.type the event type number\n event.mposn select point tuple in map (geo) coordinates: (xgeo, ygeo)\n event.vposn select point tuple in view coordinates: (xview, yview)\n event.layer_id the ID of the layer containing the selected object (or None)\n event.selection a tuple (x,y,attrib) defining the position of the object selected (or [] if no selection)\n event.data the user-supplied data object for the selected object (or [] if no selection)\n event.relsel relative selection point inside a single selected image (or [] if no selection)\n\n Just look at 'event.type' to decide what handler to call and pass\n 'event' through to the handler.\n \"\"\"\n\n self.dump_event('select_event: event:', event)\n\n self.demo_select_dispatch.get(event.layer_id, self.null_handler)(event)\n\n ######\n # Small utility routines\n ######\n\n def unimplemented(self, msg):\n \"\"\"Issue an \"Sorry, ...\" message.\"\"\"\n\n self.pyslip.warn('Sorry, %s is not implemented at the moment.' % msg)\n\n def dump_event(self, msg, event):\n \"\"\"Dump an event to the log.\n\n Print attributes and values for non_dunder attributes.\n \"\"\"\n\n log('dump_event: %s' % msg)\n for attr in dir(event):\n if not attr.startswith('__'):\n log(' event.%s=%s' % (attr, getattr(event, attr)))\n\n ######\n # Finish initialization of data, etc\n ######\n\n def initData(self):\n global PointData, PointDataColour, PointViewDataPlacement\n global PointViewData, PointViewDataColour\n global ImageData\n global ImageViewData\n global TextData\n global TextViewData\n global TextViewDataPlace, TextViewDataOffX, TextViewDataOffY\n global PolyData, PolyViewData\n global PolylineData, PolylineViewData\n global CR_Width, CR_Height\n\n # create PointData - lots of it to test handling\n PointData = []\n for lon in range(-70, 290+1, 5):\n for lat in range(-65, 65+1, 5):\n udata = 'point(%s,%s)' % (str(lon), str(lat))\n PointData.append((lon, lat, {'data': udata}))\n PointDataColour = '#ff000080'\t# semi-transparent\n\n # create PointViewData - a point-rendition of 'PYSLIP'\n PointViewData = [(-66,-14),(-66,-13),(-66,-12),(-66,-11),(-66,-10),\n (-66,-9),(-66,-8),(-66,-7),(-66,-6),(-66,-5),(-66,-4),\n (-66,-3),(-65,-7),(-64,-7),(-63,-7),(-62,-7),(-61,-8),\n (-60,-9),(-60,-10),(-60,-11),(-60,-12),(-61,-13),\n (-62,-14),(-63,-14),(-64,-14),(65,-14), # P\n (-59,-14),(-58,-13),(-57,-12),(-56,-11),(-55,-10),\n (-53,-10),(-52,-11),(-51,-12),(-50,-13),(-49,-14),\n (-54,-9),(-54,-8),(-54,-7),(-54,-6),(-54,-5),\n (-54,-4),(-54,-3), # Y\n (-41,-13),(-42,-14),(-43,-14),(-44,-14),(-45,-14),\n (-46,-14),(-47,-13),(-48,-12),(-48,-11),(-47,-10),\n (-46,-9),(-45,-9),(-44,-9),(-43,-9),(-42,-8),\n (-41,-7),(-41,-6),(-41,-5),(-42,-4),(-43,-3),\n (-44,-3),(-45,-3),(-46,-3),(-47,-3),(-48,-4), # S\n (-39,-14),(-39,-13),(-39,-12),(-39,-11),(-39,-10),\n (-39,-9),(-39,-8),(-39,-7),(-39,-6),(-39,-5),\n (-39,-4),(-39,-3),(-38,-3),(-37,-3),(-36,-3),\n (-35,-3),(-34,-3),(-33,-3),(-32,-3), # L\n (-29,-14),(-29,-13),(-29,-12),\n (-29,-11),(-29,-10),(-29,-9),(-29,-8),(-29,-7),\n (-29,-6),(-29,-5),(-29,-4),(-29,-3), # I\n (-26,-14),(-26,-13),(-26,-12),(-26,-11),(-26,-10),\n (-26,-9),(-26,-8),(-26,-7),(-26,-6),(-26,-5),(-26,-4),\n (-26,-3),(-25,-7),(-24,-7),(-23,-7),(-22,-7),(-21,-8),\n (-20,-9),(-20,-10),(-20,-11),(-20,-12),(-21,-13),\n (-22,-14),(-23,-14),(-24,-14),(25,-14)] # P\n PointViewDataColour = '#00000040'\t# transparent\n PointViewDataPlacement = 'se'\n\n # create image data - shipwrecks off the Australian east coast\n ImageData = [# Agnes Napier - 1855\n (160.0, -30.0, ShipImg, {'placement': 'cc'}),\n # Venus - 1826\n (145.0, -11.0, ShipImg, {'placement': 'ne'}),\n # Wolverine - 1879\n (156.0, -23.0, ShipImg, {'placement': 'nw'}),\n # Thomas Day - 1884\n (150.0, -15.0, ShipImg, {'placement': 'sw'}),\n # Sybil - 1902\n (165.0, -19.0, ShipImg, {'placement': 'se'}),\n # Prince of Denmark - 1863\n (158.55, -19.98, ShipImg),\n # Moltke - 1911\n (146.867525, -19.152185, ShipImg)\n ]\n ImageData2 = []\n ImageData3 = []\n ImageData4 = []\n ImageData5 = []\n ImageData6 = []\n self.map_level_2_img = {0: ImageData2,\n 1: ImageData3,\n 2: ImageData4,\n 3: ImageData5,\n 4: ImageData6}\n self.map_level_2_selimg = {0: SelGlassyImg2,\n 1: SelGlassyImg3,\n 2: SelGlassyImg4,\n 3: SelGlassyImg5,\n 4: SelGlassyImg6}\n self.current_layer_img_layer = None\n\n ImageViewData = [(0, 0, CompassRoseGraphic, {'placement': 'ne',\n 'data': 'compass rose'})]\n\n text_placement = {'placement': 'se'}\n transparent_placement = {'placement': 'se', 'colour': '#00000040'}\n capital = {'placement': 'se', 'fontsize': 14, 'colour': 'red',\n 'textcolour': 'red'}\n capital_sw = {'placement': 'sw', 'fontsize': 14, 'colour': 'red',\n 'textcolour': 'red'}\n TextData = [\n (151.20, -33.85, 'Sydney', text_placement),\n (144.95, -37.84, 'Melbourne', {'placement': 'ce'}),\n (153.08, -27.48, 'Brisbane', text_placement),\n (115.86, -31.96, 'Perth', transparent_placement),\n (138.30, -35.52, 'Adelaide', text_placement),\n (130.98, -12.61, 'Darwin', text_placement),\n (147.31, -42.96, 'Hobart', text_placement),\n (174.75, -36.80, 'Auckland', text_placement),\n (174.75, -41.29, 'Wellington', capital),\n (172.61, -43.51, 'Christchurch', text_placement),\n (168.74, -45.01, 'Queenstown', text_placement),\n (147.30, -09.41, 'Port Moresby', capital),\n (143.1048, -5.4646, 'Porgera', text_placement),\n (103.833333, 1.283333, 'Singapore', capital),\n (101.683333, 3.133333, 'Kuala Lumpur', capital_sw),\n (106.822922, -6.185451, 'Jakarta', capital),\n (110.364444, -7.801389, 'Yogyakarta', text_placement),\n (121.050, 14.600, 'Manila', capital),\n (271.74, +40.11, 'Champaign', text_placement),\n (160.0, -30.0, 'Agnes Napier - 1855',\n {'placement': 'cw', 'offset_x': 20, 'colour': 'red'}),\n (145.0, -11.0, 'Venus - 1826',\n {'placement': 'sw', 'colour': 'red'}),\n (156.0, -23.0, 'Wolverine - 1879',\n {'placement': 'ce', 'colour': 'red'}),\n (150.0, -15.0, 'Thomas Day - 1884',\n {'colour': 'red'}),\n (165.0, -19.0, 'Sybil - 1902',\n {'placement': 'cw', 'colour': 'red'}),\n (158.55, -19.98, 'Prince of Denmark - 1863',\n {'placement': 'nw', 'offset_x': 20, 'colour': 'red'}),\n (146.867525, -19.152182, 'Moltke - 1911',\n {'placement': 'ce', 'offset_x': 20, 'colour': 'red'}),\n ]\n if sys.platform != 'win32':\n # TODO: check if this works under Windows\n TextData.extend([\n (110.490, 24.780, '阳朔县 (Yangshuo)', {'placement': 'sw'}),\n (117.183333, 39.133333, '天津市 (Tianjin)', {'placement': 'sw'}),\n (106.36, +10.36, 'Mỹ Tho', {'placement': 'ne'}),\n (105.85, +21.033333, 'Hà Nội', capital),\n (109.18333, 12.25, 'Nha Trang', {'placement': 'sw'}),\n (106.681944, 10.769444, 'Thành phố Hồ Chí Minh',\n {'placement': 'sw'}),\n (132.47, +34.44, '広島市 (Hiroshima City)',\n {'placement': 'nw'}),\n (114.000, +22.450, '香港 (Hong Kong)', text_placement),\n (98.392, 7.888, 'ภูเก็ต (Phuket)', text_placement),\n ( 96.16, +16.80, 'ရန်ကုန် (Yangon)', capital),\n (104.93, +11.54, ' ភ្នំពេញ (Phnom Penh)', capital),\n (100.49, +13.75, 'กรุงเทพมหานคร (Bangkok)', capital),\n ( 77.56, +34.09, 'གླེ་(Leh)', text_placement),\n (84.991275, 24.695102, 'बोधगया (Bodh Gaya)', text_placement)\n ])\n\n TextViewData = [(0, 0, '%s %s' % (DemoName, DemoVersion))]\n TextViewDataPlace = 'cn'\n TextViewDataOffX = 0\n TextViewDataOffY = 3\n\n PolyData = [(((150.0,10.0),(160.0,20.0),(170.0,10.0),(165.0,0.0),(155.0,0.0)),\n {'width': 3, 'colour': 'blue', 'closed': True}),\n (((165.0,-35.0),(175.0,-35.0),(175.0,-45.0),(165.0,-45.0)),\n {'width': 10, 'colour': '#00ff00c0', 'filled': True,\n 'fillcolour': '#ffff0040'}),\n (((190.0,-30.0),(220.0,-50.0),(220.0,-30.0),(190.0,-50.0)),\n {'width': 3, 'colour': 'green', 'filled': True,\n 'fillcolour': 'yellow'}),\n (((190.0,+50.0),(220.0,+65.0),(220.0,+50.0),(190.0,+65.0)),\n {'width': 10, 'colour': '#00000040'})]\n\n PolyViewData = [(((230,0),(230,40),(-230,40),(-230,0)),\n {'width': 3, 'colour': '#00ff00ff', 'closed': True,\n 'placement': 'cn', 'offset_y': 1})]\n\n PolylineData = [(((150.0,10.0),(160.0,20.0),(170.0,10.0),(165.0,0.0),(155.0,0.0)),\n {'width': 3, 'colour': 'blue'}),\n (((185.0,10.0),(185.0,20.0),(180.0,10.0),(175.0,0.0),(185.0,0.0)),\n {'width': 3, 'colour': 'red'})]\n\n PolylineViewData = [(((50,100),(100,50),(150,100),(100,150)),\n {'width': 3, 'colour': '#00ffffff', 'placement': 'cn'}),\n (((100,250),(50,300),(100,350),(150,300)),\n {'width': 3, 'colour': '#0000ffff', 'placement': 'cn'})]\n\n # define layer ID variables & sub-checkbox state variables\n self.point_layer = None\n self.sel_point_layer = None\n self.sel_point = None\n\n self.point_view_layer = None\n self.sel_point_view_layer = None\n self.sel_point_view = None\n\n self.image_layer = None\n self.sel_image_layer = None\n self.sel_image = None\n\n self.image_view_layer = None\n self.sel_image_view_layer = None\n self.sel_image_view = None\n self.sel_imagepoint_view_layer = None\n\n self.text_layer = None\n self.sel_text_layer = None\n self.sel_text = None\n\n self.text_view_layer = None\n self.sel_text_view_layer = None\n\n self.poly_layer = None\n self.sel_poly_layer = None\n self.sel_poly = None\n\n self.poly_view_layer = None\n self.sel_poly_view_layer = None\n self.sel_poly = None\n\n self.polyline_layer = None\n self.sel_polyline_layer = None\n self.sel_polyline_layer2 = None\n self.sel_polyline = None\n\n self.polyline_view_layer = None\n self.sel_polyline_view_layer = None\n self.sel_polyline_view_layer2 = None\n self.sel_polyline = None\n\n # get width and height of the compass rose image\n cr_img = QPixmap(CompassRoseGraphic)\n size = cr_img.size()\n CR_Height = size.height()\n CR_Width = size.width()\n\n # set initial view position\n self.map_level.set_text('%d' % InitViewLevel)\n\n ######\n # Exception handlers\n ######\n\n def null_handler(self, event):\n \"\"\"Routine to handle unexpected events.\"\"\"\n\n print('ERROR: null_handler!?')\n log('ERROR: null_handler!?')\n\n ######\n # Handle adding/removing select handler functions.\n ######\n\n def add_select_handler(self, id, handler):\n \"\"\"Add handler for select in layer 'id'.\"\"\"\n\n self.demo_select_dispatch[id] = handler\n\n def del_select_handler(self, id):\n \"\"\"Remove handler for select in layer 'id'.\"\"\"\n\n del self.demo_select_dispatch[id]\n\n ######\n # Warning and information dialogs\n ######\n\n def info(self, msg):\n \"\"\"Display an information message, log and graphically.\"\"\"\n\n log_msg = '# ' + msg\n length = len(log_msg)\n prefix = '#### Information '\n banner = prefix + '#'*(80 - len(log_msg) - len(prefix))\n log(banner)\n log(log_msg)\n log(banner)\n\n info_dialog = QErrorMessage(self)\n info_dialog.showMessage(msg)\n\n def warn(self, msg):\n \"\"\"Display a warning message, log and graphically.\"\"\"\n\n log_msg = '# ' + msg\n length = len(log_msg)\n prefix = '#### Warning '\n banner = prefix + '#'*(80 - len(log_msg) - len(prefix))\n log(banner)\n log(log_msg)\n log(banner)\n\n warn_dialog = QErrorMessage(self)\n warn_dialog.showMessage(msg)\n\n###############################################################################\n# Main code\n###############################################################################\n\ndef usage(msg=None):\n if msg:\n print(('*'*80 + '\\n%s\\n' + '*'*80) % msg)\n print(__doc__)\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tback):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tback))\n msg += '=' * 80 + '\\n'\n log(msg)\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# parse the CLI params\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'd:h', ['debug=', 'help'])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ndebug = 10\n\nfor (opt, param) in opts:\n if opt in ['-d', '--debug']:\n debug = param\n elif opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n\n# convert any symbolic debug level to a number\ntry:\n debug = int(debug)\nexcept ValueError:\n # possibly a symbolic debug name\n try:\n debug = LogSym2Num[debug.upper()]\n except KeyError:\n usage('Unrecognized debug name: %s' % debug)\n sys.exit(1)\nlog.set_level(debug)\n\n# start the app\napp = QApplication(args)\nex = PySlipQtDemo()\nsys.exit(app.exec_())\n\n"
},
{
"alpha_fraction": 0.45125627517700195,
"alphanum_fraction": 0.4693467319011688,
"avg_line_length": 29.15151596069336,
"blob_id": "0586883a496fa28df3651cf89c1d6dc7f0c41534",
"content_id": "7fddd0bc35d018524f1c0235e6f2778337c9eec8",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2985,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 99,
"path": "/pySlipQt/examples/display_text.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA PyQt5 custom widget used by pySlipQt.\n\nUsed to display text. The layout and components:\n\n +-----------------------------------------+\n | <title> |\n | |\n | +----------------+ |\n | <label> | <text> | |\n | +----------------+ |\n | |\n +-----------------------------------------+\n\nThe constructor:\n\n dt = DisplayText(parent, title='', label='', textwidth=None, tooltip=None)\n\n where title is the text to display at the top of the widget\n label is the text to the left of the displayed <text>\n textwidth is the width (in pixels) of the <text> field\n tooltip is the text of a tooltip for the widget\n\nMethods:\n\n dt.set_text(\"some text\")\n dt.clear()\n\n\"\"\"\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt\n\nclass DisplayText(QWidget):\n\n # some subwidget sizes\n LabelWidth = 30\n\n # styles strings\n TextStyle = ('QLabel {'\n 'background-color: white; '\n 'border:1px solid rgb(128, 128, 128); '\n 'border-radius: 3px;'\n '}'\n )\n LabelStyle = ('QLabel {'\n 'background-color: white; '\n 'border:1px solid rgb(128, 128, 128); '\n 'border-radius: 3px;'\n '}'\n )\n GroupStyle = (#'QGroupBox { background-color: rgb(230, 230, 230); };'\n 'QGroupBox::title { subcontrol-origin: margin; '\n# ' background-color: rgb(215, 215, 215); '\n ' border-radius: 3px; '\n ' padding: 5px; '\n ' color: black; '\n '};'\n )\n\n def __init__(self, title, label, tooltip=None, text_width=None):\n super().__init__()\n\n lbl_label = QLabel(label)\n lbl_label.setFixedHeight(DisplayText.LabelWidth)\n\n self.lbl_text = QLabel()\n self.lbl_text.setStyleSheet(DisplayText.TextStyle)\n if text_width:\n self.lbl_text.setFixedWidth(text_width)\n self.lbl_text.setFixedHeight(20)\n\n option_box = QGroupBox(title)\n option_box.setStyleSheet(DisplayText.GroupStyle)\n\n box_layout = QHBoxLayout()\n box_layout.setContentsMargins(0, 0, 1, 1)\n\n box_layout.addWidget(lbl_label)\n box_layout.addWidget(self.lbl_text)\n box_layout.addStretch(1)\n\n option_box.setLayout(box_layout)\n\n layout = QVBoxLayout()\n layout.addWidget(option_box)\n\n self.setLayout(layout)\n\n if tooltip:\n self.setToolTip(tooltip)\n\n def set_text(self, text):\n \"\"\"Set the text of the display field.\n\n text the text to show\n \"\"\"\n\n self.lbl_text.setText(text)\n"
},
{
"alpha_fraction": 0.651379406452179,
"alphanum_fraction": 0.7579890489578247,
"avg_line_length": 50.22602844238281,
"blob_id": "02f89c9ea6920dc7ed8e7de90c655607dc26a6ea",
"content_id": "939b8de5314bb838d7f4427afdb7c810798672ec",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22437,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 438,
"path": "/pySlipQt/sys_tile_data.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSource of the 'error' and 'pending' tiles data.\n\nThe code here is generated from the:\n error_tile.png\n pending_tile.png\nfiles in the pyslipqt/examples/graphics directory using:\n /usr/local/bin/img2py <file>\n\nThe captured output is cleaned up and saved in this file as data.\n\"\"\"\n\nimport base64\n\n\n# the 'pending' tile data\ndef getPendingImage():\n \"\"\"Generate 'pending' image from embedded data.\"\"\"\n\n return base64.b64decode(\n \"iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAIAAADTED8xAAAACXBIWXMAAAsT\"\n \"AAALEwEAmpwYAAAAB3RJTUUH4gIJAxggK6MKCwAAIABJREFUeNrsvWlwXNeV\"\n \"JnjOufdtmUgsiY0ACJIAuK8StVGWbJVU8tJVtmZctqpcsstR1TUTMRUVPdM/\"\n \"JmY6JmI6OmJ+TfT8618z86NnpvYql2W7ynZJlmXtCzdREkESJEGCWAgQ+5bL\"\n \"e+/ec+bHSyQBrkmKbovg+yIlIm9mvnzv5fnuPefcs+DLL78MKVI8qKD0FqRI\"\n \"CZAiRUqAFClSAqRIkRIgRYqUAClSpARIkSIlQIoUKQFSpEgJkCJFSoAUKVIC\"\n \"pEiREiBFipQAKVKkBEiRIiVAihQpAVKkSAmQIkVKgBQpUgKkSJESIEWKlAAp\"\n \"UqQESJEiJUCKFCkBUqRICZAiRUqAFClSAqRIkRIgRYqUAClSpARIkSIlQIoU\"\n \"KQFSpEgJkCJFSoAUKVICpEiREiBFipQAKVKkBEiRIiVAihQpAVKkSAmQIkVK\"\n \"gBQpUgKkSJESIEWKlAApUqQESJEiJUCKFCkBUqRICZAiRUqAFClSAqRICZAi\"\n \"RUqAFClSAqRIkRIgRYqUAClSpARIkSIlQIoUKQFSpEgJkCJFSoAUKVICpEiR\"\n \"EiBFipQAKVKkBEiRIiVAihT3I3R6C2rB8vT08tJkuDSzVAhLYRhFRsSCgFak\"\n \"HTcIvFxDkMnk65ra/Fw+vV0pAdYDwuW5qaGBy9OzczNzU/NLc4sLS4tLYVSI\"\n \"CssmjsRYACBHa8f1MlkvyOXqcvlcfWtTrqU139re3rWpB/zG9DamBLjPYIyZ\"\n \"vnj8/IXLI6NXRq6Mz89cDpcKhESoEZGQiNDVfuXOCUDM4dxSeWZ+DuGijQEh\"\n \"aGxoynds6tywuauju6enbfNOolTV/JwCX3755fQuVFCeHzz98ZlzQ6cHL82M\"\n \"XzShUeQopSjnou86WU/5LvkkHoIG1CAAGCPEArHYguUoipcjLoVciERsbGIn\"\n \"G3R09+3Y0r1vZ0/vtp1lJ9WOUgJ8LhHH8djJIx+dOv3pmYGlqXFNmrSLdY6X\"\n \"r3MafW4D2yzShKY5Ludi9tn4pvpZVVK6qINFV80TzSBOA01JNF+M54u8FMU2\"\n \"RiVN7Zv379x64JGHO3v2ImJ6w1MCfI4wNXLuyAfvH/v09OLUZU0aPcfNZ/y2\"\n \"et4CZiOUu0tR1rpAzERAifiuFmJBAQYRYWBQ1ojoRZ0Z8vUI4jCXphbMfFmi\"\n \"0JK0dGw59PDuh598KtfUlUpeSoDfPKIoOnf87dfePXx56CxZQN9zm7Pepnqz\"\n \"TcIdYbkhdkKHiJQG6wIqJEcIEREBEZTEKCioGEQEGMQKRMAhihUr1mgTzLru\"\n \"GVefk+LIAs+VTFQm3+3dvverX3xs4+5DqWGQEuA3ieLs5bdf/9UHJ44vT0+7\"\n \"rk+NXmZL3uyHYl8onriiRJPyATwBDQJAgsAAAKwEDSAiKwEAEhSU1cuCWLEh\"\n \"qjJwxBFaXULvjKs+luLoLC9H1oa5lq5nn3ri8ae+5GSbUhH8zUJ95zvfeQAv\"\n \"e/byhX98+Z+OHf8gXi66uUywMa+fypafisM245IiT0EWtY/gCApiTGgQAAQE\"\n \"AMgQMYEAAqAhYCQmZAIDYBGZUEhpRA/JJW0VCpY6Yt5EgV9HBYIYivOzQ5dH\"\n \"lhcWNnW1O0EulcKUAP9Fcfnimb//wcsXzn6ihFRjkNnZar5Epa2Rg9pxNeZQ\"\n \"uQSMwiiMyhIKoiAwVASdAQFREC2iIDFVniYjjIAgIiCAhBQgatJGMUK8MXZa\"\n \"Am85I5ExS+Wx8bHJqfkt3Z1+tj4VxJQA/6Wkf/DE3/7Dj8YuDTjKU/mM/0Te\"\n \"PM62ybrgSB1KBkSQDAgAsSATMwgDCAIjMSIgMFgAYBRBEkRGISFDyChKQKDK\"\n \"EGAAC0iIHoImXdRhQ4wd5NmsXbZYMpNTY+NXZvu62/xcqgulBPj1Y/zCwN/9\"\n \"44/HL51znMBpybhfagj3MiChT5BB1KgNogAIgCAxCKAGJEAEEUHFaBGEEQGF\"\n \"BQRYQBiASQREQNkKQxAwUZNYEEWQUSmEACjUTAKd4DoZnjNQlpmZy+NTC9u2\"\n \"bvSCdB1ICfBr1ftHLvz9D18euzjguIHbllHP1Ee97BhH16FkgACEQTEioxUg\"\n \"BhZUDMAADMyoAIABDGpAFnCYFKMCBCEFAIyKgQWFERiFwQKDEAqKgGIEA4hI\"\n \"PhAglSluYTeb4akYS3Z2dmJurtDbu8X1M6lEpgT4taA0N/7yj/9p8Mwnjvac\"\n \"loz6Yr3ZJK7RVI82ABQEiwhIFoCBGJFRAIEAmIghmccJEAQRQASBkQHAIlok\"\n \"BrKICGCSDwoiakZiAEE0KICMK1qTD6KIihDn2Q8ydsZI0UzNTUgU9vVuRsdP\"\n \"hTIlwD1GGIZvvvLTYyc+VEyqKXC+0GA3g8uaciQeggVlVKKxCKJ1BVwRV8Bh\"\n \"BCCLYkgbIqOEQSGIIQAQFkQkEUFAEUSwFjSi1cweo8viiWgRABTEGNGiTgxr\"\n \"AXBBo8ISxE3iSYbnQi7E47PT9b7XuWXb3V3jm7/8p6nJ0ShcQFLWguu6qXDX\"\n \"ggciGO78iXc++Ogol43TEPgHmso97MREOQREHQIAMoqtN+yyOx7QCMqcQARA\"\n \"QDmENgq3FiPNzqKLMYpFUoKGRBCtAAMqAUAEkIa4pKRhMMfjIvMMFkEDNSB2\"\n \"4PL2RQCgeRcsakBrQQJRRtl5ifeyv9wcnpsuzS/88p0Puza2d2x95C6ukdlM\"\n \"Tc4sl2hoeD4ZqaurC4IgCIJczvP9esdxXNdVSqVC/2ARYHr0/K/ePVKcnXXq\"\n \"sv6W5nibaEPkOICCMTJA1Bj5BUe97ZVOh9PzV5aWClEhtLEgopPRmbpMY2Nj\"\n \"XW8dPgrhhrKe8RAFAEAQEQQRIzDNoSpq/5dB6VT58uzY0uJyWIiZWWlyAydb\"\n \"X9fY2Oj3+vg4hBvKPOe4VolmCCAou+UwNAfAWWiwocxODP3yzSPfbN2SbWi+\"\n \"08tEUH42BwCImOwxh2EYhuHi4uLk5NXwDa0pCDzXdR0pOFqhk/M8x3U0ub4D\"\n \"ShNYnVVKPTi71OucAHEcH37/vZGLA9rx3bY6+xCIEo0aHcSYxLHgiH/EWz66\"\n \"ODJ0eWls6fojzMH8GFwO+oPOUx0tj7eYh62ySBGBBraoCOL6yPsoWH5n+dL5\"\n \"c0vjNzjCNMxeguHMyUznxx2tD7XIk2KCWC85rAEDUJFjspE85OplHybN6TMf\"\n \"bz268dBvf/1Or9Sray6Xo+vHRQQRE1Yk/4ahLS9PxFFIAKCmCIEIBFAhasWg\"\n \"CC2SIiImIqWUQnR85Vgrkqwepm3rl1IC3B8YHTh8/OQptIgNHu0ITJ0Qk3IV\"\n \"GLLZWC2p+G179tiZxRXRr6+v37JlS76lJQgCY8zi/Pzw8PD4+HhprjR49MKV\"\n \"0cmd4zv0F7XJGBUqQrGO1a+75944N3NhNjlCU1NTT09PPp93PC8Ow7m5ueHh\"\n \"4ampqeJ08fz04OWhy9subM9+ORtuKTrTvjjieEoWlW217pZ6Lkbl+eUPP/q0\"\n \"d9v2tk3b7+hKibRIWEuoaRwu2LhEQIBAAqiQSAgQkISUAlEKCQEdhYIAAAI2\"\n \"ioEQOQYkim26AtwfMMX5Y8dPL0yOu5lM0NEQdwvGqB0NAOJYmqTCa4UzR8+a\"\n \"ogGAvr6+np6e7du3d3d3t6wQYGFhYWRk5Ny5c+fPn+/v71+eWP7k1U/2lvd6\"\n \"z7niCHuMb+On/3KyOF0EgK6url27du3evXvTpk35fN513TAM5+fnR0ZGLly4\"\n \"MDQ8/MmJE8WZ0qdvfbJ5YfOGf9Ue7wy9hAOOY8s23iHeZD0XzPjwuU+PHXt2\"\n \"49Zfhx4SRws2KpGiJK4pxXomwOCpj/oH+h3l6EbfbCUgISEiEhSYh8VXF899\"\n \"OGhjW1dXt3v37ueff/6hhx5yHGf1ERoaGjZt2vTUU0+dPHnyxz/+8ZkzZ5aX\"\n \"l0++dXK/vz96Mfbf9E794lQi/bt27fr617/+2GOPrT5CNpvN5/O9vb3PPPPM\"\n \"qVOn2lpaTpw4MT09ffH4kDWmi7qiLZG76CpHqVBFgdGbXJx1ZC76eGBw18VT\"\n \"nX177+0NsdGCicsKUVgQ0rSEde0GjaLovbfePH+6X2cDv7vJ7gSMUDkKNQJD\"\n \"+fXwzAdnbWRbWlqefPLJP/7jP+7r67uFh6StrW3fvn1j4+OzMzPlQrlULrVL\"\n \"65VjV6bOTwPAnj17/uzP/mzPnj23OEJra+v+/fuVUtPT04uLiwtXFoNyUNeV\"\n \"E1+QlbBI0XIjOHOaF6Olxel8Q+OW7Xtqv96xsbE4jnEFVau3agCYeNGEJSQk\"\n \"RMCr/yEhISAgrPxNyUtq5XVAJCFEFABEZK5r7UkJ8HnH2OkPX3/3iCmVdEuA\"\n \"e3wOhICUUuijPcpn3huIl+PGxsann376u9/9bi53+5DMIAh27dw5ODg4MTER\"\n \"LUfzo/PL08umbPbt2/fd7363t7f39qut1jt37lRKXb58eWlpaWFyoVW10HYi\"\n \"gyDAsVgtuqzNfMSFstFB7+b2TF3+nhDAhIs2DpEIEVICrLGd1uvSdv7C8MLU\"\n \"CDqO25S1rYIxIiIQmEkePTlani0DwP79+1988cUgCGo8Zj6ff/755zdu3MiG\"\n \"l68USvNlrfX+/ft37txZ+4k9//zzTz31VENDg43tuf5BGRB2GRWSJgwl3she\"\n \"U6Adf3T4/PC5s5/9PiBiFC3HYSFVeR4gAsxdGbkwOgkWMOtIXif2Hgqih+Z4\"\n \"NH1+GgAOHjz4wgsv1NXV3dGRH3nkkb5t21Y/feihh+5szVXq61//esKZxdHF\"\n \"xeOLGCMKEhEycQao3kdflRYWLo2Ox3Fcq35vb6x9mbhoowJgmn32IBHgyvC5\"\n \"scuXlHLcep83AJYBgYDAztmpiWkbsdZ69+7dfX19d3pkz/P6enq0rjgPmltb\"\n \"t27deqcHqa+vf/rppzdu3AgAE+NXzDkrKISICjEUbgNV53quNzQyMTN8usZj\"\n \"BvoXgs51Pp9SVF5M5/4HjgATV6aXpyfRc6jO5aygAWQADfa0XbiykEz/+/fv\"\n \"v7uDNzQ0tLS0JHN5S/4uK508+uijyUqyOLYUno+AIEmpkQhsk6g6l0jPzoxO\"\n \"jF+u1ehfPOzKB7zKrReXCyZeFEnF/wEjgFmcuDwxCwLga8gptgIAIgIA5dli\"\n \"caoEAC1tbdu23WXYWV1jY319PQBks9na7Ydr4Pv+1t5e13VtbAuLixIKAJBC\"\n \"tMiOYOCAr0sLS5NXZu/A8bX8nqc+FHESzcfEi7+W+7u+POfrcB9gZnZ8cm5W\"\n \"KUdnNNcDxklmFnIoy4VSInwd7e23Psj8/Pzhw4eXlpZ6tm3bt2vXau++S5S4\"\n \"O5VSVV2oisHBwZMnTyqlDhw40N3dfYuvaG1t3bBhw/Dw8HKhkJ9tpnpKJiQ2\"\n \"AFmFnoNLODE7X1y8nKnvrHE2ixfe9RqgFB4w0RLpVO9/IAkwPTE7Nz+lSJGv\"\n \"bU4gqWGlAJckLkeJM6ep6VYpiOPj43//gx+c/OST2dnZbdu2zX35y88++2x1\"\n \"XzYJMgOAcrlcLpdXf/D4J5+88tOfnjhxgoguXrz4la98ZceOHTf7lpaWlpaW\"\n \"luHh4bAYyYxIHUCMAAAxcoMon4j0zNzc3MSVWgmQaD5L78YcCe5KN3sfUALM\"\n \"Ly6XF2a18snVxhMqAipEES6jMQYAMplMJnOr3Ks333zznbfeSt48MDAQBEF3\"\n \"d/f27ZXgnIWFhYWFBQAolUoJExIUCoVXf/azw4cPJ09ff/11EdmyZYvneTf8\"\n \"luppmMhISUQEE1cVM/uIviaixcXZxeX5Oy2jFdCREEDMbtQ2FfEHzgYoLBVM\"\n \"ZMAn0IQMYjFJaxS+OiMmJsGNP14oAEAi/QmGhoYuXbpUfTo1NTUzM1NxN01N\"\n \"Vd85ODiYfLaKUqk0MjJS00kziBVkEEEwII6gJlSqtDi3MFe4i5vgwRGSU2LT\"\n \"6P8HjAAYLhTKJQRErcBBiBFFJBEvDYkaUywWi8XiTafPIJibm1s90tjYWN0u\"\n \"mJ2dTab/ChmuXDlz5kzVO3TNroKfyTQ0NNzsi6qnQZrQQbAiVlAEAAQFHYWu\"\n \"CoulsFy4u1vhuUcU9v86FnlmTgnwOcXSUlwox0gKCFkDJC4gK2IAXNCeSoR4\"\n \"fn7+pneE6Kmnnjp48GDyNJfL7d27d9++fcnTkydPVpUcAPj0009Pn6646ru7\"\n \"u/fs2dO+Yl5v3rz5wL59ra2tN/uiqamp6elpAPACR+oZDXCyUolAjEk9dgAo\"\n \"l81d3w1fH1VyjM095YC51fqZ2gC/YRgul+MIEVEhKAQjYgURgYED8QMvMV5H\"\n \"R0ettTeLXdu/f7/WetOWLXEcd7S3P/7448nUvry8/MHhwzMzM6TJz7umzNFi\"\n \"dPbs2cHBwb6+PiJ65plncrnc4OAgIu7Zs+fWm8Sjo6Ojo6MA4Gd8zCHHjFbE\"\n \"ClgUEiAQQkIK49AYc727qaapWiBwPy4bFHkM4N7YA5a91Aj+/MLGoTVWEQIi\"\n \"oIAVZAIUAcYyZuozQatXmgpHR0f7+/tvsRe2e/fu3bt3XzP4i1/84sypUwCQ\"\n \"afN792+5PDQxvTh77Nixxnz+X//xHwdBkMvlnnnmmWeeeea25zk5OTkxMcHM\"\n \"ylPZXJZF0FSK7AIDMQgBIBBRbITiZdB32WyGGXx9IrLA9nH1mW3isJDzGv/f\"\n \"V078+9955LVUBfo8gplZGACQoKJNJzYAg5RBdTjZ1iwA9Pf3v/Pee1EU1X7k\"\n \"995774033pibm0OFrZubcZdq62xxc46InDh+/Gc/+5m1dyBe77///rvvvgsA\"\n \"ua6s35mBMrABsFipMs1X1QxrrOXPqnL4dMLRR6x1PstBSsVc44a/JBhKjeDP\"\n \"8QrAwpYBQBgABQGQKw82DI7kNzQ6dRoAPv7oo1deeaV2ef3hD384PDwMAPlt\"\n \"DfVbGnEZ3B6vbUcrEExPT7/++us/+clPVntFb4Fjx46988475XIZFbZsyFML\"\n \"SARgRThxAyXFGKXqsPrM8g+M4NFHvvrAyl0qMFEx07Thb0rRIADAlVQF+twS\"\n \"epV/InH+iMBK/XLgkngdQfP2/MSJycnJyddee42Zv/rVr/r+TctRLS4uvvrq\"\n \"q++8807iCa3rynZt74SsSAjgQr6nuVQsz5yZvXz58s9//vPJycmvfe1rmzdv\"\n \"vsUC9d577/3zP//z+fPnASC/tbF+cwOHAixIABbEALIAreT2IsC9Mzg1nSCA\"\n \"mJ8kdQeGNSkOTTbf+vfl6HxllqlXKQE+p0Ba0UNQiMEyEACIACIyAIAoaN2S\"\n \"D5fLc2cXh4eHf/rTnw4PDx86dGj79u3XuCyHhoYuXLhw5MiRU6dOLS4uAkB2\"\n \"Y6Zn30ZoISkCoGARIScbd3QA88zZ+ampqVdffXV+fn7Hjh07d+7ctm3baiPb\"\n \"GHPq1KnjJ04cO3IksX0zGzJd27okJ1IGJBBGsYCCLFhpOfNrcDZqOYEOxvZJ\"\n \"wFoVtnKcaWz4hzA+B+sR63AnuJrtKsDClOgSyJAsAxAD1FHn9g0AMD+4NDU1\"\n \"9frrr589e/bhhx/WrpsNAiKy1ibbvZ988kni9UeFjVvru/raoVVDQYBAWAAQ\"\n \"CoIN2Lm70/XdqbOzpmg++OCDY8eOHThwoKWlpbn5anmfyenp8bGxU6dOJU70\"\n \"uq5s795N1I5QQkARTs4X2AqCMFeacdQ+/ZdLVGPsg4MfkYLIfoHw9utAbIOm\"\n \"hn+MymfJvSoyNsKUAJ9X6VdOEgTGVpSt7IIRojAAJuIkUBZqUht3d/p1s7MX\"\n \"5sP5qOqRRESl1OptYCDItAfNGxsbuxswUFBmEUw8igKMiFxkDLB1R2uQ86aG\"\n \"ZwtjpTiOjx49Wvk0EazdOdIZ3bSlvqN3A7UqKYIAAwAhCoHYFQOABFCS4CPU\"\n \"SpnbC9zpiwd6O0843u0ZwwgKPvJdKMdPu3ArDoQ225j5QWwH1nHthPV2Za7n\"\n \"u45rWZQIxCKcVDkXAECpZoaglAECbN3eUlefmZ1aKM2VwrnYlqyIJNKPCt16\"\n \"7Ta62cZMU3uDbnKAQSLGin29YmYkibchCEFddy7bmJnfsLg8VyjPhdFCbCOu\"\n \"ij655DW6mbzf2FJf11kPPkhBAEAIAMCKoCBYARZARkEwwJZZ2HOUZLO3v3Kh\"\n \"0xf37OnrJ6+mVUPJR4GHUfy0vgkH4lJQn/uByACsa6w3AmSculzgCVuOGWKL\"\n \"oCv+RERhhtV1o2IQkKAj6GoPovmotFSKwtjGNpm2tau9rJdp8CBQyJDE619V\"\n \"SQhABAQBOXHbAIMYQJ/yfU2N5YZoPioVSnFkrGEAUJoczwmygZd3wSWIQUoV\"\n \"7iBXirdVDiICAqCAjSELzJz1vGuKtdzCXu0f3LOnr//264ACtqDlOLlQNl9S\"\n \"dK3zKgzrsrkfWh4g5waOQuunKtDn9noaG5vqsyxMBiQyQmsV3Wv28AUlFEBx\"\n \"6l0374KquItgpfBtMutfn1SFViApEXrNS4bFABJ4zZ7X5qOGajoyiEgsACBl\"\n \"Fq5sU4ggEiAAWyFEQREGAmArEjFE7Pp+XUOtWcsioohOX9y3s++k69zeHmAG\"\n \"guMZDyLzJaKrycdRmMnWvUx05qafLEBKgM8vWlryQUODRJZLlpJwfUQQEBC8\"\n \"Rl4RWARBxDCYVfRYaROTzPW8Mu9XP5zkmOF1UaWVJwYBrURYKaNbeT9WVCYU\"\n \"EQSp6PpiK6xjRgJAEUbAInPZMJuG1o62ljvIumQBBDwzuHfPtk/JqUkXInvc\"\n \"dyA0z2g0ABCbIOP9SOkzt1pmTXHdSMs6DIfe0LWpfcNmtpEtxLDITCDMyfaq\"\n \"FUkeImJFmBmlqnjI6qkURFCSbeTKH5i8Z9UjSba8kSUuKyRaMz0DijCzFQC2\"\n \"VkBErIDY5Nww2cUWESVQsFyycRx3dHS1dt1xER5Eder8/rikqAZVhRmIjwfu\"\n \"m4ZdNq6n/0mrM7fxwK6jxpbrcAVo3bRj66Zjw+dOupFwIZR6DyBxNTICJVK5\"\n \"YpmCMAgwrtlAS7zyFf/M2gUj8cvI1YmfCarrAMpVjSgZoVVfhii2SoWkwXyi\"\n \"BQkSiVgBQgJgwFB4KcZQSFNfV2uu5W7ayiOqgYv7dvR9GmRsTRwwx7M+CZcc\"\n \"PM18y3nRgOuU0xXg8wul1M5dvfVtGyWK7XxMC1ZQwAoICjOseohhkVuNw0qP\"\n \"sGpKTRJZtHpeT+IXIIljq64PiRQyIK8yP1ZeQk5ItPLOJHyJmQ2DElyKedlw\"\n \"VG7r3LJ15/a7uwkiAkBnBveH5Vp3bSk66mB/Le90VJgS4HONru2PPbx7h+UY\"\n \"I+H5CEMRrOysyqpHosQzs9zpeBJdl8jxim2Aax9X1w++OoKrDImr74SEXcAo\"\n \"UBKeizEUi/LQjq0bevZ/pl8XaWDwQFS+x7+y48QpAT7XcF33kccfa+3qM6Wi\"\n \"LBuYjYQBV6bhKqwk8dKAqxT9ytxc27hUx0XYrnmI3PSPysOArDwqdogBmQml\"\n \"YE1U3Ni788AjT3zGCunJCZ4efNhE9+yHNpBpDtZPNNy63eLr6N3zzKGhn8xf\"\n \"saXQzqHSxA0KpJIecK3Neo13VKpuebxmfGW6vm4cRVa2xa4TQ1zlJmJciXC7\"\n \"OiwIKIBIczEvmNb8ho5drQ/v7WjNL/D0hyxAwKjsig0BzJA4tUQhsgUBRAdJ\"\n \"3WIdODN4cPe2467/WaOLOK7L6RezXZcB3JQAn3c8/MRT41em3v/wXSpbnokQ\"\n \"tNRrQSt2JdQSKx1QWMlamRYBQEa+dt5MNPgbjVsAQLl+nmVYG9CDkuz+8lW3\"\n \"qRCDAC0ano2gxJ072776xVaWcG7stLAFZhQbWUIRawUIjAUAsIIcAyNYQBS5\"\n \"hceHGZSiM+cP7tr5kafsZ5H+jPtSXHQ9iUScWrrRpCrQb5Tcmcbnv/KV3XsO\"\n \"Wo6laHkqxLlYkqg4FECp2Ly4sue1oiAl5qwQr3Z6VscZbjSOAmvHK48VbQlW\"\n \"P5gFuJr9hQw0b2Q6wrIISUtjBlabEPcIAurMmYNxrOiu4qtZ6nx6qVyuY6aV\"\n \"ret0BfjcI5Pv/MY3vxGaePD0x1TSMhMTs8k5olCYCQgAeEXWUKoey6SHClyj\"\n \"G1XHWW48fn22OCLeMIccK3M/QAxq0ciCoQhE095dBw8+sgWiqTVzVHxvGICk\"\n \"zpx/dOe2o15wZ+uAmGygXoriTHLe6ykpfv1Xz2ts3vj7v/97u/Y+xsRSimXO\"\n \"qpkQywYJGLiqlCf7tAQVTiQWc/K0+rjT8UoERFLxapXPJ9n7ZRQsMk1GMmvQ\"\n \"oLFRa3v3bz/7bKaxRa6l3r1bBwTPnn30jjINOHZR/jSymXUpHg9E+cj6/MYX\"\n \"v/1fPXXoKZ3x46USLgJORnregBWLwlWvEFT+hOSPFadnZYN27fgaACeTIt9k\"\n \"PHmWHApELDFYcRYsTkdYrBRvFLb5pnymsREYECUJ7r/nbfIEVHvL4B0dlpyo\"\n \"ruH1uBisdi6lXqD7CSISNHV87Rv/dXtb6+vvHp6+POTGvlirSpayygRKNICV\"\n \"lVC4FX0oCdpZ/ZMn3p7rNeCV8eviHxAwsQSwOuGwEbXEVLJSEowRmFVWAxKU\"\n \"wRiDHAMBY0VE2VQ9OfxZC50jMFBbbrCta/pOP7q8eLKlW6bHX3CcEABilaZE\"\n \"3ldIAtB0pvGxZ393Y9eG1946PHD203Cx4EYZKBsnY9kn65PVQlDZuxVZFcIG\"\n \"1859Ijcex2tj41Y0GREhgFh0KFhkLIuKkK2Ah35ni97ghhcWebrilSJiYEYl\"\n \"YpgUswUgAPOZZwHA9voLnZsnGIHoDqsEaVhY6m/pgOkrL2httDEpAe5XdGx/\"\n \"5Fsbek4e7Tp84tTo0FlZFh37WGTHZfHJBsgOJiGgCAA3z0mUGgYFKtGkFIoO\"\n \"BSOGsCL6lsBpzfg9DfHjYIuCFxGrC0simgaAgZlQ2XtR0ora6oc2dE+ABbjb\"\n \"6Xthrr+lExYu/a6zjuThgSMAAGTq848/90LPtl39Hx07MXBhYuS8lK3jBlJm\"\n \"KYjjEztoHWQHRQMBCN9BkyGpliQyQLGoWDBiCQUjUBbZCmtRed/vaLD7sLQl\"\n \"No4EU06iV139GsOCAgQkhg19RpcjIrY2XuzsvvLZhWVhvr9hIxQd2M0sAAAd\"\n \"HklEQVTNvpQA9z1au7c9s3Hrzn2nz546eeri6MjQuXBxyfV8LGl0gDQoB8FB\"\n \"cZAVigZQN9rnWi1nDGCFDGAs2grHArGgARWjCIBl65HTnPVac3YrxJvA+KyW\"\n \"lO+grFTbTAoAoVilrNhkc5kQP9P8L6LaGi92dE3dq/u2tNA/XtjS0jKdEmA9\"\n \"2AYbenZv6Nm9+8rQ0JkzQ8OjF4ZH5yZHzGKktUtKg0OiGQlQoRCQQiYQxNU9\"\n \"F5OQOEoKOqxUOQeD2kBF7l3SOddtyGKLa7dA1CGCSBHoiICBQVSESVjo1c1c\"\n \"AwBMwIwMTAIWCO9ia0xEtTdd6Ng4zXzvHEoayK6f6tAPNAGqaGnf0tK+ZV8Y\"\n \"Tl84MTw6cfnK9Njk1PTU5bhYUOAlm8IrXkQQWBMvjwIoSBYoic8RQSsAwFml\"\n \"Mx5lPV3vSzvaTrANQgagDFjJThNgALPGwCUiEiMoYKu7BgwCd+ECElDtTRc7\"\n \"u2udqlnVWYuOWrrN+wwkvqCUAOsNnud17XqiaxdwaX5yfHJyYuitD0+MXjyj\"\n \"XE8cFGvZGI6Zk5yBJLIfUXsueR6DoEbQjva18jX5LtSRNKNtlSiLAICx4JIk\"\n \"vdaTzEwEZGCMMImFhmpsEgAIIwpbQKpUtrsLq7e18eKGrprmfiIQCGL4Q9dl\"\n \"tn+jqXg7ClBKgPUMCho39DaWlivdX5AwXCps2bH38Yf3RmEUR6GJjBUBBN/V\"\n \"p4fGLp0/TVq7LXXYF7ALtpE5hwDARsgAFLmSCIzVil1S5Q8gcCwrSTdgjXLr\"\n \"fMviIouuxGYkFnLti4AItueHOrtmWKC2rEjt+C+UIz8mcei7LH91Gw6sHy9o\"\n \"SoAbYXFq9MgH77535MTSzLhfl4nD2G+o/9ITj+578tnr36xe+dHgqY9dR2Pe\"\n \"j/sESwIMspQER1ezxwTWRhZJ9V8BFZNlACRERAdNZm9zR3F+4iQlEQucpG7W\"\n \"Kv0I2J6/1LFhpsaSuogKnW+VwxZAYCYjvnJe4vivybk5B3RqA6xTlObGB/o/\"\n \"PvzxmaHBU2xMIv0A8IVHHt/z0MM3ng3DMhuDCtGAFASj6jTMVa3mxkFylXEU\"\n \"U1F7NJLvagCA/OONYOZG+5XCCgdqs+kRsTU/0t42wwx0O38/EYgodL8dmfxq\"\n \"ncaYjOd81/JfK7pJ/ROTEmDdYWlu7Oynn5w8c2HwwunS4pKbybiBjosl8tzH\"\n \"Dj7x7Je/TMGNW1REMVfUGxSIBaJqXWdMgv9hlYdfKpJayYavREZbEREktVQu\"\n \"TVw407x5R5BrwvwXmgCmR04prDUwTgTbmkY6Omdr3DizrNB/MYzz10ZICxjj\"\n \"6+AlsX99wxpA6KSxQOsI86MXzpw9+enAxZGLA6Wlgpvxg1zWRKZUKDW0dT9z\"\n \"6OFHn3rOr2++uRjZRN5BBKysDl1OKlKvjo+o7C4nNeGqHQAMs1jtuhMTl3/4\"\n \"6hvPfWF535NfFhHMf6FNeGr4dC16vAC25Yc7O+Zqkn4LiAq9b8WlJtQ35pIx\"\n \"GVd9j81fkbt83cvpCrA+dP3Z0f5jx06cOj9yaSAulZxM1stlbRSHhUK2qfmh\"\n \"/Y8++fi+zp2Pq5vHfllrLVsElMTMNWvUFa4EDl2rqawYthU7gSygFQQqL8wX\"\n \"Z6bntvetSKFw89MtYKcuDdzC75Lsl7XWjTXk52ry+QiwaHS+acpNt35vbAJF\"\n \"f8rmr0hPX8OflAD3N3h+vv/0kfeP91+6cNqUyzrIuNk6G4Vx0eZaOw4e3LZz\"\n \"d++OXQ+72dt05gqWlnhlOhQRLiPFV2PMkW8aMSQVYggAshZq9MU3Oqlm3eZU\"\n \"bQZmpuZnWgEmhs7eLEFejGrtOd1pRpcWa/L5WKvA/WZczhPePhyehcD+EcBf\"\n \"kHuVA6JSFeh+xuzIhbfeeeP4JydKc3NuXZ3yg6hYVFpt6N66o2fz9u2berbt\"\n \"u5nGf+0ciWgMq4qSzxiTsJBFIVOtnrWmjlCSDZOUFWUQMBCC0QgdgqySlMsr\"\n \"m+avWWSONv2Ph+D/mLh4Fq4LSmLRbZv7g6YxFgKyt7UWhJTobxrTVPvOgkIB\"\n \"/h7Hf0nOCgdSN+j9i9GTR19+5ZeXh86A0l59LioUvbps3/5Hd/R2b93e29q9\"\n \"R91JsHvsiLGQ6D9JXRO0yGi3mAN1+QwRoIAD4BC4BC6RC+w67AO7DueUbGn7\"\n \"1hMDzxVyxSNPHF2rgq8R5Eflg/NN398m/3ns0uCqiDkQUO1b+t36y5YVALgM\"\n \"WgEAJC5TigEUUKWcHZABJte6L3DceEcBoWyJFJP9HsNfkjMNGlDblAD3JYZP\"\n \"vvc3P3plemzIrcuAgI3i7fsOfuHhvd19O+6yAqENxVpYyQUDK2BZiFFKWkgJ\"\n \"KABF4CrUBEqAHAQCJrYgReDB+b8vHJq1XhNZwhXc8Is28+BI0x904d+NXrhQ\"\n \"1Xzae/tVdpwt+Y75NNaHx1qmy0HgRXt3XlGOTUL3qtUZFSKWezJysc4P8irX\"\n \"YJuDUgNK0pO4BpWGgOz3WP0l0XRqA9x/QMSJoU/+9ievzoxfcnwvKha11gLY\"\n \"kMnu3H+wRoXnBj5Q0SwrTn1msQAWRcQBzqLVCpCBiFy0WkApdAA1gAJxiLUI\"\n \"GJm7+PNNu59bwq7b1llol3Hb+ExnH4yevQBC7dv7vboxY8gl859OOG/051FJ\"\n \"k1faeeBKJbBipfIKEpCgjTegKiyY4oKBy8QCjBoUNNZRUO9kW6i+CfL15Twq\"\n \"giQtiNcoXAKIIBR/n+X/SlMi7z8UZsd//srbM2MXSSk/l9+1afPY5fGZiUsf\"\n \"nzzWXJ99+nd+z3XvptKTrKQ8VnR9A8yWhTWxj6JEiICAScjVQCQOsaNFISoQ\"\n \"x0kaLcmVs79o2/07BW6/LQeYGeu/1LrZA+dHXm6MmQDg8LjzxuEmVCKC23dP\"\n \"3sBFKchmA+JqvYcQCAUZCotRYSmeGUGbVMdwTUPOzzR7mTYn10gt9cvtSjEx\"\n \"Vs3levp90zwB0J4S4H7CJ8cOnz/7CSBkGtq+/ttf2n3w0GD/Rz9+5ZczY5fe\"\n \"Pnqka3PHjoef+SzHFxEWa00S4IbaY58MIhKRIlHIhOAoUQq1sFLooEUBElCu\"\n \"ZYbpk//cuu93C9xx2y+6dGnzf/NH/8Prv5pFRAHJeuZHbzUksr2jeQHVDbac\"\n \"JW4HvJ27RxSBQkQrpflSab40c54YmAVFq4bGbNCmM+11dc3Y6s60YG79iM0D\"\n \"QYDF2dFTg8OJd//LX3zioS9+jYh2P/HcUqn089dK81cuv3v40+7N2zL5zjvW\"\n \"rEAwab+RqEAGAUCANYCDAoBErAkUgUIhAS2iUJQwASgSUEAMCILC8/0/adzz\"\n \"wq05MDi448//9KuvvT7vOhAbAICMHw3MEzogQk1ti0nvmdXSb+P2tXP/rfX8\"\n \"q9sYJApQMQFzeXapPLM8e6Zi6diMNG0D1dXubHB62kdfuq9l44EoizJ2/tzo\"\n \"6BCSemjv/ie/9CxRpbbZoSeffmTfPtf3B8/3n/zkxN2ZFonrHQHYWmZjjTFs\"\n \"HLAawdPiIjgkDokmdpRoxYpEEytlAUAljbwJAMDEMN//kyyN3+y73njj+T//\"\n \"06+++face21O7op8E1+j+dyR9Neg7xGCJvTKUXT2VPSrXxX+v3/55H6XjQeC\"\n \"AFPTMwuT403t3Y8c2Ad+I1Qj7/3Gp774zKbe3cvT0+fPDxcXZ+/0yIoQV4J1\"\n \"pFryn0GBaAdIgVasERy0jhKFVgtr4OTbFVqCNSFrVQ5cbwy8Mfz8f/zfdrzz\"\n \"3mwlPO720orAbb+u33eFaFphSoDPO0RkabkQlku7tvW0bztwzavNXX2/9eTB\"\n \"hg0dg8OXrgyfvtODu5ocrSqtHVfEgkUUigOigR0SRZYQCNhRgiBAlSigG976\"\n \"G64DR48e+o9/suPNt+d0jbO5oJg2FgUpUgJguFAKY6WdDa0tnudd/4a9Tz76\"\n \"6IGHZi5fnBi/40Rvo+uUo0QEWQSAEZkAFBJAYvUiiKOEqFJPGhUSAJIovKkn\"\n \"0cQwc/KnzXSpKv3/4d8dfOfd2VrdVIJi2uAm0o/EVG3dh9VeTQ+GJvBgGsFl\"\n \"8EWkrimfb6q/yRLR8PgTh070n5mYumMCKKV85SYWhQoFwYIBBMZGTBwyiYZT\"\n \"UXVqiNFPYFmGT73etvt3fvXBN/7Dvzv4znuzSt9e2bAACm8i/QSlCVyYc3ta\"\n \"urd3NNebvDIe+mHBn5+EyeHlCcYSXO+EJaCkhunaZDRKCXAfwfd919VepqGh\"\n \"4ab91jf07P7CwYfmlkpcmq9lUyyO4+mRs9NTk8tLiwNDI27gAwMUmYoADBgL\"\n \"9FWEXt1dkVthYZg88S///n/6X995b97RYmwtqY3EcTPeKM7hdL/qxpZv7don\"\n \"7OgYBEF0jJZypZZ63brD2X8xe/LswiCv4oCgRNOoAifX5JXCUCBel+LxQLhB\"\n \"s66XCXxQt5LsfQd2Tc/MstxmeosLcwP9x86dvzQyMTUzfaW0MEdE2vU5ZopB\"\n \"WJCQ+R5slEaxu/nAR8eOzziapKbCXFakXSmVhGasxqlTug0afmfnAcNEuNLT\"\n \"TAEIMYECAG16y3ua803vzR2peo3E0PFB89zu3m/So0rjsab3+6eH7r6sXEqA\"\n \"3yDqmurr6xuVcysxauza3dReAvdWbdmX56ff/sXP3zt+uDgz6zieIuV4gTgo\"\n \"Gkk7opAUIpFyUOxn4oBYtXHfJ9mmS9bWLnAEqJiIAKocUIgLl9Syke/u2R2v\"\n \"KulLQKFantKTG+NeBQoAEKWx2L07P3dq9nzCAVsSYO5uzpsgAp88reVqi6iU\"\n \"APcVNja3jDVesctzt3iP53lwIxP56p2an3/r1Z+9/stXEdDJeJQLKKPRU+gS\"\n \"OkpcAAdFAys0LuPi3fsHBVXbnrO55gt3Iv0rNq7Iag4YA6dm8Qst3R4H1blb\"\n \"BMez58+V+mcG9MuLF7//8FMtTpYIALkn3DegxpnLqDCchsDJbGtqwDIyrfF4\"\n \"pjbAfYa2nQeDsxcaOrNzEx8hWdfvtJirVv2vdD4VsdbyWlQHrbUi0rPvsX+9\"\n \"/5BhkHDp3NG3S3Fc6RocMRhgTspioYjYFrlr6W/ZeTbXeg4RiZj5NsKWz1Ws\"\n \"37W2ABGAiC2MKSA+sLFTjFTjqI2OBoqnEJzmXpw5aX/6yYk/e/ypOGnsjbCn\"\n \"ceOncwMAenpRWrTXvNQQOknrg8rHk23EdSMbD4T7y3XdrT2bTMSEoN0tohqS\"\n \"wGMiStSC6tME1ZeISClVHbTWmqhswwID9R48pLU2ABbEEBgBi8IIFsTKPZD+\"\n \"Wt4fBLhHvQhQvpE9TEqpC8vY5zR5cd0qlxdeUIO04ibalJeReG7AVLYdWGAz\"\n \"bCkMUThJfa2bnzy4vexV9B2XiNAhrcFbV2bAg+L/7dh+wNpYed3oBPfkgALO\"\n \"9ocPec49KxVelf4ai3j6PjW3/Hn5L/77m73BGCqX7cZ8kxW7+veet0vVxGGv\"\n \"VcDw5fmrae9u7MZL+Xgu+MOeg49nurlSss5uW977/dbvf6fvTw4daBWxKQHu\"\n \"v0XAy3bUKP1JIbcaRNbZceCJOs//Ly/9no9N+X9zYeShW7yHlwk0NbdnV//I\"\n \"ghLbsHp16AhoWp4vVFNiLKum5iyLxHQ1BBoIJBNIptUpNTvaXU+C8aAQgJmZ\"\n \"6mt8s43DGpUQIadn/5NZN4DPEBTDMd2R9LseZf1/c2Fk/3W/5ZrPm1AAZEOu\"\n \"1XUyq0sL8VqVKaeoUCjLilIkjJ09rgUzDqVYGQAQEscJjNcGToF1eZ0JxgO7\"\n \"BX5zmTYFKA5x+bJIrepN7/5DGce/a+nv2DdwB3O/h2j/ZGgkr/jaEs12bXgF\"\n \"RwAAWahTXs5zs1UKMK75oKdwIY50tVYj4eZG39Ly//7mP5yPhgnZQ0/c7svt\"\n \"P/3rsf/8g6n/+/2Pxu9hhGlKgM+Z9EcFLlwWUCYuSzQCNXdE7DvwZNYN7tRB\"\n \"LqA6dg/kmmuVfiRl4/92emYTMpryBGLppr9r0gcZwAcXALRf7wZ1wEnywZo9\"\n \"XZewJCFeXSLY54zvAoDnAHkUcHajGLISmzgMw9DwetoGSAmweso0y1QeBgWg\"\n \"ABFNXLblcak5prJ3/6E638eaYx8EVPv2gVxbrT4fYcxlvjU3ezV5X5bGL2y6\"\n \"GjeqVv2YLGgNAAAZndBY6aybqbdo4NrAHjFrt7Y0+KQBQJSvbaZrPTk9UwLc\"\n \"1OoFu6yjYVBrFOk4Ck35MnGtd6l376HArcnOZtbt2wdq93giYl32OyPDa5pz\"\n \"MemR/Plq7vo12cCVZ3xVkVM662T821r8DlBStYvr9Q16wqYEWH/SL2ZJ8aUb\"\n \"vSRsynE0XqMQEMDW/YcC/zYcEKvatw3kWs/VeP8JIee9ODa859pxNuoOS5Qo\"\n \"z7umeCJWunasspKZkshQy+tfPFICANhl5BG+edq4ictxYUpqKJiVVL3dtu+J\"\n \"bO6mURUs1LFjoL7tfI03HxGz7h8Mj++vwUK4wU9rro3i1ESEK9eCiAxItKb7\"\n \"hs5UWtTLTaJ+0p3g9SX98aXbTvDWhHZpvPajdu58Lpdx+LrZmS119A5kWwZr\"\n \"U5OICLLuH1y6fKAmI2GVXBJKYhKIs+YkVOSLCBAhVWwDQVCr+tg7ypPmFZfo\"\n \"A/D7P9AEQLsE0aiIsrfrPoQKLcdhodZmo0yU2fbthpxjV1cyRNXeO5BtH6zR\"\n \"50MErn5pePShWjuzs4EVTYYFSTMAhGudnoSI4ggDKIQk6lPABUo60GjHx6Ar\"\n \"okVMVgBOCbBOYYyLvBQXrxDVHL1PIBzGxZp0oQRB34sNWWdlDVEbtpzNdQzW\"\n \"Yk4gCjMF7h+Ojey70wuDFTdlEv5dguXrLAqvyjAkHRnOKl9QlOvpYIMSKcUF\"\n \"YQKA1fPCmtMWTAlwf8OhGVsaR0S+QzvP2tgW7qDlutf7Yl3giqj2nnPZtsGa\"\n \"PSoUuH84PLb/Li4NrU1aCpMLADgbF9aqSajIvWr/IpYBM66rXM8N2pLg1uW4\"\n \"kKwAkBrB61TzWYiX78DBv1bRJmtsWJyp/ROl5j/rOzhwR9Kfdf5gdHi/ju+2\"\n \"DHlsCUXlBAzPLl/b99fzvNXmcjEydfk6CTqqjSuvzIVIBAxX+2Ere7Xjn5WU\"\n \"APe39JcL04yfIRFCgE0UlWoqIjQ/3/LtF/6toaFaPZ4EPr10cfThuzqxVfZG\"\n \"ZJTLytNT43PO2piOhmrWm6ApAjA3tbYCVZr7WQznCoWkC5is7uAtDqAwA0dX\"\n \"R1HSukD3nfSXpuleZAGxjaLSLN7SIRhFmW+/8G/ffuci1hYrRwRavj8yeuCe\"\n \"XCwbszmAk4sT5K+JYNvsbBYJAQAJihOAju5rulqPMaqfLJUXyUFgsSu9WUWk\"\n \"jvMCBm0sYqsLiEVJCXC/Sf+9u2K2Ubl403Vgaanha8/9L2+/c1HRHUj/2Ni+\"\n \"e3jJ+Q0mDuH48qk1K8Dy1qzXmjh5Bmbt/roNXfHmlRldDfMJAa0CBKAoLlbV\"\n \"tsbJ/U3Y5Re6D7bvYInXjfQ8KARAxKg4Qff6cplNXJy5Xrm31vvGV/7nt94a\"\n \"0gqwhmlSq3sh/dd9kZOVtgx+1H/RW3WGsciTwbNaBedOLu7KNn390PNVV6lV\"\n \"pZMj4wQEShzHmZmdQ9YAQLaMi+O/V3/ou50Ht2fbr65797+f9AFqkPHrOKwg\"\n \"GjZcmPGCJrkq/U7vpv/n7Xcuep7U0rYREbV8d+Sezv2VMxHZtMUePVN4d+HD\"\n \"x+qfRmUBANHqUsOz/u89/mShSTUAV8KfCPRp7+fGhgqVUtjboE/3z6jeKLKG\"\n \"o1FFaFkYVUQRr6P5Mw2FuDfrQKk0l0SVWetl9OvDF07UUssNAETQVS/dK73/\"\n \"eqhAHtqIvzx9fphOVlO8mBhAN0iDrNTbInYv51/vHx1SqBKlv2k3tdYX/s/X\"\n \"/+J8+HbBXyhl5sdy5z/Uh3984QSBm64AKdasA8BxuTDr+K0Z/VpY+EAaasqP\"\n \"IQLfeWl0eP89WZ9uNpl57fZhUn/7/rHnd5d/K/9oKHRN73lR8QnnXy6cH0Vw\"\n \"EnNdEAmgfpviaPrw1BSCRgIRKwQoBOsoISYlwD1SsUSsFV1+JbRHoeZIh/rs\"\n \"SxcH9/3aT47BbbWP5XFiov8/DY5+4dEte+Txq+HSuvyzub9FEaSrdRxRBBCZ\"\n \"WRQCIIgwCCAi4zUqVqoCpQAEseK46jhJrdKvNezf88K5sW/SvZuDhG7FAVCY\"\n \"2aLzdcV/fPdEOTdyVQKMn/XrbjipExGiWt8ikhLgXsywVrt03KFjtd50hJ07\"\n \"vv3qO98TkTjouTccuGVIkwAhabaS6RIw8vGVS86qartP538rCAIUuUFIOBHc\"\n \"fN9E0n2AdO4XVq467tAx5pqMQiLYsevFX73/4ooRLHHQo+5F0dkbZjRIEv2s\"\n \"K0KsPWmrc/r7x4JVU7633Pav3O/9dt9vWy7f0MLBm8T/ecpJCfCgz/2aTrjq\"\n \"aK23m2Dbjj98+4NvXSOjoX9vOHBDNYYctWqdwM3bcbS8/NryYbUqGsqidU3d\"\n \"TTNgFKobyXpXj0oJ8AA7f1j5zglPHWGGWjITEXHHtj94/8g3bvjqr4MDRCSk\"\n \"rwnrVz48upNeO37irfn3fQAyPrBLxle3jg4kuIYDlvm3dnw59QLdJ8J6z/0V\"\n \"Qp57Gu2HK2IFwAAEJMAChJGwz3h1F0wp2br9O+8e+/otDhn6PV75ooW7LTxI\"\n \"a3QwQsVImKhGspq3oOvhsUf11Pypv4tOt7Q3udqNTVSaLCsMbn180pqNIYVR\"\n \"bP/odx9uHno6JcCDQQAGESQiAECFYLTnfgzmPUve3EIQxRlUTfVZP9PQWA47\"\n \"PX9bwaLjvVeXuTB4wXZvHHfd6MBD33rj3d+9rb8/4YAxdxEIjUircrgU8u22\"\n \"oN08AcB8cR4LAlaYOcmNR0ROCgZTJcuZLFi0YGOkTFNj/sCjemvrU+tA+uEB\"\n \"DIWoFIJ2AWIWUeQyGUGyLAp8oyyUluKlQrxc4lIpDKM4Co1htlasiWNbqZau\"\n \"lPIzXYj/Xa4uqG/MtGSzdXV+JuMGQeB5nuM4RBTLjpkldrPUf1GXlqYOn5hu\"\n \"aJiM4xgAurq6nFVVda8hZ+j3OOYi33UyAAAoFNLViwUBNgaBxQoggiBYCwoF\"\n \"kYiYCJQCTYq0IKJCJEAREQso+ab6jZuguyNoMVv9hceyw7sxU3YKERTWiWA8\"\n \"KATQWi8uw8mzF6an58NyGIURA5dKoYgwmygyK9tC4LracRzXdR3P8zzPcz3P\"\n \"dYJM4Hmevwqe57mum4i71lprXa2ovsZP4kFTEzC3MOeNMWEYFgqFc+fOLSws\"\n \"lMOSo926+owmcpygznVbOjuTPpax7mlRZyMUU45DcSw5NTiYBJPiDihiREyZ\"\n \"ks05QUEkrdHV5CrUBKhAITkIiEjgElgGFivWNjTWdW7GLV1Om9rsyrb82BOV\"\n \"Y1dz4DoX15tgPCAEUEqJ07y4eHZ2fjEpt6Act7ExIK0dx3GUqgi947jXofqS\"\n \"1loptVrcVzcZuI0rhkhr7XlefX19W1ubMSaKonK5XCwWl5eXZ2ZmLi4uLhw+\"\n \"vLi8LDZ2XTfptOEFnuu6QeC4ikhrRynHVSCQCTQAGANZg+g5wAAkZn4ZYxRN\"\n \"oBxyFGgHlCJHASpwkBQCImkQBhbLxmZyQdcW6u1y2v2NCje3XXq2crrj8ODg\"\n \"AVKBcrlcX19fe3t7siBorR3HIdf1VmR6NZK+GEoppVS1d0YiyvAZYkuTDyZf\"\n \"4XleNpttamqy1iZ8CMMwDMNyuZz8P/kjDMNiMVyI4zCugI0xhplN0t7mC+7m\"\n \"93FUBCmTEdCoNSqFSpGDoBAQBa0Y62X8rk168ya90etQ2NM6/FzlnP7/du3Y\"\n \"CAAQBIJgRAPff4VWYWBq7OCwWwOXAGv0Km/QL1CSJK2aPIFVPTwnzR736+YM\"\n \"BAACAAGAAEAAIAAQAAgABAACAAGAAEAAIAAQAAgABAACAAGAAEAAIAAQAAgA\"\n \"BAACAAGAAEAAIAAQAAgABAACAAGAAEAAIAAQAAgABAACAAGAAEAAIAAQAAgA\"\n \"BAACAAEgABAACAAEAAIAAYAAQAAgABAACAAEAN/b0mZho3Q2iFYAAAAASUVO\"\n \"RK5CYII=\")\n\ndef getErrorImage():\n \"\"\"Generate 'pending' image from embedded data.\"\"\"\n\n return base64.b64decode(\n \"iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAAAXNSR0IArs4c6QAAAAlwSFlz\"\n \"AAALEwAACxMBAJqcGAAAAAd0SU1FB9wDCQACMfBoIRYAAAAZdEVYdENvbW1lbnQAQ3JlYXRl\"\n \"ZCB3aXRoIEdJTVBXgQ4XAAAMd0lEQVR42u3b23Pb5oGG8RcnAiQIgpRdUbJ8jONNk4tM053O\"\n \"XvZv701vsrvqtJMeMtvYUZSKlHWgxBNAEOBeGGKhCKQkS4oZ6/ldaUSK/AYiHnz4ABrHf/3r\"\n \"rBfHAnC/NF1Xdi+O9f3JCVsDuG/CUCZbAbi/CABAAAAQAAAEAAABAEAAABAAAAQAAAEAQAAA\"\n \"EAAABAAAAQBAAAAQAAAEAAABAEAAABAAAAQAAAEAQAAAEAAABAAAAQBAAAAQAAAEAAABAEAA\"\n \"ABAAAAQAAAEAQAAAEAAABAAAAQAIAJsAIAAACAAAAgCAAAAgAAAIAAACAIAAACAAAAgAAAIA\"\n \"gAAAIAAACAAAAgCAAAAgAAAIAAACAIAAACAAAAgAAAIAgAAAIAAACAAAAgCAAAAgAAAIAAAC\"\n \"AIAAACAAAAgAAAIAgAAABAAAAQBAAAAQAAAEAAABAEAAABAAAAQAAAEAQAAAEAAABAAAAQCw\"\n \"2mw2wWr4/dOn85//sLNz7ccBAvABd9r3xc4MAgDcwawJBOBOLfvQXffDyQcYHwKLgAABAMAp\"\n \"AH7x57OmYehxEKheqch3HLmWpWGSaDCZqDMcqj+Z3Po412s1rVWranmestlMg8lEPw4G6kXR\"\n \"pa9XtW1tBYFqjiPfcSRJwyTRMEn0Y7+vaDpdOoZlv+O0igDcK03P02dra/Ls8//Whuuq4bra\"\n \"qNf1utfTbr9/a+/52dqaNur1c7/zbFsPqlV9d8l7bdXretFsyjLPT0QrlqWW52nT9/Vdr6d/\"\n \"DQb8cwkAlml5nr5cX5ckzWYzvTk50cF4rHg6VcN19arVUtVx9LLVkqRbicAnzaZanqdv3r7V\"\n \"SRzLNAxt1Ot6HoYyDEOfNJt6OxopTtOLO38Q6NN8LNMs0/8dH+s4imTkIfu01ZJtmnq1tiZJ\"\n \"5yJwdmTnKgBrAJBkSHqV70yS9M3BgXZOTzVKEqWzmY6jSNvdrqZZJkl6HoaqWNaN33fD9/U/\"\n \"nY4OxmMlWaY4TfX9yYk6+c5qGIaeNBoX/s4xTT0PQ0lSNptpu9tVdzjUJE0Vp6m6w6H+1O0q\"\n \"m83ejbfZlG3yUSUAKJ9KB4Gq+flzP451OB5feE6SZdofDiVJlmnqacmOeV0/9PtK8qgUdfL3\"\n \"kaR6pXLh8cdBMN+hO8OhRkly4TnDJFE3fx3HNPU4CPhHEwCUabju/Oed09OFz9srTKPLdszr\"\n \"OioJjSSNCzu0VzLT8AvvvV+IxU91LwkJWAOA3q2kn/ni4cPFpwqG8e+dMJ8x3ETZCr0kTQqz\"\n \"grJTjVphvIte46ePVW0+qgQAlwaguJMv/cffwjl1mp+jL12fKBlPMQqTkgXCssfcW1izAAH4\"\n \"KMVpqlq+Q/9xd7f0vHxVx1uxrNKrBD8NRbwkFGAN4F4rLqLdxtT+ro0LU3tvydS++Nh4yakC\"\n \"CMC9Vry7L/S8lR/vsDDe9Vpt4fPavj//eVByB2NamOkYfAwIwH31Y7+vOD9CPgmCpUfVVbDb\"\n \"78/vSWj7fukCX81x5gFIsqz0xqXiIqHLIiEBuK/S2UzfHh1JeneN/7fttp42GvIdR5ZhyDQM\"\n \"VW1bTc/TJ82mfre5+UHHm2SZ3pyczMf71caG1ms1VSxLFctS2/f1VbstM19AfNPrzYNRNCic\"\n \"+jxrNFgofA9k8yNxFEX6y/6+XuXfBXjRbOpFs7nSsxZJehGGckxTn5dcvkyzbOl3AV73enpY\"\n \"rcoyTW3U6xe+k8DtwQTg3kXg6709bdXrClxXNdtW1XGUZpkmaarxdKrjKNLRFb6l93NF4Gg8\"\n \"1lYQyHcc1fIFzFH+bcDdBd8GPBOnqb7e29PjRkOh68q1LDmmeeVLoZCM19vbs+/z6RiA++NZ\"\n \"GLIGANxnBAAgAAAIAAACAIAAACAAAAgAAAIAgAAAIAAACAAAAgCAAAAgAAAIAAACAIAAACAA\"\n \"AAgAAAIAgAAAIAAACAAAAgCAAAAgAAAIAAACAIAAACAAAK7LZhN8/H7/9On85z/s7Fz78Z9j\"\n \"DD/364AZAEAA2AQApwD4iDFVBjMAAAQAAKcAl/JsWy3PU8vzVLVtuZYl0zAUp6mi6VQncazd\"\n \"fl/ZbHbhb/9zY0P1SkWS9O3RkfYGg4Xvs1mv6z/W1iRJ/clE/9vp3No4ztx05fw2xnDhyGMY\"\n \"Wq/V1PI8NT1PaZapP5lofzTS4Xh88yObYehxEKheqch3HLmWpWGSaDCZqDMcqj+Z8CEnAIv9\"\n \"16NHpb+vmaZqjqO1alVbQaB/HB7qKIrOPWdvMNCrfKfe9P1LA3CmU/K8m4xjFbbFIr9+8EC/\"\n \"qtX+/QvLUtVxtO77+uH0VN/1eu893qbn6bO1NXn2+Y93w3XVcF1t1Ot63etpt98nAOzq5YZJ\"\n \"ooPRSMdRpDhNNUlTebat0HUVuq7WazVVLEu/fvBAX+/tKcmy+d/uj0Z62WrJNAwFrivfcTRM\"\n \"kgvv4TuOgnymkM1m2h+NbnUcq7AtyrxsNtX0PP398FDHeTBanqdPWy3ZpqknjYay2UxvTk6u\"\n \"PdaW5+nL9XVJ0ix/jYPxWPF0qobr6lWrparj6GWrJUn3PgKsASzw33t7enNyopM4VjSdKpvN\"\n \"NEoS7Q0G+vvhof6ZH6Ecy9KzMDz3t9Ms00FhZ97w/dL32Cgc/Q9GI01LdpybjGMVtsWiWc92\"\n \"p6PucKhJHpTucKjtbnd+GvE4COSY1/t4GpJe5Tu2JH1zcKCd01ONkkTpbKbjKNJ2tzvfzs/D\"\n \"UBXLIgC4vu5wOP/57Hz/3GlA4fG278so+bC2C1PgZacJNxnHKmyLsuePp9MLvx8liTr5a1mm\"\n \"qceNxrXGsRUEqjrOu/WUOC5dS0iyTPuF93h6zffgFOA+bRzT1Fa9rtDz5FqWXMuSVXJUqtkX\"\n \"N2MvihRNp/JsW45l6WGtpreFWcHDWk1OfvSJplP14vhOxrEK2+JCAEpOdeanT8OhHuUzo3q+\"\n \"M19Vw3XnP++cni583t5goEdB8EGjSQBW3Ga9rpfNZumH/KecBdPIzmCg583m/DTg7YLTgs4l\"\n \"i4Q3HccqbIuiuOTofyYqPFa9ZtCKz//i4cPFpwqGcW4dhgDgnDBfLDIMQ9lspt1+XwejkeI0\"\n \"VZKmOrvYVby8VhqA4VDPwlCGYaiVHznjNJVrWWp53nyhqlOYQt/FOFZhWxRN0vRKj133/LwY\"\n \"gOJOftnMhgDgwrnk2Qfon8fH+lfJEdq9ypEuTXUcRVqrVmUYhtq+r53TU234/vz1z1bW73Ic\"\n \"q7Atiip5CBc9dpVQLNretXyH/uPu7p1cEfnYsAhYIiicFx4suCnlqtPT4tH9bNW/Xbz2v+Do\"\n \"f9vjWIVtMQ/GkucXr92Pl5wqlBkVLrXe96k9AbjJtKgwLbQWTCWLl/CWORiNlORHsqpt63kY\"\n \"zneYJE3PXS68y3GswrY4s168AWjJY4OSeyeWKd7dF+anWCAA11Y8krRLruE3KpWlH+Kimc6v\"\n \"ehcvO3VHI81+pnGswraYB8P3S2cNNduexyTNMu0uWckv82O/P19gfBIEF+4EBAG4kuIdeVtB\"\n \"MD9qe5alzXpdX66vX+sOsuIqf3FxqnPJtf/bHscqbAvp3T0SX7Xb8zsIK5al9VpNv9nYkJlv\"\n \"n91+/9rn8Olspm+Pjt7NVkxTv2239bTRkO84sgxDpmGoattqep4+aTb1u81NZrvs7uVHkgfV\"\n \"qlqeJ9s09SwMz93h1hkM9F2vpydXvIlkmCTqTybnzqf7cVx6e/BdjmMVtoX0bjHRsyx9vuBS\"\n \"3Q+np+91G7AkHUWR/rK/r1f5dwFeNJt6kV+KBQG4sj/v7+tRva5131fNcTRNU53EsXpxfO7O\"\n \"tysf9QYDBfkXhM6Ogh9iHKuwLSTpb4eHakeRWp6n0HVv9duAR1Gkr/f2tFWvK3Bd1WxbVcdR\"\n \"mmWapKnG06mOo+jOvjz1S2K83t6eff+etQXwy/UsDFkDAO4zAgAQAAAEAAABAEAAABAAAAQA\"\n \"AAEAQAAAEAAABAAAAQBAAAAQAAAEAAABAEAAABAAAAQAAAEAQAAAEAAABAAAAQBAAAAQAAAE\"\n \"AAABAEAAABAAAAQAAAEAQAAAEAAABAAAAQBAAAACwCYACAAAAgCAAAAgAAAIAAACAIAAACAA\"\n \"AAgAAAIAgAAAIAAACAAAAgCAAAAgAAAIAAACAIAAACAAAAgAAAIAgAAAIAAACAAAAgCAAAAg\"\n \"AAAIAAACAIAAACAAAAgAAAIAgAAAIAAAAQBAAAAQAAAEAAABAEAAABAAAAQAAAEAQAAAEAAA\"\n \"BAAAAQBAAACsNrvpulIYsiWAe6bpuvp/KMHzybJ4Z08AAAAASUVORK5CYII=\")\n"
},
{
"alpha_fraction": 0.7617657780647278,
"alphanum_fraction": 0.7683327198028564,
"avg_line_length": 42.507938385009766,
"blob_id": "6c4b1390470b7c2c165e26a7a67b9f389e6e66f9",
"content_id": "6240116b97097262b25492d1c870fe7f249d6d00",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2741,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 63,
"path": "/RELEASE.rst",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "pySlipQt\n========\n\npySlipQt is a 'slip map' widget for PyQt5. You can use it to embed a tiled map\ninto your PyQt5 application. The map can be overlayed with points, images, text\nand other layers. The map tiles can come from a local source of pre-generated\ntiles or from a tile server. The tiles may have any desired coordinate system\nas tile coordinates are translated to pySlipQt internal coordinates.\n\npySlipQt works on Linux and Mac. It only works with PyQt5 and Python 3.6+.\n\npySlipQt used to work on Windows, but I no longer run Windows and I can't test it.\n\nFor more information visit the\n`GitHub repository <https://github.com/rzzzwilson/pySlipQt/>`_ or view the API\ndocumentation in\n`the wiki <https://github.com/rzzzwilson/pySlipQt/wiki/The-pySlipQt-API>`_.\n\nRelease Notes\n-------------\n\nRelease 0.5.2 of pySlipQt contains improved error reporting in the case you\ntry to run *pyslipqt_demo.py* without installing the GMT tiles in your\nhome directory.\n\nRelease 0.5 of pySlipQt is early-release and is considered BETA software.\nIt is being released so anyone interested in pySlipQt can run the\n\"pyslipqt_demo.py\" program and get comfortable with the way pySlipQt\nworks. Note that testing has only been under macOS and no testing has been\ndone on either of Linux or Windows, though the aim is to make pySlipQt\ncross-platform.\n\nThis release has these notes:\n\n1. Some testing has been done, but not comprehensive testing, so please report\n any errors to me at [email protected] and attach the \"pyslipqt.log\" file.\n\n2. \"Box selection\" now works.\n\n3. Wrap-around of tiles doesn't work yet, but I *hope* to have it working even\n though it will come with some limitations.\n\n4. The included GMT tileset is very old and has a different zoom compared to any\n tiles from the 'net, such as OSM tiles, for instance. I hope to fix this\n later. The GMT tiles can still be used an example of how to use locally\n generated tiles.\n\n5. All the \"examples/test_*.py\" programs have been converted to python3\n and PyQt5, but there may still be problems.\n\n6. Some bugs found and removed.\n\nThe GMT example tileset is included in the \"examples\" subdirectory. The\ngmt_local.py tileset code assumes that the zip file has been unzipped in\nthe user's home directory (ie, ~/gmt_local_tiles). If you put the tiles in any\nother place, please make the appropriate changes in gmt_local.py or make\nyour own version of gmt_local.py.\n\nSee the API documentation for the details on how to use pySlipQt. The\ndemonstration program \"examples/pyslipqt_demo.py\" does require that the pySlipQt\npackage has been installed, though you make run pyslipqt_demo.py from any place\nas long as it is moved along with its required files from the \"pySlipQt/examples\"\ndirectory.\n"
},
{
"alpha_fraction": 0.6052066087722778,
"alphanum_fraction": 0.6173871755599976,
"avg_line_length": 27.29054069519043,
"blob_id": "2657b6bc54e60e76ee34dd910769051022cb545c",
"content_id": "037ed5737214fea6e1f498e367ae3b4123d57339",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4187,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 148,
"path": "/pySlipQt/examples/test_displayable_levels.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest if we can have a list of \"allowable levels\" and if a user requests\nthe display of a level not in that list we CANCEL the zoom operation.\n\nUsage: test_displayable_levels.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QLabel,\n QHBoxLayout)\nfrom display_text import DisplayText\n\nimport pySlipQt.pySlipQt as pySlipQt\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log(\"pyslipqt.log\")\n\n\n######\n# Various constants\n######\n\nDemoName = 'PySlipQt %s - Zoom undo test' % pySlipQt.__version__\nDemoWidth = 1000\nDemoHeight = 800\n\nInitViewLevel = 2\nInitViewPosition = (100.494167, 13.7525) # Bangkok\n\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass AppFrame(QMainWindow):\n def __init__(self, tiles):\n super().__init__()\n\n self.setGeometry(300, 300, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n self.show()\n\n # create the tile source object\n self.tile_src = tiles.Tiles()\n\n # build the GUI\n box = QHBoxLayout()\n box.setContentsMargins(1, 1, 1, 1)\n\n qwidget = QWidget(self)\n qwidget.setLayout(box)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_src, start_level=InitViewLevel)\n box.addWidget(self.pyslipqt)\n\n self.show()\n\n # bind the pySlipQt widget to the \"zoom undo\" method\n self.pyslipqt.events.EVT_PYSLIPQT_LEVEL.connect(self.on_zoom)\n\n # set initial view position\n self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n def on_zoom(self, event):\n \"\"\"Catch and undo a zoom.\n\n Simulate the amount of work a user handler might do before deciding to\n undo a zoom.\n\n We must check the level we are zooming to. If we don't, the GotoLevel()\n method below will trigger another exception, which we catch, etc, etc.\n \"\"\"\n\n log('Waiting a bit')\n for _ in range(1000000):\n pass\n\n l = [InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,\n InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,\n InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,\n InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,\n InitViewLevel, InitViewLevel, InitViewLevel, InitViewLevel,\n ]\n\n log(f'Trying to zoom to level {event.level}, allowed level={InitViewLevel}')\n if event.level not in l:\n # undo zoom\n log('New level NOT in allowed list, undoing zoom')\n self.pyslipqt.GotoLevel(InitViewLevel)\n # set initial view position\n# self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the app\napp = QApplication(sys.argv)\nex = AppFrame(Tiles)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5606021881103516,
"alphanum_fraction": 0.5771880745887756,
"avg_line_length": 29.6171875,
"blob_id": "1fe9d26c3d071d828cad14347e2752ac2172b4db",
"content_id": "01bc531625701477107a6c27146ba69365805ed0",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3919,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 128,
"path": "/pySlipQt/examples/test_gmt_local_tiles.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest the local GMT tiles code.\n\nRequires a PyQt5 application to be created before use.\nIf we can create a bitmap without PyQt5, we could remove this dependency.\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport pickle\nimport shutil\nimport unittest\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow)\n\nimport pySlipQt.gmt_local as tiles\n\nDemoName = 'GMT Tiles Cache Test'\nDemoVersion = '0.1'\n\nDemoWidth = 300\nDemoHeight = 250\n\n\nclass AppFrame(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(300, 300, DemoWidth, DemoHeight)\n self.setWindowTitle('%s %s' % (DemoName, DemoVersion))\n self.show()\n\n unittest.main()\n\n def onClose(self):\n \"\"\"Application is closing.\"\"\"\n\n pass\n\n\nclass TestGMTTiles(unittest.TestCase):\n\n # for GMT tiles\n TileWidth = 256\n TileHeight = 256\n\n def testSimple(self):\n \"\"\"Simple tests.\"\"\"\n\n # read all tiles in all rows of all levels\n cache = tiles.Tiles()\n for level in cache.levels:\n cache.UseLevel(level)\n info = cache.GetInfo(level)\n if info:\n width_px = self.TileWidth * cache.num_tiles_x\n height_px = self.TileHeight * cache.num_tiles_y\n ppd_x = cache.ppd_x\n ppd_y = cache.ppd_y\n num_tiles_width = int(width_px / self.TileWidth)\n num_tiles_height = int(height_px / self.TileHeight)\n for x in range(num_tiles_width):\n for y in range(num_tiles_height):\n bmp = cache.GetTile(x, y)\n msg = \"Can't find tile (%d,%d,%d)!?\" % (level, x, y)\n self.assertFalse(bmp is None, msg)\n\n def testErrors(self):\n \"\"\"Test possible errors.\"\"\"\n\n # check that using level outside map levels returns False\n cache = tiles.Tiles()\n level = cache.levels[-1] + 1 # get level # that DOESN'T exist\n msg = \"Using bad level (%d) didn't raise exception?\" % level\n result = cache.UseLevel(level)\n self.assertFalse(result, msg)\n\n # check that reading tile outside map returns None\n cache = tiles.Tiles()\n level = cache.levels[0] # known good level\n cache.UseLevel(level)\n width_px = self.TileWidth * cache.num_tiles_x\n height_px = self.TileHeight * cache.num_tiles_y\n ppd_x = cache.ppd_x\n ppd_y = cache.ppd_y\n num_tiles_width = int(width_px / self.TileWidth)\n num_tiles_height = int(height_px / self.TileHeight)\n msg = (\"Using bad coords (%d,%d), didn't raise KeyError\"\n % (num_tiles_width, num_tiles_height))\n with self.assertRaises(KeyError, msg=msg):\n bmp = cache.GetTile(num_tiles_width, num_tiles_height)\n\n def XtestConvert(self):\n \"\"\"Test geo2map conversions.\n\n This is normally turned off as it is a \"by hand\" sort of check.\n \"\"\"\n\n cache = tiles.Tiles()\n\n # get tile covering Greenwich observatory\n# xgeo = -0.0005 # Greenwich observatory\n# ygeo = 51.4768534\n xgeo = 7.605916 # Deutsches Eck\n ygeo = 50.364444\n for level in [0, 1, 2, 3, 4]:\n cache.UseLevel(level)\n (xtile, ytile) = cache.Geo2Tile(xgeo, ygeo)\n bmp = cache.GetTile(int(xtile), int(ytile))\n\n pt_px_x = int((xtile - int(xtile)) * cache.tile_size_x)\n pt_px_y = int((ytile - int(ytile)) * cache.tile_size_y)\n\n dc = wx.MemoryDC()\n dc.SelectObject(bmp)\n text = \"o\"\n (tw, th) = dc.GetTextExtent(text)\n dc.DrawText(text, pt_px_x-tw/2, pt_px_y-th/2)\n dc.SelectObject(wx.NullBitmap)\n\n bmp.SaveFile('xyzzy_%d.jpg' % level, wx.BITMAP_TYPE_JPEG)\n\n\napp = QApplication(sys.argv)\nex = AppFrame()\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.7589743733406067,
"alphanum_fraction": 0.7589743733406067,
"avg_line_length": 20.66666603088379,
"blob_id": "e55b338972dfaeb02c837058994628cebe833d1e",
"content_id": "2e18f9a8effaf3ab115e01f32c99ac648045ede1",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 195,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 9,
"path": "/Makefile",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "install: clean\n\tclear; python setup.py build; python setup.py install\n\nrelease: clean\n\tclear; python setup.py sdist bdist_wheel\n\ttwine upload dist/*\n\nclean:\n\trm -Rf build dist pySlipQt.egg-info/\n"
},
{
"alpha_fraction": 0.5265366435050964,
"alphanum_fraction": 0.5497407913208008,
"avg_line_length": 27.730497360229492,
"blob_id": "53cdc8baea0240994823ef5ffe66b3ed02c8633d",
"content_id": "3807a4e974f09633ea7f375180ab8e87250b7cdf",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4051,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 141,
"path": "/pySlipQt/examples/test_maprel_image.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"Test PySlipQt map-relative images.\n\nUsage: test_maprel_image.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\nimport pySlipQt.pySlipQt as pySlipQt\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\nfrom image_placement import ImagePlacementControl\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\n\n######\n# Various constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = f'Test map-relative image placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 3\nInitViewPosition = (158.0, -20.0)\n\narrow = 'graphics/arrow_right.png'\n\nImageMapData = [(158, -17, arrow, {'offset_x': 0, 'offset_y': 0}),\n (158, -18, arrow, {'offset_x': 0, 'offset_y': 0}),\n (158, -19, arrow, {'offset_x': 0, 'offset_y': 0}),\n (158, -20, arrow, {'offset_x': 0, 'offset_y': 0}),\n (158, -21, arrow, {'offset_x': 0, 'offset_y': 0}),\n (158, -22, arrow, {'offset_x': 0, 'offset_y': 0}),\n (158, -23, arrow, {'offset_x': 0, 'offset_y': 0})\n ]\n\nPolygonMapData = [(((158,-17),(158,-23)),\n {'width': 1, 'colour': 'black', 'filled': False})\n ]\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # add test layers\n self.poly_layer = self.pyslipqt.AddPolygonLayer(PolygonMapData)\n self.image_layer = self.pyslipqt.AddImageLayer(ImageMapData,\n map_rel=True,\n placement='ce',\n name='<image_map_layer>')\n\n self.show()\n\n # set initial view position\n self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the app\nlog(DemoName)\ntile_dir = 'test_maprel_tiles'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5780836343765259,
"alphanum_fraction": 0.5947591066360474,
"avg_line_length": 28.279069900512695,
"blob_id": "a580997d23e3607b7d5587fccaa5b0e7a003cefe",
"content_id": "7002b8b2083c6dc966733cb33922a34ccae52fdb",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3778,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 129,
"path": "/pySlipQt/examples/test_multi_widget.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt with multiple widget instances.\n\nUsage: test_multi_widget.py [-h]\n\nUses the GMT and OSM tiles. Look for interactions of any sort between\nthe widget instances.\n\"\"\"\n\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout, QGridLayout\n\nimport pySlipQt.pySlipQt as pySlipQt\nimport pySlipQt.gmt_local as GMTTiles\nimport pySlipQt.open_street_map as NetTiles\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\n\n######\n# Various demo constants\n######\n\nDemoVersion = '1.0'\nDemoName = f'Test multi-widget use {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (100.51, 13.75) # Bangkok\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n\n # note that we need a unique Tile source for each widget\n # sharing directories is OK\n gmt_tile_src_1 = GMTTiles.Tiles()\n gmt_tile_src_2 = GMTTiles.Tiles()\n osm_tile_src_1 = NetTiles.Tiles()\n osm_tile_src_2 = NetTiles.Tiles()\n\n # build the GUI\n grid = QGridLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(grid)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt1 = pySlipQt.PySlipQt(self, tile_src=gmt_tile_src_1,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt1, 0, 0)\n self.pyslipqt2 = pySlipQt.PySlipQt(self, tile_src=osm_tile_src_1,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt2, 0, 1)\n self.pyslipqt3 = pySlipQt.PySlipQt(self, tile_src=osm_tile_src_2,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt3, 1, 0)\n self.pyslipqt4 = pySlipQt.PySlipQt(self, tile_src=gmt_tile_src_2,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt4, 1, 1)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # set initial view position\n# gmt_tile_src_1.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n# gmt_tile_src_2.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n# osm_tile_src_1.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n# osm_tile_src_2.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n self.show()\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'h', ['help'])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n\n# start the app\nlog(DemoName)\ntile_dir = 'test_multi_widget'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n\n"
},
{
"alpha_fraction": 0.5313721299171448,
"alphanum_fraction": 0.5353471040725708,
"avg_line_length": 38.05443572998047,
"blob_id": "3f72a2d4b6e8e00c79c4a2927dcf73296ebf811b",
"content_id": "261f89a2910cff60757db3392e219bd764250cd5",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 135598,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 3472,
"path": "/pySlipQt/pySlipQt.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA \"slip map\" widget for PyQt5.\n\nSo why is this widget called 'pySlip'?\n\nWell, in the OpenStreetMap world[1], a 'slippy map' is a browser map view\nserved by a tile server that can be panned and zoomed in the same way as\npopularised by Google maps. Such a map feels 'slippery', I guess.\n\nRather than 'slippy' I went for the slightly more formal 'pySlip' since the\nthing is written in Python and therefore must have the obligatory 'py' prefix.\n\nEven though this was originally written for a geographical application, the\n*underlying* system only assumes a cartesian 2D coordinate system. The tile\nsource must translate between the underlying coordinates and whatever coordinate\nsystem the tiles use. So pySlip could be used to present a game map, 2D CAD\nview, etc, as well as Mercator tiles provided either locally from the filesystem\nor from the internet (OpenStreetMap, for example).\n\n[1] http://wiki.openstreetmap.org/index.php/Slippy_Map\n\nSome semantics:\n map the whole map\n view is the view of the map through the widget\n (view may be smaller than map, or larger)\n\"\"\"\n\n\nimport sys\nfrom PyQt5.QtCore import Qt, QTimer, QPoint, QPointF, QObject, pyqtSignal\nfrom PyQt5.QtWidgets import QLabel, QSizePolicy, QWidget, QMessageBox\nfrom PyQt5.QtGui import (QPainter, QColor, QPixmap, QPen, QFont, QFontMetrics,\n QPolygon, QBrush, QCursor)\n\ntry:\n import pySlipQt.log as log\n log = log.Log('pyslipqt.log')\nexcept AttributeError:\n # means log already set up\n pass\nexcept ImportError as e:\n # if we don't have log.py, don't crash\n # fake all log(), log.debug(), ... calls\n def logit(*args, **kwargs):\n pass\n log = logit\n log.debug = logit\n log.info = logit\n log.warn = logit\n log.error = logit\n log.critical = logit\n\nimport platform\nif platform.python_version_tuple()[0] != '3':\n msg = ('You must run pySlipQt with python 3.x, you are running version %s.x.'\n % platform.python_version_tuple()[0])\n log(msg)\n print(msg)\n sys.exit(1)\n\n# version number of the widget\n__version__ = '0.5'\n\n\n######\n# A layer class - encapsulates all layer data.\n######\n\nclass _Layer(object):\n \"\"\"A Layer object.\"\"\"\n\n DefaultDelta = 50 # default selection delta\n\n def __init__(self, id=0, painter=None, data=None, map_rel=True,\n visible=False, show_levels=None, selectable=False,\n name=\"<no name given>\", ltype=None):\n \"\"\"Initialise the Layer object.\n\n id unique layer ID\n painter render function\n data the layer data\n map_rel True if layer is map-relative, else layer-relative\n visible layer visibility\n show_levels list of levels at which to auto-show the level\n selectable True if select operates on this layer, else False\n name the name of the layer (for debug)\n ltype a layer 'type' flag\n \"\"\"\n\n self.painter = painter # routine to draw layer\n self.data = data # data that defines the layer\n self.map_rel = map_rel # True if layer is map relative\n self.visible = visible # True if layer visible\n self.show_levels = show_levels # None or list of levels to auto-show\n self.selectable = selectable # True if we can select on this layer\n self.delta = self.DefaultDelta # minimum distance for selection\n self.name = name # name of this layer\n self.type = ltype # type of layer\n self.id = id # ID of this layer\n\n def __str__(self):\n return ('<pySlipQt Layer: id=%d, name=%s, map_rel=%s, visible=%s>'\n % (self.id, self.name, str(self.map_rel), str(self.visible)))\n\n\n######\n# The pySlipQt widget.\n######\n\n\nclass PySlipQt(QWidget):\n\n # events the widget will emit\n class Events(QObject):\n EVT_PYSLIPQT_LEVEL = pyqtSignal(object)\n EVT_PYSLIPQT_POSITION = pyqtSignal(object)\n EVT_PYSLIPQT_SELECT = pyqtSignal(object)\n EVT_PYSLIPQT_BOXSELECT = pyqtSignal(object)\n EVT_PYSLIPQT_POLYSELECT = pyqtSignal(object)\n EVT_PYSLIPQT_POLYBOXSELECT = pyqtSignal(object)\n\n # event numbers\n (EVT_PYSLIPQT_LEVEL, EVT_PYSLIPQT_POSITION,\n EVT_PYSLIPQT_SELECT, EVT_PYSLIPQT_BOXSELECT,\n EVT_PYSLIPQT_POLYSELECT, EVT_PYSLIPQT_POLYBOXSELECT) = range(6)\n\n event_name = {EVT_PYSLIPQT_LEVEL: 'EVT_PYSLIPQT_LEVEL',\n EVT_PYSLIPQT_POSITION: 'EVT_PYSLIPQT_POSITION',\n EVT_PYSLIPQT_SELECT: 'EVT_PYSLIPQT_SELECT',\n EVT_PYSLIPQT_BOXSELECT: 'EVT_PYSLIPQT_BOXSELECT',\n EVT_PYSLIPQT_POLYSELECT: 'EVT_PYSLIPQT_POLYSELECT',\n EVT_PYSLIPQT_POLYBOXSELECT: 'EVT_PYSLIPQT_POLYBOXSELECT',\n }\n\n # list of valid placement values\n valid_placements = ['cc', 'nw', 'cn', 'ne', 'ce',\n 'se', 'cs', 'sw', 'cw']\n\n # default point attributes - map relative\n DefaultPointPlacement = 'cc'\n DefaultPointRadius = 3\n DefaultPointColour = 'red'\n DefaultPointOffsetX = 0\n DefaultPointOffsetY = 0\n DefaultPointData = None\n\n # default point attributes - view relative\n DefaultPointViewPlacement = 'cc'\n DefaultPointViewRadius = 3\n DefaultPointViewColour = 'red'\n DefaultPointViewOffsetX = 0\n DefaultPointViewOffsetY = 0\n DefaultPointViewData = None\n\n # default image attributes - map relative\n DefaultImagePlacement = 'nw'\n DefaultImageRadius = 0\n DefaultImageColour = 'black'\n DefaultImageOffsetX = 0\n DefaultImageOffsetY = 0\n DefaultImageData = None\n\n # default image attributes - view relative\n DefaultImageViewPlacement = 'nw'\n DefaultImageViewRadius = 0\n DefaultImageViewColour = 'black'\n DefaultImageViewOffsetX = 0\n DefaultImageViewOffsetY = 0\n DefaultImageViewData = None\n\n # default text attributes - map relative\n DefaultTextPlacement = 'nw'\n DefaultTextRadius = 2\n DefaultTextColour = 'black'\n DefaultTextTextColour = 'black'\n DefaultTextOffsetX = 5\n DefaultTextOffsetY = 1\n DefaultTextFontname = 'Helvetica'\n DefaultTextFontSize = 10\n DefaultTextData = None\n\n # default text attributes - view relative\n DefaultTextViewPlacement = 'nw'\n DefaultTextViewRadius = 0\n DefaultTextViewColour = 'black'\n DefaultTextViewTextColour = 'black'\n DefaultTextViewOffsetX = 0\n DefaultTextViewOffsetY = 0\n DefaultTextViewFontname = 'Helvetica'\n DefaultTextViewFontSize = 10\n DefaultTextViewData = None\n\n # default polygon attributes - map view\n DefaultPolygonPlacement = 'cc'\n DefaultPolygonWidth = 1\n DefaultPolygonColour = 'red'\n DefaultPolygonClose = False\n DefaultPolygonFilled = False\n DefaultPolygonFillcolour = 'blue'\n DefaultPolygonOffsetX = 0\n DefaultPolygonOffsetY = 0\n DefaultPolygonData = None\n\n # default polygon attributes - view relative\n DefaultPolygonViewPlacement = 'cc'\n DefaultPolygonViewWidth = 1\n DefaultPolygonViewColour = 'red'\n DefaultPolygonViewClose = False\n DefaultPolygonViewFilled = False\n DefaultPolygonViewFillcolour = 'blue'\n DefaultPolygonViewOffsetX = 0\n DefaultPolygonViewOffsetY = 0\n DefaultPolygonViewData = None\n\n # default polyline attributes - map view\n DefaultPolylinePlacement = 'cc'\n DefaultPolylineWidth = 1\n DefaultPolylineColour = 'red'\n DefaultPolylineOffsetX = 0\n DefaultPolylineOffsetY = 0\n DefaultPolylineData = None\n\n # default polyline attributes - view relative\n DefaultPolylineViewPlacement = 'cc'\n DefaultPolylineViewWidth = 1\n DefaultPolylineViewColour = 'red'\n DefaultPolylineViewOffsetX = 0\n DefaultPolylineViewOffsetY = 0\n DefaultPolylineViewData = None\n\n # layer type values\n (TypePoint, TypeImage, TypeText, TypePolygon, TypePolyline) = range(5)\n\n # cursor types\n StandardCursor = Qt.ArrowCursor\n BoxSelectCursor = Qt.CrossCursor\n WaitCursor = Qt.WaitCursor\n DragCursor = Qt.OpenHandCursor\n\n # box select constants\n BoxSelectPenColor = QColor(255, 0, 0, 128)\n BoxSelectPenStyle = Qt.DashLine\n BoxSelectPenWidth = 2\n\n def __init__(self, parent, tile_src, start_level, **kwargs):\n \"\"\"Initialize the pySlipQt widget.\n\n parent the GUI parent widget\n tile_src a Tiles object, source of tiles\n start_level level to initially display\n kwargs keyword args passed through to the underlying QLabel\n \"\"\"\n\n super().__init__(parent, **kwargs) # inherit all parent object setup\n\n # remember the tile source object\n self.tile_src = tile_src\n\n # the tile coordinates\n self.level = start_level\n\n # view and map limits\n self.view_width = 0 # width/height of the view\n self.view_height = 0 # changes when the widget changes size\n\n # set tile and levels stuff\n self.max_level = max(tile_src.levels) # max level displayed\n self.min_level = min(tile_src.levels) # min level displayed\n self.tile_width = tile_src.tile_size_x # width of tile in pixels\n self.tile_height = tile_src.tile_size_y # height of tile in pixels\n self.num_tiles_x = tile_src.num_tiles_x # number of map tiles in X direction\n self.num_tiles_y = tile_src.num_tiles_y # number of map tiles in Y direction\n# TODO: implement map wrap-around\n# self.wrap_x = tile_src.wrap_x # True if tiles wrap in X direction\n# self.wrap_y = tile_src.wrap_y # True if tiles wrap in Y direction\n self.wrap_x = False # True if tiles wrap in X direction\n self.wrap_y = False # True if tiles wrap in Y direction\n\n self.map_width = self.num_tiles_x * self.tile_width # virtual map width\n self.map_height = self.num_tiles_y * self.tile_height # virtual map height\n\n self.next_layer_id = 1 # source of unique layer IDs\n\n self.tiles_max_level = max(tile_src.levels) # maximum level in tile source\n self.tiles_min_level = min(tile_src.levels) # minimum level in tile source\n\n # box select state\n self.sbox_w = None # width/height of box select rectangle\n self.sbox_h = None\n self.sbox_1_x = None # view coords of start corner of select box\n self.sbox_1_y = None # if selecting, self.sbox_1_x != NOne\n\n # define position and tile coords of the \"key\" tile\n self.key_tile_left = 0 # tile coordinates of key tile\n self.key_tile_top = 0\n self.key_tile_xoffset = 0 # view coordinates of key tile wrt view\n self.key_tile_yoffset = 0\n\n # we keep track of the cursor coordinates if cursor on map\n self.mouse_x = None\n self.mouse_y = None\n\n # state variables holding mouse buttons state\n self.left_mbutton_down = False\n self.mid_mbutton_down = False\n self.right_mbutton_down = False\n\n # keyboard state variables\n self.shift_down = False\n\n # when dragging, remember the initial start point\n self.start_drag_x = None\n self.start_drag_y = None\n\n # layer state variables\n self.layer_mapping = {} # maps layer ID to layer data\n self.layer_z_order = [] # layer Z order, contains layer IDs\n\n # some cursors\n self.standard_cursor = QCursor(self.StandardCursor)\n self.box_select_cursor = QCursor(self.BoxSelectCursor)\n self.wait_cursor = QCursor(self.WaitCursor)\n self.drag_cursor = QCursor(self.DragCursor)\n\n # set up dispatch dictionaries for layer select handlers\n # for point select\n self.layerPSelHandler = {self.TypePoint: self.sel_point_in_layer,\n self.TypeImage: self.sel_image_in_layer,\n self.TypeText: self.sel_text_in_layer,\n self.TypePolygon: self.sel_polygon_in_layer,\n self.TypePolyline: self.sel_polyline_in_layer}\n\n # for box select\n self.layerBSelHandler = {self.TypePoint: self.sel_box_points_in_layer,\n self.TypeImage: self.sel_box_images_in_layer,\n self.TypeText: self.sel_box_texts_in_layer,\n self.TypePolygon: self.sel_box_polygons_in_layer,\n self.TypePolyline: self.sel_box_polylines_in_layer}\n\n # create the events raised by PySlipQt\n self.events = PySlipQt.Events()\n\n # a dictionary to map event number to raising function\n self.pyslipqt_event_dict = {\n PySlipQt.EVT_PYSLIPQT_LEVEL: self.events.EVT_PYSLIPQT_LEVEL.emit,\n PySlipQt.EVT_PYSLIPQT_POSITION: self.events.EVT_PYSLIPQT_POSITION.emit,\n PySlipQt.EVT_PYSLIPQT_SELECT: self.events.EVT_PYSLIPQT_SELECT.emit,\n PySlipQt.EVT_PYSLIPQT_BOXSELECT: self.events.EVT_PYSLIPQT_BOXSELECT.emit,\n PySlipQt.EVT_PYSLIPQT_POLYSELECT: self.events.EVT_PYSLIPQT_POLYSELECT.emit,\n PySlipQt.EVT_PYSLIPQT_POLYBOXSELECT: self.events.EVT_PYSLIPQT_POLYBOXSELECT.emit,\n }\n\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.setMinimumSize(self.tile_width, self.tile_height)\n\n tile_src.setCallback(self.on_tile_available)\n\n self.setMouseTracking(True)\n self.setEnabled(True) # to receive key events?\n\n self.default_cursor = self.standard_cursor\n self.setCursor(self.standard_cursor)\n\n # do a \"resize\" after this function\n QTimer.singleShot(10, self.resizeEvent)\n\n def on_tile_available(self, level, x, y, image, error):\n \"\"\"Called when a new 'net tile is available.\n\n level the level the tile is for\n x, y tile coordinates of the tile\n image the tile image data\n error True if there was an error\n\n We have enough information to redraw a specific tile,\n but we just redraw the widget.\n \"\"\"\n\n self.update()\n\n ######\n # Code for events raised by this widget\n ######\n\n class PySlipQtEvent:\n def __init__(self, etype, **kwargs):\n \"\"\"Create a PySlipQtEvent with attributes in 'kwargs'.\"\"\"\n\n self.type = etype\n for (attr, value) in kwargs.items():\n setattr(self, attr, value)\n\n def dump_event(self, msg, event):\n \"\"\"Dump an event to the log.\n\n Print attributes and values for non_dunder attributes.\n \"\"\"\n\n log('dump_event: %s:' % msg)\n for attr in dir(event):\n if not attr.startswith('__'):\n log(' event.%s=%s' % (attr, str(getattr(event, attr))))\n\n def raise_event(self, etype, **kwargs):\n \"\"\"Raise event with attributes in 'kwargs'.\n\n etype type of event to raise\n kwargs a dictionary of attributes to attach to event\n \"\"\"\n\n event = PySlipQt.PySlipQtEvent(etype, **kwargs)\n self.pyslipqt_event_dict[etype](event)\n\n ######\n # Overide the PyQt5 mouse/keyboard/etc events\n ######\n\n def mousePressEvent(self, event):\n \"\"\"Mouse button pressed.\"\"\"\n\n click_x = event.x()\n click_y = event.y()\n\n # assume we aren't dragging\n self.start_drag_x = self.start_drag_y = None\n\n b = event.button()\n if b == Qt.NoButton:\n pass\n elif b == Qt.LeftButton:\n self.left_mbutton_down = True\n if self.shift_down:\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = (click_x, click_y)\n elif b == Qt.MidButton:\n self.mid_mbutton_down = True\n elif b == Qt.RightButton:\n self.right_mbutton_down = True\n else:\n log('mousePressEvent: unknown button')\n\n def mouseReleaseEvent(self, event):\n \"\"\"Mouse button was released.\n\n event.x & event.y view coords when released\n\n Could be end of a drag or point or box selection. If it's the end of\n a drag we don't do a lot. If a selection we process that.\n \"\"\"\n\n x = event.x()\n y = event.y()\n clickpt_v = (x, y)\n\n # cursor back to normal in case it was a box select\n self.setCursor(self.default_cursor)\n\n # we need a repaint to remove any selection box, but NOT YET!\n delayed_paint = self.sbox_1_x # True if box select active\n\n b = event.button()\n if b == Qt.NoButton:\n pass\n elif b == Qt.LeftButton:\n self.left_mbutton_down = False\n# legacy code from pySlip, leave just in case we need it\n# # if required, ignore this event\n# if self.ignore_next_up:\n# self.ignore_next_up = False\n# return\n# # we need a repaint to remove any selection box, but NOT YET!\n# delayed_paint = self.sbox_1_x # True if box select active\n\n if self.sbox_1_x:\n # we are doing a box select,\n # get canonical selection box in view coordinates\n (ll_vx, ll_vy, tr_vx, tr_vy) = self.sel_box_canonical()\n\n # get lower-left and top-right view tuples\n ll_v = (ll_vx, ll_vy)\n tr_v = (tr_vx, tr_vy)\n\n # convert view to geo coords\n ll_g = self.view_to_geo(ll_v)\n tr_g = self.view_to_geo(tr_v)\n\n # check each layer for a box select event, work on copy of\n # '.layer_z_order' as user response could change layer order\n for lid in self.layer_z_order[:]:\n l = self.layer_mapping[lid]\n # if layer visible and selectable\n if l.selectable and l.visible:\n if l.map_rel:\n # map-relative, get all points selected (if any)\n result = self.layerBSelHandler[l.type](l, ll_g, tr_g)\n else:\n # view-relative\n result = self.layerBSelHandler[l.type](l, ll_v, tr_v)\n if result:\n (sel, data, relsel) = result\n self.raise_event(PySlipQt.EVT_PYSLIPQT_BOXSELECT,\n mposn=None, vposn=None, layer_id=lid,\n selection=sel, relsel=relsel)\n else:\n # raise an empty EVT_PYSLIPQT_BOXSELECT event\n self.raise_event(PySlipQt.EVT_PYSLIPQT_BOXSELECT,\n mposn=None, vposn=None,\n layer_id=lid, selection=None, relsel=None)\n\n # user code possibly updated screen, must repaint\n delayed_paint = True\n self.sbox_1_x = self.sbox_1_y = None\n else:\n if self.start_drag_x is None:\n # not dragging, possible point selection\n # get click point in view & global coords\n clickpt_g = self.view_to_geo(clickpt_v)\n# if clickpt_g is None:\n# return # we clicked off the map\n \n # check each layer for a point select handler, we work on a\n # copy as user click-handler code could change order\n for lid in self.layer_z_order[:]:\n l = self.layer_mapping[lid]\n # if layer visible and selectable\n if l.selectable and l.visible:\n result = self.layerPSelHandler[l.type](l, clickpt_v,\n clickpt_g)\n if result:\n (sel, relsel) = result\n \n # raise the EVT_PYSLIPQT_SELECT event\n self.raise_event(PySlipQt.EVT_PYSLIPQT_SELECT,\n mposn=clickpt_g,\n vposn=clickpt_v,\n layer_id=lid,\n selection=sel, relsel=relsel)\n else:\n # raise an empty EVT_PYSLIPQT_SELECT event\n self.raise_event(PySlipQt.EVT_PYSLIPQT_SELECT,\n mposn=clickpt_g,\n vposn=clickpt_v,\n layer_id=lid,\n selection=None, relsel=None)\n\n # turn off dragging, if we were\n self.start_drag_x = self.start_drag_y = None\n\n # turn off box selection mechanism\n self.sbox_1_x = self.sbox_1_y = None\n\n # force PAINT event if required\n if delayed_paint:\n self.update()\n\n elif b == Qt.MidButton:\n self.mid_mbutton_down = False\n elif b == Qt.RightButton:\n self.right_mbutton_down = False\n else:\n log('mouseReleaseEvent: unknown button')\n\n def mouseDoubleClickEvent(self, event):\n b = event.button()\n if b == Qt.NoButton:\n pass\n elif b == Qt.LeftButton:\n pass\n elif b == Qt.MidButton:\n pass\n elif b == Qt.RightButton:\n pass\n else:\n log('mouseDoubleClickEvent: unknown button')\n\n def mouseMoveEvent(self, event):\n \"\"\"Handle a mouse move event.\n \n If left mouse down, either drag the map or start a box selection.\n If we are off the map, ensure self.mouse_x, etc, are None.\n \"\"\"\n\n x = event.x()\n y = event.y()\n mouse_view = (x, y)\n mouse_geo = self.view_to_geo(mouse_view)\n\n # update remembered mouse position in case of zoom\n self.mouse_x = self.mouse_y = None\n if mouse_geo:\n self.mouse_x = x\n self.mouse_y = y\n\n if self.left_mbutton_down:\n if self.shift_down:\n # we are starting a box select\n if self.sbox_1_x == -1:\n # mouse down before SHIFT down, fill in box start point\n self.sbox_1_x = x\n self.sbox_1_y = y\n\n # set select box start point at mouse position\n (self.sbox_w, self.sbox_h) = (x - self.sbox_1_x, y - self.sbox_1_y)\n else:\n # we are dragging\n if self.start_drag_x == None:\n # start of drag, set drag state\n (self.start_drag_x, self.start_drag_y) = (x, y)\n\n # we don't move much - less than a tile width/height\n # drag the key tile in the X direction\n delta_x = self.start_drag_x - x\n self.key_tile_xoffset -= delta_x\n if self.key_tile_xoffset < -self.tile_width: # too far left\n self.key_tile_xoffset += self.tile_width\n self.key_tile_left += 1\n if self.key_tile_xoffset > 0: # too far right\n self.key_tile_xoffset -= self.tile_width\n self.key_tile_left -= 1\n\n # drag the key tile in the Y direction\n delta_y = self.start_drag_y - y\n self.key_tile_yoffset -= delta_y\n if self.key_tile_yoffset < -self.tile_height: # too far up\n self.key_tile_yoffset += self.tile_height\n self.key_tile_top += 1\n if self.key_tile_yoffset > 0: # too far down\n self.key_tile_yoffset -= self.tile_height\n self.key_tile_top -= 1\n\n # set key tile stuff so update() shows drag\n self.rectify_key_tile()\n\n # get ready for more drag\n (self.start_drag_x, self.start_drag_y) = (x, y)\n\n self.update() # force a repaint\n\n # emit the event for mouse position\n self.raise_event(PySlipQt.EVT_PYSLIPQT_POSITION,\n mposn=mouse_geo, vposn=mouse_view)\n\n def keyPressEvent(self, event):\n \"\"\"Capture a key press.\"\"\"\n\n if event.key() == Qt.Key_Shift:\n self.shift_down = True\n self.default_cursor = self.box_select_cursor\n self.setCursor(self.default_cursor)\n if self.left_mbutton_down:\n # start of a box select\n self.sbox_1_x = -1 # special value, means fill X,Y on mouse down\n event.accept()\n\n def keyReleaseEvent(self, event):\n \"\"\"Capture a key release.\"\"\"\n\n key = event.key()\n if event.key() == Qt.Key_Shift:\n self.shift_down = False\n self.default_cursor = self.standard_cursor\n self.setCursor(self.default_cursor)\n event.accept()\n\n def wheelEvent(self, event):\n \"\"\"Handle a mouse wheel rotation.\"\"\"\n\n if event.angleDelta().y() < 0:\n new_level = self.level + 1\n else:\n new_level = self.level - 1\n\n log(f'wheelEvent: new_level={new_level}')\n\n view = None\n if self.mouse_x:\n view = (self.mouse_x, self.mouse_y)\n\n log(f'calling .zoom_level({new_level}, view={view})')\n\n self.zoom_level(new_level, view=view)\n\n def resizeEvent(self, event=None):\n \"\"\"Widget resized, recompute some state.\"\"\"\n\n # new widget size\n self.view_width = self.width()\n self.view_height = self.height()\n\n # recalculate the \"key\" tile stuff\n self.rectify_key_tile()\n\n def enterEvent(self, event):\n self.setFocus()\n\n def leaveEvent(self, event):\n \"\"\"The mouse is leaving the widget.\n\n Raise a EVT_PYSLIPQT_POSITION event with positions set to None.\n We do this so user code can clear any mouse position data, for example.\n \"\"\"\n\n self.mouse_x = None\n self.mouse_y = None\n\n self.raise_event(PySlipQt.EVT_PYSLIPQT_POSITION, mposn=None, vposn=None)\n\n def paintEvent(self, event):\n \"\"\"Draw the base map and then the layers on top.\"\"\"\n\n # The \"key\" tile position is maintained by other code, we just\n # assume it's set. Figure out how to draw tiles, set up 'row_list' and\n # 'col_list' which are list of tile coords to draw (row and colums).\n\n col_list = []\n x_coord = self.key_tile_left\n x_pix_start = self.key_tile_xoffset\n while x_pix_start < self.view_width:\n col_list.append(x_coord)\n if not self.wrap_x and x_coord >= self.num_tiles_x-1:\n break\n x_coord = (x_coord + 1) % self.num_tiles_x\n x_pix_start += self.tile_height\n\n row_list = []\n y_coord = self.key_tile_top\n y_pix_start = self.key_tile_yoffset\n while y_pix_start < self.view_height:\n row_list.append(y_coord)\n if not self.wrap_y and y_coord >= self.num_tiles_y-1:\n break\n y_coord = (y_coord + 1) % self.num_tiles_y\n y_pix_start += self.tile_height\n\n # Ready to update the view\n # prepare the canvas\n painter = QPainter()\n painter.begin(self)\n\n # paste all background tiles onto the view\n x_pix = self.key_tile_xoffset\n for x in col_list:\n y_pix = self.key_tile_yoffset\n for y in row_list:\n painter.drawPixmap(x_pix, y_pix, self.tile_src.GetTile(x, y))\n y_pix += self.tile_height\n x_pix += self.tile_width\n\n # now draw the layers\n for id in self.layer_z_order:\n l = self.layer_mapping[id]\n if l.visible and self.level in l.show_levels:\n l.painter(painter, l.data, map_rel=l.map_rel)\n\n # draw selection rectangle, if any\n if self.sbox_1_x:\n # draw the select box, transparent fill\n painter.setBrush(QBrush(QColor(0, 0, 0, 0)))\n pen = QPen(PySlipQt.BoxSelectPenColor, PySlipQt.BoxSelectPenWidth,\n PySlipQt.BoxSelectPenStyle)\n painter.setPen(pen)\n painter.drawRect(self.sbox_1_x, self.sbox_1_y,\n self.sbox_w, self.sbox_h) \n\n painter.end()\n\n ######\n #\n ######\n\n# UNUSED\n def normalize_key_after_drag(self, delta_x=None, delta_y=None):\n \"\"\"After drag, set \"key\" tile correctly.\n\n delta_x the X amount dragged (pixels), None if not dragged in X\n delta_y the Y amount dragged (pixels), None if not dragged in Y\n\n The 'key' tile was corect, but we've moved the map in the X and Y\n directions. Normalize the 'key' tile taking into account whether\n we are wrapping X or Y directions.\n\n Dragging left gets a positive delta_x, up gets a positive delta_y.\n We call this routine to initialize things after zoom (for instance),\n passing 0 drag deltas.\n \"\"\"\n\n if self.wrap_x:\n # wrapping in X direction, move 'key' tile in X\n self.key_tile_xoffset -= delta_x\n\n # normalize .key_tile_left value\n while self.key_tile_xoffset > 0:\n # 'key' tile too far right, move one left\n self.key_tile_left -= 1\n self.key_tile_xoffset -= self.tile_width\n\n while self.key_tile_xoffset <= -self.tile_width:\n # 'key' tile too far left, move one right\n self.key_tile_left += 1\n self.key_tile_xoffset += self.tile_width\n self.key_tile_left = (self.key_tile_left + self.num_tiles_x) % self.num_tiles_x\n else:\n # not wrapping in X direction\n if self.map_width <= self.view_width:\n # if map <= view, don't drag, ensure centred\n self.key_tile_xoffset = (self.view_width - self.map_width) // 2\n else:\n # maybe drag, but don't expose background on left or right sides\n # remember old 'key' tile left value\n old_left = self.key_tile_left\n\n # move key tile by amount of X drag\n self.key_tile_xoffset -= delta_x\n\n while self.key_tile_xoffset > 0:\n # 'key' tile too far right\n self.key_tile_left -= 1\n self.key_tile_xoffset -= self.tile_width\n\n while self.key_tile_xoffset <= -self.tile_width:\n # 'key' tile too far left\n self.key_tile_left += 1\n self.key_tile_xoffset += self.tile_width\n self.key_tile_left = (self.key_tile_left + self.num_tiles_x) % self.num_tiles_x\n\n if delta_x < 0:\n # was dragged to the right, don't allow left edge to show\n if self.key_tile_left > old_left:\n self.key_tile_left = 0\n self.key_tile_xoffset = 0\n else:\n # if dragged too far, reset key tile data\n if self.key_tile_left > self.max_key_left:\n self.key_tile_left = self.max_key_left\n self.key_tile_xoffset = self.max_key_xoffset\n elif self.key_tile_left == self.max_key_left:\n if self.key_tile_xoffset < self.max_key_xoffset:\n self.key_tile_xoffset = self.max_key_xoffset\n\n if self.wrap_y:\n # wrapping in Y direction, move 'key' tile\n self.key_tile_yoffset -= delta_y\n\n # normalize .key_tile_top value\n while self.key_tile_yoffset > 0:\n # 'key' tile too far right, move one left\n self.key_tile_top -= 1\n self.key_tile_yoffset -= self.tile_height\n\n while self.key_tile_yoffset <= -self.tile_height:\n # 'key' tile too far left, move one right\n self.key_tile_top += 1\n self.key_tile_yoffset += self.tile_height\n self.key_tile_top = (self.key_tile_top + self.num_tiles_y) % self.num_tiles_y\n else:\n # not wrapping in the Y direction\n if self.map_height <= self.view_height:\n # if map <= view, don't drag, ensure centred\n self.key_tile_yoffset = (self.view_height - self.map_height) // 2\n else:\n # remember old 'key' tile left value\n old_top = self.key_tile_top\n\n # map > view, allow drag, but don't go past the edge\n self.key_tile_yoffset -= delta_y\n\n while self.key_tile_yoffset > 0:\n # 'key' tile too far right\n self.key_tile_top -= 1\n self.key_tile_yoffset -= self.tile_height\n\n while self.key_tile_yoffset <= -self.tile_height:\n # 'key' tile too far left\n self.key_tile_top += 1\n self.key_tile_yoffset += self.tile_height\n self.key_tile_top = (self.key_tile_top + self.num_tiles_y) % self.num_tiles_y\n\n if delta_y < 0:\n # was dragged to the top, don't allow bottom edge to show\n if self.key_tile_top > old_top:\n self.key_tile_top = 0\n self.key_tile_yoffset = 0\n else:\n # if dragged too far, reset key tile data\n if self.key_tile_top > self.max_key_top:\n self.key_tile_top = self.max_key_top\n self.key_tile_yoffset = self.max_key_yoffset\n elif self.key_tile_top == self.max_key_top:\n if self.key_tile_yoffset < self.max_key_yoffset:\n self.key_tile_yoffset = self.max_key_yoffset\n\n ######\n #\n ######\n\n# UNUSED\n def tile_frac_to_parts(self, t_frac, length):\n \"\"\"Split a tile coordinate into integer and fractional parts.\n\n frac a fractional tile coordinate\n length size of tile width or height\n\n Return (int, frac) parts of 't_frac'.\n \"\"\"\n\n int_part = int(t_frac)\n frac_part = int((t_frac - int_part) * length)\n\n return (int_part, frac_part)\n\n# UNUSED\n def tile_parts_to_frac(self, t_coord, t_offset, length):\n \"\"\"Convert a tile coord plus offset to a fractional tile value.\n\n t_coord the tile integer coordinate\n t_offset the pixel further offset\n length the width orr height of the tile\n\n Returns a fractional tile coordinate.\n \"\"\"\n\n return t_coord + t_offset/length\n\n# UNUSED\n def zoom_tile(self, c_tile, scale):\n \"\"\"Zoom into centre tile at given scale.\n\n c_tile tuple (x_frac, y_frac) of fractional tile coords for point\n scale 2.0 if zooming in, 0.5 if zooming out\n\n Returns a tuple (zx_frac, zy_frac) of fractional coordinates of the\n point after the zoom.\n \"\"\"\n\n # unpack the centre tile coords\n (x_frac, y_frac) = c_tile\n\n # convert tile fractional coords to tile # + offset\n (tile_left, tile_xoff) = self.tile_frac_to_parts(x_frac, self.tile_width)\n (tile_top, tile_yoff) = self.tile_frac_to_parts(y_frac, self.tile_height)\n\n if scale > 1:\n # assume scale is 2\n # a simple doubling of fractional coordinates\n if tile_xoff < self.tile_width // 2:\n tile_left = tile_left * 2\n tile_xoff = tile_xoff * 2\n else:\n tile_left = tile_left*2 + 1\n tile_xoff = tile_xoff*2 - self.tile_width\n\n if tile_yoff < self.tile_height // 2:\n tile_top = tile_top * 2\n tile_yoff = tile_yoff * 2\n else:\n tile_top = tile_top*2 + 1\n tile_yoff = tile_yoff*2 % self.tile_height\n else:\n # assume scale is 0.5\n # a simple halving of fractional coordinates\n tile_left = tile_left // 2\n if tile_left % 2 == 0:\n # point in left half of 2x2\n tile_xoff = tile_xoff // 2\n else:\n # point in right half of 2x2\n tile_xoff = (tile_xoff + self.tile_width) // 2\n\n tile_top = tile_top // 2\n if tile_top % 2 == 0:\n # point in top half of 2x2\n tile_yoff = tile_yoff // 2\n else:\n # point in bottom half of 2x2\n tile_yoff = (tile_yoff + self.tile_height) // 2\n\n zx_frac = self.tile_parts_to_frac(tile_left, tile_xoff, self.tile_width)\n zy_frac = self.tile_parts_to_frac(tile_top, tile_yoff, self.tile_height)\n\n return (zx_frac, zy_frac)\n\n ######\n # Layer manipulation routines.\n ######\n\n def add_layer(self, painter, data, map_rel, visible, show_levels,\n selectable, name, ltype):\n \"\"\"Add a generic layer to the system.\n\n painter the function used to paint the layer\n data actual layer data (depends on layer type)\n map_rel True if points are map relative, else view relative\n visible True if layer is to be immediately shown, else False\n show_levels list of levels at which to auto-show the layer\n selectable True if select operates on this layer\n name name for this layer\n ltype flag for layer 'type'\n\n Returns unique ID of the new layer.\n \"\"\"\n\n # get unique layer ID\n id = self.next_layer_id\n self.next_layer_id += 1\n\n # prepare the show_level value\n if show_levels is None:\n show_levels = range(self.tiles_min_level, self.tiles_max_level+1)[:]\n\n # create layer, add unique ID to Z order list\n l = _Layer(id=id, painter=painter, data=data, map_rel=map_rel,\n visible=visible, show_levels=show_levels,\n selectable=selectable, name=name, ltype=ltype)\n\n self.layer_mapping[id] = l\n self.layer_z_order.append(id)\n\n # force display of new layer if it's visible\n if visible:\n self.update()\n\n return id\n\n def SetLayerSelectable(self, lid, selectable=False):\n \"\"\"Update the .selectable attribute for a layer.\n\n lid ID of the layer we are going to update\n selectable new .selectable attribute value (True or False)\n \"\"\"\n\n # just in case id is None\n if lid:\n layer = self.layer_mapping[lid]\n layer.selectable = selectable\n\n ######\n # Layer drawing routines\n ######\n\n def draw_point_layer(self, dc, data, map_rel):\n \"\"\"Draw a points layer.\n\n dc the active device context to draw on\n data an iterable of point tuples:\n (x, y, place, radius, colour, x_off, y_off, udata)\n map_rel points relative to map if True, else relative to view\n \"\"\"\n\n # get correct pex function - this handles map or view\n # we do this once here rather than many times inside the loop\n pex = self.pex_point_view\n if map_rel:\n pex = self.pex_point\n\n # speed up drawing by caching the current pen colour\n cache_pcolour = None\n\n # draw points on map/view\n for (x, y, place, radius, pcolour, x_off, y_off, udata) in data:\n (pt, ex) = pex(place, (x,y), x_off, y_off, radius)\n\n if pt and radius: # don't draw if not on screen or zero radius\n if cache_pcolour != pcolour:\n qcolour = QColor(*pcolour)\n pen = QPen(qcolour, radius, Qt.SolidLine)\n dc.setPen(pen)\n dc.setBrush(qcolour)\n cache_pcolour = pcolour\n (pt_x, pt_y) = pt\n dc.drawEllipse(QPoint(pt_x, pt_y), radius, radius)\n\n def draw_image_layer(self, dc, images, map_rel):\n \"\"\"Draw an image Layer on the view.\n\n dc the active device context to draw on\n images a sequence of image tuple sequences\n (x,y,pmap,w,h,placement,offset_x,offset_y,idata)\n map_rel points relative to map if True, else relative to view\n \"\"\"\n\n # get correct pex function\n # we do this once here rather than many times inside the loop\n pex = self.pex_extent_view\n if map_rel:\n pex = self.pex_extent\n\n # speed up drawing by caching previous point colour\n cache_pcolour = None\n\n # draw the images\n for (lon, lat, pmap, w, h, place,\n x_off, y_off, pradius, pcolour, idata) in images:\n (pt, ex) = pex(place, (lon, lat), x_off, y_off, w, h, image=True)\n\n if pt and pradius:\n # if we need to change colours\n if cache_pcolour != pcolour:\n qcolour = QColor(*pcolour)\n pen = QPen(qcolour, pradius, Qt.SolidLine)\n dc.setPen(pen)\n dc.setBrush(qcolour)\n cache_pcolour = pcolour\n\n # draw the image 'point'\n (px, py) = pt\n dc.drawEllipse(QPoint(px, py), pradius, pradius)\n\n if ex:\n # draw the image itself\n (ix, _, iy, _) = ex\n dc.drawPixmap(QPoint(ix, iy), pmap)\n\n def draw_text_layer(self, dc, text, map_rel):\n \"\"\"Draw a text Layer on the view.\n\n dc the active device context to draw on\n text a sequence of tuples:\n (lon, lat, tdata, placement, radius, colour, fontname,\n fontsize, offset_x, offset_y, tdata)\n map_rel points relative to map if True, else relative to view\n \"\"\"\n\n # get correct pex function for mode (map/view)\n pex = self.pex_extent_view\n if map_rel:\n pex = self.pex_extent\n\n # set some caching to speed up mostly unchanging data\n cache_textcolour = None\n cache_font = None\n cache_colour = None\n\n # draw text on map/view\n for (lon, lat, tdata, place, radius, colour,\n textcolour, fontname, fontsize, x_off, y_off, data) in text:\n # set font characteristics so we can calculate text width/height\n if cache_font != (fontname, fontsize):\n font = QFont(fontname, fontsize)\n dc.setFont(font)\n cache_font = (fontname, fontsize)\n font_metrics = QFontMetrics(font)\n\n qrect = font_metrics.boundingRect(tdata)\n w = qrect.width() # text string width and height\n h = qrect.height()\n\n # get point + extent information (each can be None if off-view)\n (pt, ex) = pex(place, (lon, lat), x_off, y_off, w, h)\n if pt and radius: # don't draw point if off screen or zero radius\n (pt_x, pt_y) = pt\n if cache_colour != colour:\n qcolour = QColor(*colour)\n pen = QPen(qcolour, radius, Qt.SolidLine)\n dc.setPen(pen)\n dc.setBrush(qcolour)\n cache_colour = colour\n dc.drawEllipse(QPoint(pt_x, pt_y), radius, radius)\n\n if ex: # don't draw text if off screen\n (lx, _, _, by) = ex\n if cache_textcolour != textcolour:\n qcolour = QColor(*textcolour)\n pen = QPen(qcolour, radius, Qt.SolidLine)\n dc.setPen(pen)\n cache_textcolour = textcolour\n dc.drawText(QPointF(lx, by), tdata)\n\n def draw_polygon_layer(self, dc, data, map_rel):\n \"\"\"Draw a polygon layer.\n\n dc the active device context to draw on\n data an iterable of polygon tuples:\n (p, placement, width, colour, closed,\n filled, fillcolour, offset_x, offset_y, udata)\n where p is an iterable of points: (x, y)\n map_rel points relative to map if True, else relative to view\n \"\"\"\n\n # get the correct pex function for mode (map/view)\n pex = self.pex_polygon_view\n if map_rel:\n pex = self.pex_polygon\n\n # draw polygons\n cache_colour_width = None # speed up mostly unchanging data\n cache_fillcolour = (0, 0, 0, 0)\n\n dc.setBrush(QBrush(QColor(*cache_fillcolour))) # initial brush is transparent\n\n for (p, place, width, colour, closed,\n filled, fillcolour, x_off, y_off, udata) in data:\n (poly, extent) = pex(place, p, x_off, y_off)\n if poly:\n if (colour, width) != cache_colour_width:\n dc.setPen(QPen(QColor(*colour), width, Qt.SolidLine))\n cache_colour = (colour, width)\n\n if filled and (fillcolour != cache_fillcolour):\n dc.setBrush(QBrush(QColor(*fillcolour), Qt.SolidPattern))\n cache_fillcolour = fillcolour\n\n qpoly = [QPoint(*p) for p in poly]\n dc.drawPolygon(QPolygon(qpoly))\n\n def draw_polyline_layer(self, dc, data, map_rel):\n \"\"\"Draw a polyline layer.\n\n dc the active device context to draw on\n data an iterable of polyline tuples:\n (p, placement, width, colour, offset_x, offset_y, udata)\n where p is an iterable of points: (x, y)\n map_rel points relative to map if True, else relative to view\n \"\"\"\n\n # get the correct pex function for mode (map/view)\n pex = self.pex_polygon_view\n if map_rel:\n pex = self.pex_polygon\n\n # brush is always transparent\n dc.setBrush(QBrush(QColor(0, 0, 0, 0)))\n\n # draw polyline(s)\n cache_colour_width = None # speed up mostly unchanging data\n\n for (p, place, width, colour, x_off, y_off, udata) in data:\n (poly, extent) = pex(place, p, x_off, y_off)\n if poly:\n if cache_colour_width != (colour, width):\n dc.setPen(QPen(QColor(*colour), width, Qt.SolidLine))\n cache_colour_width = (colour, width)\n qpoly = [QPoint(*p) for p in poly]\n dc.drawPolyline(QPolygon(qpoly))\n\n######\n# Convert between geo and view coordinates\n######\n\n def geo_to_view(self, geo):\n \"\"\"Convert a geo coord to view.\n\n geo tuple (xgeo, ygeo)\n\n Return a tuple (xview, yview) in view coordinates.\n Assumes point is in view.\n \"\"\"\n\n # convert the Geo position to tile coordinates\n (tx, ty) = self.tile_src.Geo2Tile(geo)\n\n # using the key_tile_* variables to convert to view coordinates\n xview = (tx - self.key_tile_left) * self.tile_width + self.key_tile_xoffset\n yview = (ty - self.key_tile_top) * self.tile_height + self.key_tile_yoffset\n\n return (xview, yview)\n\n# UNUSED\n def geo_to_view_masked(self, geo):\n \"\"\"Convert a geo (lon+lat) position to view pixel coords.\n\n geo tuple (xgeo, ygeo)\n\n Return a tuple (xview, yview) of point if on-view,or None\n if point is off-view.\n \"\"\"\n\n (xgeo, ygeo) = geo\n\n if (self.view_llon <= xgeo <= self.view_rlon and\n self.view_blat <= ygeo <= self.view_tlat):\n return self.geo_to_view(geo)\n\n return None\n\n def view_to_geo(self, view):\n \"\"\"Convert a view coords position to a geo coords position.\n\n view tuple of view coords (xview, yview)\n\n Returns a tuple of geo coords (xgeo, ygeo) if the cursor is over map\n tiles, else returns None.\n\n Note: the 'key' tile information must be correct.\n \"\"\"\n\n (xview, yview) = view\n (min_xgeo, max_xgeo, min_ygeo, max_ygeo) = self.tile_src.GetExtent()\n\n x_from_key = xview - self.key_tile_xoffset\n y_from_key = yview - self.key_tile_yoffset\n\n # get view point as tile coordinates\n xtile = self.key_tile_left + x_from_key/self.tile_width\n ytile = self.key_tile_top + y_from_key/self.tile_height\n\n result = (xgeo, ygeo) = self.tile_src.Tile2Geo((xtile, ytile))\n\n if self.wrap_x and self.wrap_y:\n return result\n\n if not self.wrap_x:\n if not (min_xgeo <= xgeo <= max_xgeo):\n return None\n\n if not self.wrap_y:\n if not (min_ygeo <= ygeo <= max_ygeo):\n return None\n\n return result\n\n######\n# PEX - Point & EXtension.\n#\n# These functions encapsulate the code that finds the extent of an object.\n# They all return a tuple (point, extent) where 'point' is the placement\n# point of an object (or list of points for a polygon) and an 'extent'\n# tuple (lx, rx, ty, by) [left, right, top, bottom].\n######\n\n def pex_point(self, place, geo, x_off, y_off, radius):\n \"\"\"Convert point object geo position to point & extent in view coords.\n\n place placement string\n geo point position tuple (xgeo, ygeo)\n x_off, y_off X and Y offsets\n radius radius of the point\n\n Return a tuple of point and extent origins (point, extent) where 'point'\n is (px, py) and extent is (elx, erx, ety, eby) (both in view coords).\n Return None for extent if extent is completely off-view.\n\n The 'extent' here is the extent of the point+radius.\n \"\"\"\n\n # get point view coords\n (xview, yview) = self.geo_to_view(geo)\n point = self.point_placement(place, xview, yview, x_off, y_off)\n (px, py) = point\n\n # extent = (left, right, top, bottom) in view coords\n elx = px - radius\n erx = px + radius\n ety = py - radius\n eby = py + radius\n extent = (elx, erx, ety, eby)\n\n # decide if point extent is off-view\n if erx < 0 or elx > self.view_width or eby < 0 or ety > self.view_height:\n extent = None\n\n return (point, extent)\n\n def pex_point_view(self, place, view, x_off, y_off, radius):\n \"\"\"Convert point object view position to point & extent in view coords.\n\n place placement string\n view point position tuple (xview, yview)\n x_off, y_off X and Y offsets\n\n Return a tuple of point and extent origins (point, extent) where 'point'\n is (px, py) and extent is (elx, erx, ety, eby) (both in view coords).\n Return None for point or extent if completely off-view.\n\n The 'extent' here is the extent of the point+radius.\n \"\"\"\n\n # get point view coords and perturb point to placement\n (xview, yview) = view\n point = self.point_placement_view(place, xview, yview, x_off, y_off)\n (px, py) = point\n\n # extent = (left, right, top, bottom) in view coords\n elx = px - radius\n erx = px + radius\n ety = py - radius\n eby = py + radius\n extent = (elx, erx, ety, eby)\n\n # decide if extent is off-view\n if erx < 0 or elx > self.view_width or eby < 0 or ety > self.view_height:\n extent = None\n\n return (point, extent)\n\n def pex_extent(self, place, geo, x_off, y_off, w, h, image=False):\n \"\"\"Convert object geo position to position & extent in view coords.\n\n place placement string\n geo point position tuple (xgeo, ygeo)\n x_off, y_off X and Y offsets\n w, h width and height of extent in pixels\n image True if we are placing an image. Required because an image\n and text extents have DIFFERENT ORIGINS!\n\n Return a tuple ((px, py), (elx, erx, ety, eby)) of point and extent\n data where '(px, py)' is the point and '(elx, erx, ety, eby)' is the\n extent. Both point and extent are in view coordinates.\n\n Return None for point or extent if either is completely off-view.\n\n An extent object can be either an image object or a text object.\n \"\"\"\n\n # get point view coords\n vpoint = self.geo_to_view(geo)\n (vpx, vpy) = vpoint\n\n # get extent limits\n # must take into account 'place', 'x_off' and 'y_off'\n point = self.extent_placement(place, vpx, vpy, x_off, y_off, w, h, image=image)\n (px, py) = point\n\n # extent = (left, right, top, bottom) in view coords\n # this is different for images\n elx = px\n erx = px + w\n if image:\n ety = py\n eby = py + h\n else:\n ety = py - h\n eby = py\n\n extent = (elx, erx, ety, eby)\n\n # decide if point is off-view\n if vpx < 0 or vpx > self.view_width or vpy < 0 or vpy > self.view_height:\n vpoint = None\n\n # decide if extent is off-view\n if erx < 0 or elx > self.view_width or eby < 0 or ety > self.view_height:\n # no extent if ALL of extent is off-view\n extent = None\n\n return (vpoint, extent)\n\n def pex_extent_view(self, place, view, x_off, y_off, w, h, image=False):\n \"\"\"Convert object view position to point & extent in view coords.\n\n place placement string\n view point position tuple (xview, yview) (view coords)\n x_off, y_off X and Y offsets\n w, h width and height of extent in pixels\n image True if we are placing an image. Required because an image\n and text extents have DIFFERENT ORIGINS!\n\n Return a tuple of point and extent origins (point, extent) where 'point'\n is (px, py) and extent is (elx, erx, ety, eby) (both in view coords).\n Return None for extent if extent is completely off-view.\n\n Takes size of extent object into consideration.\n \"\"\"\n\n # get point view coords and perturb point to placement origin\n # we ignore offsets for the point as they apply to the extent only\n (xview, yview) = view\n point = self.point_placement_view(place, xview, yview, 0, 0)\n\n # get extent view coords (ix and iy)\n (px, py) = point\n (ix, iy) = self.extent_placement(place, px, py, x_off, y_off,\n w, h, image=False)\n\n # extent = (left, right, top, bottom) in view coords\n # this is different for images\n if image:\n # perturb extent coords to edges of image\n if place == 'cc': elx = px - w/2; ety = py - h/2\n elif place == 'cn': elx = px - w/2; ety = py + y_off\n elif place == 'ne': elx = px - w - x_off; ety = py + y_off\n elif place == 'ce': elx = px - w - x_off; ety = py - h/2\n elif place == 'se': elx = px - w - x_off; ety = py - h - y_off\n elif place == 'cs': elx = px - w/2; ety = py - h - y_off\n elif place == 'sw': elx = px + x_off; ety = py - h - y_off\n elif place == 'cw': elx = px + x_off; ety = py - h/2\n elif place == 'nw': elx = px + x_off; ety = py + y_off\n erx = elx + w\n eby = ety + h\n else:\n elx = ix\n erx = ix + w\n ety = iy - h\n eby = iy\n\n extent = (elx, erx, ety, eby)\n\n # decide if point is off-view\n if px < 0 or px > self.view_width or py < 0 or py > self.view_height:\n point = None\n\n # decide if extent is off-view\n if erx < 0 or elx > self.view_width or eby < 0 or ety > self.view_height:\n extent = None\n\n return (point, extent)\n\n def pex_polygon(self, place, poly, x_off, y_off):\n \"\"\"Convert polygon/line obj geo position to points & extent in view coords.\n\n place placement string\n poly list of point position tuples (xgeo, ygeo)\n x_off, y_off X and Y offsets\n\n Return a tuple of point and extent (point, extent) where 'point' is a\n list of (px, py) and extent is (elx, erx, ety, eby) (both in view coords).\n Return None for extent if extent is completely off-view.\n \"\"\"\n\n # get polygon/line points in perturbed view coordinates\n view_points = []\n for geo in poly:\n (xview, yview) = self.geo_to_view(geo)\n point = self.point_placement(place, xview, yview, x_off, y_off)\n view_points.append(point)\n\n # extent = (left, right, top, bottom) in view coords\n elx = min(view_points, key=lambda x: x[0])[0]\n erx = max(view_points, key=lambda x: x[0])[0]\n ety = min(view_points, key=lambda x: x[1])[1]\n eby = max(view_points, key=lambda x: x[1])[1]\n extent = (elx, erx, ety, eby)\n\n # decide if extent is off-view\n res_ex = None # assume extent is off-view\n for (px, py) in view_points:\n if ((px >= 0 and px < self.view_width)\n and (py >= 0 and py < self.view_height)):\n res_ex = extent # at least some of extent is on-view\n break\n\n return (view_points, res_ex)\n\n def pex_polygon_view(self, place, poly, x_off, y_off):\n \"\"\"Convert polygon/line obj view position to points & extent in view coords.\n\n place placement string\n poly list of point position tuples (xview, yview)\n x_off, y_off X and Y offsets\n\n Return a tuple of point and extent origins (point, extent) where 'point'\n is a list of (px, py) and extent is (elx, erx, ety, eby) (both in view\n coords). Return None for extent if extent is completely off-view.\n \"\"\"\n\n # get polygon/line points in view coordinates\n view = []\n for (xview, yview) in poly:\n point = self.point_placement_view(place, xview, yview, x_off, y_off)\n view.append(point)\n\n # get extent - max/min x and y\n # extent = (left, right, top, bottom) in view coords\n elx = min(view, key=lambda x: x[0])[0]\n erx = max(view, key=lambda x: x[0])[0]\n ety = min(view, key=lambda x: x[1])[1]\n eby = max(view, key=lambda x: x[1])[1]\n extent = (elx, erx, ety, eby)\n\n # decide if polygon/line or extent are off-view\n res_ex = None\n for (px, py) in view:\n if ((px >= 0 and px < self.view_width)\n and (py >= 0 and py < self.view_height)):\n res_ex = extent\n break\n\n return (view, res_ex)\n\n######\n# Placement routines instead of original 'exec' code.\n# Code in test_assumptions.py shows this is faster.\n######\n\n def point_placement(self, place, x, y, x_off, y_off):\n \"\"\"Perform map-relative placement for a single point.\n\n place placement key string\n x, y point view coordinates\n x_off, y_off the X and Y offset values\n\n Returns a tuple (x, y) in view coordinates.\n \"\"\"\n\n # adjust the X, Y coordinates relative to the origin\n if place == 'cc': pass \n elif place == 'nw': x += x_off; y += y_off\n elif place == 'cn': y += y_off\n elif place == 'ne': x += -x_off; y += y_off\n elif place == 'ce': x += -x_off\n elif place == 'se': x += -x_off; y += -y_off\n elif place == 'cs': y += -y_off\n elif place == 'sw': x += x_off; y += -y_off\n elif place == 'cw': x += x_off; \n\n return (x, y)\n\n def point_placement_view(self, place, x, y, x_off, y_off):\n \"\"\"Perform view-relative placement for a single point.\n\n place placement key string\n x, y point view coordinates\n x_off, y_off the X and Y offset values\n\n Returns a tuple (x, y) in view coordinates.\n \"\"\"\n\n dcw = self.view_width\n dch = self.view_height\n dcw2 = dcw / 2\n dch2 = dch / 2\n\n # adjust the X, Y coordinates relative to the origin\n # offsets are always away from the nearest edge\n if place == 'cc': x += dcw2; y += dch2\n elif place == 'nw': x += x_off; y += y_off\n elif place == 'cn': x += dcw2; y += y_off\n elif place == 'ne': x += dcw - x_off; y += y_off\n elif place == 'ce': x += dcw - x_off; y += dch2\n elif place == 'se': x += dcw - x_off; y += dch - y_off\n elif place == 'cs': x += dcw2; y += dch - y_off\n elif place == 'sw': x += x_off; y += dch - y_off\n elif place == 'cw': x += x_off; y += dch2\n\n return (x, y)\n\n def extent_placement(self, place, x, y, x_off, y_off, w, h, image=False):\n \"\"\"Perform map-relative placement of an extent.\n\n place placement key string\n x, y view coords of point\n x_off, y_off offset from point (pixels)\n w, h width, height of the extent (pixels)\n image True if we are placing an image. Required because an image\n and text extents have DIFFERENT ORIGINS!\n\n Returns a tuple (x, y).\n \"\"\"\n\n w2 = w / 2\n h2 = h / 2\n\n if image:\n if place == 'cc': x += -w2; y += -h2\n elif place == 'nw': x += x_off; y += y_off\n elif place == 'cn': x += -w2; y += y_off\n elif place == 'ne': x += -x_off - w; y += y_off\n elif place == 'ce': x += -x_off - w; y += -h2\n elif place == 'se': x += -x_off - w; y += -y_off - h\n elif place == 'cs': x += -w2; y += -y_off - h\n elif place == 'sw': x += x_off; y += -y_off - h\n elif place == 'cw': x += x_off; y += -h2\n else:\n if place == 'cc': x += -w2; y += h2\n elif place == 'nw': x += x_off; y += y_off + h\n elif place == 'cn': x += -w2; y += y_off + h\n elif place == 'ne': x += -x_off - w; y += y_off + h\n elif place == 'ce': x += -x_off - w; y += h2\n elif place == 'se': x += -x_off - w; y += -y_off\n elif place == 'cs': x += -w2; y += -y_off\n elif place == 'sw': x += x_off; y += -y_off\n elif place == 'cw': x += x_off; y += h2\n\n return (x, y)\n\n def zoom_level(self, level, view=None):\n \"\"\"Zoom to a map level.\n\n level map level to zoom to\n view view coords of cursor\n (if not given, assume view centre)\n\n Change the map zoom level to that given. Returns True if the zoom\n succeeded, else False. If False is returned the method call has no effect.\n Same operation as .GotoLevel() except we try to maintain the geo position\n under the cursor.\n \"\"\"\n\n log(f'zoom_level: level={level}, view={view}')\n\n # if not given cursor coords, assume view centre\n if view is None:\n view = (self.view_width // 2, self.view_height // 2)\n (view_x, view_y) = view\n\n # get geo coords of view point\n geo = self.view_to_geo(view)\n\n # get tile source to use the new level\n result = self.tile_src.UseLevel(level)\n\n if result:\n # zoom worked, adjust state variables\n self.level = level\n\n # move to new level\n (self.num_tiles_x, self.num_tiles_y, _, _) = self.tile_src.GetInfo(level)\n self.map_width = self.num_tiles_x * self.tile_width\n self.map_height = self.num_tiles_y * self.tile_height\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tile_src.extent\n\n # finally, pan to original map centre (updates widget)\n self.pan_position(geo, view=view)\n\n # to set some state variables\n self.resizeEvent()\n\n # raise the EVT_PYSLIPQT_LEVEL event\n self.raise_event(PySlipQt.EVT_PYSLIPQT_LEVEL, level=level)\n\n return result\n\n def pan_position(self, geo, view=None):\n \"\"\"Pan the given geo position in the current map zoom level.\n\n geo a tuple (xgeo, ygeo)\n view a tuple of view coordinates (view_x, view_y)\n (if not given, assume view centre)\n\n We just adjust the key tile to place the required geo position at the\n given view coordinates. If that is not possible, just centre in either\n the X or Y directions, or both.\n \"\"\"\n\n log(f'pan_position: geo={geo}, view={view}')\n\n # if not given a \"view\", assume the view centre coordinates\n if view is None:\n view = (self.view_width // 2, self.view_height // 2)\n (view_x, view_y) = view\n\n log(f'view_x={view_x}, view_y={view_y}')\n\n # convert the geo posn to a tile position\n (tile_x, tile_y) = self.tile_src.Geo2Tile(geo)\n\n # determine what the new key tile should be\n # figure out number of tiles from centre point to edges\n tx = view_x / self.tile_width\n ty = view_y / self.tile_height\n\n # calculate tile coordinates of the top-left corner of the view\n key_tx = tile_x - tx\n key_ty = tile_y - ty\n\n (key_tile_left, x_offset) = divmod(key_tx, 1)\n self.key_tile_left = int(key_tile_left)\n self.key_tile_xoffset = -int(x_offset * self.tile_width)\n\n (key_tile_top, y_offset) = divmod(key_ty, 1)\n self.key_tile_top = int(key_tile_top)\n self.key_tile_yoffset = -int(y_offset * self.tile_height)\n\n # adjust key tile, if necessary\n self.rectify_key_tile()\n\n # redraw the widget\n self.update()\n\n def rectify_key_tile(self):\n \"\"\"Adjust state variables to ensure map centred if map is smaller than\n view. Otherwise don't allow edges to be exposed.\n\n Adjusts the \"key\" tile variables to ensure proper presentation.\n\n Relies on .map_width, .map_height and .key_tile_* being set.\n \"\"\"\n\n # check map in X direction\n if self.map_width < self.view_width:\n # map < view, fits totally in view, centre in X\n self.key_tile_left = 0\n self.key_tile_xoffset = (self.view_width - self.map_width) // 2\n else:\n # if key tile out of map in X direction, rectify\n if self.key_tile_left < 0:\n self.key_tile_left = 0\n self.key_tile_xoffset = 0\n else:\n # if map left/right edges showing, cover them\n show_len = (self.num_tiles_x - self.key_tile_left)*self.tile_width + self.key_tile_xoffset\n if show_len < self.view_width:\n # figure out key tile X to have right edge of map and view equal\n tiles_showing = self.view_width / self.tile_width\n int_tiles = int(tiles_showing)\n self.key_tile_left = self.num_tiles_x - int_tiles - 1\n self.key_tile_xoffset = -int((1.0 - (tiles_showing - int_tiles)) * self.tile_width)\n\n # now check map in Y direction\n if self.map_height < self.view_height:\n # map < view, fits totally in view, centre in Y\n self.key_tile_top = 0\n self.key_tile_yoffset = (self.view_height - self.map_height) // 2\n else:\n if self.key_tile_top < 0:\n # map top edge showing, cover\n self.key_tile_top = 0\n self.key_tile_yoffset = 0\n else:\n # if map bottom edge showing, cover\n show_len = (self.num_tiles_y - self.key_tile_top)*self.tile_height + self.key_tile_yoffset\n if show_len < self.view_height:\n # figure out key tile Y to have bottom edge of map and view equal\n tiles_showing = self.view_height / self.tile_height\n int_tiles = int(tiles_showing)\n self.key_tile_top = self.num_tiles_y - int_tiles - 1\n self.key_tile_yoffset = -int((1.0 - (tiles_showing - int_tiles)) * self.tile_height)\n\n def zoom_level_position(self, level, posn):\n \"\"\"Zoom to a map level and pan to the given position in the map.\n\n level map level to zoom to\n posn a tuple (xgeo, ygeo)\n \"\"\"\n\n if self.zoom_level(level):\n self.pan_position(posn)\n\n def get_i18n_kw(self, kwargs, kws, default):\n \"\"\"Get alternate international keyword value.\n\n kwargs dictionary to look for keyword value\n kws iterable of keyword spelling strings\n default default value if no keyword found\n\n Returns the keyword value.\n \"\"\"\n\n result = None\n for kw_str in kws[:-1]:\n result = kwargs.get(kw_str, None)\n if result:\n break\n else:\n result = kwargs.get(kws[-1], default)\n\n return result\n\n def get_level_and_position(self, place='cc'):\n \"\"\"Get the level and geo position of a cardinal point within the view.\n\n place a placement string specifying the point in the view\n for which we require the geo position\n\n Returns a tuple (level, geo) where 'geo' is (geo_x, geo_y).\n \"\"\"\n\n view_coords = self.point_placement_view(place, 0, 0, 0, 0,)\n geo = self.view_to_geo(view_coords)\n\n return (self.level, geo)\n\n def set_key_from_centre(self, geo):\n \"\"\"Set 'key' tile stuff from given geo at view centre.\n\n geo geo coords of centre of view\n\n We need to assume little about which state variables are set.\n Only assume these are set:\n self.tile_width\n self.tile_height\n \"\"\"\n\n (ctile_tx, ctile_ty) = self.tile_src.Geo2Tile(geo)\n\n int_ctile_tx = int(ctile_tx)\n int_ctile_ty = int(ctile_ty)\n\n frac_ctile_tx = ctile_tx - int_ctile_tx\n frac_ctile_ty = ctile_ty - int_ctile_ty\n\n ctile_xoff = self.view_width // 2 - self.tile_width * frac_ctile_tx\n ctile_yoff = self.view_height // 2 - self.tile_height * frac_ctile_ty\n\n num_whole_x = ctile_xoff // self.tile_width\n num_whole_y = ctile_yoff // self.tile_height\n\n xmargin = ctile_xoff - num_whole_x*self.tile_width\n ymargin = ctile_yoff - num_whole_y*self.tile_height\n\n # update the 'key' tile state variables\n self.key_tile_left = int_ctile_tx - num_whole_x - 1\n self.key_tile_top = int_ctile_ty - num_whole_y - 1\n self.key_tile_xoffset = self.tile_width - xmargin\n self.key_tile_yoffset = self.tile_height - ymargin\n\n # centre map in view if map < view\n if self.key_tile_left < 0:\n self.key_tile_left = 0\n self.key_tile_xoffset = (self.view_width - self.map_width) // 2\n\n if self.key_tile_top < 0:\n self.key_tile_top = 0\n self.key_tile_yoffset = (self.view_height - self.map_height) // 2\n\n######\n#\n######\n\n def colour_to_internal(self, colour):\n \"\"\"Convert a colour in one of various forms to an internal format.\n\n colour either a HEX string ('#RRGGBBAA')\n or a tuple (r, g, b, a)\n or a colour name ('red')\n\n Returns internal form: (r, g, b, a)\n \"\"\"\n\n if isinstance(colour, str):\n # expect '#RRGGBBAA' form\n if len(colour) != 9 or colour[0] != '#':\n # assume it's a colour *name*\n # we should do more checking of the name here, though it looks\n # like PyQt5 defaults to a colour if the name isn't recognized\n c = QColor(colour) \n result = (c.red(), c.blue(), c.green(), c.alpha())\n else:\n # we try for a colour like '#RRGGBBAA'\n r = int(colour[1:3], 16)\n g = int(colour[3:5], 16)\n b = int(colour[5:7], 16)\n a = int(colour[7:9], 16)\n result = (r, g, b, a)\n elif isinstance(colour, QColor):\n # if it's a QColor, get float RGBA values, convert to ints\n result = [int(v*255) for v in colour.getRgbF()]\n else:\n\n # we assume a list or tuple\n try:\n len_colour = len(colour)\n except TypeError:\n msg = (\"Colour value '%s' is not in the form '(r, g, b, a)'\"\n % str(colour))\n raise Exception(msg)\n\n if len_colour != 4:\n msg = (\"Colour value '%s' is not in the form '(r, g, b, a)'\"\n % str(colour))\n raise Exception(msg)\n result = []\n for v in colour:\n try:\n v = int(v)\n except ValueError:\n msg = (\"Colour value '%s' is not in the form '(r, g, b, a)'\"\n % str(colour))\n raise Exception(msg)\n if v < 0 or v > 255:\n msg = (\"Colour value '%s' is not in the form '(r, g, b, a)'\"\n % str(colour))\n raise Exception(msg)\n result.append(v)\n result = tuple(result)\n\n return result\n\n def sel_box_canonical(self):\n \"\"\"'Canonicalize' a selection box limits.\n\n Uses instance variables (all in view coordinates):\n self.sbox_1_x X position of box select start\n self.sbox_1_y Y position of box select start\n self.sbox_w width of selection box (start to finish)\n self.sbox_h height of selection box (start to finish)\n\n Four ways to draw the selection box (starting in each of the four\n corners), so four cases.\n\n The sign of the width/height values are decided with respect to the\n origin at view top-left corner. That is, a negative width means\n the box was started at the right and swept to the left. A negative\n height means the selection started low and swept high in the view.\n\n Returns a tuple (llx, llr, urx, ury) where llx is lower left X, ury is\n upper right corner Y, etc. All returned values in view coordinates.\n \"\"\"\n\n if self.sbox_h >= 0:\n if self.sbox_w >= 0:\n # 2\n ll_corner_vx = self.sbox_1_x\n ll_corner_vy = self.sbox_1_y + self.sbox_h\n tr_corner_vx = self.sbox_1_x + self.sbox_w\n tr_corner_vy = self.sbox_1_y\n else:\n # 1\n ll_corner_vx = self.sbox_1_x + self.sbox_w\n ll_corner_vy = self.sbox_1_y + self.sbox_h\n tr_corner_vx = self.sbox_1_x\n tr_corner_vy = self.sbox_1_y\n else:\n if self.sbox_w >= 0:\n # 3\n ll_corner_vx = self.sbox_1_x\n ll_corner_vy = self.sbox_1_y\n tr_corner_vx = self.sbox_1_x + self.sbox_w\n tr_corner_vy = self.sbox_1_y + self.sbox_h\n else:\n # 4\n ll_corner_vx = self.sbox_1_x + self.sbox_w\n ll_corner_vy = self.sbox_1_y\n tr_corner_vx = self.sbox_1_x\n tr_corner_vy = self.sbox_1_y + self.sbox_h\n\n return (ll_corner_vx, ll_corner_vy, tr_corner_vx, tr_corner_vy)\n\n######\n# Select helpers - get objects that were selected\n######\n\n def sel_point_in_layer(self, layer, view_pt, map_pt):\n \"\"\"Determine if clicked location selects a point in layer data.\n\n layer layer object we are looking in\n view_pt click location tuple (view coords)\n map_pt click location tuple (geo coords)\n\n We must look for the nearest point to the selection point.\n\n Return None (no selection) or (point, data, None) of selected point\n where point is [(x,y,attrib)] where X and Y are map or view relative\n depending on layer.map_rel. 'data' is the data object associated with\n each selected point. The None is a placeholder for the relative\n selection point, which is meaningless for point selection.\n \"\"\"\n\n # TODO: speed this up? Do we need to??\n # http://en.wikipedia.org/wiki/Kd-tree\n # would need to create kd-tree in AddLayer() (slower)\n\n result = None\n delta = layer.delta\n dist = 9999999.0 # more than possible\n\n # get correct pex function (map-rel or view-rel)\n pex = self.pex_point_view\n if layer.map_rel:\n pex = self.pex_point\n\n # find selected point on map/view\n (view_x, view_y) = view_pt\n for (x, y, place, radius, colour, x_off, y_off, udata) in layer.data:\n (vp, _) = pex(place, (x,y), x_off, y_off, radius)\n if vp:\n (vx, vy) = vp\n d = (vx - view_x)*(vx - view_x) + (vy - view_y)*(vy - view_y)\n if d < dist:\n rpt = (x, y, {'placement': place,\n 'radius': radius,\n 'colour': colour,\n 'offset_x': x_off,\n 'offset_y': y_off,\n 'data': udata})\n result = ([rpt], None)\n dist = d\n\n if dist <= layer.delta:\n return result\n return None\n\n def sel_box_points_in_layer(self, layer, ll, ur):\n \"\"\"Get list of points inside box.\n\n layer reference to layer object we are working on\n ll lower-left corner point of selection box (geo or view)\n ur upper-right corner point of selection box (geo or view)\n\n Return a tuple (selection, data, relsel) where 'selection' is a list of\n selected point positions (xgeo,ygeo), 'data' is a list of userdata\n objects associated with the selected points and 'relsel' is always None\n as this is meaningless for box selects.\n\n If nothing is selected return None.\n \"\"\"\n\n # get a list of points inside the selection box\n selection = []\n data = []\n\n # get correct pex function and box limits in view coords\n pex = self.pex_point_view\n (blx, bby) = ll\n (brx, bty) = ur\n if layer.map_rel:\n pex = self.pex_point\n (blx, bby) = self.geo_to_view(ll)\n (brx, bty) = self.geo_to_view(ur)\n\n # get points selection\n for (x, y, place, radius, colour, x_off, y_off, udata) in layer.data:\n (vp, _) = pex(place, (x,y), x_off, y_off, radius)\n if vp:\n (vpx, vpy) = vp\n if blx <= vpx <= brx and bby >= vpy >= bty:\n selection.append((x, y, {'placement': place,\n 'radius': radius,\n 'colour': colour,\n 'offset_x': x_off,\n 'offset_y': y_off}))\n data.append(udata)\n\n if selection:\n return (selection, data, None)\n return None\n\n def sel_image_in_layer(self, layer, view_pt, geo_pt):\n \"\"\"Decide if click location selects image object(s) in layer data.\n\n layer layer object we are looking in\n view_pt click location tuple (view coords)\n geo_pt click location (geo coords)\n\n Returns either None if no selection or a tuple (selection, relsel)\n where 'selection' is a tuple (xgeo,ygeo) or (xview,yview) of the object\n placement view_pt and 'relsel' is the relative position within the\n selected object of the mouse click.\n\n Note that there could conceivably be more than one image selectable in\n the layer at the mouse click position but only the first found is\n returned as selected.\n \"\"\"\n\n result = None\n\n # get correct pex function and click view_pt into view coords\n clickpt = view_pt\n pex = self.pex_extent_view\n if layer.map_rel:\n clickpt = geo_pt\n pex = self.pex_extent\n (xclick, yclick) = clickpt\n\n (view_x, view_y) = view_pt\n\n # selected an image?\n for (x, y, bmp, w, h, place,\n x_off, y_off, radius, colour, udata) in layer.data:\n (_, e) = pex(place, (x,y), x_off, y_off, w, h)\n if e:\n (lx, rx, ty, by) = e\n if lx <= view_x <= rx and ty <= view_y <= by:\n selection = [(x, y, {'placement': place,\n 'radius': radius,\n 'colour': colour,\n 'offset_x': x_off,\n 'offset_y': y_off,\n 'data': udata})]\n relsel = (int(xclick - lx), int(yclick - ty))\n result = (selection, relsel)\n break\n\n return result\n\n def sel_box_images_in_layer(self, layer, ll, ur):\n \"\"\"Get list of images inside selection box.\n\n layer reference to layer object we are working on\n ll lower-left corner point of selection box (geo or view coords)\n ur upper-right corner point of selection box (geo or view coords)\n\n Return a tuple (selection, data) where 'selection' is a list of\n selected point positions (xgeo,ygeo) and 'data' is a list of userdata\n objects associated withe selected points.\n\n If nothing is selected return None.\n \"\"\"\n\n # get correct pex function and box limits in view coords\n pex = self.pex_extent_view\n if layer.map_rel:\n pex = self.pex_extent\n ll = self.geo_to_view(ll)\n ur = self.geo_to_view(ur)\n (vboxlx, vboxby) = ll\n (vboxrx, vboxty) = ur\n\n # select images in map/view\n selection = []\n data = []\n for (x, y, bmp, w, h, place,\n x_off, y_off, radius, colour, udata) in layer.data:\n (_, e) = pex(place, (x,y), x_off, y_off, w, h)\n if e:\n (li, ri, ti, bi) = e # image extents (view coords)\n if (vboxlx <= li and ri <= vboxrx\n and vboxty <= ti and bi <= vboxby):\n selection.append((x, y, {'placement': place,\n 'radius': radius,\n 'colour': colour,\n 'offset_x': x_off,\n 'offset_y': y_off}))\n data.append(udata)\n\n if not selection:\n return None\n return (selection, data, None)\n\n def sel_text_in_layer(self, layer, view_point, geo_point):\n \"\"\"Determine if clicked location selects a text object in layer data.\n\n layer layer object we are looking in\n view_point click location tuple (view coordinates)\n geo_point click location tuple (geo coordinates)\n\n Return ([(x, y, attr)], None) for the selected text object, or None if\n no selection. The x and y coordinates are view/geo depending on\n the layer.map_rel value.\n\n ONLY SELECTS ON POINT, NOT EXTENT.\n \"\"\"\n\n result = None\n delta = layer.delta\n dist = 9999999.0\n\n # get correct pex function and mouse click in view coords\n pex = self.pex_point_view\n clickpt = view_point\n if layer.map_rel:\n pex = self.pex_point\n clickpt = geo_point\n (xclick, yclick) = clickpt\n (view_x, view_y) = view_point\n\n # select text in map/view layer\n for (x, y, text, place, radius, colour,\n tcolour, fname, fsize, x_off, y_off, udata) in layer.data:\n (vp, ex) = pex(place, (x,y), 0, 0, radius)\n if vp:\n (px, py) = vp\n d = (px - view_x)**2 + (py - view_y)**2\n if d < dist:\n selection = (x, y, {'placement': place,\n 'radius': radius,\n 'colour': colour,\n 'textcolour': tcolour,\n 'fontname': fname,\n 'fontsize': fsize,\n 'offset_x': x_off,\n 'offset_y': y_off,\n 'data': udata})\n result = ([selection], None)\n dist = d\n\n if dist <= delta:\n return result\n\n return None\n\n def sel_box_texts_in_layer(self, layer, ll, ur):\n \"\"\"Get list of text objects inside box ll-ur.\n\n layer reference to layer object we are working on\n ll lower-left corner point of selection box (geo or view)\n ur upper-right corner point of selection box (geo or view)\n\n The 'll' and 'ur' points are in view or geo coords, depending on\n the layer.map_rel value.\n\n Returns (selection, data, None) where 'selection' is a list of text\n positions (geo or view, depending on layer.map_rel) plus attributes\n and 'data' is a list of userdata objects associated with the selected\n text objects.\n\n Returns None if no selection.\n\n Selects on text extent and point.\n \"\"\"\n\n selection = []\n data = []\n\n # get correct pex function and box limits in view coords\n pex = self.pex_point_view\n if layer.map_rel:\n pex = self.pex_point\n ll = self.geo_to_view(ll)\n ur = self.geo_to_view(ur)\n (lx, by) = ll\n (rx, ty) = ur\n\n # get texts inside box\n for (x, y, text, place, radius, colour,\n tcolour, fname, fsize, x_off, y_off, udata) in layer.data:\n (vp, ex) = pex(place, (x,y), x_off, y_off, radius)\n if vp:\n (px, py) = vp\n if lx <= px <= rx and ty <= py <= by:\n sel = (x, y, {'placement': place,\n 'radius': radius,\n 'colour': colour,\n 'textcolour': tcolour,\n 'fontname': fname,\n 'fontsize': fsize,\n 'offset_x': x_off,\n 'offset_y': y_off})\n selection.append(sel)\n data.append(udata)\n\n if selection:\n return (selection, data, None)\n return None\n\n def sel_polygon_in_layer(self, layer, view_pt, map_pt):\n \"\"\"Get first polygon object clicked in layer data.\n\n layer layer object we are looking in\n view_pt tuple of click position (xview,yview)\n map_pt tuple of click position (xgeo,ygeo)\n\n Returns an iterable: ((x,y), udata) of the first polygon selected.\n Returns None if no polygon selected.\n \"\"\"\n\n result = None\n\n # get correct 'view_pt in polygon' routine\n sel_pt = view_pt\n pip = self.point_in_polygon_view\n if layer.map_rel:\n sel_pt = map_pt\n pip = self.point_in_polygon_geo\n\n # check polyons in layer, choose first view_pt is inside\n for (poly, place, width, colour, close,\n filled, fcolour, x_off, y_off, udata) in layer.data:\n if pip(poly, sel_pt, place, x_off, y_off):\n sel = (poly, {'placement': place,\n 'offset_x': x_off,\n 'offset_y': y_off,\n 'data': udata})\n result = ([sel], None)\n break\n\n return result\n\n def sel_box_polygons_in_layer(self, layer, p1, p2):\n \"\"\"Get list of polygons inside box p1-p2 in given layer.\n\n layer reference to layer object we are working on\n p1 bottom-left corner point of selection box (geo or view)\n p2 top-right corner point of selection box (geo or view)\n\n Return a tuple (selection, data, None) where 'selection' is a list of\n iterables of vertex positions and 'data' is a list of data objects\n associated with each polygon selected.\n \"\"\"\n\n selection = []\n data = []\n\n # get correct pex function and box limits in view coords\n pex = self.pex_polygon_view\n if layer.map_rel:\n pex = self.pex_polygon\n p1 = self.geo_to_view(p1)\n p2 = self.geo_to_view(p2)\n (lx, by) = p1\n (rx, ty) = p2\n\n # check polygons in layer\n for (poly, place, width, colour, close,\n filled, fcolour, x_off, y_off, udata) in layer.data:\n (pt, ex) = pex(place, poly, x_off, y_off)\n if ex:\n (plx, prx, pty, pby) = ex\n if lx <= plx and prx <= rx and ty <= pty and pby <= by:\n sel = (poly, {'placement': place,\n 'offset_x': x_off,\n 'offset_y': y_off})\n selection.append(sel)\n data.append(udata)\n\n if not selection:\n return None\n return (selection, data, None)\n\n def sel_polyline_in_layer(self, layer, view_pt, map_pt):\n \"\"\"Get first polyline object clicked in layer data.\n\n layer layer object we are looking in\n view_pt tuple of click position in view coords\n map_pt tuple of click position in geo coords\n\n Returns a tuple (sel, seg) if a polyline was selected. 'sel' is the\n tuple (poly, attrib) and 'seg' is a tuple (pt1, pt2) of nearest segment\n endview_pts. Returns None if no polyline selected.\n \"\"\"\n\n result = None\n delta = layer.delta\n\n # get correct 'view_pt in polyline' routine\n pip = self.point_near_polyline_view\n point = view_pt\n if layer.map_rel:\n pip = self.point_near_polyline_geo\n point = map_pt\n\n # check polylines in layer, choose first where view_pt is close enough\n for (polyline, place, width, colour, x_off, y_off, udata) in layer.data:\n seg = pip(point, polyline, place, x_off, y_off, delta=delta)\n if seg:\n sel = (polyline, {'placement': place,\n 'offset_x': x_off,\n 'offset_y': y_off,\n 'data': udata})\n result = ([sel], seg)\n break\n\n return result\n\n def sel_box_polylines_in_layer(self, layer, p1, p2):\n \"\"\"Get list of polylines inside box p1-p2 in given layer.\n\n layer reference to layer object we are working on\n p1 bottom-left corner point of selection box (geo or view)\n p2 top-right corner point of selection box (geo or view)\n\n Return a tuple (selection, data, None) where 'selection' is a list of\n iterables of vertex positions plus attributes and 'data' is a list of\n data objects associated with each polyline selected.\n \"\"\"\n\n selection = []\n\n # get correct pex function and box limits in view coords\n pex = self.pex_polygon_view\n if layer.map_rel:\n pex = self.pex_polygon\n p1 = self.geo_to_view(p1)\n p2 = self.geo_to_view(p2)\n (lx, by) = p1\n (rx, ty) = p2\n\n # check polygons in layer\n for (poly, place, width, colour, x_off, y_off, udata) in layer.data:\n (pt, ex) = pex(place, poly, x_off, y_off)\n if ex:\n (plx, prx, pty, pby) = ex\n if lx <= plx and prx <= rx and ty <= pty and pby <= by:\n sel = (poly, {'placement': place,\n 'offset_x': x_off,\n 'offset_y': y_off,\n 'data': udata})\n selection.append(sel)\n\n if not selection:\n return None\n return (selection, None, None)\n\n######\n# Polygon/polyline utility routines\n######\n\n @staticmethod\n def point_inside_polygon(point, poly):\n \"\"\"Decide if point is inside polygon.\n\n point tuple of (x,y) coordnates of point in question (geo or view)\n poly polygon in form [(x1,y1), (x2,y2), ...]\n\n Returns True if point is properly inside polygon.\n May return True or False if point on edge of polygon.\n\n Slightly modified version of the 'published' algorithm found on the 'net.\n Instead of indexing into the poly, create a new poly that 'wraps around'.\n Even with the extra code, it runs in 2/3 the time.\n \"\"\"\n\n (x, y) = point\n\n # we want a *copy* of original iterable plus extra wraparound point\n l_poly = list(poly)\n l_poly.append(l_poly[0]) # ensure poly wraps around\n\n inside = False\n\n (p1x, p1y) = l_poly[0]\n\n for (p2x, p2y) in l_poly:\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n (p1x, p1y) = (p2x, p2y)\n\n return inside\n\n def point_in_polygon_geo(self, poly, geo, placement, offset_x, offset_y):\n \"\"\"Decide if a point is inside a map-relative polygon.\n\n poly an iterable of (x,y) where x,y are in geo coordinates\n geo tuple (xgeo, ygeo) of point position\n placement a placement string\n offset_x X offset in pixels\n offset_y Y offset in pixels\n\n The 'geo' point, while in geo coordinates, must be a click point\n within the view.\n\n Returns True if point is inside the polygon.\n \"\"\"\n\n return self.point_inside_polygon(geo, poly)\n\n def point_in_polygon_view(self, poly, view, place, x_off, y_off):\n \"\"\"Decide if a point is inside a view-relative polygon.\n\n poly an iterable of (x,y) where x,y are in view (pixel) coordinates\n ptx point X coordinate (view)\n pty point Y coordinate (view)\n place a placement string\n offset_x X offset in pixels\n offset_y Y offset in pixels\n\n Returns True if point is inside the polygon.\n \"\"\"\n\n # convert polygon and placement into list of (x,y) tuples\n view_poly = []\n for (x, y) in poly:\n (x, y) = self.point_placement_view(place, x, y, x_off, y_off)\n view_poly.append((x, y))\n\n # decide if (ptx,pty) is inside polygon\n return self.point_inside_polygon(view, view_poly)\n\n def point_near_polyline_geo(self, point, poly, placement,\n offset_x, offset_y, delta):\n \"\"\"Decide if a point is near a map-relative polyline.\n\n point tuple (xgeo, ygeo) of point position\n poly an iterable of (x,y) where x,y are in geo coordinates\n placement a placement string\n offset_x X offset in pixels\n offset_y Y offset in pixels\n delta distance (squared) before selection allowed\n\n The 'geo' point, while in geo coordinates, must be a click point\n within the view.\n\n Returns nearest line segment of polyline that is 'close enough'\n to the point. Returns None if no segment close enough.\n \"\"\"\n\n return self.point_near_polyline(point, poly, delta=delta)\n\n def point_near_polyline_view(self, point, polyline, place,\n x_off, y_off, delta):\n \"\"\"Decide if a point is near a view-relative polyline.\n\n point a tuple (viewx, viewy) of selection point in view coordinates\n polyline an iterable of (x,y) where x,y are in view (pixel) coordinates\n place a placement string\n offset_x X offset in pixels\n offset_y Y offset in pixels\n delta distance (squared) before selection allowed\n\n Returns nearest line segment of polyline that is 'close enough'\n to the point. Returns None if no segment close enough.\n \"\"\"\n\n # dict to convert selected segment back to orig coords\n back_to_orig = {}\n\n # convert polyline and placement into list of (x,y) tuples\n view_poly = []\n for (x, y) in polyline:\n (vx, vy) = self.point_placement_view(place, x, y, x_off, y_off)\n view_poly.append((vx, vy))\n back_to_orig[(vx, vy)] = (x, y)\n\n # decide if (ptx,pty) is inside polyline (gets nearest segment)\n seg = self.point_near_polyline(point, view_poly, delta=delta)\n\n if seg:\n (s1, s2) = seg\n s1 = back_to_orig[s1]\n s2 = back_to_orig[s2]\n return (s1, s2)\n return None\n\n def point_near_polyline(self, point, polyline, delta=50):\n \"\"\"Decide if point is within 'delta' of the given polyline.\n\n point point (x, y)\n polyline iterable of (x, y) point tuples\n delta maximum distance before 'not close enough'\n\n Returns nearest line segment of polyline that is 'close enough'\n to the point. Returns None if no segment close enough.\n \"\"\"\n\n result = None\n last_delta = delta + 1\n\n last_pp = polyline[0]\n for pp in polyline[1:]:\n d = self.point_segment_distance(point, last_pp, pp)\n if d < last_delta:\n result = (last_pp, pp)\n last_delta = d\n last_pp = pp\n\n if last_delta > delta:\n result = None\n\n return result\n\n def point_segment_distance(self, point, s1, s2):\n \"\"\"Get distance from a point to segment (s1, s2).\n\n point tuple (x, y)\n s1, s2 tuples (x, y) of segment endpoints\n\n Returns the distance squared.\n \"\"\"\n\n (ptx, pty) = point\n (s1x, s1y) = s1\n (s2x, s2y) = s2\n\n px = s2x - s1x\n py = s2y - s1y\n\n u = ((ptx - s1x)*px + (pty - s1y)*py) / float(px**2 + py**2)\n\n if u > 1:\n u = 1\n elif u < 0:\n u = 0\n\n dx = s1x + u*px - ptx\n dy = s1y + u*py - pty\n\n return dx**2 + dy**2\n\n def info(self, msg):\n \"\"\"Display an information message, log and graphically.\"\"\"\n\n log_msg = '# ' + msg\n length = len(log_msg)\n prefix = '#### Information '\n banner = prefix + '#'*(80 - len(log_msg) - len(prefix))\n log(banner)\n log(log_msg)\n log(banner)\n\n QMessageBox.information(self, 'Information', msg)\n\n def warn(self, msg):\n \"\"\"Display a warning message, log and graphically.\"\"\"\n\n log_msg = '# ' + msg\n length = len(log_msg)\n prefix = '#### Warning '\n banner = prefix + '#'*(80 - len(log_msg) - len(prefix))\n log(banner)\n log(log_msg)\n log(banner)\n\n QMessageBox.warning(self, 'Information', msg)\n\n################################################################################\n# Below are the \"external\" API methods.\n################################################################################\n\n ######\n # \"add a layer\" routines\n ######\n\n def AddPointLayer(self, points, map_rel=True, visible=True,\n show_levels=None, selectable=False,\n name='<points_layer>', **kwargs):\n \"\"\"Add a layer of points, map or view relative.\n\n points iterable of point data:\n (x, y[, attributes])\n where x & y are either lon&lat (map) or x&y (view) coords\n and attributes is an optional dictionary of attributes for\n _each point_ with keys like:\n 'placement' a placement string\n 'radius' radius of point in pixels\n 'colour' colour of point\n 'offset_x' X offset\n 'offset_y' Y offset\n 'data' point user data object\n map_rel points are map relative if True, else view relative\n visible True if the layer is visible\n show_levels list of levels at which layer is auto-shown (or None==all)\n selectable True if select operates on this layer\n name the 'name' of the layer - mainly for debug\n kwargs a layer-specific attributes dictionary, has keys:\n 'placement' a placement string\n 'radius' radius of point in pixels\n 'colour' colour of point\n 'offset_x' X offset\n 'offset_y' Y offset\n 'data' point user data object\n \"\"\"\n\n # merge global and layer defaults\n if map_rel:\n default_placement = kwargs.get('placement', self.DefaultPointPlacement)\n default_radius = kwargs.get('radius', self.DefaultPointRadius)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultPointColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultPointOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultPointOffsetY)\n default_data = kwargs.get('data', self.DefaultPointData)\n else:\n default_placement = kwargs.get('placement', self.DefaultPointViewPlacement)\n default_radius = kwargs.get('radius', self.DefaultPointViewRadius)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultPointViewColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultPointViewOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultPointViewOffsetY)\n default_data = kwargs.get('data', self.DefaultPointData)\n\n # create draw data iterable for draw method\n draw_data = [] # list to hold draw data\n\n for pt in points:\n if len(pt) == 3:\n (x, y, attributes) = pt\n elif len(pt) == 2:\n (x, y) = pt\n attributes = {}\n else:\n msg = ('Point data must be iterable of tuples: '\n '(x, y[, dict])\\n'\n 'Got: %s' % str(pt))\n raise Exception(msg)\n\n # plug in any required polygon values (override globals+layer)\n placement = attributes.get('placement', default_placement)\n radius = attributes.get('radius', default_radius)\n colour = self.get_i18n_kw(attributes, ('colour', 'color'),\n default_colour)\n offset_x = attributes.get('offset_x', default_offset_x)\n offset_y = attributes.get('offset_y', default_offset_y)\n udata = attributes.get('data', default_data)\n\n # check values that can be wrong\n if not placement:\n placement = default_placement\n placement = placement.lower()\n if placement not in self.valid_placements:\n msg = (\"Point placement value is invalid, got '%s'\"\n % str(placement))\n raise Exception(msg)\n\n # convert various colour formats to internal (r, g, b, a)\n colour = self.colour_to_internal(colour)\n\n # append another point to draw data list\n draw_data.append((float(x), float(y), placement,\n radius, colour, offset_x, offset_y, udata))\n\n return self.add_layer(self.draw_point_layer, draw_data, map_rel,\n visible=visible, show_levels=show_levels,\n selectable=selectable, name=name,\n ltype=self.TypePoint)\n\n def AddImageLayer(self, data, map_rel=True, visible=True,\n show_levels=None, selectable=False,\n name='<image_layer>', **kwargs):\n \"\"\"Add a layer of images, map or view relative.\n\n data list of (lon, lat, fname[, attributes]) (map_rel)\n or list of (x, y, fname[, attributes]) (view relative)\n attributes is a dictionary of attributes:\n placement a placement string\n radius object point radius\n colour object point colour\n offset_x X offset\n offset_y Y offset\n data image user data\n map_rel points drawn relative to map if True, else view relative\n visible True if the layer is to be immediately visible\n show_levels list of levels at which layer is auto-shown (or None)\n selectable True if select operates on this layer\n name name of this layer\n kwargs dictionary of extra params:\n placement string describing placement wrt hotspot\n radius object point radius\n colour object point colour\n offset_x hotspot X offset in pixels\n offset_y hotspot Y offset in pixels\n data image user data\n\n The hotspot is placed at (lon, lat) or (x, y). 'placement' controls\n where the image is displayed relative to the hotspot.\n \"\"\"\n\n # merge global and layer defaults\n if map_rel:\n default_placement = kwargs.get('placement', self.DefaultImagePlacement)\n default_radius = kwargs.get('radius', self.DefaultImageRadius)\n default_colour = kwargs.get('colour', self.DefaultImageColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultImageOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultImageOffsetY)\n default_data = kwargs.get('data', self.DefaultImageData)\n else:\n default_placement = kwargs.get('placement', self.DefaultImageViewPlacement)\n default_radius = kwargs.get('radius', self.DefaultImageViewRadius)\n default_colour = kwargs.get('colour', self.DefaultImageViewColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultImageViewOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultImageViewOffsetY)\n default_data = kwargs.get('data', self.DefaultImageViewData)\n\n # define cache variables for the image informtion\n # used to minimise file access - just caches previous file informtion\n fname_cache = None\n pmap_cache = None\n w_cache = None\n h_cache = None\n\n # load all image files, convert to bitmaps, create draw_data iterable\n draw_data = []\n for d in data:\n if len(d) == 4:\n (lon, lat, fname, attributes) = d\n elif len(d) == 3:\n (lon, lat, fname) = d\n attributes = {}\n else:\n msg = ('Image data must be iterable of tuples: '\n '(x, y, fname[, dict])\\nGot: %s' % str(d))\n raise Exception(msg)\n\n # get image specific values, if any\n placement = attributes.get('placement', default_placement)\n radius = attributes.get('radius', default_radius)\n colour = attributes.get('colour', default_colour)\n offset_x = attributes.get('offset_x', default_offset_x)\n offset_y = attributes.get('offset_y', default_offset_y)\n udata = attributes.get('data', None)\n\n if fname == fname_cache:\n pmap = pmap_cache\n w = w_cache\n h = h_cache\n else:\n fname_cache = fname\n pmap_cache = pmap = QPixmap(fname)\n size = pmap.size()\n h = h_cache = size.height()\n w = w_cache = size.width()\n\n # check values that can be wrong\n if not placement:\n placement = default_placement\n placement = placement.lower()\n if placement not in self.valid_placements:\n msg = (\"Image placement value is invalid, got '%s'\"\n % str(placement))\n raise Exception(msg)\n\n # convert various colour formats to internal (r, g, b, a)\n colour = self.colour_to_internal(colour)\n\n draw_data.append((float(lon), float(lat), pmap, w, h, placement,\n offset_x, offset_y, radius, colour, udata))\n\n return self.add_layer(self.draw_image_layer, draw_data, map_rel,\n visible=visible, show_levels=show_levels,\n selectable=selectable, name=name,\n ltype=self.TypeImage)\n\n def AddTextLayer(self, text, map_rel=True, visible=True, show_levels=None,\n selectable=False, name='<text_layer>', **kwargs):\n \"\"\"Add a text layer to the map or view.\n\n text list of sequence of (lon, lat, text[, dict]) coordinates\n (optional 'dict' contains point-specific attributes)\n map_rel points drawn relative to map if True, else view relative\n visible True if the layer is to be immediately visible\n show_levels list of levels at which layer is auto-shown\n selectable True if select operates on this layer\n name name of this layer\n kwargs a dictionary of changeable text attributes\n (placement, radius, fontname, fontsize, colour, data)\n these supply any data missing in 'data'\n \"\"\"\n\n # merge global and layer defaults\n if map_rel:\n default_placement = kwargs.get('placement', self.DefaultTextPlacement)\n default_radius = kwargs.get('radius', self.DefaultTextRadius)\n default_fontname = kwargs.get('fontname', self.DefaultTextFontname)\n default_fontsize = kwargs.get('fontsize', self.DefaultTextFontSize)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultTextColour)\n default_textcolour = self.get_i18n_kw(kwargs,\n ('textcolour', 'textcolor'),\n self.DefaultTextTextColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultTextOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultTextOffsetY)\n default_data = kwargs.get('data', self.DefaultTextData)\n else:\n default_placement = kwargs.get('placement', self.DefaultTextViewPlacement)\n default_radius = kwargs.get('radius', self.DefaultTextViewRadius)\n default_fontname = kwargs.get('fontname', self.DefaultTextViewFontname)\n default_fontsize = kwargs.get('fontsize', self.DefaultTextViewFontSize)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultTextViewColour)\n default_textcolour = self.get_i18n_kw(kwargs,\n ('textcolour', 'textcolor'),\n self.DefaultTextViewTextColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultTextViewOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultTextViewOffsetY)\n default_data = kwargs.get('data', self.DefaultTextData)\n\n # create data iterable ready for drawing\n draw_data = []\n for t in text:\n if len(t) == 4:\n (lon, lat, tdata, attributes) = t\n elif len(t) == 3:\n (lon, lat, tdata) = t\n attributes = {}\n else:\n msg = ('Text data must be iterable of tuples: '\n '(lon, lat, text, [dict])\\n'\n 'Got: %s' % str(t))\n raise Exception(msg)\n\n # plug in any required defaults\n placement = attributes.get('placement', default_placement)\n radius = attributes.get('radius', default_radius)\n fontname = attributes.get('fontname', default_fontname)\n fontsize = attributes.get('fontsize', default_fontsize)\n colour = self.get_i18n_kw(attributes, ('colour', 'color'),\n default_colour)\n textcolour = self.get_i18n_kw(attributes,\n ('textcolour', 'textcolor'),\n default_textcolour)\n offset_x = attributes.get('offset_x', default_offset_x)\n offset_y = attributes.get('offset_y', default_offset_y)\n udata = attributes.get('data', default_data)\n\n # check values that can be wrong\n if not placement:\n placement = default_placement\n placement = placement.lower()\n if placement not in self.valid_placements:\n msg = (\"Text placement value is invalid, got '%s'\"\n % str(placement))\n raise Exception(msg)\n\n # convert various colour formats to internal (r, g, b, a)\n colour = self.colour_to_internal(colour)\n textcolour = self.colour_to_internal(textcolour)\n\n draw_data.append((float(lon), float(lat), tdata, placement.lower(),\n radius, colour, textcolour, fontname, fontsize,\n offset_x, offset_y, udata))\n\n return self.add_layer(self.draw_text_layer, draw_data, map_rel,\n visible=visible, show_levels=show_levels,\n selectable=selectable, name=name,\n ltype=self.TypeText)\n\n def AddPolygonLayer(self, data, map_rel=True, visible=True,\n show_levels=None, selectable=False,\n name='<polygon_layer>', **kwargs):\n \"\"\"Add a layer of polygon data to the map.\n\n data iterable of polygon tuples:\n (points[, attributes])\n where points is another iterable of (x, y) tuples and\n attributes is a dictionary of polygon attributes:\n placement a placement string (view-relative only)\n width width of polygon edge lines\n colour colour of edge lines\n close if True closes polygon\n filled polygon is filled (implies closed)\n fillcolour fill colour\n offset_x X offset\n offset_y Y offset\n data polygon user data object\n map_rel points drawn relative to map if True, else view relative\n visible True if the layer is to be immediately visible\n show_levels list of levels at which layer is auto-shown (or None)\n selectable True if select operates on this layer\n name name of this layer\n kwargs extra keyword args, layer-specific:\n placement placement string (view-rel only)\n width width of polygons in pixels\n colour colour of polygon edge lines\n close True if polygon is to be closed\n filled if True, fills polygon\n fillcolour fill colour\n offset_x X offset\n offset_y Y offset\n data polygon user data object\n \"\"\"\n\n # merge global and layer defaults\n if map_rel:\n default_placement = kwargs.get('placement',\n self.DefaultPolygonPlacement)\n default_width = kwargs.get('width', self.DefaultPolygonWidth)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultPolygonColour)\n default_close = kwargs.get('closed', self.DefaultPolygonClose)\n default_filled = kwargs.get('filled', self.DefaultPolygonFilled)\n default_fillcolour = self.get_i18n_kw(kwargs,\n ('fillcolour', 'fillcolor'),\n self.DefaultPolygonFillcolour)\n default_offset_x = kwargs.get('offset_x', self.DefaultPolygonOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultPolygonOffsetY)\n default_data = kwargs.get('data', self.DefaultPolygonData)\n else:\n default_placement = kwargs.get('placement',\n self.DefaultPolygonViewPlacement)\n default_width = kwargs.get('width', self.DefaultPolygonViewWidth)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultPolygonViewColour)\n default_close = kwargs.get('closed', self.DefaultPolygonViewClose)\n default_filled = kwargs.get('filled', self.DefaultPolygonViewFilled)\n default_fillcolour = self.get_i18n_kw(kwargs,\n ('fillcolour', 'fillcolor'),\n self.DefaultPolygonViewFillcolour)\n default_offset_x = kwargs.get('offset_x', self.DefaultPolygonViewOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultPolygonViewOffsetY)\n default_data = kwargs.get('data', self.DefaultPolygonViewData)\n\n # create draw_data iterable\n draw_data = []\n for d in data:\n if len(d) == 2:\n (p, attributes) = d\n elif len(d) == 1:\n p = d\n attributes = {}\n else:\n msg = ('Polygon data must be iterable of tuples: '\n '(points, [attributes])\\n'\n 'Got: %s' % str(d))\n raise Exception(msg)\n\n # get polygon attributes\n placement = attributes.get('placement', default_placement)\n width = attributes.get('width', default_width)\n colour = self.get_i18n_kw(attributes, ('colour', 'color'),\n default_colour)\n close = attributes.get('closed', default_close)\n filled = attributes.get('filled', default_filled)\n if filled:\n close = True\n fillcolour = self.get_i18n_kw(attributes,\n ('fillcolour', 'fillcolor'),\n default_fillcolour)\n offset_x = attributes.get('offset_x', default_offset_x)\n offset_y = attributes.get('offset_y', default_offset_y)\n udata = attributes.get('data', default_data)\n\n # if polygon is to be filled, ensure closed\n if close:\n p = list(p) # must get a *copy*\n p.append(p[0])\n\n # check values that can be wrong\n if not placement:\n placement = default_placement\n placement = placement.lower()\n if placement not in self.valid_placements:\n msg = (\"Polygon placement value is invalid, got '%s'\"\n % str(placement))\n raise Exception(msg)\n\n # convert various colour formats to internal (r, g, b, a)\n colour = self.colour_to_internal(colour)\n fillcolour = self.colour_to_internal(fillcolour)\n\n # append this polygon to the layer data\n draw_data.append((p, placement, width, colour, close,\n filled, fillcolour, offset_x, offset_y, udata))\n\n return self.add_layer(self.draw_polygon_layer, draw_data, map_rel,\n visible=visible, show_levels=show_levels,\n selectable=selectable, name=name,\n ltype=self.TypePolygon)\n\n def AddPolylineLayer(self, data, map_rel=True, visible=True,\n show_levels=None, selectable=False,\n name='<polyline>', **kwargs):\n \"\"\"Add a layer of polyline data to the map.\n\n data iterable of polyline tuples:\n (points[, attributes])\n where points is another iterable of (x, y) tuples and\n attributes is a dictionary of polyline attributes:\n placement a placement string (view-relative only)\n width width of polyline edge lines\n colour colour of edge lines\n offset_x X offset\n offset_y Y offset\n data polyline user data object\n map_rel points drawn relative to map if True, else view relative\n visible True if the layer is to be immediately visible\n show_levels list of levels at which layer is auto-shown (or None)\n selectable True if select operates on this layer\n name name of this layer\n kwargs extra keyword args, layer-specific:\n placement placement string (view-rel only)\n width width of polyline in pixels\n colour colour of polyline edge lines\n offset_x X offset\n offset_y Y offset\n data polygon user data object\n \"\"\"\n\n # merge global and layer defaults\n if map_rel:\n default_placement = kwargs.get('placement',\n self.DefaultPolygonPlacement)\n default_width = kwargs.get('width', self.DefaultPolygonWidth)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultPolygonColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultPolygonOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultPolygonOffsetY)\n default_data = kwargs.get('data', self.DefaultPolygonData)\n else:\n default_placement = kwargs.get('placement',\n self.DefaultPolygonViewPlacement)\n default_width = kwargs.get('width', self.DefaultPolygonViewWidth)\n default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),\n self.DefaultPolygonViewColour)\n default_offset_x = kwargs.get('offset_x', self.DefaultPolygonViewOffsetX)\n default_offset_y = kwargs.get('offset_y', self.DefaultPolygonViewOffsetY)\n default_data = kwargs.get('data', self.DefaultPolygonViewData)\n\n # create draw_data iterable\n draw_data = []\n for d in data:\n if len(d) == 2:\n (p, attributes) = d\n elif len(d) == 1:\n p = d\n attributes = {}\n else:\n msg = ('Polyline data must be iterable of tuples: '\n '(polyline, [attributes])\\n'\n 'Got: %s' % str(d))\n raise Exception(msg)\n\n # get polygon attributes\n placement = attributes.get('placement', default_placement)\n width = attributes.get('width', default_width)\n colour = self.get_i18n_kw(attributes, ('colour', 'color'),\n default_colour)\n offset_x = attributes.get('offset_x', default_offset_x)\n offset_y = attributes.get('offset_y', default_offset_y)\n udata = attributes.get('data', default_data)\n\n # check values that can be wrong\n if not placement:\n placement = default_placement\n placement = placement.lower()\n if placement not in self.valid_placements:\n msg = (\"Polyline placement value is invalid, got '%s'\"\n % str(placement))\n raise Exception(msg)\n\n # convert various colour formats to internal (r, g, b, a)\n colour = self.colour_to_internal(colour)\n\n draw_data.append((p, placement, width, colour,\n offset_x, offset_y, udata))\n\n return self.add_layer(self.draw_polyline_layer, draw_data, map_rel,\n visible=visible, show_levels=show_levels,\n selectable=selectable, name=name,\n ltype=self.TypePolyline)\n\n ######\n # Layer manipulation\n ######\n\n def ShowLayer(self, id):\n \"\"\"Show a layer.\n\n id the layer id\n \"\"\"\n\n self.layer_mapping[id].visible = True\n self.update()\n\n def HideLayer(self, id):\n \"\"\"Hide a layer.\n\n id the layer id\n \"\"\"\n\n self.layer_mapping[id].visible = False\n self.update()\n\n def DeleteLayer(self, id):\n \"\"\"Delete a layer.\n\n id the layer id\n \"\"\"\n\n # just in case we got None\n if id:\n # see if what we are about to remove might be visible\n layer = self.layer_mapping[id]\n visible = layer.visible\n\n del layer\n self.layer_z_order.remove(id)\n\n # if layer was visible, refresh display\n if visible:\n self.update()\n\n def PushLayerToBack(self, id):\n \"\"\"Make layer specified be drawn at back of Z order.\n\n id ID of the layer to push to the back\n \"\"\"\n\n self.layer_z_order.remove(id)\n self.layer_z_order.insert(0, id)\n self.update()\n\n def PopLayerToFront(self, id):\n \"\"\"Make layer specified be drawn at front of Z order.\n\n id ID of the layer to pop to the front\n \"\"\"\n\n self.layer_z_order.remove(id)\n self.layer_z_order.append(id)\n self.update()\n\n def PlaceLayerBelowLayer(self, below, top):\n \"\"\"Place a layer so it will be drawn behind another layer.\n\n below ID of layer to place underneath 'top'\n top ID of layer to be drawn *above* 'below'\n \"\"\"\n\n self.layer_z_order.remove(below)\n i = self.layer_z_order.index(top)\n self.layer_z_order.insert(i, below)\n self.update()\n\n def SetLayerShowLevels(self, id, show_levels=None):\n \"\"\"Update the show_levels list for a layer.\n\n id ID of the layer we are going to update\n show_levels new layer show list\n\n If 'show_levels' is None reset the displayable levels to\n all levels in the current tileset.\n \"\"\"\n\n # if we actually got an 'id' change the .show_levels value\n if id:\n layer = self.layer_mapping[id]\n\n # if not given a 'show_levels' show all levels available\n if not show_levels:\n show_levels = range(self.tiles_min_level,\n self.tiles_max_level+1)[:]\n\n layer.show_levels = show_levels\n\n # always update the display, there may be a change\n self.update()\n\n ######\n # Zoom and pan\n ######\n\n def GotoLevel(self, level):\n \"\"\"Use a new tile level.\n\n level the new tile level to use.\n\n Returns True if all went well.\n \"\"\"\n\n if not self.tile_src.UseLevel(level):\n return False # couldn't change level\n\n self.level = level\n (self.num_tiles_x, self.num_tiles_y, _, _) = self.tile_src.GetInfo(level)\n self.map_width = self.num_tiles_x * self.tile_width\n self.map_height = self.num_tiles_y * self.tile_height\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tile_src.extent\n\n # to set some state variables\n self.resizeEvent()\n\n # raise level change event\n self.raise_event(PySlipQt.EVT_PYSLIPQT_LEVEL, level=level)\n\n return True\n\n def GotoPosition(self, geo):\n \"\"\"Set view to centre on a geo position in the current level.\n\n geo a tuple (xgeo,ygeo) to centre view on\n\n Recalculates the key tile info.\n \"\"\"\n\n # get fractional tile coords of required centre of view\n (xtile, ytile) = self.tile_src.Geo2Tile(geo)\n\n # get view size in half widths and height\n w2 = self.view_width / 2\n h2 = self.view_height / 2\n\n # get tile coords of view left and top edges\n view_tile_x = xtile - (w2 / self.tile_width)\n view_tile_y = ytile - (h2 / self.tile_height)\n\n # calculate the key tile coords and offsets\n keytile_x = int(view_tile_x)\n keytile_y = int(view_tile_y)\n\n keyoffset_x = - int((view_tile_x - keytile_x) * self.tile_width)\n keyoffset_y = - int((view_tile_y - keytile_y) * self.tile_height)\n\n # update the key tile info\n self.key_tile_left = keytile_x\n self.key_tile_top = keytile_y\n self.key_tile_xoffset = keyoffset_x\n self.key_tile_yoffset = keyoffset_y\n\n # centre map in view if map < view\n if self.key_tile_left < 0 or self.key_tile_xoffset > 0:\n self.key_tile_left = 0\n self.key_tile_xoffset = (self.view_width - self.map_width) // 2\n\n if self.key_tile_top < 0 or self.key_tile_yoffset > 0:\n self.key_tile_top = 0\n self.key_tile_yoffset = (self.view_height - self.map_height) // 2\n\n # redraw the display\n self.update()\n\n def GotoLevelAndPosition(self, level, geo):\n \"\"\"Goto a map level and set view to centre on a position.\n\n level the map level to use\n geo a tuple (xgeo,ygeo) to centre view on\n\n Does nothing if we can't use desired level.\n \"\"\"\n\n if self.GotoLevel(level):\n self.GotoPosition(geo)\n\n def ZoomToArea(self, geo, size):\n \"\"\"Set view to level and position to view an area.\n\n geo a tuple (xgeo,ygeo) to centre view on\n size a tuple (width,height) of area in geo coordinates\n\n Centre an area and zoom to view such that the area will fill\n approximately 50% of width or height, whichever is greater.\n\n Use the ppd_x and ppd_y values in the level 'tiles' file.\n \"\"\"\n\n # unpack area width/height (geo coords)\n (awidth, aheight) = size\n\n # step through levels (smallest first) and check view size (degrees)\n for l in self.tile_src.levels:\n level = l\n (_, _, ppd_x, ppd_y) = self.tile_src.getInfo(l)\n view_deg_width = self.view_width / ppd_x\n view_deg_height = self.view_height / ppd_y\n\n # if area >= 50% of view, finished\n if awidth >= view_deg_width / 2 or aheight >= view_deg_height / 2:\n break\n\n self.GotoLevelAndPosition(level, geo)\n\n ######\n # Change the tileset\n ######\n\n def ChangeTileset(self, tile_src):\n \"\"\"Change the source of tiles.\n\n tile_src the new tileset object to use\n\n Returns the previous tileset object, None if none.\n\n Refreshes the display and tries to maintain the same position\n and zoom level. May change the zoom level if the current level doesn't\n exist in the new tileset.\n \"\"\"\n\n log('ChangeTileset: tile_src=%s' % str(tile_src))\n\n # get level and geo position of view centre\n (level, geo) = self.get_level_and_position()\n log('level=%s, geo=%s' % (str(level), str(geo)))\n\n # remember old tileset\n old_tileset = self.tile_src\n\n # get levels in new tileset and see if we can display at the current level\n new_levels = tile_src.levels\n new_max_level = tile_src.max_level\n new_min_level = tile_src.min_level\n if level > new_max_level:\n level = new_max_level\n if level < new_min_level:\n level = new_min_level\n\n # set new tile source and set some state\n self.tile_src = tile_src\n self.tile_size_x = tile_src.tile_size_x\n self.tile_size_y = tile_src.tile_size_y\n self.level = level\n\n result = self.tile_src.GetInfo(level)\n (num_tiles_x, num_tiles_y, ppd_x, ppd_y) = result\n self.map_width = self.tile_size_x * num_tiles_x\n self.map_height = self.tile_size_y * num_tiles_y\n self.ppd_x = ppd_x\n self.ppd_y = ppd_y\n\n # set tile levels stuff - allowed levels, etc\n self.tiles_max_level = max(tile_src.levels)\n self.tiles_min_level = min(tile_src.levels)\n\n # set callback from Tile source object when tile(s) available\n self.tile_src.setCallback(self.on_tile_available)\n\n # set the new zoom level to the old\n if not tile_src.UseLevel(self.level):\n # can't use old level, make sensible choice\n if self.level < self.tiles_min_level:\n self.level = self.tiles_min_level\n elif self.level > self.tiles_max_level:\n self.level = self.tiles_max_level\n\n # if we can't change level now, raise an error exception\n if not tile_src.UseLevel(self.level):\n raise Exception('Trying to use level %s in tile obj %s, '\n 'levels available are %s'\n % (str(self.level),\n str(tile_src), str(tile_src.levels)))\n\n# TODO: MUST SET KEY TILE STUFF HERE\n self.set_key_from_centre(geo)\n\n # back to old level+centre, and refresh the display\n# self.GotoLevelAndPosition(level, geo)\n self.zoom_level_position(level, geo)\n\n return old_tileset\n\n"
},
{
"alpha_fraction": 0.4555748999118805,
"alphanum_fraction": 0.5074042081832886,
"avg_line_length": 29.81208038330078,
"blob_id": "9d0693e76d769b573f0390f753d8038d832c8bea",
"content_id": "7e502b4f99572a1660aac3693338ac8f95919d34",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4592,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 149,
"path": "/pySlipQt/examples/test_viewrel_polygon.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt view-relative polygons.\n\nUsage: test_maprel_image.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.pySlipQt as pySlipQt\n\n\n######\n# Various demo constants\n######\n\nDemoVersion = '1.0'\nDemoName = f'Test view-relative point placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (133.87, -23.7) # Alice Springs\n\narrow_cn = ((0,0),(10,10),(5,10),(5,20),(-5,20),(-5,10),(-10,10))\narrow_ne = ((-1,0),(-1,10),(-4,8),(-9,13),(-14,8),(-9,3),(-11,0))\narrow_ce = ((-1,0),(-11,10),(-11,5),(-21,5),(-21,-5),(-11,-5),(-11,-10))\narrow_se = ((-1,-1),(-1,-10),(-4,-8),(-9,-13),(-14,-8),(-9,-3),(-11,-1))\narrow_cs = ((0,-1),(-10,-11),(-5,-11),(-5,-21),(5,-21),(5,-11),(10,-11))\narrow_sw = ((0,-1),(0,-10),(3,-8),(8,-13),(13,-8),(8,-3),(10,-1))\narrow_cw = ((0,0),(10,10),(10,5),(20,5),(20,-5),(10,-5),(10,-10))\narrow_nw = ((0,0),(0,10),(3,8),(8,13),(13,8),(8,3),(10,0))\nfilled_poly = ((-100,100),(-100,-100),(0,150),(100,-100),(100,100))\n\nPolyViewData = [(arrow_cn, {'placement': 'cn'}),\n (arrow_ne, {'placement': 'ne'}),\n (arrow_ce, {'placement': 'ce'}),\n (arrow_se, {'placement': 'se'}),\n (arrow_cs, {'placement': 'cs'}),\n (arrow_sw, {'placement': 'sw'}),\n (arrow_cw, {'placement': 'cw'}),\n (arrow_nw, {'placement': 'nw'}),\n (filled_poly, {'placement': 'cc', 'width': 8,\n 'fillcolour': '#ff000020',\n 'colour': '#00ff0040',\n 'filled': True}),\n ]\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # set initial view position\n# self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n # add test test layer\n self.text_layer = self.pyslipqt.AddPolygonLayer(PolyViewData,\n map_rel=False,\n name='<poly_map_layer>',\n offset_x=0, offset_y=0,\n closed=True)\n\n self.show()\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the app\nlog(DemoName)\ntile_dir = 'test_viewrel_polygon'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n\n"
},
{
"alpha_fraction": 0.7202796936035156,
"alphanum_fraction": 0.7255244851112366,
"avg_line_length": 29.752687454223633,
"blob_id": "a416ba74e9517fc4d0f0380be901978af5483538",
"content_id": "647b609e96786d3d865ab813770074d5b3d1168b",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2861,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 93,
"path": "/README.rst",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": ".. image:: pySlipQt/examples/graphics/pyslipqt_logo.png\n\npySlipQt\n========\n\npySlipQt is a 'slip map' widget for PyQt5.\n\nDuring my work writing geophysical applications in python I often wanted to\ndisplay a map that was very large - many hundreds of thousands of pixels in\nwidth. I searched around for a GUI solution that would work rather like Google\nmaps: tiled, layers, etc. I couldn't find anything that didn't assume\nbrowser+map server. So I wrote my own PyQt5 widget. This worked well for\ncartesian self-generated maps and has been extended to handle non-cartesian\nmaps and tiles sourced from places like OpenStreetMap.\n\nIt's a poor thing, but solves my problem. I'm placing it here in the hope that\nsomeone else may find it useful. If you find it useful, or make improvements\nto it, drop me a line.\n\npySlipQt works on Linux and Mac. It only works with PyQt5 and Python 3.6+.\n\npySlipQt used to work on Windows, but I no longer run Windows and I can't test it.\n\n\nThe widget API is documented in\n`the wiki <https://github.com/rzzzwilson/pySlipQt/wiki/The-pySlipQt-API>`_.\n\nScreenshots\n===========\n\nA few screenshots of the demonstration program *pyslipqt_demo.py*, the first\nshowing OpenStreetMap tiles:\n\n.. image:: pySlipQt/examples/graphics/pyslip_demo_osm.png\n\nNext, the pre-generated GMT tiles:\n\n.. image:: pySlipQt/examples/graphics/pyslip_demo_gmt.png\n\nGetting pySlipQt\n================\n\nYou can clone this repository, of course, and then do this in the top directory\ncontaining the *setup.py* program:\n\n::\n\n python setup.py install\n\nOr you could install through PyPI:\n\n::\n\n pip install pySlipQt\n\nUsing pip is the recommended way to install pySlipQt as the cheese shop code\nis guaranteed to work. The code in the GitHib repository is, unfortunately,\na moving target.\n\nMap Tiles Licensing\n===================\n\nOpenStreetMap Tiles\n-------------------\n\n© OpenStreetMap contributors\n\nSee the licence `here <http://www.openstreetmap.org/copyright>`_.\n\nStamen Toner Tiles\n------------------\n\nMap tiles by `Stamen Design <http://stamen.com/>`_, under\n`CC BY 3.0 <http://creativecommons.org/licenses/by/3.0>`_. Data by\n`OpenStreetMap <http://openstreetmap.org>`_, under\n`ODbL <http://www.openstreetmap.org/copyright>`_.\n\nStamen Watercolor and Transport Tiles\n-------------------------------------\n\nMap tiles by `Stamen Design <http://stamen.com/>`_, under\n`CC BY 3.0 <http://creativecommons.org/licenses/by/3.0>`_. Data by\n`OpenStreetMap <http://openstreetmap.org>`_, under\n`CC BY SA <http://creativecommons.org/licenses/by-sa/3.0>`_.\n\nTile Usage\n==========\n\nBefore using any tiles provided by pySlipQt modules, make sure you are not\ntransgressing any usage rules applied by the tiles provider.\n\nHeavy usage of tiles probably means you should set up your own tile cache\nserver and write a Tiles module that gets tiles from your own server(s).\n"
},
{
"alpha_fraction": 0.5619872212409973,
"alphanum_fraction": 0.5655404329299927,
"avg_line_length": 36.20913314819336,
"blob_id": "155285a2e8edc45257cb666673c87d9f8b21ea30",
"content_id": "b4b0099277cdfd6dfaa96e4cb31a55e15c5d5c5f",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15479,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 416,
"path": "/pySlipQt/tiles_net.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA server Tiles object for pySlipQt tiles.\n\nAll server tile sources should inherit from this class.\nFor example, see osm_tiles.py.\n\"\"\"\n\nimport os\nimport time\nimport math\nimport traceback\nimport urllib\nfrom urllib import request\nimport queue\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import QThread\nimport pySlipQt.tiles as tiles\nimport pySlipQt.sys_tile_data as std\nimport pySlipQt.log as log\n\ntry:\n log = log.Log('pyslipqt.log')\nexcept AttributeError:\n # means log already set up\n pass\n\n# set how old disk-cache tiles can be before we re-request them from the\n# server. this is the number of days old a tile is before we re-request.\n# if 'None', never re-request tiles after first satisfied request.\nRefreshTilesAfterDays = 60\n\n# define the error messages for various failures\nStatusError = {401: 'Looks like you need to be authorised for this server.',\n 404: 'You might need to check the tile addressing for this server.',\n 429: 'You are asking for too many tiles.',\n }\n\n################################################################################\n# Worker class for server tile retrieval\n################################################################################\n\nclass TileWorker(QThread):\n \"\"\"Thread class that gets request from queue, loads tile, calls callback.\"\"\"\n\n def __init__(self, id_num, server, tilepath, requests, callback,\n error_tile, content_type, rerequest_age, error_image):\n \"\"\"Prepare the tile worker.\n\n id_num a unique numer identifying the worker instance\n server server URL\n tilepath path to tile on server\n requests the request queue\n callback function to call after tile available\n error_tile image of error tile\n content_type expected Content-Type string\n rerequest_age number of days in tile age before re-requesting\n (0 means don't update tiles)\n error_image the image to return on some error\n\n Results are returned in the callback() params.\n \"\"\"\n\n QThread.__init__(self)\n\n self.id_num = id_num\n self.server = server\n self.tilepath = tilepath\n self.requests = requests\n self.callback = callback\n self.error_tile_image = error_tile\n self.content_type = content_type\n self.rerequest_age = rerequest_age\n self.error_image = error_image\n self.daemon = True\n\n def run(self):\n while True:\n # get zoom level and tile coordinates to retrieve\n (level, x, y) = self.requests.get()\n\n # try to retrieve the image\n error = False\n pixmap = self.error_image\n try:\n tile_url = self.server + self.tilepath.format(Z=level, X=x, Y=y)\n response = request.urlopen(tile_url)\n content_type = response.info().get_content_type()\n if content_type == self.content_type:\n data = response.read()\n pixmap = QPixmap()\n pixmap.loadFromData(data)\n else:\n # show error, don't cache returned error tile\n error = True\n except Exception as e:\n error = True\n log('%s exception getting tile (%d,%d,%d)'\n % (type(e).__name__, level, x, y))\n\n # call the callback function passing level, x, y and pixmap data\n # error is False if we want to cache this tile on-disk\n self.callback(level, x, y, pixmap, error)\n\n # finally, removes request from queue\n self.requests.task_done()\n\n###############################################################################\n# Class for a server tile source. Extend the BaseTiles class.\n###############################################################################\n\nclass Tiles(tiles.BaseTiles):\n \"\"\"A tile object to source server tiles for the widget.\"\"\"\n\n # maximum number of outstanding requests per server\n MaxServerRequests = 2\n\n # maximum number of in-memory cached tiles\n MaxLRU = 1000\n\n # allowed file types and associated values\n AllowedFileTypes = {\n 'png': 'PNG',\n 'jpg': 'JPG',\n }\n\n # the number of seconds in a day\n SecondsInADay = 60 * 60 * 24\n\n def __init__(self, levels, tile_width, tile_height, tiles_dir, max_lru,\n servers, url_path, max_server_requests, http_proxy,\n refetch_days=RefreshTilesAfterDays):\n \"\"\"Initialise a Tiles instance.\n\n levels a list of level numbers that are to be served\n tile_width width of each tile in pixels\n tile_height height of each tile in pixels\n tiles_dir path to on-disk tile cache directory\n max_lru maximum number of cached in-memory tiles\n servers list of tile servers\n url_path path on server to each tile\n max_server_requests maximum number of requests per server\n http_proxy proxy to use if required\n refetch_days fetch new server tile if older than this in days\n (0 means don't ever update tiles)\n \"\"\"\n\n # prepare the tile cache directory, if required\n # we have to do this *before* the base class initialization!\n for level in levels:\n level_dir = os.path.join(tiles_dir, '%d' % level)\n if not os.path.isdir(level_dir):\n os.makedirs(level_dir)\n\n # perform the base class initialization\n super().__init__(levels, tile_width, tile_height, tiles_dir, max_lru)\n\n # save params not saved in super()\n self.servers = servers\n self.url_path = url_path\n self.max_requests = max_server_requests\n self.http_proxy = http_proxy\n\n # callback must be set by higher-level copde\n self.callback = None\n\n # calculate a re-request age, if specified\n self.rerequest_age = None\n if refetch_days:\n self.rerequest_age = (time.time() - refetch_days*self.SecondsInADay)\n\n # tiles extent for tile data (left, right, top, bottom)\n self.extent = (-180.0, 180.0, -85.0511, 85.0511)\n\n # figure out tile filename extension from 'url_path'\n tile_extension = os.path.splitext(url_path)[1][1:]\n tile_extension_lower = tile_extension.lower() # ensure lower case\n\n # determine the file bitmap type\n try:\n self.filetype = self.AllowedFileTypes[tile_extension_lower]\n except KeyError as e:\n raise TypeError(\"Bad tile_extension value, got '%s', \"\n \"expected one of %s\"\n % (str(tile_extension),\n str(self.AllowedFileTypes.keys()))) from None\n\n # compose the expected 'Content-Type' string on request result\n # if we get here we know the extension is in self.AllowedFileTypes\n if tile_extension_lower == 'jpg':\n self.content_type = 'image/jpeg'\n elif tile_extension_lower == 'png':\n self.content_type = 'image/png'\n\n # set the list of queued unsatisfied requests to 'empty'\n self.queued_requests = {}\n\n # prepare the \"pending\" and \"error\" images\n self.pending_tile = QPixmap()\n self.pending_tile.loadFromData(std.getPendingImage())\n\n self.error_tile = QPixmap()\n self.error_tile.loadFromData(std.getErrorImage())\n\n # test for firewall - use proxy (if supplied)\n test_url = self.servers[0] + self.url_path.format(Z=0, X=0, Y=0)\n try:\n request.urlopen(test_url)\n except urllib.error.HTTPError as e:\n # if it's fatal, log it and die, otherwise try a proxy\n status_code = e.code\n log('Error: test_url=%s, status_code=%s'\n % (test_url, str(status_code)))\n error_msg = StatusError.get(status_code, None)\n if status_code:\n msg = '\\n'.join(['You got a %d error from: %s' % (status_code, test_url),\n error_msg])\n log(msg)\n raise RuntimeError(msg) from None\n\n log('%s exception doing simple connection to: %s'\n % (type(e).__name__, test_url))\n log(''.join(traceback.format_exc()))\n\n if http_proxy:\n proxy = request.ProxyHandler({'http': http_proxy})\n opener = request.build_opener(proxy)\n request.install_opener(opener)\n try:\n request.urlopen(test_url)\n except:\n msg = (\"Using HTTP proxy %s, \"\n \"but still can't get through a firewall!\")\n raise Exception(msg) from None\n else:\n msg = (\"There is a firewall but you didn't \"\n \"give me an HTTP proxy to get through it?\")\n raise Exception(msg) from None\n\n # set up the request queue and worker threads\n self.request_queue = queue.Queue() # entries are (level, x, y)\n self.workers = []\n for server in self.servers:\n for num_thread in range(self.max_requests):\n worker = TileWorker(num_thread, server, self.url_path,\n self.request_queue, self.tile_is_available,\n self.error_tile, self.content_type,\n self.rerequest_age, self.error_tile)\n self.workers.append(worker)\n worker.start()\n\n def UseLevel(self, level):\n \"\"\"Prepare to serve tiles from the required level.\n\n level the required level\n\n Return True if level change occurred, else False if not possible.\n \"\"\"\n\n # first, CAN we zoom to this level?\n if level not in self.levels:\n return False\n\n # get tile info\n info = self.GetInfo(level)\n if info is None:\n return False\n\n # OK, save new level\n self.level = level\n (self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info\n\n # flush any outstanding requests.\n # we do this to speed up multiple-level zooms so the user doesn't\n # sit waiting for tiles to arrive that won't be shown.\n self.FlushRequests()\n\n return True\n\n def GetTile(self, x, y):\n \"\"\"Get bitmap for tile at tile coords (x, y) and current level.\n\n x X coord of tile required (tile coordinates)\n y Y coord of tile required (tile coordinates)\n\n Returns bitmap object for the tile image.\n Tile coordinates are measured from map top-left.\n\n We override the existing GetTile() method to add code to retrieve\n tiles from the servers if not in on-disk cache.\n\n We also check the date on the tile from disk-cache. If \"too old\",\n return old tile after starting the process to get new tile from servers.\n \"\"\"\n\n try:\n # get tile from cache\n tile = self.cache[(self.level, x, y)]\n if self.tile_on_disk(self.level, x, y):\n tile_date = self.cache.tile_date((self.level, x, y))\n if self.rerequest_age and (tile_date < self.rerequest_age):\n self.get_server_tile(self.level, x, y)\n except KeyError as e:\n # not cached, start process of getting tile from 'net, return 'pending' image\n self.get_server_tile(self.level, x, y)\n tile = self.pending_tile\n\n return tile\n\n def GetInfo(self, level):\n \"\"\"Get tile info for a particular level.\n\n level the level to get tile info for\n\n Returns (num_tiles_x, num_tiles_y, ppd_x, ppd_y) or None if 'level'\n doesn't exist.\n\n Note that ppd_? may be meaningless for some tiles, so its\n value will be None.\n\n This method is for server tiles. It will be overridden for GMT tiles.\n \"\"\"\n\n # is required level available?\n if level not in self.levels:\n return None\n\n # otherwise get the information\n self.num_tiles_x = int(math.pow(2, level))\n self.num_tiles_y = int(math.pow(2, level))\n\n return (self.num_tiles_x, self.num_tiles_y, None, None)\n\n def FlushRequests(self):\n \"\"\"Delete any outstanding tile requests.\"\"\"\n\n # if we are serving server tiles ...\n if self.servers:\n with self.request_queue.mutex:\n self.request_queue.queue.clear()\n self.queued_requests.clear()\n\n def get_server_tile(self, level, x, y):\n \"\"\"Start the process to get a server tile.\n\n level, x, y identify the required tile\n\n If we don't already have this tile (or getting it), queue a request and\n also put the request into a 'queued request' dictionary. We\n do this since we can't peek into a queue to see what's there.\n \"\"\"\n\n tile_key = (level, x, y)\n if tile_key not in self.queued_requests:\n # add tile request to the server request queue\n self.request_queue.put(tile_key)\n self.queued_requests[tile_key] = True\n\n def tile_on_disk(self, level, x, y):\n \"\"\"Return True if tile at (level, x, y) is on-disk.\"\"\"\n\n tile_path = self.cache.tile_path((level, x, y))\n return os.path.exists(tile_path)\n\n def setCallback(self, callback):\n \"\"\"Set the \"tile available\" callback.\n\n callback reference to object to call when tile is found.\n \"\"\"\n\n self.callback = callback\n\n def tile_is_available(self, level, x, y, image, error):\n \"\"\"Callback routine - a 'net tile is available.\n\n level level for the tile\n x x coordinate of tile\n y y coordinate of tile\n image tile image data\n error True if image is 'error' image, don't cache in that case\n \"\"\"\n\n # put image into in-memory cache, but error images don't go to disk\n self.cache[(level, x, y)] = image\n if not error:\n self.cache._put_to_back((level, x, y), image)\n\n # remove the request from the queued requests\n # note that it may not be there - a level change can flush the dict\n try:\n del self.queued_requests[(level, x, y)]\n except KeyError:\n pass\n\n # tell the world a new tile is available\n if self.callback:\n self.callback(level, x, y, image, True)\n else:\n msg = f'tile_is_available: self.callback is NOT SET!'\n log.error(msg)\n raise RuntimeError(msg) from None\n\n def SetAgeThresholdDays(self, num_days):\n \"\"\"Set the tile refetch threshold time.\n\n num_days number of days before refetching tiles\n\n If 'num_days' is 0 refetching is inhibited.\n \"\"\"\n\n # update the global in case we instantiate again\n global RefreshTilesAfterDays\n RefreshTilesAfterDays = num_days\n\n # recalculate this instance's age threshold in UNIX time\n self.rerequest_age = (time.time() -\n RefreshTilesAfterDays * self.SecondsInADay)\n"
},
{
"alpha_fraction": 0.42854413390159607,
"alphanum_fraction": 0.43083682656288147,
"avg_line_length": 64.42500305175781,
"blob_id": "5a69899c89eb81cb4a225712ae3e590f59463f59",
"content_id": "e7a5ff90ea799884e01f6fd0a50b7374b15ad8aa",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5234,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 80,
"path": "/setup.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\ndef readme():\n with open('RELEASE.rst') as f:\n return f.read()\n\nsetup(name='pySlipQt',\n version='0.5.2',\n description='A slipmap widget for PyQt5',\n long_description=readme(),\n url='http://github.com/rzzzwilson/pySlipQt',\n author='Ross Wilson',\n author_email='[email protected]',\n license='MIT',\n packages=['pySlipQt'],\n install_requires=['pyqt5'],\n classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only'],\n keywords='python pyqt5 slipmap map',\n data_files=[\n ('pySlipQt/examples', [\n 'pySlipQt/examples/README.rst',\n 'pySlipQt/examples/display_text.py',\n 'pySlipQt/examples/image_placement.py',\n 'pySlipQt/examples/layer_control.py',\n 'pySlipQt/examples/make_gmt_tiles.py',\n 'pySlipQt/examples/numofcpus.py',\n 'pySlipQt/examples/point_placement.py',\n 'pySlipQt/examples/pyslipqt_demo.py',\n 'pySlipQt/examples/test_assumptions.py',\n 'pySlipQt/examples/test_display_text.py',\n 'pySlipQt/examples/test_displayable_levels.py',\n 'pySlipQt/examples/test_gmt_local_tiles.py',\n 'pySlipQt/examples/test_gotoposition.py',\n 'pySlipQt/examples/test_image_placement.py',\n 'pySlipQt/examples/test_layer_control.py',\n 'pySlipQt/examples/test_maprel_image.py',\n 'pySlipQt/examples/test_maprel_polygon.py',\n 'pySlipQt/examples/test_maprel_text.py',\n 'pySlipQt/examples/test_multi_widget.py',\n 'pySlipQt/examples/test_osm_tiles.py',\n 'pySlipQt/examples/test_point_placement.py',\n 'pySlipQt/examples/test_polygon_placement.py',\n 'pySlipQt/examples/test_polyline_placement.py',\n 'pySlipQt/examples/test_text_placement.py',\n 'pySlipQt/examples/test_viewrel_image.py',\n 'pySlipQt/examples/test_viewrel_point.py',\n 'pySlipQt/examples/test_viewrel_polygon.py',\n 'pySlipQt/examples/test_viewrel_text.py',\n 'pySlipQt/examples/text_placement.py',\n 'pySlipQt/examples/tkinter_error.py',\n 'pySlipQt/examples/utils.py',\n ]),\n ('pySlipQt/examples/graphics', [\n 'pySlipQt/examples/graphics/Qt_logo.png',\n 'pySlipQt/examples/graphics/arrow_down.png',\n 'pySlipQt/examples/graphics/arrow_left.png',\n 'pySlipQt/examples/graphics/arrow_leftdown.png',\n 'pySlipQt/examples/graphics/arrow_leftup.png',\n 'pySlipQt/examples/graphics/arrow_right.png',\n 'pySlipQt/examples/graphics/arrow_rightdown.png',\n 'pySlipQt/examples/graphics/arrow_rightup.png',\n 'pySlipQt/examples/graphics/arrow_up.png',\n 'pySlipQt/examples/graphics/compass_rose.png',\n 'pySlipQt/examples/graphics/error_tile.png',\n 'pySlipQt/examples/graphics/image_place_target.png',\n 'pySlipQt/examples/graphics/img2py.py',\n 'pySlipQt/examples/graphics/pending_image.png',\n 'pySlipQt/examples/graphics/pyslip_demo_gmt.png',\n 'pySlipQt/examples/graphics/pyslip_demo_osm.png',\n 'pySlipQt/examples/graphics/pyslipqt_logo.png',\n 'pySlipQt/examples/graphics/shipwreck.png',\n ]),\n ],\n download_url='https://github.com/rzzzwilson/pySlipQt/releases/tag/0.5.2',\n include_package_data=True,\n zip_safe=False)\n"
},
{
"alpha_fraction": 0.5081863403320312,
"alphanum_fraction": 0.5457545518875122,
"avg_line_length": 31.829166412353516,
"blob_id": "7007ab6c4c5fbbd2b895c0df9dbe562fccd26d16",
"content_id": "15378f197808223c9fc39e4f7e3781ee2b2fad45",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7897,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 240,
"path": "/pySlipQt/examples/test_gotoposition.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt GototPosition() function.\n\nThe idea is to have a set of buttons selecting various geo positions on the OSM\ntile map. When selected, the view would be moved with GotoPosition() and a\nmap-relative marker would be drawn at that position. At the same time, a\nview-relative marker would be drawn at the centre of the view. The difference\nbetween the two markers shows errors in the Geo2Tile() & Tile2Geo() functions.\n\n\"\"\"\n\n\nimport os\nimport sys\nimport traceback\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget,\n QHBoxLayout, QGridLayout,\n QPushButton)\n\nimport pySlipQt.pySlipQt as pySlipQt\nimport pySlipQt.open_street_map as tiles\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\n\n# set up logging\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = \"pySlip %s - GotoPosition() test %s\" % (pySlipQt.__version__, DemoVersion)\nDemoWidth = 800\nDemoHeight = 665\n\n# initial level and position\nInitViewLevel = 3\nInitViewPosition = (0, 0)\n\n# the number of decimal places in a lon/lat display\nLonLatPrecision = 2\n\n# a selection of cities, position from WikiPedia, etc\n# format is ((<lon>,<lat>),<name>)\n# lat+lon from Google Maps\nCities = [((0.0, 51.4778), 'Greenwich, United Kingdom'),\n ((5.33, 60.389444), 'Bergen, Norway'),\n ((151.209444, -33.865), 'Sydney, Australia'),\n ((-77.036667, 38.895111), 'Washington DC, USA'),\n ((132.472638, 34.395359), 'Hiroshima (広島市), Japan'),\n ((-8.008273, 31.632488), 'Marrakech (مراكش), Morocco'),\n ((18.955321, 69.649208), 'Tromsø, Norway'),\n ((-70.917058, -53.163863), 'Punta Arenas, Chile'),\n ((168.347217, -46.413020), 'Invercargill, New Zealand'),\n ((-147.8094268, 64.8282982), 'Fairbanks AK, USA'),\n ((103.8508548, 1.2848402), \"Singapore (One Raffles Place)\"),\n ((-3.2056135, 55.9552474), \"Maxwell's Birthplace\"),\n ((7.6059011, 50.3644454), \"Deutsches Eck, Koblenz, Germany\"),\n ((116.391667, 39.903333), \"Beijing (北京市)\"),\n ]\n\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass AppFrame(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setGeometry(300, 300, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n self.show()\n\n self.tile_source = tiles.Tiles()\n self.tile_directory = self.tile_source.tiles_dir\n\n # the data objects for map and view layers\n self.map_layer = None\n self.view_layer = None\n\n # build the GUI\n self.make_gui()\n\n self.show()\n\n # bind events to handlers\n self.pyslipqt.events.EVT_PYSLIPQT_POSITION.connect(self.handle_position_event)\n self.pyslipqt.events.EVT_PYSLIPQT_LEVEL.connect(self.handle_level_change)\n\n # finally, goto desired level and position\n self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n#####\n# Build the GUI\n#####\n\n def make_gui(self):\n \"\"\"Create application GUI.\"\"\"\n\n # build the GUI\n grid = QGridLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(grid)\n self.setCentralWidget(qwidget)\n\n # add controls to right of spacer\n rows = self.make_gui_controls(grid)\n# grid.addLayout(controls)\n\n # put map view in left of horizontal box\n self.pyslipqt = pySlipQt.PySlipQt(self, start_level=InitViewLevel, tile_src=self.tile_source)\n grid.addWidget(self.pyslipqt, 0, 0, rows+1, 1)\n\n def make_gui_controls(self, grid):\n \"\"\"Build the 'controls' part of the GUI\n\n grid reference to the grid layout to fill\n Returns reference to containing sizer object.\n \"\"\"\n\n # row to put controls into\n row = 0\n\n # add the map level in use widget\n level_mouse = self.make_gui_level_mouse()\n grid.addLayout(level_mouse, row, 1)\n row += 1\n\n # buttons for each point of interest\n self.buttons = {}\n for (num, city) in enumerate(Cities):\n (lonlat, name) = city\n btn = QPushButton(name)\n grid.addWidget(btn, row, 1)\n btn.clicked.connect(self.handle_button)\n self.buttons[btn] = city\n row += 1\n\n return row\n\n def make_gui_level_mouse(self):\n \"\"\"Build the control that shows the level and mouse position.\n\n Returns reference to containing layout.\n \"\"\"\n\n hbox = QHBoxLayout()\n self.map_level = DisplayText(title='', label='Level:', tooltip=None)\n self.mouse_position = DisplayText(title='', label='Lon/Lat:',\n text_width=100, tooltip=None)\n hbox.addWidget(self.map_level)\n hbox.addWidget(self.mouse_position)\n\n return hbox\n\n ######\n # Exception handlers\n ######\n\n def handle_button(self, event):\n \"\"\"Handle button event.\"\"\"\n\n # get the button that was pressed\n sender_btn = self.sender()\n (posn, name) = self.buttons[sender_btn]\n log(f\"Got button event, posn={posn}, name='{name}'\")\n\n self.pyslipqt.GotoPosition(posn)\n\n if self.map_layer:\n # if there was a previous layer, delete it\n self.pyslipqt.DeleteLayer(self.map_layer)\n map_data = [posn]\n point_colour = '#0000ff40'\n self.map_layer = self.pyslipqt.AddPointLayer(map_data, map_rel=True,\n placement='cc',\n color=point_colour,\n radius=11,\n visible=True,\n name='map_layer')\n\n if self.view_layer:\n self.pyslipqt.DeleteLayer(self.view_layer)\n view_data = [(((0,0),(0,-10),(0,0),(0,10),\n (0,0),(-10,0),(0,0),(10,0)),{'colour':'#ff0000ff'},)]\n# poly_colour = '#ff0000ff'\n self.view_layer = self.pyslipqt.AddPolygonLayer(view_data, map_rel=False,\n placement='cc',\n# colour=poly_colour,\n closed=False,\n visible=True,\n width=2,\n name='view_layer')\n\n def handle_position_event(self, event):\n \"\"\"Handle a pySlip POSITION event.\"\"\"\n\n posn_str = ''\n if event.mposn:\n (lon, lat) = event.mposn\n posn_str = ('%.*f / %.*f'\n % (LonLatPrecision, lon, LonLatPrecision, lat))\n\n self.mouse_position.set_text(posn_str)\n\n def handle_level_change(self, event):\n \"\"\"Handle a pySlip LEVEL event.\"\"\"\n\n self.map_level.set_text('%d' % event.level)\n\n\n################################################################################\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# use user tile directory, if supplied\ntile_dir = None\nif len(sys.argv) > 1:\n tile_dir = sys.argv[1]\n\napp = QApplication(sys.argv)\nex = AppFrame()\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.8292682766914368,
"alphanum_fraction": 0.8292682766914368,
"avg_line_length": 40,
"blob_id": "cd9c0f1aa72a5592b51897311753cc2fa057d1dc",
"content_id": "102b751b3da0e9d8f1a3d5fed86be2a922234562",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 41,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 1,
"path": "/pySlipQt/doc/README.rst",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "Documentation here is built with Sphinx.\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 16.399999618530273,
"blob_id": "34aca618c495330b7b7f0dd55e45cf6dc8e23bc0",
"content_id": "7f0532804f33a0bce04865f697b9fa996e92f9de",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 5,
"path": "/pySlipQt/examples/Makefile",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "test: clean\n\tpython test_image_placement.py gmt > xyzzy.log 2>&1\n\nclean:\n\trm -Rf *.log\n"
},
{
"alpha_fraction": 0.7435897588729858,
"alphanum_fraction": 0.7487179636955261,
"avg_line_length": 31.5,
"blob_id": "a5168644a3e44bfe05ae274c8da8673944c8aee0",
"content_id": "afad6047af0475dda919da20734fae04d4dbc86c",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 195,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 6,
"path": "/pySlipQt/README.rst",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "What is this?\n=============\n\nThis directory contains the implementation of pySlipQt, a custom\nPyQt5 widget that can show a tiled, draggable map with programmable\noverlays of points, images, etc.\n"
},
{
"alpha_fraction": 0.5691236853599548,
"alphanum_fraction": 0.5700727701187134,
"avg_line_length": 33.122222900390625,
"blob_id": "49bf861bfbb04989bde002edcb54380d753d6377",
"content_id": "cf3933c30da60423472aa7dc57947d3ec1fcc32d",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3187,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 90,
"path": "/pySlipQt/examples/tkinter_error.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nA small function to put an error message on the screen with Tkinter.\r\n\r\nUsed by GUI programs started from a desktop icon.\r\n\"\"\"\r\n\r\nimport textwrap\r\n\r\ntry:\r\n from tkinter import *\r\nexcept ImportError:\r\n print(\"You must install 'tkinter'\")\r\n print(\"Ubuntu: apt-get install python-tk\")\r\n print(\"Windows: ???\")\r\n sys.exit(1)\r\n\r\n\r\ndef tkinter_error(msg, title=None):\r\n \"\"\"Show an error message in a Tkinter dialog.\r\n\r\n msg text message to display (may contain newlines, etc)\r\n title the window title (defaults to 'ERROR')\r\n\r\n The whole point of this is to get *some* output from a python GUI\r\n program when run from an icon double-click. We use tkinter since it's\r\n part of standard python and we may be trying to say something like:\r\n\r\n +-------------------------+\r\n | You must install PyQt |\r\n +-------------------------+\r\n\r\n Under Linux and OSX we can run the program from the commandline and we would\r\n see printed output. Under Windows that's hard to do, hence this code.\r\n\r\n NOTE: For some reason, Ubuntu python doesn't have tkinter installed as\r\n part of the base install. Do \"sudo apt-get install python-tk\".\r\n \"\"\"\r\n\r\n ######\r\n # Define the Application class\r\n ######\r\n\r\n class Application(Frame):\r\n def createWidgets(self):\r\n self.LABEL = Label(self, text=self.text, font=(\"Courier\", 14))\r\n self.LABEL[\"fg\"] = \"black\"\r\n self.LABEL[\"bg\"] = \"yellow\"\r\n self.LABEL[\"justify\"] = \"left\"\r\n self.LABEL.pack()\r\n\r\n def __init__(self, text, master=None):\r\n self.text = text\r\n Frame.__init__(self, master)\r\n self.pack()\r\n self.createWidgets()\r\n self.tkraise()\r\n\r\n\r\n # set the title string\r\n if title is None:\r\n title = 'ERROR'\r\n\r\n # get the message text\r\n msg = '\\n' + msg.strip() + '\\n'\r\n\r\n msg = msg.replace('\\r', '')\r\n msg = msg.replace('\\n', ' \\n ')\r\n\r\n app = Application(msg)\r\n app.master.title(title)\r\n app.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n # just a simple \"smoke test\" of the error notification\r\n long_msg = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit, '\r\n 'sed do eiusmod tempor incididunt ut labore et dolore magna '\r\n 'aliqua. Ut enim ad minim veniam, quis nostrud exercitation '\r\n 'ullamco laboris nisi ut aliquip ex ea commodo consequat. '\r\n 'Duis aute irure dolor in reprehenderit in voluptate velit '\r\n 'esse cillum dolore eu fugiat nulla pariatur. Excepteur sint '\r\n 'occaecat cupidatat non proident, sunt in culpa qui officia '\r\n 'deserunt mollit anim id est laborum.'\r\n )\r\n\r\n tkinter_error('A short message with initial TAB:\\n\\tHello, world!\\n\\n'\r\n 'Some Unicode (你好, สวัสดี, こんにちは)\\n\\n'\r\n 'A large text paragraph. You must wrap and indent the text yourself:\\n'\r\n + textwrap.fill(long_msg, initial_indent=' ', subsequent_indent=' '),\r\n title='Test Error Message')\r\n"
},
{
"alpha_fraction": 0.500239908695221,
"alphanum_fraction": 0.5290306806564331,
"avg_line_length": 27.94444465637207,
"blob_id": "71591ac5338d13141380a78cb0ac50a4f767566d",
"content_id": "bb3bfb25e6e2659aea8d8b96de345f87f8e9cdb4",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4168,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 144,
"path": "/pySlipQt/examples/test_maprel_text.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt map-relative text.\n\nUsage: test_maprel_text.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\nimport pySlipQt.pySlipQt as pySlipQt\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = f'Test map-relative text placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (133.87, -23.7) # Alice Springs\n\nTextMapData = [(151.20, -33.85, 'Sydney cc', {'placement': 'cc'}),\n (144.95, -37.84, 'Melbourne ne', {'placement': 'ne'}),\n (153.08, -27.48, 'Brisbane ce', {'placement': 'ce'}),\n (115.86, -31.96, 'Perth se', {'placement': 'se'}),\n (138.30, -35.52, 'Adelaide cs', {'placement': 'cs'}),\n (130.98, -12.61, 'Darwin sw', {'placement': 'sw'}),\n (147.31, -42.96, 'Hobart cw', {'placement': 'cw'}),\n (149.20, -35.31, 'Canberra nw', {'placement': 'nw',\n 'colour': 'red',\n 'textcolour': 'blue',\n 'fontsize': 10}),\n (133.90, -23.70, 'Alice Springs cn', {'placement': 'cn'})]\n\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # add test layers\n # add test test layer\n self.text_layer = self.pyslipqt.AddTextLayer(TextMapData,\n map_rel=True,\n name='<text_map_layer>',\n offset_x=5, offset_y=1)\n\n self.show()\n\n # finally, set initial view position\n self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n################################################################################\n\nimport sys\nimport getopt\nimport traceback\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the app\nlog(DemoName)\ntile_dir = 'test_maprel_text'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.6541244387626648,
"alphanum_fraction": 0.6577423810958862,
"avg_line_length": 26.098039627075195,
"blob_id": "07ebc6f1d6c5adac340b270626635df2d1cf8bab",
"content_id": "ce5b49f613ef2e65f2d50eb1d405df34512ef8cc",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1382,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 51,
"path": "/pySlipQt/examples/test_layer_control.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest the LayerControl custom widget used by pySlipQt.\n\"\"\"\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout\n\nfrom layer_control import LayerControl\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\n\nclass LayerControlExample(QWidget):\n \"\"\"Application to demonstrate the pySlipQt 'LayerControl' widget.\"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.lc_group = LayerControl(self, title='Group title longer', selectable=True, tooltip=\"tooltip\")\n\n hbox = QHBoxLayout()\n hbox.setContentsMargins(0, 0, 0, 0)\n hbox.addWidget(self.lc_group)\n self.setLayout(hbox)\n\n self.setWindowTitle('LayerControl widget')\n self.show()\n\n # connect the widget to '.changed' event handler\n self.lc_group.change_add.connect(self.layer_add)\n self.lc_group.change_show.connect(self.layer_show)\n self.lc_group.change_select.connect(self.layer_select)\n\n def layer_add(self, add):\n print(f'Layer ADD={add}')\n log(f'Layer ADD={add}')\n\n def layer_show(self, show):\n print(f'Layer SHOW={show}')\n log(f'Layer SHOW={show}')\n\n def layer_select(self, select):\n print(f'Layer SELECT={select}')\n log(f'Layer SELECT={select}')\n\n\napp = QApplication(sys.argv)\nex = LayerControlExample()\nsys.exit(app.exec())\n"
},
{
"alpha_fraction": 0.5523326396942139,
"alphanum_fraction": 0.5610455274581909,
"avg_line_length": 29.631755828857422,
"blob_id": "6f01c17f25f50705100e93cb1263e9b4d406a41b",
"content_id": "13619df1a2c479c4fcd3536dfcf615b5ac3a9bc6",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9067,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 296,
"path": "/pySlipQt/examples/test_text_placement.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nProgram to test text map-relative and view-relative placement.\nSelect which to show and experiment with placement parameters.\n\nUsage: test_text_placement.py [-h|--help] [-d] [(-t|--tiles) (GMT|OSM)]\n\"\"\"\n\n\nimport os\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget,\n QGridLayout, QVBoxLayout, QHBoxLayout)\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.pySlipQt as pySlipQt\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\nfrom text_placement import TextPlacementControl\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = 'Test text placement %s (pySlipQt %s)' % (DemoVersion, pySlipQt.__version__)\n\nDemoHeight = 800\nDemoWidth = 1000\n\n# initial values\n#InitialViewLevel = 4\nInitialViewLevel = 0\nInitialViewPosition = (145.0, -20.0)\n\n# tiles info\nTileDirectory = 'test_tiles'\nMinTileLevel = 0\n\n# the number of decimal places in a lon/lat display\nLonLatPrecision = 2\n\n# startup size of the application\nDefaultAppSize = (1000, 700)\n\n################################################################################\n# The main application window.\n################################################################################\n\nclass TestTextPlacement(QMainWindow):\n\n def __init__(self, tile_dir=TileDirectory):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # variables for layer IDs\n self.text_map_layer = None\n self.text_view_layer = None\n\n # build the GUI\n grid = QGridLayout()\n grid.setColumnStretch(0, 1)\n grid.setContentsMargins(2, 2, 2, 2)\n\n qwidget = QWidget(self)\n qwidget.setLayout(grid)\n self.setCentralWidget(qwidget)\n\n # build the 'controls' part of GUI\n num_rows = self.make_gui_controls(grid)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt, 0, 0, num_rows + 1, 1)\n grid.setRowStretch(num_rows, 1)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # set initial view position\n# self.map_level.set_text('%d' % InitViewLevel)\n\n # tie events from controls to handlers\n self.map_text.remove.connect(self.remove_text_map)\n self.map_text.change.connect(self.change_text_map)\n\n self.view_text.remove.connect(self.remove_text_view)\n self.view_text.change.connect(self.change_text_view)\n\n self.pyslipqt.events.EVT_PYSLIPQT_LEVEL.connect(self.handle_level_change)\n self.pyslipqt.events.EVT_PYSLIPQT_POSITION.connect(self.handle_position_event)\n\n # set initial view position\n self.map_level.set_text('0')\n\n self.show()\n\n def make_gui_controls(self, grid):\n \"\"\"Build the controls in the right side of the grid.\"\"\"\n\n # the 'grid_row' variable is row to add into\n grid_row = 0\n\n # put level and position into grid at top right\n self.map_level = DisplayText(title='', label='Level:',\n tooltip=None)\n grid.addWidget(self.map_level, grid_row, 1, 1, 1)\n self.mouse_position = DisplayText(title='',\n label='Lon/Lat:', text_width=100,\n tooltip='Shows the mouse longitude and latitude on the map',)\n grid.addWidget(self.mouse_position, grid_row, 2, 1, 1)\n grid_row += 1\n\n # now add the two text control widgets to right part of grid\n self.map_text = TextPlacementControl('Map-relative Text')\n self.map_text.setToolTip('Position text on the map')\n grid.addWidget(self.map_text, grid_row, 1, 1, 2)\n grid_row += 1\n\n self.view_text = TextPlacementControl('View-relative Text')\n self.view_text.setToolTip('Position text on the view')\n grid.addWidget(self.view_text, grid_row, 1, 1, 2)\n grid_row += 1\n\n return grid_row\n\n def final_setup(self):\n \"\"\"Perform final setup.\n\n We do this in a OneShot() function for those operations that\n must not be done while the GUI is \"fluid\".\n \"\"\"\n\n pass\n# self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n\n ######\n # event handlers\n ######\n\n##### map-relative text layer\n\n def change_text_map(self, text, textcolour, placement, radius, colour,\n x, y, off_x, off_y):\n \"\"\"Display updated text.\"\"\"\n\n # remove any previous layer\n if self.text_map_layer:\n self.remove_text_map()\n\n # create the new layer\n attribs = {'radius': radius,\n 'colour': colour,\n 'textcolour': textcolour,\n 'offset_x': off_x,\n 'offset_y': off_y}\n if placement != 'none':\n attribs['placement'] = placement\n text_data = [(x, y, text, attribs)]\n self.text_map_layer = self.pyslipqt.AddTextLayer(text_data,\n map_rel=True,\n visible=True,\n name='<map_text>')\n\n def remove_text_map(self):\n \"\"\"Delete the text map-relative layer.\"\"\"\n\n if self.text_map_layer:\n self.pyslipqt.DeleteLayer(self.text_map_layer)\n self.text_map_layer = None\n\n##### view-relative text layer\n\n def change_text_view(self, text, textcolour, placement, radius, colour,\n x, y, off_x, off_y):\n \"\"\"Display updated text.\"\"\"\n\n if self.text_view_layer:\n self.remove_text_view()\n\n print(f\"change_text_view: placement='{placement}'\")\n\n # create a new text layer\n attribs = {'radius': radius,\n 'colour': colour,\n 'textcolour': textcolour,\n 'offset_x': off_x,\n 'offset_y': off_y}\n if placement != 'none':\n attribs['placement'] = placement\n print(f\"change_text_view: attribs={attribs}\")\n text_data = [(x, y, text, attribs)]\n self.text_view_layer = self.pyslipqt.AddTextLayer(text_data,\n map_rel=False,\n visible=True,\n name='<view_text>')\n\n def remove_text_view(self):\n \"\"\"Delete the text view-relative layer.\"\"\"\n\n if self.text_view_layer:\n self.pyslipqt.DeleteLayer(self.text_view_layer)\n self.text_view_layer = None\n\n ######\n # Exception handlers\n ######\n\n def handle_position_event(self, event):\n \"\"\"Handle a pySlipQt POSITION event.\"\"\"\n\n posn_str = ''\n if event.mposn:\n (lon, lat) = event.mposn\n posn_str = ('%.*f / %.*f' % (LonLatPrecision, lon,\n LonLatPrecision, lat))\n\n self.mouse_position.set_text(posn_str)\n\n def handle_level_change(self, event):\n \"\"\"Handle a pySlipQt LEVEL event.\"\"\"\n\n self.map_level.set_text('%d' % event.level)\n\n###############################################################################\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n log(msg)\n print(msg)\n# tkinter_error.tkinter_error(msg)\n sys.exit(1)\n\ndef usage(msg=None):\n if msg:\n print(('*'*80 + '\\n%s\\n' + '*'*80) % msg)\n print(__doc__)\n\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# analyse the command line args\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'dht:', ['debug', 'help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_dir = 'test_tiles'\ntile_source = 'GMT'\ndebug = False\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ['-d', '--debug']:\n debug = True\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\nimport pySlipQt.gmt_local as Tiles\n## set up the appropriate tile source\n#if tile_source == 'gmt':\n# print('importing pySlipQt.gmt_local')\n# import pySlipQt.gmt_local as Tiles\n#elif tile_source == 'osm':\n# print('importing pySlipQt.open_street_map')\n# import pySlipQt.open_street_map as Tiles\n#else:\n# usage('Bad tile source: %s' % tile_source)\n# sys.exit(3)\n\n# start the app\nlog(DemoName)\napp = QApplication(args)\nex = TestTextPlacement(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.553354024887085,
"alphanum_fraction": 0.5660061836242676,
"avg_line_length": 27.489795684814453,
"blob_id": "9f1ddbb7d51c35363d744d8498a54a03cf92cf34",
"content_id": "4b56612cb486fdb18f3b6a570127fd92c5c20e7b",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4189,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 147,
"path": "/pySlipQt/examples/test_viewrel_image.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt view-relative images.\n\nUsage: test_maprel_image.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.pySlipQt as pySlipQt\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\nfrom image_placement import ImagePlacementControl\n\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = f'Test view-relative image placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (133.87, -23.7) # Alice Springs\n\n# test data\narrow_cw = 'graphics/arrow_left.png'\narrow_nw = 'graphics/arrow_leftup.png'\narrow_cn = 'graphics/arrow_up.png'\narrow_ne = 'graphics/arrow_rightup.png'\narrow_ce = 'graphics/arrow_right.png'\narrow_se = 'graphics/arrow_rightdown.png'\narrow_cs = 'graphics/arrow_down.png'\narrow_sw = 'graphics/arrow_leftdown.png'\n\nImageViewData = [(0, 0, arrow_cw, {'placement': 'cw'}),\n (0, 0, arrow_nw, {'placement': 'nw'}),\n (0, 0, arrow_cn, {'placement': 'cn'}),\n (0, 0, arrow_ne, {'placement': 'ne'}),\n (0, 0, arrow_ce, {'placement': 'ce'}),\n (0, 0, arrow_se, {'placement': 'se'}),\n (0, 0, arrow_cs, {'placement': 'cs'}),\n (0, 0, arrow_sw, {'placement': 'sw'}),\n ]\n\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # set initial view position and add test layer(s)\n# self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n self.text_layer = self.pyslipqt.AddImageLayer(ImageViewData,\n map_rel=False,\n name='<image_view_layer>',\n offset_x=0, offset_y=0)\n\n self.show()\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start wxPython app\nlog(DemoName)\ntile_dir = 'test_viewrel_image'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n\n"
},
{
"alpha_fraction": 0.560149073600769,
"alphanum_fraction": 0.5639920830726624,
"avg_line_length": 30.569852828979492,
"blob_id": "8b6d19d7d14ff854a744d5b8fdd978b243aa4360",
"content_id": "e962d115bed7280f9461cc8c8fcee010023250f7",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8587,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 272,
"path": "/pySlipQt/tiles.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA base Tiles object for pySlipQt local tiles.\n\nAll tile sources should inherit from this base class.\nFor example, see gmt_local.py (local tiles) and osm_tiles.py\n(internet tiles).\n\"\"\"\n\nimport os\nimport math\nfrom PyQt5.QtGui import QPixmap\nimport pySlipQt.pycacheback as pycacheback\nimport pySlipQt.log as log\n\ntry:\n log = log.Log('pyslipqt.log')\nexcept AttributeError:\n # already have a log file, ignore\n pass\n\n\n# set how old disk-cache tiles can be before we re-request them from the internet\n# this is the number of days old a tile is before we re-request\n# if 'None', never re-request tiles after first satisfied request\nRefreshTilesAfterDays = 60\n\n\n################################################################################\n# Define a cache for tiles. This is an in-memory cache backed to disk.\n################################################################################\n\nclass Cache(pycacheback.pyCacheBack):\n \"\"\"Cache for local or internet tiles.\n\n Instance variables we use from pyCacheBack:\n self._tiles_dir path to the on-disk cache directory\n \"\"\"\n\n PicExtension = 'png'\n TilePath = '{Z}/{X}/{Y}.%s' % PicExtension\n\n\n def tile_date(self, key):\n \"\"\"Return the creation date of a tile given its key.\"\"\"\n\n tile_path = self.tile_path(key)\n return os.path.getctime(tile_path)\n\n def tile_path(self, key):\n \"\"\"Return path to a tile file given its key.\"\"\"\n\n (level, x, y) = key\n file_path = os.path.join(self._tiles_dir,\n self.TilePath.format(Z=level, X=x, Y=y))\n return file_path\n\n def _get_from_back(self, key):\n \"\"\"Retrieve value for 'key' from backing storage.\n\n key tuple (level, x, y)\n where level is the level of the tile\n x, y is the tile coordinates (integer)\n\n Raises KeyError if tile not found.\n \"\"\"\n\n # look for item in disk cache\n file_path = self.tile_path(key)\n if not os.path.exists(file_path):\n # tile not there, raise KeyError\n raise KeyError(\"Item with key '%s' not found in on-disk cache\"\n % str(key)) from None\n\n # we have the tile file - read into memory & return\n return QPixmap(file_path)\n\n def _put_to_back(self, key, image):\n \"\"\"Put a image into on-disk cache.\n\n key a tuple: (level, x, y)\n where level level for image\n x integer tile coordinate\n y integer tile coordinate\n image the wx.Image to save\n \"\"\"\n\n (level, x, y) = key\n tile_path = os.path.join(self._tiles_dir,\n self.TilePath.format(Z=level, X=x, Y=y))\n dir_path = os.path.dirname(tile_path)\n try:\n os.makedirs(dir_path)\n except OSError:\n # we assume it's a \"directory exists' error, which we ignore\n pass\n\n image.save(tile_path, Cache.PicExtension)\n\n###############################################################################\n# Base class for a tile source - handles access to a source of tiles.\n###############################################################################\n\nclass BaseTiles(object):\n \"\"\"A base tile object to source local tiles for pySlip.\"\"\"\n\n # maximum number of in-memory cached tiles\n MaxLRU = 1000\n\n def __init__(self, levels, tile_width, tile_height,\n tiles_dir, max_lru=MaxLRU):\n \"\"\"Initialise a Tiles instance.\n\n levels a list of level numbers that are to be served\n tile_width width of each tile in pixels\n tile_height height of each tile in pixels\n tiles_dir path to on-disk tile cache directory\n max_lru maximum number of cached in-memory tiles\n \"\"\"\n\n # save params\n self.levels = levels\n self.tile_size_x = tile_width\n self.tile_size_y = tile_height\n self.tiles_dir = tiles_dir\n self.max_lru = max_lru\n\n # set min and max tile levels and current level\n self.min_level = min(self.levels)\n self.max_level = max(self.levels)\n self.level = self.min_level\n\n# TODO: implement map wrap-around\n# self.wrap_x = False\n# self.wrap_y = False\n\n # setup the tile cache\n self.cache = Cache(tiles_dir=tiles_dir, max_lru=max_lru)\n\n #####\n # Now finish setting up\n #####\n\n # tiles extent for tile data (left, right, top, bottom)\n self.extent = (-180.0, 180.0, -85.0511, 85.0511)\n\n # check tile cache - we expect there to already be a directory\n if not os.path.isdir(tiles_dir):\n if os.path.isfile(tiles_dir):\n msg = (\"%s doesn't appear to be a tile cache directory\"\n % tiles_dir)\n log.critical(msg)\n raise Exception(msg) from None\n\n msg = \"The tiles directory %s doesn't exist.\" % tiles_dir\n log.critical(msg)\n raise Exception(msg) from None\n\n# possible recursion here?\n# self.UseLevel(min(self.levels))\n\n def UseLevel(self, level):\n \"\"\"Prepare to serve tiles from the required level.\n\n level the required level\n\n Return True if level change occurred, else False if not possible.\n \"\"\"\n\n # first, CAN we zoom to this level?\n if level not in self.levels:\n return False\n\n # get tile info\n info = self.GetInfo(level)\n if info is None:\n return False\n\n # OK, save new level\n self.level = level\n (self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info\n\n return True\n\n def GetTile(self, x, y):\n \"\"\"Get bitmap for tile at tile coords (x, y) and current level.\n\n x X coord of tile required (tile coordinates)\n y Y coord of tile required (tile coordinates)\n\n Returns bitmap object for the tile image.\n Tile coordinates are measured from map top-left.\n \"\"\"\n\n# # if we are wrapping X or Y, get wrapped tile coords\n# if self.wrap_x:\n# x = (x + self.num_tiles_x*self.tile_size_x) % self.num_tiles_x\n# if self.wrap_y:\n# y = (y + self.num_tiles_y*self.tile_size_y) % self.num_tiles_y\n\n # retrieve the tile\n try:\n # get tile from cache\n return self.cache[(self.level, x, y)]\n except KeyError as e:\n raise KeyError(\"Can't find tile for key '%s'\"\n % str((self.level, x, y))) from None\n\n def GetInfo(self, level):\n \"\"\"Get tile info for a particular level.\n\n level the level to get tile info for\n\n Returns (num_tiles_x, num_tiles_y, ppd_x, ppd_y) or None if 'level'\n doesn't exist.\n\n Note that ppd_? may be meaningless for some tiles, so its\n value will be None.\n \"\"\"\n\n # is required level available?\n if level not in self.levels:\n return None\n\n # otherwise get the information\n self.num_tiles_x = int(math.pow(2, level))\n self.num_tiles_y = int(math.pow(2, level))\n\n return (self.num_tiles_x, self.num_tiles_y, None, None)\n\n def GetExtent(self):\n \"\"\"Get geo limits of the map tiles.\n \n Returns a tuple: (min_geo_x, max_geo_x, min_geo_y, max_geo_y)\n \"\"\"\n\n return self.extent\n\n def tile_on_disk(self, level, x, y):\n \"\"\"Return True if tile at (level, x, y) is on-disk.\"\"\"\n\n raise Exception('You must override BaseTiles.tile_on_disk(level, x, y))')\n\n def setCallback(self, callback):\n \"\"\"Set the \"tile available\" callback function.\n\n Only used with internet tiles. See \"tiles_net.py\".\n \"\"\"\n\n pass\n #raise Exception('You must override BaseTiles.setCallback(callback))')\n\n def Geo2Tile(self, xgeo, ygeo):\n \"\"\"Convert geo to tile fractional coordinates for level in use.\n\n xgeo geo longitude in degrees\n ygeo geo latitude in degrees\n\n Note that we assume the point *is* on the map!\n \"\"\"\n\n raise Exception('You must override BaseTiles.Geo2Tile(xgeo, ygeo)')\n\n def Tile2Geo(self, xtile, ytile):\n \"\"\"Convert tile fractional coordinates to geo for level in use.\n\n xtile tile fractional X coordinate\n ytile tile fractional Y coordinate\n\n Note that we assume the point *is* on the map!\n \"\"\"\n\n raise Exception('You must override BaseTiles.Tile2Geo(xtile, ytile)')\n"
},
{
"alpha_fraction": 0.5502139329910278,
"alphanum_fraction": 0.5650754570960999,
"avg_line_length": 36.31932830810547,
"blob_id": "fc64f704e63942ec7eea72f4843d469d0c1ba4d2",
"content_id": "191a01f04863addc7a62d7a61c8e0e2e77790a4e",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8882,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 238,
"path": "/pySlipQt/examples/image_placement.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe custom control for test_image_placement.py program.\n\"\"\"\n\nimport os\nimport sys\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import (QWidget, QGridLayout, QHBoxLayout, QGroupBox,\n QPushButton, QLabel, QComboBox, QLineEdit,\n QSizePolicy, QFileDialog, QColorDialog)\nfrom PyQt5.QtGui import QColor\n\n\n##################################\n# Custom ImagePlacementControl widget.\n# \n# Constructor:\n# \n# ipc = ImagePlacementControl('test title')\n# \n# Events:\n# \n# .change the contents were changed\n# .remove the image should be removed\n#\n# The '.change' event has attached attributes holding the values from the\n# widget, all checked so they are 'sane'.\n##################################\n\nclass ImagePlacementControl(QWidget):\n\n # various sizes\n LineEditWidth = 40\n ButtonWidth = 40\n ComboboxWidth = 70\n\n # signals raised by this widget\n change = pyqtSignal(str, str, int, QColor, int, int, int, int)\n remove = pyqtSignal()\n\n # some stylesheets\n LabelStyle = 'QLabel { background-color : #f0f0f0; border: 1px solid gray; border-radius: 3px; }'\n GroupStyle = ('QGroupBox { background-color: rgb(230, 230, 230); }'\n 'QGroupBox::title { subcontrol-origin: margin; '\n ' background-color: rgb(215, 215, 215); '\n ' border-radius: 3px; '\n ' padding: 2 2px; '\n ' color: black; }')\n ButtonStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: white;'\n '}')\n ButtonColourStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: %s;'\n '}')\n\n def __init__(self, title):\n \"\"\"Initialise a ImagePlacementControl instance.\n\n title title to give the custom widget\n \"\"\"\n\n super().__init__()\n\n # some state\n self.image_path = None # path to the image file\n\n # create subwidgets used in this custom widget\n self.filename = QLabel('')\n self.filename.setStyleSheet(ImagePlacementControl.LabelStyle)\n self.filename.setToolTip('Click here to change the image file')\n\n self.placement = QComboBox()\n for p in ['none', 'nw', 'cn', 'ne', 'ce', 'se', 'cs', 'sw', 'cw', 'cc']:\n self.placement.addItem(p)\n\n self.point_radius = QComboBox()\n for p in range(21):\n self.point_radius.addItem(str(p))\n self.point_radius.setCurrentIndex(3)\n self.point_radius.setFixedWidth(ImagePlacementControl.ComboboxWidth)\n\n self.point_colour = QPushButton('')\n self.point_colour.setFixedWidth(ImagePlacementControl.ButtonWidth)\n self.point_colour.setToolTip('Click here to change the point colour')\n self.point_colour.setStyleSheet(ImagePlacementControl.ButtonStyle)\n\n self.posn_x = QComboBox()\n for p in range(0, 121, 10):\n self.posn_x.addItem(str(p - 60))\n self.posn_x.setCurrentIndex(6)\n self.posn_x.setFixedWidth(ImagePlacementControl.ComboboxWidth)\n\n self.posn_y = QComboBox()\n for p in range(0, 121, 10):\n self.posn_y.addItem(str(p - 60))\n self.posn_y.setCurrentIndex(6)\n self.posn_y.setFixedWidth(ImagePlacementControl.ComboboxWidth)\n\n self.offset_x = QComboBox()\n for p in range(0, 121, 10):\n self.offset_x.addItem(str(p - 60))\n self.offset_x.setCurrentIndex(6)\n self.offset_x.setFixedWidth(ImagePlacementControl.ComboboxWidth)\n\n self.offset_y = QComboBox()\n for p in range(0, 121, 10):\n self.offset_y.addItem(str(p - 60))\n self.offset_y.setCurrentIndex(6)\n self.offset_y.setFixedWidth(ImagePlacementControl.ComboboxWidth)\n\n btn_remove = QPushButton('Remove')\n btn_remove.resize(btn_remove.sizeHint())\n btn_update = QPushButton('Update')\n btn_update.resize(btn_update.sizeHint())\n\n # start the layout\n option_box = QGroupBox(title)\n option_box.setStyleSheet(ImagePlacementControl.GroupStyle)\n\n box_layout = QGridLayout()\n box_layout.setContentsMargins(2, 2, 2, 2)\n box_layout.setHorizontalSpacing(1)\n box_layout.setColumnStretch(0, 1)\n\n # start layout\n row = 1\n label = QLabel('filename: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.filename, row, 1, 1, 3)\n\n row += 1\n label = QLabel('placement: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.placement, row, 1)\n\n row += 1\n label = QLabel('point radius: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.point_radius, row, 1)\n label = QLabel('colour: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.point_colour, row, 3)\n\n row += 1\n label = QLabel('X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.posn_x, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.posn_y, row, 3)\n\n row += 1\n label = QLabel('offset X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.offset_x, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.offset_y, row, 3)\n\n row += 1\n box_layout.addWidget(btn_remove, row, 1)\n box_layout.addWidget(btn_update, row, 3)\n\n option_box.setLayout(box_layout)\n\n layout = QHBoxLayout()\n layout.setContentsMargins(1, 1, 1, 1)\n layout.addWidget(option_box)\n\n self.setLayout(layout)\n\n # set size hints\n self.setMinimumSize(300, 200)\n size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setSizePolicy(size_policy)\n\n # connect internal widget events to handlers\n self.filename.mouseReleaseEvent = self.changeGraphicsFile\n self.point_colour.clicked.connect(self.changePointColour)\n btn_remove.clicked.connect(self.removeImage)\n btn_update.clicked.connect(self.updateData)\n\n def changeGraphicsFile(self, event):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_types = \"PNG (*.png);;JPG (*.jpg)\"\n (filename, _) = QFileDialog.getOpenFileName(self,\"Open image file\", \"\",\n file_types,\n options=options)\n if filename:\n # just dislay the filename in the text field\n self.image_path = filename\n filename = os.path.basename(filename)\n self.filename.setText(filename)\n\n def changePointColour(self, event):\n color = QColorDialog.getColor()\n if color.isValid():\n colour = color.name()\n # set colour button background\n self.point_colour.setStyleSheet(ImagePlacementControl.ButtonColourStyle % colour)\n \n def removeImage(self, event):\n self.remove.emit()\n\n def updateData(self, event):\n # get data from the widgets\n placement = str(self.placement.currentText())\n if placement == 'none':\n placement = None\n radius = int(self.point_radius.currentText())\n colour = self.point_colour.palette().color(1)\n x = int(self.posn_x.currentText())\n y = int(self.posn_y.currentText())\n offset_x = int(self.offset_x.currentText())\n offset_y = int(self.offset_y.currentText())\n \n self.change.emit(self.image_path, placement, radius, colour, x, y, offset_x, offset_y)\n"
},
{
"alpha_fraction": 0.5113006234169006,
"alphanum_fraction": 0.5415778160095215,
"avg_line_length": 29.454545974731445,
"blob_id": "bb6cc7ca3b47135698766cbb8888bfebf459b510",
"content_id": "fdd24a7a97988c738db4e724b17707ead4bd25e4",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4690,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 154,
"path": "/pySlipQt/examples/test_maprel_polygon.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"Test PySlipQt map-relative polygons.\n\nUsage: test_maprel_poly.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\nimport pySlipQt.pySlipQt as pySlipQt\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\nfrom image_placement import ImagePlacementControl\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = f'Test map-relative polygon placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (152.0, -8.0)\n\n# create polygon data\nOpenPoly = ((145,5),(135,5),(135,-5),(145,-5))\nClosedPoly = ((170,5),(160,5),(160,-5),(170,-5))\nFilledPoly = ((170,-20),(160,-20),(160,-10),(170,-10))\nClosedFilledPoly = ((145,-20),(135,-20),(135,-10),(145,-10))\n\nPolyMapData = [[OpenPoly, {'width': 2}],\n [ClosedPoly, {'width': 10, 'color': '#00ff0040',\n 'closed': True}],\n [FilledPoly, {'colour': 'blue',\n 'filled': True,\n 'fillcolour': '#00ff0022'}],\n [ClosedFilledPoly, {'colour': 'black',\n 'closed': True,\n 'filled': True,\n 'fillcolour': 'yellow'}]]\n\nTextMapData = [(135, 5, 'open (polygons always closed in pSlipQt)', {'placement': 'ce', 'radius': 0}),\n (170, 5, 'closed', {'placement': 'cw', 'radius': 0}),\n (170, -10, 'open but filled (translucent) (polygons always closed in pSlipQt)',\n {'placement': 'cw', 'radius': 0}),\n (135, -10, 'closed & filled (solid)',\n {'placement': 'ce', 'radius': 0}),\n ]\n\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # add test layers\n self.poly_layer = self.pyslipqt.AddPolygonLayer(PolyMapData,\n map_rel=True,\n name='<poly_map_layer>')\n self.text_layer = self.pyslipqt.AddTextLayer(TextMapData, map_rel=True,\n name='<text_map_layer>')\n\n self.show()\n\n # finally, set initial view position\n self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start wxPython app\nlog(DemoName)\ntile_dir = 'test_maprel_polygon.tiles'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.556861937046051,
"alphanum_fraction": 0.5740928649902344,
"avg_line_length": 30.819355010986328,
"blob_id": "8fb40fae8f04988a589477b15986539b7b6cb7ac",
"content_id": "ffceff946a3c63d061e9695fa68488523207c7dd",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4933,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 155,
"path": "/pySlipQt/examples/test_osm_tiles.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest the OSM tiles code.\n\nRequires a PyQt5 application to be created before use.\nIf we can create a bitmap without PyQt5, we could remove this dependency.\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport pickle\nimport unittest\nimport shutil\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QLabel,\n QSpinBox, QVBoxLayout, QVBoxLayout, QAction,\n QHBoxLayout, QVBoxLayout, QGridLayout,\n QErrorMessage)\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.open_street_map as tiles\n\n\n######\n# Various demo constants\n######\n\n# where the OSM tiles are cached on disk\nTilesDir = 'test_osm_tiles'\n\nDemoName = 'OSM Tiles Cache Test'\nDemoVersion = '0.1'\n\nDemoWidth = 300\nDemoHeight = 250\n\n\nclass AppFrame(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(300, 300, DemoWidth, DemoHeight)\n self.setWindowTitle('%s %s' % (DemoName, DemoVersion))\n self.show()\n\n unittest.main()\n\n def onClose(self, event):\n self.Destroy()\n\nclass TestOSMTiles(unittest.TestCase):\n\n # for OSM tiles\n TileWidth = 256\n TileHeight = 256\n\n def testSimple(self):\n \"\"\"Simple tests.\"\"\"\n\n # read all tiles in all rows of all levels\n cache = tiles.Tiles(tiles_dir=TilesDir)\n for level in cache.levels:\n cache.UseLevel(level)\n info = cache.GetInfo(level)\n if info:\n (width_px, height_px, ppd_x, ppd_y) = info\n num_tiles_width = int(width_px / self.TileWidth)\n num_tiles_height = int(height_px / self.TileHeight)\n y = 0\n for x in range(num_tiles_width):\n# for y in range(num_tiles_height):\n bmp = cache.GetTile(x, y)\n msg = \"Can't find tile (%d,%d,%d)!?\" % (level, x, y)\n self.assertFalse(bmp is None, msg)\n else:\n print('level %d not available' % level)\n\n def XtestErrors(self):\n \"\"\"Test possible errors.\"\"\"\n\n # check that using level outside map levels returns None\n cache = tiles.Tiles(tiles_dir=TilesDir)\n level = cache.levels[-1] + 1 # get level # that DOESN'T exist\n info = cache.UseLevel(level)\n self.assertTrue(info is None,\n 'Using bad level (%d) got info=%s' % (level, str(info)))\n\n # check that reading tile outside map returns None\n cache = tiles.Tiles(tiles_dir=TilesDir)\n level = cache.levels[0]\n info = cache.UseLevel(level)\n (width_px, height_px, ppd_x, ppd_y) = info\n num_tiles_width = int(width_px / self.TileWidth)\n num_tiles_height = int(height_px / self.TileHeight)\n self.assertFalse(info is None,\n 'Using good level (%d) got info=%s' % (level, str(info)))\n# OSM returns an empty tile if you request outside map limits\n# bmp = cache.GetTile(num_tiles_width, num_tiles_height)\n# self.assertTrue(bmp is None,\n# 'Using bad coords (%d,%d) got bmp=%s'\n# % (num_tiles_width, num_tiles_height, str(bmp)))\n info = cache.UseLevel(1)\n bmp = cache.GetTile(0, 0)\n bmp.SaveFile('xyzzy00.jpg', wx.BITMAP_TYPE_JPEG)\n bmp = cache.GetTile(0, 1)\n bmp.SaveFile('xyzzy01.jpg', wx.BITMAP_TYPE_JPEG)\n bmp = cache.GetTile(1, 0)\n bmp.SaveFile('xyzzy10.jpg', wx.BITMAP_TYPE_JPEG)\n bmp = cache.GetTile(1, 1)\n bmp.SaveFile('xyzzy11.jpg', wx.BITMAP_TYPE_JPEG)\n\n def XtestConvert(self):\n \"\"\"Test geo2map conversions.\n\n This can't be automatic, it's a 'by hand' thing.\n So it's generally turned off.\n \"\"\"\n\n import time\n\n cache = tiles.Tiles(tiles_dir=TilesDir)\n\n # get tile covering Greenwich observatory\n #xgeo = -0.0005 # Greenwich observatory\n #ygeo = 51.4768534\n xgeo = 7.605916 # Deutsches Eck\n ygeo = 50.364444\n for level in [0, 1, 2, 3, 4]:\n info = cache.UseLevel(level)\n (xtile, ytile) = cache.Geo2Tile(xgeo, ygeo)\n bmp = cache.GetTile(int(xtile), int(ytile))\n\n pt_px_x = int((xtile - int(xtile)) * cache.tile_size_x)\n pt_px_y = int((ytile - int(ytile)) * cache.tile_size_y)\n\n dc = wx.MemoryDC()\n dc.SelectObject(bmp)\n text = \"o\"\n (tw, th) = dc.GetTextExtent(text)\n dc.DrawText(text, pt_px_x-tw/2, pt_px_y-th/2)\n dc.SelectObject(wx.NullBitmap)\n\n bmp.SaveFile('xyzzy_%d.jpg' % level, wx.BITMAP_TYPE_JPEG)\n # we have to delay for internet response\n time.sleep(30)\n\n\nlog(DemoName)\napp = QApplication(sys.argv)\nex = AppFrame()\nsys.exit(app.exec_())\n\n"
},
{
"alpha_fraction": 0.5064316391944885,
"alphanum_fraction": 0.5390661954879761,
"avg_line_length": 29.420289993286133,
"blob_id": "4019e7c97741de930c54ac4c6b13dce429e89e41",
"content_id": "0053363452470b07fa843f4289c17de77c88cf1f",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4198,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 138,
"path": "/pySlipQt/examples/test_viewrel_text.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest PySlipQt view-relative text.\n\nUsage: test_maprel_image.py [-h] [-t (OSM|GMT)]\n\"\"\"\n\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.pySlipQt as pySlipQt\n\n\n######\n# Various demo constants\n######\n\nDemoVersion = '1.0'\nDemoName = f'Test view-relative text placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\nMinTileLevel = 0\nInitViewLevel = 2\nInitViewPosition = (133.87, -23.7) # Alice Springs\n\nTextViewData = [( 0, 0, 'cc', {'placement':'cc','fontsize':50,'textcolour':'#ff000020'}),\n ( 0, 10, 'cn', {'placement':'cn','fontsize':45,'textcolour':'#00ff0020'}),\n (-10, 10, 'ne', {'placement':'ne','fontsize':40,'textcolour':'#0000ff20'}),\n (-10, 0, 'ce', {'placement':'ce','fontsize':35,'textcolour':'#ff000080'}),\n (-10, -10, 'se', {'placement':'se','fontsize':30,'textcolour':'#00ff0080'}),\n ( 0, -10, 'cs', {'placement':'cs','fontsize':25,'textcolour':'#0000ff80'}),\n ( 10, -10, 'sw', {'placement':'sw','fontsize':20,'textcolour':'#ff0000ff'}),\n ( 10, 0, 'cw', {'placement':'cw','fontsize':15,'textcolour':'#00ff00ff'}),\n ( 10, 10, 'nw', {'placement':'nw','fontsize':10,'textcolour':'#0000ffff'}),\n ]\n\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestFrame(QMainWindow):\n\n def __init__(self, tile_dir):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # build the GUI\n hbox = QHBoxLayout()\n\n qwidget = QWidget(self)\n qwidget.setLayout(hbox)\n self.setCentralWidget(qwidget)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n hbox.addWidget(self.pyslipqt)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # set initial view position\n# self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)\n\n # add test test layer\n self.text_layer = self.pyslipqt.AddTextLayer(TextViewData,\n map_rel=False,\n name='<text_view_layer>',\n offset_x=20, offset_y=20,\n fontsize=20, colour='red')\n\n self.show()\n\n################################################################################\n\n# print some usage information\ndef usage(msg=None):\n if msg:\n print(msg+'\\n')\n print(__doc__) # module docstring used\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the app\nlog(DemoName)\ntile_dir = 'test_viewrel_text'\napp = QApplication(args)\nex = TestFrame(tile_dir)\nsys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5263556838035583,
"alphanum_fraction": 0.5401971936225891,
"avg_line_length": 30.20709991455078,
"blob_id": "483f3963473e631c818ed44f8185228f58c8a71f",
"content_id": "eb793a82868823556b99a780eded0052c985642f",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5274,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 169,
"path": "/pySlipQt/examples/layer_control.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCustom LayerControl widget.\n\nThis is used to control each type of layer, whether map- or view-relative.\nThe layout is:\n\n + Title text ------------------------------+\n | +--+ |\n | | | Add layer |\n | +--+ |\n | |\n | +--+ +--+ |\n | | | Show | | Select |\n | +--+ +--+ |\n | |\n +------------------------------------------+\n\nConstructor:\n\n lc = LayerControl(parent, title, selectable=False, tooltip=None):\n\nMethods:\n\n lc.set_show(state) set 'show' checkbox to 'state'\n lc.set_select(state) set 'select' checkbox to 'state'\n\nEvents:\n\n .change_add the \"add layer\" checkbox was toggled\n .change_show the \"show\" checkbox was toggled\n .change_select the \"select\" checkbox was toggled\n\"\"\"\n\nimport platform\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QCheckBox, QGroupBox\nfrom PyQt5.QtWidgets import QHBoxLayout, QGridLayout\n\nclass LayerControl(QWidget):\n\n # set platform dependant values\n if platform.system() == 'Linux':\n pass\n elif platform.system() == 'Darwin':\n pass\n elif platform.system() == 'Windows':\n pass\n else:\n raise Exception('Unrecognized platform: %s' % platform.system())\n\n # signals raised by this widget\n change_add = pyqtSignal(bool) # signal raised when user toggles \"add\" checkbox\n change_show = pyqtSignal(bool) # signal raised when user toggles \"show\" checkbox\n change_select = pyqtSignal(bool) # signal raised when user toggles \"select\" checkbox\n\n # some stylesheets\n TextStyle = ('QLabel { background-color: white; '\n 'border:1px solid rgb(128, 128, 128); '\n 'border-radius: 3px; }')\n LabelStyle = ('QLabel { background-color: white; '\n 'border:1px solid rgb(128, 128, 128); '\n 'border-radius: 3px; }')\n# GroupStyle = 'QGroupBox { background-color: rgb(230, 230, 230); }'\n\n# LabelStyle = 'QLabel { background-color : #f0f0f0; border: 1px solid gray; border-radius: 3px; }'\n GroupStyle = (#'QGroupBox { background-color: rgb(230, 230, 230); };'\n 'QGroupBox::title { subcontrol-origin: margin; '\n# ' background-color: rgb(215, 215, 215); '\n ' border-radius: 3px; '\n ' padding: 2 2px; '\n ' color: black; }')\n\n\n def __init__(self, parent, title, selectable=False, tooltip=None):\n \"\"\"Initialise a LayerControl instance.\n\n parent reference to parent object\n title text to ahow in static box outline\n selectable True if 'selectable' checkbox is to be displayed\n tooltip tooltip text, if any\n \"\"\"\n\n QWidget.__init__(self)\n\n # create all widgets\n self.cb_show = QCheckBox('Show')\n self.cb_show.setChecked(True)\n self.cb_select = QCheckBox('Select')\n\n # start layout\n group = QGroupBox(title)\n group.setCheckable(True)\n group.setChecked(False)\n group.setStyleSheet(LayerControl.GroupStyle)\n\n grid = QGridLayout()\n grid.setContentsMargins(2, 2, 2, 2)\n\n grid.addWidget(group, 0, 0)\n\n hbox = QHBoxLayout()\n hbox.setContentsMargins(1, 1, 1, 1)\n group.setLayout(hbox)\n\n hbox.addStretch(1)\n hbox.addWidget(self.cb_show)\n hbox.addWidget(self.cb_select)\n hbox.addStretch(3)\n\n self.setLayout(grid)\n\n # if tooltip given, set it up\n if tooltip:\n self.setToolTip(tooltip)\n\n # connect internal widget events to handlers\n group.toggled.connect(self.changed_add)\n self.cb_show.stateChanged.connect(self.changed_show)\n if selectable:\n self.cb_select.stateChanged.connect(self.changed_select)\n\n self.show()\n\n def changed_add(self, state):\n \"\"\"Main checkbox changed.\n \n state the state of the group check: True == ticked\n\n Emit a signal with the state.\n \"\"\"\n\n self.change_add.emit(state)\n\n def changed_show(self, state):\n \"\"\"'Show' checkbox changed.\n \n state the state of the 'Show' check: True == ticked\n\n Emit a signal with the state.\n \"\"\"\n\n self.change_show.emit(state)\n\n def changed_select(self, state):\n \"\"\"'Select' checkbox changed.\n \n state the state of the 'Select' check: True == ticked\n\n Emit a signal with the state.\n \"\"\"\n\n self.change_select.emit(state)\n\n def set_show(self, state):\n \"\"\"Set the 'show' checkbox state.\n\n state new state of the checkbox, True or False\n \"\"\"\n\n self.cb_show.setChecked(state)\n\n def set_select(self, state):\n \"\"\"Set the 'select' checkbox state.\n\n state new state of the checkbox, True or False\n \"\"\"\n\n self.cb_select.setChecked(state)\n"
},
{
"alpha_fraction": 0.5376920104026794,
"alphanum_fraction": 0.5575825572013855,
"avg_line_length": 31.99417495727539,
"blob_id": "b3f2f563dcc8c01a0535a7d1002656af0aa408c8",
"content_id": "d08d53347834db34c6052f804033772dc5ed9d56",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16993,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 515,
"path": "/pySlipQt/examples/test_polyline_placement.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nProgram to test polyline map-relative and view-relative placement.\nSelect what to show and experiment with placement parameters.\n\nUsage: test_polyline_placement.py [-h|--help] [-d] [(-t|--tiles) (GMT|OSM)]\n\"\"\"\n\n\nimport sys\nimport getopt\nimport traceback\n\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget,\n QComboBox, QPushButton, QCheckBox, QLabel,\n QGroupBox, QGridLayout, QHBoxLayout,\n QSizePolicy, QColorDialog)\n\n# initialize the logging system\nimport pySlipQt.log as log\nlog = log.Log('pyslipqt.log')\n\nimport pySlipQt.pySlipQt as pySlipQt\nfrom display_text import DisplayText\nfrom layer_control import LayerControl\n\n######\n# Various demo constants\n######\n\n# demo name/version\nDemoVersion = '1.0'\nDemoName = f'Test polyline placement {DemoVersion} (pySlipQt {pySlipQt.__version__})'\n\nDemoHeight = 800\nDemoWidth = 1000\n\n# initial values\nInitialViewLevel = 4\nInitialViewPosition = (145.0, -20.0)\n\n# tiles info\nTileDirectory = 'test_polygon_placement_tiles'\nMinTileLevel = 0\n\n# the number of decimal places in a lon/lat display\nLonLatPrecision = 2\n\n# startup size of the application\nDefaultAppSize = (1000, 700)\n\n# general defaults\nDefaultWidth = 5\nDefaultColour = 'red'\n\n# initial values in map-relative LayerControl\nDefaultPlacement = 'ne'\nDefaultX = 145.0\nDefaultY = -20.0\nDefaultOffsetX = 0\nDefaultOffsetY = 0\n\n# initial values in view-relative LayerControl\nDefaultViewPlacement = 'ne'\nDefaultViewX = 0\nDefaultViewY = 0\nDefaultViewOffsetX = 0\nDefaultViewOffsetY = 0\n\n# polyline map- and view-relative data\nPolyPoints = [(140.0,-17.5), (144.0,-19.0), (142.5,-15.0), (147.5,-15.0),\n (146.0,-19.0), (150.0,-17.5), (150.0,-22.5), (146.0,-21.0),\n (147.5,-25.0), (142.5,-25.0), (144.0,-21.0), (140.0,-22.5)]\n\nPolyViewPoints = [(-100,-50), (-20,-20), (-50,-100), (50,-100),\n (20,-20), (100,-50), (100,50), (20,20),\n (50,100), (-50,100), (-20,20), (-100,50)]\n\n##################################\n# Custom LayerControl widget.\n#\n# Constructor:\n#\n# ppc = LayerControl('test title')\n#\n# Events:\n#\n# .change the contents were changed\n# .remove the image should be removed\n#\n# The '.change' event has attached attributes holding the values from the\n# widget, all checked so they are 'sane'.\n##################################\n\nclass LayerControl(QWidget):\n \"\"\"\n Custom LayerControl widget.\n\n Constructor:\n\n ipc = LayerControl('test title')\n\n Events:\n\n .change the contents were changed\n .remove the image should be removed\n\n The '.change' event has attached attributes holding the values from the\n widget, all checked so they are 'sane'.\n \"\"\"\n\n # various sizes\n ButtonWidth = 40\n ButtonHeight = 40\n ComboboxWidth = 70\n\n # signals raised by this widget\n change = pyqtSignal(str, int, QColor, int, int)\n remove = pyqtSignal()\n\n # some stylesheets\n LabelStyle = 'QLabel { background-color : #f0f0f0; border: 1px solid gray; border-radius: 3px; }'\n GroupStyle = ('QGroupBox { background-color: rgb(230, 230, 230); }'\n 'QGroupBox::title { subcontrol-origin: margin; '\n 'background-color: rgb(215, 215, 215); '\n 'border-radius: 3px; '\n 'padding: 2 2px; '\n 'color: black; }')\n ButtonStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: white;'\n '}')\n ButtonColourStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: %s;'\n '}')\n\n def __init__(self, title,\n placement=DefaultPlacement, width=DefaultWidth,\n colour=DefaultColour, offset_x=0, offset_y=0):\n \"\"\"Initialise a LayerControl instance.\n\n title text to show in static box outline around control\n placement placement string for object\n width width in pixels of the drawn polygon\n colour sets the colour of the polygon outline\n offset_x X offset of object\n offset_y Y offset of object\n \"\"\"\n\n super().__init__()\n\n # save parameters\n self.v_placement = placement\n self.v_width = width\n self.v_colour = colour\n self.v_offset_x = offset_x\n self.v_offset_y = offset_y\n\n # create subwidgets used in this custom widget\n self.placement = QComboBox()\n for p in ['none', 'nw', 'cn', 'ne', 'ce', 'se', 'cs', 'sw', 'cw', 'cc']:\n self.placement.addItem(p)\n self.placement.setCurrentIndex(9)\n\n self.line_width = QComboBox()\n for p in range(21):\n self.line_width.addItem(str(p))\n self.line_width.setCurrentIndex(3)\n self.line_width.setFixedWidth(LayerControl.ComboboxWidth)\n\n self.line_colour = QPushButton('')\n self.line_colour.setFixedWidth(LayerControl.ButtonWidth)\n self.line_colour.setStyleSheet(LayerControl.ButtonStyle)\n self.line_colour.setToolTip('Click here to change the point colour')\n\n self.x_offset = QComboBox()\n for p in range(0, 121, 10):\n self.x_offset.addItem(str(p - 60))\n self.x_offset.setCurrentIndex(6)\n self.x_offset.setFixedWidth(LayerControl.ComboboxWidth)\n\n self.y_offset = QComboBox()\n for p in range(0, 121, 10):\n self.y_offset.addItem(str(p - 60))\n self.y_offset.setCurrentIndex(6)\n self.y_offset.setFixedWidth(LayerControl.ComboboxWidth)\n\n btn_remove = QPushButton('Remove')\n btn_remove.resize(btn_remove.sizeHint())\n\n btn_update = QPushButton('Update')\n btn_update.resize(btn_update.sizeHint())\n\n # start the layout\n option_box = QGroupBox(title)\n option_box.setStyleSheet(LayerControl.GroupStyle)\n\n box_layout = QGridLayout()\n box_layout.setContentsMargins(2, 2, 2, 2)\n box_layout.setHorizontalSpacing(1)\n box_layout.setColumnStretch(0, 1)\n\n # start layout\n row = 1\n label = QLabel('placement: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.placement, row, 1)\n label = QLabel('width: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.line_width, row, 3)\n\n row += 1\n label = QLabel('colour: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.line_colour, row, 1)\n\n row += 1\n label = QLabel('offset X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.x_offset, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.y_offset, row, 3)\n\n row += 1\n box_layout.addWidget(btn_remove, row, 1)\n box_layout.addWidget(btn_update, row, 3)\n\n option_box.setLayout(box_layout)\n\n layout = QHBoxLayout()\n layout.setContentsMargins(1, 1, 1, 1)\n layout.addWidget(option_box)\n\n self.setLayout(layout)\n\n # set size hints\n self.setMinimumSize(300, 200)\n size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setSizePolicy(size_policy)\n\n # connect internal widget events to handlers\n self.line_colour.clicked.connect(self.changeLineColour)\n btn_remove.clicked.connect(self.removeImage)\n btn_update.clicked.connect(self.updateData)\n\n def changeLineColour(self, event):\n color = QColorDialog.getColor()\n if color.isValid():\n colour = color.name()\n # set colour button background\n self.line_colour.setStyleSheet(LayerControl.ButtonColourStyle % colour);\n \n def removeImage(self, event):\n self.remove.emit()\n\n def updateData(self, event):\n # get data from the widgets\n placement = str(self.placement.currentText())\n if placement == 'none':\n placement = None\n line_width = int(self.line_width.currentText())\n line_colour = self.line_colour.palette().color(1)\n x_offset = int(self.x_offset.currentText())\n y_offset = int(self.y_offset.currentText())\n\n print(f'updateData: placement={placement}, line_width={line_width}, x_offset={x_offset}, y_offset={y_offset}')\n \n self.change.emit(placement, line_width, line_colour, x_offset, y_offset)\n\n################################################################################\n# The main application frame\n################################################################################\n\nclass TestPolyPlacement(QMainWindow):\n\n def __init__(self, tile_dir=TileDirectory):\n super().__init__()\n\n self.tile_directory = tile_dir\n self.tile_source = Tiles.Tiles()\n\n # variables for layer IDs\n self.poly_map_layer = None\n self.poly_view_layer = None\n\n # build the GUI\n grid = QGridLayout()\n grid.setColumnStretch(0, 1)\n grid.setContentsMargins(2, 2, 2, 2)\n\n qwidget = QWidget(self)\n qwidget.setLayout(grid)\n self.setCentralWidget(qwidget)\n\n # build the 'controls' part of GUI\n num_rows = self.make_gui(grid)\n\n self.pyslipqt = pySlipQt.PySlipQt(self, tile_src=self.tile_source,\n start_level=MinTileLevel)\n grid.addWidget(self.pyslipqt, 0, 0, num_rows + 1, 1)\n grid.setRowStretch(num_rows, 1)\n\n # set the size of the demo window, etc\n self.setGeometry(100, 100, DemoWidth, DemoHeight)\n self.setWindowTitle(DemoName)\n\n # set initial view position\n# self.map_level.set_text('%d' % InitViewLevel)\n\n # tie events from controls to handlers\n self.map_poly.remove.connect(self.remove_poly_map)\n self.map_poly.change.connect(self.change_poly_map)\n\n self.view_poly.remove.connect(self.remove_poly_view)\n self.view_poly.change.connect(self.change_poly_view)\n\n self.pyslipqt.events.EVT_PYSLIPQT_LEVEL.connect(self.handle_level_change)\n self.pyslipqt.events.EVT_PYSLIPQT_POSITION.connect(self.handle_position_event)\n\n self.map_level.set_text('0')\n\n self.show()\n\n#####\n# Build the GUI\n#####\n\n def make_gui(self, grid):\n \"\"\"Create application GUI.\"\"\"\n\n \"\"\"Build the controls in the right side of the grid.\"\"\"\n\n # the 'grid_row' variable is row to add into\n grid_row = 0\n\n # put level and position into grid at top right\n self.map_level = DisplayText(title='', label='Level:',\n tooltip=None)\n grid.addWidget(self.map_level, grid_row, 1, 1, 1)\n self.mouse_position = DisplayText(title='',\n label='Lon/Lat:', text_width=100,\n tooltip='Shows the mouse longitude and latitude on the map',)\n grid.addWidget(self.mouse_position, grid_row, 2, 1, 1)\n grid_row += 1\n\n # now add the two point control widgets to right part of grid\n self.map_poly = LayerControl('Map-relative Polygon')\n grid.addWidget(self.map_poly, grid_row, 1, 1, 2)\n grid_row += 1\n\n self.view_poly = LayerControl('View-relative Polygon')\n grid.addWidget(self.view_poly, grid_row, 1, 1, 2)\n grid_row += 1\n\n return grid_row\n\n ######\n # event handlers\n ######\n\n##### map-relative polygon layer\n\n def change_poly_map(self, placement, line_width, line_colour, x_off, y_off):\n \"\"\"Display updated polygon.\"\"\"\n\n print(f'change_poly_map: placement={placement}, line_width={line_width}, line_colour={line_colour}, x_off={x_off}, y_off={y_off}')\n\n if self.poly_map_layer:\n self.pyslipqt.DeleteLayer(self.poly_map_layer)\n\n poly_data = [(PolyPoints, {'placement': placement,\n 'width': line_width,\n 'colour': line_colour,\n 'offset_x': x_off,\n 'offset_y': y_off})]\n self.poly_map_layer = self.pyslipqt.AddPolylineLayer(poly_data, map_rel=True,\n visible=True,\n name='<poly_map_layer>')\n\n def remove_poly_map(self):\n \"\"\"Delete the polygon map-relative layer.\"\"\"\n\n if self.poly_map_layer:\n self.pyslipqt.DeleteLayer(self.poly_map_layer)\n self.poly_map_layer = None\n\n##### view-relative polygon layer\n\n def change_poly_view(self, placement, line_width, line_colour, x_off, y_off):\n \"\"\"Display updated view-relative polygon layer.\"\"\"\n\n if self.poly_view_layer:\n self.pyslipqt.DeleteLayer(self.poly_view_layer)\n\n # create a new polygon layer\n poly_data = [(PolyViewPoints, {'placement': placement,\n 'width': line_width,\n 'colour': line_colour,\n 'offset_x': x_off,\n 'offset_y': y_off})]\n self.poly_view_layer = self.pyslipqt.AddPolylineLayer(poly_data,\n map_rel=False,\n visible=True,\n name='<poly_view_layer>')\n\n def remove_poly_view(self):\n \"\"\"Delete the polygon view-relative layer.\"\"\"\n\n if self.poly_view_layer:\n self.pyslipqt.DeleteLayer(self.poly_view_layer)\n self.poly_view_layer = None\n\n def final_setup(self, level, position):\n \"\"\"Perform final setup.\n\n level zoom level required\n position position to be in centre of view\n \"\"\"\n\n self.pyslipqt.GotoLevelAndPosition(level, position)\n\n ######\n # Exception handlers\n ######\n\n def handle_position_event(self, event):\n \"\"\"Handle a pySlipQt POSITION event.\"\"\"\n\n posn_str = ''\n if event.mposn:\n (lon, lat) = event.mposn\n posn_str = ('%.*f / %.*f' % (LonLatPrecision, lon,\n LonLatPrecision, lat))\n\n self.mouse_position.set_text(posn_str)\n\n def handle_level_change(self, event):\n \"\"\"Handle a pySlipQt LEVEL event.\"\"\"\n\n self.map_level.set_text('%d' % event.level)\n\n###############################################################################\n\n# our own handler for uncaught exceptions\ndef excepthook(type, value, tb):\n msg = '\\n' + '=' * 80\n msg += '\\nUncaught exception:\\n'\n msg += ''.join(traceback.format_exception(type, value, tb))\n msg += '=' * 80 + '\\n'\n print(msg)\n log(msg)\n sys.exit(1)\n\n# plug our handler into the python system\nsys.excepthook = excepthook\n\ndef usage(msg=None):\n if msg:\n print(('*'*80 + '\\n%s\\n' + '*'*80) % msg)\n print(__doc__)\n\n# decide which tiles to use, default is GMT\nargv = sys.argv[1:]\n\ntry:\n (opts, args) = getopt.getopt(argv, 'dht:', ['debug', 'help', 'tiles='])\nexcept getopt.error:\n usage()\n sys.exit(1)\n\ntile_source = 'GMT'\ndebug = False\nfor (opt, param) in opts:\n if opt in ['-h', '--help']:\n usage()\n sys.exit(0)\n elif opt in ['-d', '--debug']:\n debug = True\n elif opt in ('-t', '--tiles'):\n tile_source = param\ntile_source = tile_source.lower()\n\n# set up the appropriate tile source\nif tile_source == 'gmt':\n import pySlipQt.gmt_local as Tiles\nelif tile_source == 'osm':\n import pySlipQt.open_street_map as Tiles\nelse:\n usage('Bad tile source: %s' % tile_source)\n sys.exit(3)\n\n# start the PyQt5 app\nlog(DemoName)\ntile_dir = 'test_polygon_placement'\napp = QApplication(args)\nex = TestPolyPlacement(tile_dir)\nsys.exit(app.exec_())\n\n"
},
{
"alpha_fraction": 0.6617143154144287,
"alphanum_fraction": 0.6617143154144287,
"avg_line_length": 46.297298431396484,
"blob_id": "749db6d64ae82a1c68e5a3f1a0952bc02d98dcdf",
"content_id": "ca105bf07a2626ffcb716ebe2d70952d947e7f47",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1750,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 37,
"path": "/pySlipQt/examples/README.rst",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "Programs in this directory\n==========================\n\nThe programs here are used to test pySlipQt and to demonstrate some of the\ncapabilities of the widget:\n\n======================= =======\nProgram Details\n======================= =======\npyslipqt_demo.py demonstrates some capabilities of pySlipQt\ntest_image_placement.py allows playing with image placement\ntest_point_placement.py allows playing with point placement\ntest_poly_placement.py allows playing with polygon placement\ntest_text_placement.py allows playing with text placement\ntest_gotoposition.py test the \"goto position\" code\ntest_assumptions.py test some assumptions made in pySlip\ntest_gmt_local_tiles.py simplistic test of GMT tiles\ntest_osm_tiles.py simplistic test of OSM tiles\ntest_maprel_image.py simple test of map-relative image placement\ntest_maprel_poly.py simple test of map-relative polygon placement\ntest_maprel_text.py simple test of map-relative text placement\ntest_multi_widget.py simple multi-widget test - look for interaction\ntest_viewrel_image.py simple test of view-relative image placement\ntest_viewrel_point.py simple test of view-relative point placement\ntest_viewrel_poly.py simple test of view-relative polygon placement\ntest_viewrel_text.py simple test of view-relative text placement\n======================= =======\n\nOther things here:\n\n======================= =======\nDirectory What it is\n======================= =======\ngmt_tiles.tar.gz pre-generated GMT tiles\ngraphics/ directory of graphics files used by the programs here\nmake_gmt_tiles.py a very old program used to generate the GMT tiles\n======================= =======\n"
},
{
"alpha_fraction": 0.5576527714729309,
"alphanum_fraction": 0.5666230320930481,
"avg_line_length": 28.888267517089844,
"blob_id": "b1b27eaf970708c3b4306ebfe5b67733a8fcebac",
"content_id": "718a524800e7e026902705f156f893d343e08de0",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5351,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 179,
"path": "/pySlipQt/gmt_local.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA tile source that serves pre-generated GMT tiles from the local filesystem.\n\"\"\"\n\nimport os\nimport pickle\nimport pySlipQt.tiles as tiles\nimport pySlipQt.log as log\n\ntry:\n log = log.Log('pyslipqt.log')\nexcept AttributeError:\n # means log already set up\n pass\n\n\n###############################################################################\n# Change values below here to configure the GMT local tile source.\n###############################################################################\n\n# attributes used for tileset introspection\n# names must be unique amongst tile modules\nTilesetName = 'GMT local tiles'\nTilesetShortName = 'GMT tiles'\nTilesetVersion = '1.0'\n\n# the pool of tile servers used\nTileServers = None\n\n# the path on the server to a tile\n# {} params are Z=level, X=column, Y=row, origin at map top-left\nTileURLPath = None\n\n# tile levels to be used\nTileLevels = range(5)\n\n# maximum pending requests for each tile server\n# unused with local tiles\nMaxServerRequests = None\n\n# set maximum number of in-memory tiles for each level\nMaxLRU = 10000\n\n# path to the INFO file for GMT tiles\nTileInfoFilename = \"tile.info\"\n\n# default path to the tiles directory\nTilesDir = os.path.abspath(os.path.expanduser('~/gmt_local_tiles'))\n\n################################################################################\n# Class for GMT local tiles. Builds on tiles.BaseTiles.\n################################################################################\n\nclass Tiles(tiles.BaseTiles):\n \"\"\"An object to source GMT tiles for the widget.\"\"\"\n\n # size of these tiles\n TileWidth = 256\n TileHeight = 256\n\n def __init__(self, tiles_dir=TilesDir):\n \"\"\"Override the base class for GMT tiles.\n\n Basically, just fill in the BaseTiles class with GMT values from above\n and provide the Geo2Tile() and Tile2Geo() methods.\n \"\"\"\n\n super().__init__(TileLevels,\n Tiles.TileWidth, Tiles.TileHeight,\n tiles_dir=tiles_dir, max_lru=MaxLRU)\n\n if not os.path.isfile(os.path.join(tiles_dir, TileInfoFilename)):\n msg = f\"The GMT tiles directory '{tiles_dir}' doesn't appear to be setup?\"\n log.critical(msg)\n raise RuntimeError(msg)\n \n# TODO: implement map wrap-around\n# # we *can* wrap tiles in X direction, but not Y\n# self.wrap_x = False\n# self.wrap_y = False\n\n # override the tiles.py extent here, the GMT tileset is different\n self.extent = (-65.0, 295.0, -66.66, 66.66)\n self.deg_span_x = 295.0 + 65.0\n self.deg_span_y = 66.66 + 66.66\n\n self.levels = TileLevels\n\n # get tile information into instance\n self.level = min(TileLevels)\n (self.num_tiles_x, self.num_tiles_y,\n self.ppd_x, self.ppd_y) = self.GetInfo(self.level)\n\n def GetInfo(self, level):\n \"\"\"Get tile info for a particular level.\n Override the tiles.py method.\n\n level the level to get tile info for\n\n Returns (num_tiles_x, num_tiles_y, ppd_x, ppd_y) or None if 'levels'\n doesn't exist.\n \"\"\"\n\n # is required level available?\n if level not in self.levels:\n return None\n\n # see if we can open the tile info file.\n info_file = os.path.join(self.tiles_dir, '%d' % level, TileInfoFilename)\n try:\n with open(info_file, 'rb') as fd:\n info = pickle.load(fd)\n except IOError:\n info = None\n\n return info\n\n def Geo2Tile(self, geo):\n \"\"\"Convert geo to tile fractional coordinates for level in use.\n\n geo a tuple of geo coordinates (xgeo, ygeo)\n\n Returns (xtile, ytile).\n\n This is an easy transformation as geo coordinates are Cartesian\n for this tileset.\n \"\"\"\n\n # unpack the 'geo' tuple\n (xgeo, ygeo) = geo\n\n # get extent information\n (min_xgeo, max_xgeo, min_ygeo, max_ygeo) = self.extent\n\n # get number of degress from top-left corner\n x = xgeo - min_xgeo\n y = max_ygeo - ygeo\n\n tiles_x = x * self.ppd_x / self.tile_size_x\n tiles_y = y * self.ppd_y / self.tile_size_y\n\n return (tiles_x, tiles_y)\n\n def Tile2Geo(self, tile):\n \"\"\"Convert tile fractional coordinates to geo for level in use.\n\n tile a tuple (xtile,ytile) of tile fractional coordinates\n\n Note that we assume the point *is* on the map!\n\n This is an easy transformation as geo coordinates are Cartesian for\n this tileset.\n \"\"\"\n\n (xtile, ytile) = tile\n\n # get extent information\n (min_xgeo, max_xgeo, min_ygeo, max_ygeo) = self.extent\n\n # compute tile size in degrees\n tdeg_x = self.tile_size_x / self.ppd_x\n tdeg_y = self.tile_size_y / self.ppd_y\n\n # calculate the geo coordinates\n xgeo = xtile*tdeg_x + min_xgeo\n ygeo = max_ygeo - ytile*tdeg_y\n\n# if self.wrap_x:\n# while xgeo < min_xgeo:\n# xgeo += self.deg_span_x\n# while xgeo > max_xgeo:\n# xgeo -= self.deg_span_x\n# if self.wrap_x:\n# while ygeo > max_ygeo:\n# ygeo -= self.deg_span_y\n# while ygeo < min_ygeo:\n# ygeo += self.deg_span_y\n\n return (xgeo, ygeo)\n\n"
},
{
"alpha_fraction": 0.6068660020828247,
"alphanum_fraction": 0.6068660020828247,
"avg_line_length": 17.8125,
"blob_id": "ea80de34ecbaba48cf4940bf395b0f65204c7dd7",
"content_id": "d5a088f3c9c8614ee5601fb874160d35d01ae919",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 903,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 48,
"path": "/pySlipQt/examples/utils.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSmall utility functions.\n\"\"\"\n\n\nimport traceback\n# if we don't have log.py, don't crash\ntry:\n# from . import log\n import log\n log = log.Log('pyslipqt.log')\nexcept AttributeError:\n # means log already set up\n pass\nexcept ImportError as e:\n # if we don't have log.py, don't crash\n # fake all log(), log.debug(), ... calls\n def logit(*args, **kwargs):\n pass\n log = logit\n log.debug = logit\n log.info = logit\n log.warn = logit\n log.error = logit\n log.critical = logit\n\n\ndef str_trace(msg=None):\n \"\"\"Get a traceback string.\n\n This is useful if we need at any point in code to find out how\n we got to that point.\n \"\"\"\n\n result = []\n\n if msg:\n result.append(msg+'\\n')\n\n result.extend(traceback.format_stack())\n\n return ''.join(result)\n\n\ndef log_trace(msg=None):\n \"\"\"Log a traceback string.\"\"\"\n\n log.debug(str_trace(msg))\n"
},
{
"alpha_fraction": 0.5851179361343384,
"alphanum_fraction": 0.5975525379180908,
"avg_line_length": 36.66914367675781,
"blob_id": "9d356fc8a3180a44bae75e078a01393704f29889",
"content_id": "6a9186b0b23a63f14ad4813beadd4014b0870b3e",
"detected_licenses": [
"CC-BY-SA-3.0",
"CC-BY-3.0",
"CC-BY-SA-4.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10133,
"license_type": "permissive",
"max_line_length": 157,
"num_lines": 269,
"path": "/pySlipQt/examples/text_placement.py",
"repo_name": "MAPSWorks/pySlipQt",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe custom control for test_text_placement.py program.\n\"\"\"\n\nimport os\nimport sys\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import (QWidget, QGridLayout, QHBoxLayout, QGroupBox,\n QPushButton, QLabel, QComboBox, QLineEdit,\n QSizePolicy, QFileDialog, QColorDialog)\nfrom PyQt5.QtGui import QColor\n\n\n##################################\n# Custom TextPlacementControl widget.\n#\n# Constructor:\n#\n# tpc = TextPlacementControl('test title')\n#\n# Events:\n#\n# .change the contents were changed\n# .remove the image should be removed\n#\n# The '.change' event has attached attributes holding the values from the\n# widget, all checked so they are 'sane'.\n##################################\n\nclass TextPlacementControl(QWidget):\n \"\"\"\n Custom TextPlacementControl widget.\n\n Constructor:\n\n ipc = TextPlacementControl('test title')\n\n Events:\n\n .change the contents were changed\n .remove the image should be removed\n\n The '.change' event has attached attributes holding the values from the\n widget, all checked so they are 'sane'.\n \"\"\"\n\n # various sizes\n LineEditWidth = 40\n ButtonWidth = 40\n ComboboxWidth = 70\n TestText = 'Test text'\n\n # initial colour values\n DefaultPointColour = 'red'\n DefaultTextColour = 'blue'\n\n # signals raised by this widget\n change = pyqtSignal(str, QColor, str, int, QColor, int, int, int, int)\n remove = pyqtSignal()\n\n # some stylesheets\n LabelStyle = 'QLabel { background-color : #f0f0f0; border: 1px solid gray; border-radius: 3px; }'\n GroupStyle = ('QGroupBox { background-color: rgb(230, 230, 230); }'\n 'QGroupBox::title { subcontrol-origin: margin; '\n ' background-color: rgb(215, 215, 215); '\n ' border-radius: 3px; '\n ' padding: 2 2px; '\n ' color: black; }')\n ButtonColourStyle = ('QPushButton {'\n 'margin: 1px;'\n 'border-color: #0c457e;'\n 'border-style: outset;'\n 'border-radius: 3px;'\n 'border-width: 1px;'\n 'color: black;'\n 'background-color: %s;'\n '}')\n\n def __init__(self, title):\n \"\"\"Initialise a TextPlacementControl instance.\n\n title title to give the custom widget\n \"\"\"\n\n super().__init__()\n\n # create subwidgets used in this custom widget\n self.text = QLineEdit()\n self.text.setText(TextPlacementControl.TestText)\n self.text.setToolTip('You can edit this text')\n\n self.text_colour = QPushButton('')\n self.text_colour.setFixedWidth(TextPlacementControl.ButtonWidth)\n self.text_colour.setToolTip('Click here to change the text colour')\n\n self.point_radius = QComboBox()\n for p in range(21):\n self.point_radius.addItem(str(p))\n self.point_radius.setCurrentIndex(3)\n self.point_radius.setFixedWidth(TextPlacementControl.ComboboxWidth)\n self.point_radius.setToolTip('Click here to change the point radius')\n\n self.point_colour = QPushButton('')\n self.point_colour.setFixedWidth(TextPlacementControl.ButtonWidth)\n self.point_colour.setToolTip('Click here to change the point colour')\n\n self.placement = QComboBox()\n for p in ['none', 'nw', 'cn', 'ne', 'ce', 'se', 'cs', 'sw', 'cw', 'cc']:\n self.placement.addItem(p)\n self.placement.setFixedWidth(TextPlacementControl.ComboboxWidth)\n self.placement.setToolTip('Click here to change the placement')\n\n self.x_posn = QComboBox()\n for p in range(0, 121, 10):\n self.x_posn.addItem(str(p - 60))\n self.x_posn.setCurrentIndex(6)\n self.x_posn.setFixedWidth(TextPlacementControl.ComboboxWidth)\n self.x_posn.setToolTip('Click here to change the X position')\n\n self.y_posn = QComboBox()\n for p in range(0, 121, 10):\n self.y_posn.addItem(str(p - 60))\n self.y_posn.setCurrentIndex(6)\n self.y_posn.setFixedWidth(TextPlacementControl.ComboboxWidth)\n self.y_posn.setToolTip('Click here to change the Y position')\n\n self.x_offset = QComboBox()\n for p in range(0, 121, 10):\n self.x_offset.addItem(str(p - 60))\n self.x_offset.setCurrentIndex(6)\n self.x_offset.setFixedWidth(TextPlacementControl.ComboboxWidth)\n self.x_offset.setToolTip('Click here to change the X offset')\n\n self.y_offset = QComboBox()\n for p in range(0, 121, 10):\n self.y_offset.addItem(str(p - 60))\n self.y_offset.setCurrentIndex(6)\n self.y_offset.setFixedWidth(TextPlacementControl.ComboboxWidth)\n self.y_offset.setToolTip('Click here to change the Y offset')\n\n btn_remove = QPushButton('Remove')\n btn_remove.resize(btn_remove.sizeHint())\n btn_remove.setToolTip('Click here to remove the test layer')\n\n btn_update = QPushButton('Update')\n btn_update.resize(btn_update.sizeHint())\n btn_update.setToolTip('Click here to update the test layer values and show it')\n\n # start the layout\n option_box = QGroupBox(title)\n option_box.setStyleSheet(TextPlacementControl.GroupStyle)\n\n box_layout = QGridLayout()\n box_layout.setContentsMargins(5, 5, 5, 5)\n box_layout.setHorizontalSpacing(1)\n box_layout.setColumnStretch(0, 1)\n\n # start layout\n row = 1\n label = QLabel('text: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.text, row, 1)\n label = QLabel('colour: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.text_colour, row, 3)\n\n row += 1\n label = QLabel('point radius: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.point_radius, row, 1)\n label = QLabel('colour: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.point_colour, row, 3)\n\n row += 1\n label = QLabel('placement: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.placement, row, 1)\n\n row += 1\n label = QLabel('X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.x_posn, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.y_posn, row, 3)\n\n row += 1\n label = QLabel('offset X: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 0)\n box_layout.addWidget(self.x_offset, row, 1)\n label = QLabel('Y: ')\n label.setAlignment(Qt.AlignRight)\n box_layout.addWidget(label, row, 2)\n box_layout.addWidget(self.y_offset, row, 3)\n\n row += 1\n box_layout.addWidget(btn_remove, row, 1)\n box_layout.addWidget(btn_update, row, 3)\n\n option_box.setLayout(box_layout)\n\n layout = QHBoxLayout()\n layout.setContentsMargins(1, 1, 1, 1)\n layout.addWidget(option_box)\n\n self.setLayout(layout)\n\n # set size hints\n self.setMinimumSize(300, 200)\n size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setSizePolicy(size_policy)\n\n # connect internal widget events to handlers\n self.point_colour.clicked.connect(self.changePointColour)\n self.text_colour.clicked.connect(self.changeTextColour)\n btn_remove.clicked.connect(self.removeLayer)\n btn_update.clicked.connect(self.updateData)\n\n # finally, put default colours into the colour selector buttons\n self.text_colour.setStyleSheet(TextPlacementControl.ButtonColourStyle\n % TextPlacementControl.DefaultTextColour)\n self.point_colour.setStyleSheet(TextPlacementControl.ButtonColourStyle\n % TextPlacementControl.DefaultPointColour)\n\n def changePointColour(self, event):\n color = QColorDialog.getColor()\n if color.isValid():\n colour = color.name()\n # set colour button background\n self.point_colour.setStyleSheet(TextPlacementControl.ButtonColourStyle\n % colour)\n \n def changeTextColour(self, event):\n color = QColorDialog.getColor()\n if color.isValid():\n colour = color.name()\n # set colour button background\n self.text_colour.setStyleSheet(TextPlacementControl.ButtonColourStyle\n % colour)\n \n def removeLayer(self, event):\n self.remove.emit()\n\n def updateData(self, event):\n # get data from the widgets\n text = self.text.text()\n textcolour = self.text_colour.palette().color(1)\n placement = str(self.placement.currentText())\n if placement == 'none':\n placement = None\n radius = int(self.point_radius.currentText())\n colour = self.point_colour.palette().color(1)\n x_posn = int(self.x_posn.currentText())\n y_posn = int(self.y_posn.currentText())\n x_offset = int(self.x_offset.currentText())\n y_offset = int(self.y_offset.currentText())\n\n print(f'updateData: text={text}, placement={placement}, radius={radius}, x_posn={x_posn}, y_posn={y_posn}, x_offset={x_offset}, y_offset={y_offset}')\n \n self.change.emit(text, textcolour, placement, radius, colour, x_posn, y_posn, x_offset, y_offset)\n"
}
] | 42 |
George19395/PET-Exercises | https://github.com/George19395/PET-Exercises | b884e5d2285208a1ce40fd232249dabb7246a7f4 | e8697b3ec9db2fb3c7e377f195c77f1556b92403 | 2e455d7786b2f7fa1eb7f6de37d5325d9998dadd | refs/heads/master | 2021-03-26T04:01:18.255942 | 2020-03-27T11:24:55 | 2020-03-27T11:24:55 | 247,671,155 | 1 | 0 | null | 2020-03-16T10:17:34 | 2020-01-13T08:52:56 | 2020-02-03T00:08:13 | null | [
{
"alpha_fraction": 0.5307878851890564,
"alphanum_fraction": 0.5452070832252502,
"avg_line_length": 36.86351013183594,
"blob_id": "9f6ab2a6858c080bd6dc0b7b331fa02473efef1b",
"content_id": "d47f42b35e19d16a313f8b113a8ed004ecee5c37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13593,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 359,
"path": "/Lab01Basics/Lab01Code.py",
"repo_name": "George19395/PET-Exercises",
"src_encoding": "UTF-8",
"text": "#####################################################\n# GA17 Privacy Enhancing Technologies -- Lab 01\n#\n# Basics of Petlib, encryption, signatures and\n# an end-to-end encryption system.\n#\n# Run the tests through:\n# $ py.test-2.7 -v Lab01Tests.py\n\n###########################\n# Group Members: TODO\n###########################\n\n\n#####################################################\n# TASK 1 -- Ensure petlib is installed on the System\n# and also pytest. Ensure the Lab Code can\n# be imported.\n\nimport petlib\n\n#####################################################\n# TASK 2 -- Symmetric encryption using AES-GCM\n# (Galois Counter Mode)\n#\n# Implement a encryption and decryption function\n# that simply performs AES_GCM symmetric encryption\n# and decryption using the functions in petlib.cipher.\n\nfrom os import urandom\nfrom petlib.cipher import Cipher\n\ndef encrypt_message(K, message):\n \"\"\" Encrypt a message under a key K \"\"\"\n\n plaintext = message.encode(\"utf8\")\n aes= Cipher.aes_128_gcm() ## intialise aes gcm cipher\n iv = urandom(16) ## Generate random IV of length 16\n\n\n ciphertext,tag = aes.quick_gcm_enc(K,iv,plaintext) ## produce cipher and tag using the encryption function provided\n ## YOUR CODE HERE\n\n return (iv, ciphertext, tag)\n\ndef decrypt_message(K, iv, ciphertext, tag):\n \"\"\" Decrypt a cipher text under a key K\n\n In case the decryption fails, throw an exception.\n \"\"\"\n ## YOUR CODE HERE\n aes= Cipher.aes_128_gcm() #Intialise Advanced encryption standart\n plain = aes.quick_gcm_dec(K,iv,ciphertext,tag) # produce the plaintext using decryption function given using the arguments of the function\n\n return plain.encode(\"utf8\")\n\n#####################################################\n# TASK 3 -- Understand Elliptic Curve Arithmetic\n# - Test if a point is on a curve.\n# - Implement Point addition.\n# - Implement Point doubling.\n# - Implement Scalar multiplication (double & add).\n# - Implement Scalar multiplication (Montgomery ladder).\n#\n# MUST NOT USE ANY OF THE petlib.ec FUNCIONS. Only petlib.bn!\n\nfrom petlib.bn import Bn\n\n\ndef is_point_on_curve(a, b, p, x, y):\n \"\"\"\n Check that a point (x, y) is on the curve defined by a,b and prime p.\n Reminder: an Elliptic Curve on a prime field p is defined as:\n\n y^2 = x^3 + ax + b (mod p)\n (Weierstrass form)\n\n Return True if point (x,y) is on curve, otherwise False.\n By convention a (None, None) point represents \"infinity\".\n \"\"\"\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) \\\n or (x == None and y == None)\n\n if x is None and y is None:\n return True\n\n lhs = (y * y) % p\n rhs = (x*x*x + a*x + b) % p\n on_curve = (lhs == rhs)\n\n return on_curve\n\n\ndef point_add(a, b, p, x0, y0, x1, y1):\n \"\"\"Define the \"addition\" operation for 2 EC Points.\n\n Reminder: (xr, yr) = (xq, yq) + (xp, yp)\n is defined as:\n lam = (yq - yp) * (xq - xp)^-1 (mod p)\n xr = lam^2 - xp - xq (mod p)\n yr = lam * (xp - xr) - yp (mod p)\n\n Return the point resulting from the addition. Raises an Exception if the points are equal.\n \"\"\"\n\n # ADD YOUR CODE BELOW\n xr, yr = None, None\n\n if not (is_point_on_curve(a,b,p,x0,y0) and is_point_on_curve(a,b,p,x1,y1)): ## check if points not on curve and throw exception\n raise Exception(\"EC Points are not on curve\")\n\n if x0 is None and y0 is None: ## if one point is infinity then return the other point\n xr,yr=x1,y1\n elif x1 is None and y1 is None:\n xr,yr=x0,y0\n elif x0 == x1 and y0==y1 : ## if points are equal then raise exception as this will be handled in next function\n raise Exception(\"EC Points must not be equal\")\n elif x0==x1 and y0==y1.mod_mul(-1,p): ##if point 2 is the inverse of point 1 the return infinite\n xr,yr=None,None\n else:\n\n lam = ((y1-y0) * (x1-x0).mod_inverse(p)) % p ##otherwise we calculate the new coordinates of the new point\n xr = ((lam*lam) - x0 - x1) % p\n yr = (lam * (x0 - xr) - y0) % p\n\n return xr,yr\n\ndef point_double(a, b, p, x, y):\n \"\"\"Define \"doubling\" an EC point.\n A special case, when a point needs to be added to itself.\n\n Reminder:\n lam = (3 * xp ^ 2 + a) * (2 * yp) ^ -1 (mod p)\n xr = lam ^ 2 - 2 * xp\n yr = lam * (xp - xr) - yp (mod p)\n\n Returns the point representing the double of the input (x, y).\n \"\"\"\n xr, yr = None, None\n # ADD YOUR CODE BELOW\n if x is None and y is None:\n xr,yr = None,None\n else:\n lam = ((3*(x*x) + a) * (2*y).mod_inverse(p))%p ## the special case of point doubling which we raised an exception on the previous\n xr = ((lam**2) - 2*x)%p ## function is handled here with a sleight variation of the algorithm\n yr = ((lam * (x - xr)) - y)%p\n\n return xr, yr\n\ndef point_scalar_multiplication_double_and_add(a, b, p, x, y, scalar):\n \"\"\"\n Implement Point multiplication with a scalar:\n r * (x, y) = (x, y) + ... + (x, y) (r times)\n\n Reminder of Double and Multiply algorithm: r * P\n Q = infinity\n for i = 0 to num_bits(P)-1\n if bit i of r == 1 then\n Q = Q + P\n P = 2 * P\n return Q\n\n \"\"\"\n Q = (None, None)\n P = (x, y)\n for i in range(scalar.num_bits()): ## scalar multiplication use our 2 previous functions\n# pass ## ADD YOUR CODE HEre ## adding a point n times to it selve\n if scalar.is_bit_set(i) ==1:\n Q = point_add(a,b,p,Q[0],Q[1],P[0],P[1]) ## if bit i is set to 1 we add P to Q\n P= point_double(a,b,p,P[0],P[1]) ## then we just double point P\n\n return Q\n\ndef point_scalar_multiplication_montgomerry_ladder(a, b, p, x, y, scalar):\n \"\"\"\n Implement Point multiplication with a scalar:\n r * (x, y) = (x, y) + ... + (x, y) (r times)\n\n Reminder of Double and Multiply algorithm: r * P\n R0 = infinity\n R1 = P\n for i in num_bits(P)-1 to zero:\n if di = 0:\n R1 = R0 + R1\n R0 = 2R0\n else\n R0 = R0 + R1\n R1 = 2 R1\n return R0\n\n \"\"\"\n R0 = (None, None)\n R1 = (x, y)\n\n for i in reversed(range(0,scalar.num_bits())): ### optimised version of the scalar multiplication\n# pass ## ADD YOUR CODE HERE ### implemented the algorithm provided above\n if scalar.is_bit_set(i) ==0:\n R1= point_add(a,b,p,R0[0],R0[1],R1[0],R1[1])\n R0=point_double(a,b,p,R0[0],R0[1])\n else:\n R0 = point_add(a,b,p,R0[0],R0[1],R1[0],R1[1])\n R1=point_double(a,b,p,R1[0],R1[1])\n return R0\n\n\n#####################################################\n# TASK 4 -- Standard ECDSA signatures\n#\n# - Implement a key / param generation\n# - Implement ECDSA signature using petlib.ecdsa\n# - Implement ECDSA signature verification\n# using petlib.ecdsa\n\nfrom hashlib import sha256\nfrom petlib.ec import EcGroup, EcPt\nfrom petlib.ecdsa import do_ecdsa_sign, do_ecdsa_verify\nimport petlib.ec\n\ndef ecdsa_key_gen():\n \"\"\" Returns an EC group, a random private key for signing\n and the corresponding public key for verification\"\"\"\n G = EcGroup()\n priv_sign = G.order().random()\n pub_verify = priv_sign * G.generator()\n return (G, priv_sign, pub_verify)\n\n\ndef ecdsa_sign(G, priv_sign, message):\n \"\"\" Sign the SHA256 digest of the message using ECDSA and return a signature \"\"\"\n plaintext = message.encode(\"utf8\")\n\n ## YOUR CODE HERE\n digest = sha256(plaintext).digest() ## implementng a signature scheme\n ## hash the message and get the digest code(hash function as binary string)\n sig = do_ecdsa_sign(G,priv_sign,digest) ## sign the message\n return sig\n\ndef ecdsa_verify(G, pub_verify, message, sig):\n \"\"\" Verify the ECDSA signature on the message \"\"\"\n plaintext = message.encode(\"utf8\")\n\n ## YOUR CODE HERE\n digest = sha256(plaintext).digest() #prdouce hash function as binary string\n\n res = do_ecdsa_verify(G,pub_verify,sig,digest) # verify by applying the verification function\n return res\n\n#####################################################\n# TASK 5 -- Diffie-Hellman Key Exchange and Derivation\n# - use Bob's public key to derive a shared key.\n# - Use Bob's public key to encrypt a message.\n# - Use Bob's private key to decrypt the message.\n#\n# NOTE:\n\ndef dh_get_key():\n \"\"\" Generate a DH key pair \"\"\"\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)\n\n\ndef dh_encrypt(pub, message, aliceSig = None):\n \"\"\" Assume you know the public key of someone else (Bob),\n and wish to Encrypt a message for them.\n - Generate a fresh DH key for this message.\n - Derive a fresh shared key.\n - Use the shared key to AES_GCM encrypt the message.\n - Optionally: sign the message with Alice's key.\n \"\"\"\n\n ## YOUR CODE HERE\n gene, priv_key, pub_key = dh_get_key() #allice generates her keys\n\n K = pub.pt_mul(priv_key) ## produce shared key using BoBs publick key and alices private key\n #(multipy using function from the petlib library)\n\n len_str_k = str(K)[:16] #first 16 bytes of string as it is required by encryption scheme to use 16 bytes\n\n iv,cipher,tag = encrypt_message(len_str_k,message) ## use function from exercise 2 to encrypt\n ciphertext =pub_key, iv, cipher, tag\n return ciphertext ## return ciphertext as a tuple of 4 including alices public key\n pass\n\ndef dh_decrypt(priv, c, aliceVer = None):\n \"\"\" Decrypt a received message encrypted using your public key,\n of which the private key is provided. Optionally verify\n the message came from Alice using her verification key.\"\"\"\n# iv,cipher,tag,pub_e=ciphertext\n\n ## YOUR CODE HERE\n K=c[0].pt_mul(priv) ## Bobs in this case will use alices public key and his private key to produce the shared key\n len_str_k=str(K)[:16] ## gets first 16 byts of the string of the key as the decryption fucntion required\n plaintext = decrypt_message(len_str_k,c[1],c[2],c[3]) ## produce the plaintext using function from task 2\n return plaintext\n\n\n## NOTE: populate those (or more) tests\n# ensure they run using the \"py.test filename\" command.\n# What is your test coverage? Where is it missing cases?\n# $ py.test-2.7 --cov-report html --cov Lab01Code Lab01Code.py\nG,Pr,P=dh_get_key()\ndef test_encrypt(): ## used the tests from task2 as guidine to create tests for task 5 which check if the\n message= u\"Hello World\" ## implemention of task 5 is correct\n\n pub_key,iv,cipher,tag = dh_encrypt(P,message,None)\n assert True\n assert len(iv) == 16\n assert len(cipher)==len(message)\n assert len(tag) == 16\n\ndef test_decrypt():\n message= u\"Hello World\"\n ciphertext = dh_encrypt(P,message)\n assert dh_decrypt(Pr,ciphertext) == message\n\ndef test_fails():\n from pytest import raises\n\n from os import urandom\n message = u\"Hello World!\"\n ciphertext= dh_encrypt(P,message)\n Pub,iv,cipher,tag=ciphertext\n\n cipher1 = urandom(len(cipher))\n ciphertext1=Pub,iv,cipher1,tag\n with raises(Exception) as excinfo:\n dh_decrypt(Pr, ciphertext1)\n assert 'decryption failed' in str(excinfo.value)\n\n tag1= urandom(len(tag))\n ciphertext2=Pub,iv,cipher,tag1\n with raises(Exception) as excinfo:\n dh_decrypt(Pr,ciphertext2)\n assert 'decryption failed' in str(excinfo.value)\n\n iv1= urandom(len(iv))\n ciphertext3 = Pub,iv1,cipher,tag\n with raises(Exception) as excinfo:\n dh_decrypt(Pr,ciphertext3)\n assert 'decryption failed' in str(excinfo.value)\n\n\n#####################################################\n# TASK 6 -- Time EC scalar multiplication\n# Open Task.\n#\n# - Time your implementations of scalar multiplication\n# (use time.clock() for measurements)for different\n# scalar sizes)\n# - Print reports on timing dependencies on secrets.\n# - Fix one implementation to not leak information.\n\ndef time_scalar_mul():\n pass\n"
},
{
"alpha_fraction": 0.7828162312507629,
"alphanum_fraction": 0.7923627495765686,
"avg_line_length": 82.80000305175781,
"blob_id": "c2a1ad4c4e18e1a435fa18dff1a17c5621ba7643",
"content_id": "641c5eeb14d33d02c0318171716aac7dd1eb9cd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 270,
"num_lines": 5,
"path": "/README.md",
"repo_name": "George19395/PET-Exercises",
"src_encoding": "UTF-8",
"text": "# PET-Exercises\nExercises in Privacy Enhancing Technologies (UCL Information Security MSc; Course COMPGA17)\n\n# How to install and run the exercises\nIn order to run the labs you will need an `ubuntu` (linux) virtual machine. Ensure you have a working installation of Python 2.7, and `pip install` packages `pytest` and `petlib`. Git clone this repository and follow the instructions in each of the exercise directories.\n"
}
] | 2 |
myoung859/Tank_Attack_580.200 | https://github.com/myoung859/Tank_Attack_580.200 | da0223d3b6b3c1681dff1e129a8ee89344e3f6f3 | fd6ea93fda39e8f8f5f2a7156bcefe31ad51d77a | 54b28d7ea5eaa19a958b8c505d339f3134d9b1d2 | refs/heads/master | 2020-03-17T01:49:06.013747 | 2018-05-16T07:43:22 | 2018-05-16T07:43:22 | 133,167,536 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4628162086009979,
"alphanum_fraction": 0.48613226413726807,
"avg_line_length": 38.75776290893555,
"blob_id": "f223753091a68c7c113bceb2b0b7f0d65e6100d0",
"content_id": "f7dcf6caf17f18267ee91765c0d995407955cc71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6562,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 161,
"path": "/TankAttack.py",
"repo_name": "myoung859/Tank_Attack_580.200",
"src_encoding": "UTF-8",
"text": "import pygame\r\nimport random as rd\r\nfrom helpers import Tank\r\nfrom helpers import Shell\r\nimport helpers as TA\r\n\r\n#Initial parameter setup\r\nfiler=open('options.csv', 'r',newline = '')\r\nx_dim = int(filer.readline())\r\ny_dim = int(filer.readline())\r\ngravity = float(filer.readline())\r\ndrag = float(filer.readline())\r\nwind_max = float(filer.readline())\r\nfiler.close()\r\n\r\npygame.init()\r\npygame.display.set_caption(\"Tank Attack\")\r\n\r\nprint(\"Welcome to Tank Attack!\")\r\n\r\ndef show(p1, p2, screen):\r\n#Sets up tanks on screen\r\n screen.fill([0,0,156])\r\n Font = pygame.font.SysFont(None, 14)\r\n pygame.draw.rect(screen, [0,56,0],(0,y_dim-50,x_dim,y_dim),0)\r\n screen.blit(p1.showtank(), (p1.position(),y_dim-85))\r\n text = Font.render('P1', True, (255, 0, 0), None)\r\n screen.blit(text, (p1.position()+15,y_dim-50))\r\n text2 = Font.render('P2', True, (0, 255, 0), None)\r\n screen.blit(p2.showtank(), (p2.position(),y_dim-85))\r\n screen.blit(text2, (p2.position()+15,y_dim-50))\r\n return\r\n\r\n#Repeatedly prompts the user until they type 'o' or 'p'\r\nwhile(True):\r\n start = input(\"To begin, type (P)lay. To change parameters type (O)ptions.\")\r\n\t#if options, redo the parameters\r\n if start[0].lower() == 'o':\r\n TA.options_prompt('options.csv',x_dim,y_dim,gravity,drag, wind_max)\r\n filer=open('options.csv', 'r',newline = '')\r\n x_dim = int(filer.readline())\r\n y_dim = int(filer.readline())\r\n gravity = float(filer.readline())\r\n drag = float(filer.readline())\r\n wind_max = float(filer.readline())\r\n filer.close()\r\n\r\n\r\n if start[0].lower() == 'p':\r\n field = [int(x_dim) , int(y_dim)]\r\n ip1 = rd.randint(50,int(x_dim) - 50)\r\n ip2 = rd.randint(50,int(x_dim) - 50)\r\n#Adds in the players\r\n p1 = Tank(ip1, x_dim, y_dim, 1, 'p1tank.png')\r\n p2 = Tank(ip2, x_dim, y_dim, 2, 'p2tank.png')\r\n \r\n pygame.init()\r\n b=rd.random()\r\n windy=b*wind_max\r\n \r\n p = 1\r\n screen = pygame.display.set_mode(field)\r\n show(p1,p2, screen)\r\n pygame.display.flip()\r\n col = False\r\n \r\n a=rd.random()\r\n b=rd.random()\r\n windy=b*wind_max\r\n if a<0.5:\r\n v_wind=windy\r\n print('The wind is blowing %.2f mph to the right.'%windy)\r\n else:\r\n v_wind=windy*-1\r\n print('The wind is blowing %.2f mph to the left.'%windy)\r\n \r\n while col == False:\r\n\t#Checks for window closing, then updates display\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n break\r\n screen = pygame.display.set_mode(field)\r\n show(p1,p2, screen)\r\n pygame.display.flip()\r\n\t\t\t#Prompts the user until they select a mode\r\n opt = 'IFYOUREADTHISGIVEUSANA'\r\n while (not (opt[0].lower() in ['f','m','q'])):\r\n print(\"---Player \" + str(p) +\"---\")\r\n print(\"If you want to fire a shell from your tank, input (F)ire.\")\r\n print(\"If you want to move your tank up to 50 meters, input (M)ove.\")\r\n opt = str(input())\r\n\r\n#Sets up shell spawning\r\n if (opt[0].lower() == 'f'):\r\n v_0 = float(input(\"Input the initial velocity: \"))\r\n angle = float(input(\"Input the angle of your shot (degrees): \"))\r\n pygame.display.flip()\r\n\t\t\t\t#Fires shell, then checks after each iteration fot outofbounds/hit\r\n if p == 1:\r\n shot = Shell(v_0, angle, p1)\r\n while shot.y_pos < 450 and shot.x_pos > 0 and shot.y_pos > -1*(y_dim-50) and shot.x_pos < shot.Tank.x_max and col==False:\r\n shot.Fire(drag, v_wind, gravity, 0.05)\r\n yposition = shot.y_pos\r\n if shot.y_pos < 0:\r\n yposition = shot.y_pos*-1\r\n screen = pygame.display.set_mode(field)\r\n show(p1,p2, screen)\r\n fire = pygame.draw.rect(screen,shot.color,[shot.x_pos,yposition,10,10],0)\r\n col = pygame.Rect.colliderect(fire, p2.rect)\r\n if col == True:\r\n screen.blit(pygame.image.load('dead.png'), (p2.position(),y_dim-85))\r\n pygame.display.flip()\r\n\r\n elif p == 2: #...and does the same if its player 2's turn\r\n shot = Shell(v_0, angle, p2)\r\n col = False\r\n while shot.y_pos < 450 and shot.x_pos > 0 and shot.y_pos > -1*(y_dim-50) and shot.x_pos < shot.Tank.x_max and col==False:\r\n shot.Fire(drag, v_wind, gravity, 0.05)\r\n yposition = shot.y_pos\r\n if shot.y_pos < 0:\r\n yposition = shot.y_pos*-1\r\n screen = pygame.display.set_mode(field)\r\n show(p1,p2, screen)\r\n fire = pygame.draw.rect(screen,shot.color,[shot.x_pos,yposition,10,10],0)\r\n col = pygame.Rect.colliderect(fire, p1.rect)\r\n if col == True:\r\n screen.blit(pygame.image.load('dead.png'), (p1.position(),y_dim-85))\r\n pygame.display.flip()\r\n\r\n if col == True:\r\n print(\"Congratulations, Player \" + str(p) +\".\")\r\n print(\"You totally annihilated the other player.\")\r\n print(\"I hope you're happy with yourself.\")\r\n break\r\n\r\n elif (opt[0].lower() == 'm'):\r\n if p == 1:\r\n p1.move() #defined in helpers.py\r\n elif p == 2:\r\n p2.move()\r\n\r\n screen = pygame.display.set_mode(field)\r\n show(p1,p2, screen)\r\n pygame.display.flip()\r\n\t\t\t\t\r\n\t\t\t#Switches player and recalculates wind\t\r\n if p == 1:\r\n p = 2\r\n elif p == 2:\r\n p = 1\r\n a=rd.random()\r\n b=rd.random()\r\n windy=b*wind_max\r\n if a<0.5:\r\n v_wind=windy\r\n print('The wind is blowing %.2f mph to the right.'%windy)\r\n else:\r\n v_wind=windy*-1\r\n print('The wind is blowing %.2f mph to the left.'%windy)\r\n"
},
{
"alpha_fraction": 0.5413740277290344,
"alphanum_fraction": 0.560916006565094,
"avg_line_length": 37.33734893798828,
"blob_id": "dc0d2be8eab9cb991671e1cb0bc2dc116c498c79",
"content_id": "58f5ea41577edae0b3bb95b1239ca463d7e02b92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3275,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 83,
"path": "/helpers.py",
"repo_name": "myoung859/Tank_Attack_580.200",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 13 17:35:38 2018\r\n@author: Mike\r\n\"\"\"\r\nimport pygame\r\nfrom math import radians,sin,cos\r\nimport csv\r\nimport random\r\n\r\ndef options_prompt(filename, x_dim, y_dim, gravity, drag,wind_max):\r\n\r\n filew = open(filename, 'w',newline = '')\r\n output = csv.writer(filew)\r\n output.writerow([int(input(\"Please input the horizontal window size (Current value is \"+ str(x_dim) +\"): \"))])\r\n output.writerow([int(input(\"Please input the vertical window size (Current value is \"+ str(y_dim) +\"): \"))])\r\n output.writerow([float(input(\"Please input the gravity strength (Current value is \"+ str(gravity) +\"): \"))])\r\n output.writerow([float(input(\"Please input the drag constant (Current value is \"+ str(drag) +\"): \"))])\r\n output.writerow([float(input(\"Please input the maximum wind speed (Current value is \"+ str(wind_max) +\"): \"))])\r\n\r\nclass Tank(pygame.sprite.Sprite):\r\n def __init__(self, pos_x, x_dim, y_dim, player, img):\r\n super().__init__()\r\n self.image = pygame.image.load(img)\r\n self.rect = self.image.get_rect()\r\n self.ymax = y_dim\r\n self.positx= pos_x\r\n self.rect.center = (self.positx + 15, y_dim-63) #bottom of tank is on ground\r\n self.posx = pos_x+15\r\n if player == 1:\r\n self.color = [255,0,0]\r\n elif player == 2:\r\n self.color = [0,255,0] \r\n self.posy = y_dim-63\r\n self.player = player\r\n self.x_max = x_dim\r\n \r\n def move(self):\r\n dist = 516\r\n while (dist > 50 or dist < -50):\r\n dist = int(input(\"Please enter the distance (positive-RIGHT or negative-LEFT) to move, up to 50 meters: \"))\r\n self.positx = self.positx + int(2.5*dist) #Inspired by https://bit.ly/2KkNOp8\r\n if (self.positx <= 20):\r\n self.positx = 0\r\n print(\"You can't get out of this one.\")\r\n if (self.positx >= self.x_max - 20):\r\n self.positx = self.x_max\r\n print(\"You can't get out of this one.\")\r\n return self.positx\r\n\r\n def showtank(self):\r\n pic = self.image\r\n return pic \r\n\r\n def position(self):\r\n return self.positx\r\n\r\n def color(self):\r\n return self.color\r\n \r\n def fire(self):\r\n None\r\nclass Shell(pygame.sprite.Sprite):\r\n def __init__(self, v_0, angle, Tank):\r\n super().__init__()\r\n self.image = pygame.image.load('bullet.png')\r\n self.color = [255,0,255]\r\n self.rect = self.image.get_rect()\r\n self.Tank = Tank\r\n self.rect.center = (self.Tank.rect.centerx, self.Tank.rect.centery - 6) \r\n self.player = getattr(self.Tank, 'player')\r\n self.v_x = cos(radians(angle)) * v_0\r\n self.v_y = sin(radians(angle)) * v_0\r\n self.mass = 10\r\n self.x_pos=self.Tank.posx\r\n self.y_pos=self.Tank.posy\r\n \r\n def Fire(self,drag,v_wind, gravity,dt):\r\n #Calculates real-time change in velocity, then moves the shell that much\r\n self.v_x = self.v_x - ((drag*(self.v_x + v_wind)/self.mass)*dt)\r\n self.v_y = self.v_y - ((drag*(self.v_y)/self.mass)*dt) - (gravity * dt)\r\n self.x_pos=self.x_pos+dt*self.v_x\r\n self.y_pos=self.y_pos-dt*self.v_y\r\n \r\n"
},
{
"alpha_fraction": 0.7297297120094299,
"alphanum_fraction": 0.8108108043670654,
"avg_line_length": 36,
"blob_id": "d10268342f15dc0754a99457cda7a2226c01c8a1",
"content_id": "53789694e90c94ee906799ec50d9364d7d0438c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 2,
"path": "/README.md",
"repo_name": "myoung859/Tank_Attack_580.200",
"src_encoding": "UTF-8",
"text": "# Tank_Attack_580.200\nCreates a simplified Tank Attack clone using pygame\n"
}
] | 3 |
puzzlewolf/barb-button | https://github.com/puzzlewolf/barb-button | 288cf5744649052761a93ee1ced450494369cd4f | a942751452bbdaa65b8039dc31a1990b8020c39c | a02027db94962cda69d397f49f3108ae7c8ec410 | refs/heads/master | 2021-03-24T23:23:13.337195 | 2020-03-15T23:46:34 | 2020-03-15T23:46:34 | 247,571,390 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6823529601097107,
"alphanum_fraction": 0.6823529601097107,
"avg_line_length": 55.33333206176758,
"blob_id": "7014aed22fa882cf4632ac010f15c38cc56ddbbe",
"content_id": "7aa9d1be8da7ebd93a02c75b8ff8f7b89b1ad67b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 3,
"path": "/barb.sh",
"repo_name": "puzzlewolf/barb-button",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\nJSON=$(youtube-dl -j --flat-playlist 'https://www.youtube.com/user/VideoGameSeppuku/videos')\necho $JSON | jq 'select(.title | match(\"SMM\")) | .title' \n"
},
{
"alpha_fraction": 0.5264054536819458,
"alphanum_fraction": 0.5383304953575134,
"avg_line_length": 33.52941131591797,
"blob_id": "64c7596244dd18a5014bd23c3be4aa443892b01e",
"content_id": "0ac9c3237e0980683505a0c6d7f0e71f55a2772e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 587,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 17,
"path": "/barb.py",
"repo_name": "puzzlewolf/barb-button",
"src_encoding": "UTF-8",
"text": "import youtube_dl\nimport json\n\nydl = youtube_dl.YoutubeDL({\n 'playliststart' : 1,\n 'playlistend' : 10,\n 'matchtitle' : \"Super|SMM|Mario\",\n 'quiet' : True,\n })\n\nbarb = ydl.extract_info(\n 'https://www.youtube.com/user/VideoGameSeppuku/videos', download=False)\ncarl = ydl.extract_info(\n 'https://www.youtube.com/user/CarlSagan42/videos', download=False)\n\nprint(barb['entries'][0]['webpage_url'])\n#print(json.dumps(result, sort_keys=True, indent=4))\n"
}
] | 2 |
kelleyparker/Python_Files | https://github.com/kelleyparker/Python_Files | a1f210c764c672b68bea298135d9b68fd6c84f25 | edffb855f15b84a205b60db665d4d3dba7832da6 | 665ccd885608f418c25d88befc831948d15483ca | refs/heads/master | 2015-08-11T18:04:33.924454 | 2014-05-22T03:17:03 | 2014-05-22T03:17:03 | 20,045,916 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5629921555519104,
"alphanum_fraction": 0.5826771855354309,
"avg_line_length": 13.05555534362793,
"blob_id": "b3f16f59e646b39ae248925f03fc8c99ef7b020b",
"content_id": "cee70f98d45c97868e52f986c696c4c99285c202",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 18,
"path": "/Python Visual Studio/PythonApplication1/PythonApplication1/3 - pythagorean theorem.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "# Type in a and b in the pythagorean theorem, ints only\n#\n# formula is\n#\n# a**2 + b**2 = c**2\n#\nimport math\n\na=int(input(\"What is the A?\"))\nif type(a) is str:\nb=int(input(\"What is the B?\"))\n\n\n\nc = a**2 + b**2\nc = math.sqrt(c)\nprint(c)\n#c = math.sqrt(c)\n\n"
},
{
"alpha_fraction": 0.4833333194255829,
"alphanum_fraction": 0.4833333194255829,
"avg_line_length": 9.166666984558105,
"blob_id": "961698ef9a7168361034fb1a76786996df9973c5",
"content_id": "dadc393e8ea845de43c46806108896bc03348798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 6,
"path": "/Python Visual Studio/PythonApplication1/PythonApplication1/1 - Simple addition - Copy.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "a = int(input('a'))\nb = int(input('b'))\n\nc = a + b\n\nprint(c)"
},
{
"alpha_fraction": 0.5732483863830566,
"alphanum_fraction": 0.5732483863830566,
"avg_line_length": 18.625,
"blob_id": "6dcd66b320c422518222ffd2d3f6bd37ef6571cd",
"content_id": "08151ee0b4386437e660e9926f0d9f61fc0133d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 8,
"path": "/IDLE/1- Simple array.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "# kpName = [\"Kelley \",\"Ryan \",\"Parker \"]\n\n\n\ndef FMLname(first,middle,last):\n print(first + \"\" + middle + \"\" + last)\n\nFMLname(\"Kelley \",\"Ryan \",\"Parker \")\n"
},
{
"alpha_fraction": 0.6405228972434998,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 16,
"blob_id": "a61a7bea7d38c92badcf696c1a56a14ee3734f70",
"content_id": "0fd2684c295441bc1c25d587bdcf365d54c49f21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 9,
"path": "/IDLE/2 - fahrenheint to celsius.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "import math\n\nx=input(\"What is the temperature, in Celsius, that you'd like to convert to Fahrenheit?\")\nc=int(x)\n\nf = c * (9/5) + 32\nf=round(f)\n\nprint(f)\n"
},
{
"alpha_fraction": 0.6504064798355103,
"alphanum_fraction": 0.6504064798355103,
"avg_line_length": 30,
"blob_id": "01ac3dfdf2a8f8990bf4220e1cfd0a94199b7dde",
"content_id": "ffaf25ad0346dbc0e50e30ff3afdc1e000393a37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 4,
"path": "/Python Visual Studio/PythonApplication1/PythonApplication1/2 - name in array.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "def printFullName(first,middle,last):\n print(first + \" \" + middle + \" \" + last)\n\nprintFullName(\"Kelley\",\"Ryan\",\"Parker\")"
},
{
"alpha_fraction": 0.6490066051483154,
"alphanum_fraction": 0.6754966974258423,
"avg_line_length": 15.777777671813965,
"blob_id": "d1ff008387d3a9090470df42eb708b42da7be314",
"content_id": "043908e2cdc3ef115b9ef2484e6e7ab8c4c83c21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 9,
"path": "/IDLE/3 - celsius to f.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "import math\n\nx=input(\"What is the temperature, in Fahrenheit, that you'd like to convert to Celsius?\")\nf=int(x)\n\nc = (f-32)*(5/9)\nc=round(c)\n\nprint(c)\n"
},
{
"alpha_fraction": 0.5738396644592285,
"alphanum_fraction": 0.607594907283783,
"avg_line_length": 20.545454025268555,
"blob_id": "12cb29aa79ba4038ad4e30cc40efb2b39f0ce5ed",
"content_id": "cb7677627375f4a22dcdeeddc5e98114289bafea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 11,
"path": "/IDLE/4 - list numbers in list with mmx reference.py",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "print(\"Do you want to see a list of mavericks from Mega Man X?\");\n\nresponse = str(input(\"Y/N\"))\n\nmavericks = [1,2,3,4,5,6,7,8]\n\nif response == \"Y\" or 'y':\n for i in mavericks:\n print(mavericks[i])\nelse:\n print(\"ok goodbye\")\n"
},
{
"alpha_fraction": 0.7264957427978516,
"alphanum_fraction": 0.7264957427978516,
"avg_line_length": 38,
"blob_id": "aff7f61f4f24d42bd3d33ced5d91bdc95c283082",
"content_id": "c99dc59d239d09bdec0583a10cd1b80df168c881",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 3,
"path": "/README.md",
"repo_name": "kelleyparker/Python_Files",
"src_encoding": "UTF-8",
"text": "Python_Files\n============\nThese are python files I've created to boost my skills in the Python programming language.\n"
}
] | 8 |
tmyyss/email | https://github.com/tmyyss/email | 00777f3741b57adc103159c7433b30a8ce86ab29 | 1f5ae91ef4474ea111e4d3f9cb85f0d15cc021b7 | 9ca32b95615af1cc87d875db2f28a8dc6de861bf | refs/heads/master | 2021-01-22T09:20:23.573545 | 2015-04-17T08:04:40 | 2015-04-17T08:04:40 | 34,104,139 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 12,
"blob_id": "6086b849bffafa9e88319c73c751e8f1b2a34ce1",
"content_id": "e5a7abefc86720cb5ee39430b1c98fe2ba2d1339",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 4,
"path": "/README.md",
"repo_name": "tmyyss/email",
"src_encoding": "UTF-8",
"text": "# email\n简单的邮箱群发系统\n功能:\n能实现两种模式的邮件发送,一种为普通模式,一种为专业模式。\n"
},
{
"alpha_fraction": 0.6790890097618103,
"alphanum_fraction": 0.6873705983161926,
"avg_line_length": 25.648147583007812,
"blob_id": "e1e2ee8bd5aec4752d20cae9e55933617549cdd7",
"content_id": "12dbbbd07299e489d64119a26831e3afb37a6bcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1697,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 54,
"path": "/bulk_mail.py",
"repo_name": "tmyyss/email",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n#coding:utf-8\n\nimport smtplib \nimport argparse \nfrom email.mime.text import MIMEText \nfrom email.header import Header\n \nhost = 'gstsh.com' \nSENDER= '[email protected]' \nCONTENT=u\"你好,测试!\"\nSUBJECT=\"python email test\"\n \ndef send_mail(msg,mailto,subject):\n\tmsg=MIMEText('<html><h1>%s</h1></html>'%msg,'html','utf-8')\n\tmsg['Subject']=Header(subject,'utf-8')\n\tmsg['From']=SENDER\n\tsmtp=smtplib.SMTP()\n\tprint(u\"连接服务器....\")\n\tsmtp.connect(host)\n\tprint(u\"连接服务器成功,准备发送消息...\")\n\tmsg['TO']=mailto\n\tprint(u\"正在发送给%s\"%mailto)\n\tsmtp.sendmail(SENDER,mailto,msg.as_string())\n\tprint(u\"发送给%s成功\"%mailto)\n\t#smtp.sendmail(sender,mailto,msg.as_string())\n\tsmtp.quit()\n\ndef parse_args():\n\tparser=argparse.ArgumentParser(description=u'邮件群发系统参数使用说明')\n\tparser.add_argument('-m',dest='mode',action='store',choices=['n','p'],default='p',help=u\"群发模式,n为普通模式,即一般的群发,p为专业模式, 让每一个收到邮件的人都以为是你只给他/她发了邮件\")\n\tparser.add_argument('-f',dest='filename',action='store',required=True,help=u'存储邮箱地址的文件')\n\tparser.add_argument('-t',dest='threads_name',action='store',type=int,default=20,help=u'开启多线程,后接线程数目,默认为20,最大不超过50')\n\targs=parser.parse_args()\n\t\n\tfilename=args.filename\n\tf=open(filename,'r')\n\taddrs=f.read()\n\tf.close()\n\t\n\taddrs=addrs.split('\\n')[:-1]\n\tprint addrs\n\tif args.mode=='n':\n\t\tsend_mail(CONTENT,addrs,SUBJECT)\n\telif args.mode=='p':\n\t\tfor addr in addrs:\n\t\t\tsend_mail(CONTENT,addr,SUBJECT)\n\t\n\t\t\n\n\nif __name__==\"__main__\":\n\targs=parse_args()\n\tprint dir(args)\n\n \n"
}
] | 2 |
yask123/cfi-team24 | https://github.com/yask123/cfi-team24 | b6d956362e23798d9aeabae878feec91b21e28cc | a9d606593449480beef65a1d2e30a9dbde52784e | be2f86d82b0d0d4fa8653bd17ae03825bf43816b | refs/heads/master | 2020-12-11T07:59:22.720180 | 2015-09-27T14:38:51 | 2015-09-27T14:38:51 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6526047587394714,
"alphanum_fraction": 0.6568516492843628,
"avg_line_length": 31.703702926635742,
"blob_id": "2f4d280c24e2f621e0522470843a941331292080",
"content_id": "df5fdb45a59cc3359b47552ac494e08f7a1a3ab1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3532,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 108,
"path": "/main.py",
"repo_name": "yask123/cfi-team24",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, render_template, render_template\nfrom flask import make_response, current_app, abort, jsonify\nfrom datetime import timedelta\nfrom functools import update_wrapper\nimport urllib\nimport json\nimport requests\n\napp = Flask(__name__)\n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n#------------------------------------------------------------------------------\n\n\ndef getOrder(origin,waypoints):\n origin = origin\n waypoints = '|'.join(waypoints)\n url = 'https://maps.googleapis.com/maps/api/directions/json?origin='+origin+'&destination='+origin+'&waypoints=optimize:true|'+waypoints+'&key=AIzaSyDmFfFmAAJ_9wnCsaz6oOGvyUeMis9BmkI'\n print url\n data = requests.get(url).json()\n return data['routes'][0]['waypoint_order']\n\ndef getDirectionURLs(names,waypoints):\n result = []\n for i in xrange(len(waypoints)-1):\n source = waypoints[i]\n destination = waypoints[i+1]\n name = names[i+1]\n url = 'https://www.google.co.in/maps/dir/'+source+'/'+destination\n # print url\n result.append([name,url])\n return result\n\[email protected]('/')\ndef map():\n return render_template('map.html', name='map')\n\[email protected]('/map', methods=['GET'])\n@crossdomain(origin='*')\ndef calculate():\n if request.method == 'GET':\n print 'f'\n origin_ = request.args['source']\n origin = urllib.quote_plus(origin_)\n waypoints_ = request.args.getlist('waypoints[]')\n waypoints = [ urllib.quote_plus(i) for i in waypoints_]\n print waypoints\n\n # origin = 'Adelaide,SA'\n # waypoints = ['Barossa+Valley,SA','Clare,SA','Connawarra,SA','McLaren+Vale,SA']\n order = getOrder(origin=origin,waypoints=waypoints)\n ordered_waypoints = list(waypoints)\n names = list(waypoints_)\n for i,j in enumerate(order):\n ordered_waypoints[i] = waypoints[j]\n names[i] = waypoints_[j]\n\n ordered_names = [origin_] + names + [origin_]\n ordered_waypoints = [origin] + ordered_waypoints + [origin]\n # print ordered_waypoints\n result = getDirectionURLs(names=ordered_names,waypoints=ordered_waypoints)\n else:\n print 'no POST baby'\n return render_template('result.html', name='map', result=result)\n return json.dumps(result)\n\nif __name__ == '__main__':\n app.run(port=3000, debug=True)\n"
},
{
"alpha_fraction": 0.7910863757133484,
"alphanum_fraction": 0.7910863757133484,
"avg_line_length": 50.28571319580078,
"blob_id": "e26a02bf6b759272763098d5ca7ca9eefc384fad",
"content_id": "3f5be41fc4952e507570ec7a0c366b759658a59b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 359,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 7,
"path": "/README.md",
"repo_name": "yask123/cfi-team24",
"src_encoding": "UTF-8",
"text": "#Route Optimization for multiple stops\n\nA web-app which will provide navigation for multiple stops using the optimal route.\n\n### Why google maps navigation?\n\nBecause it's the best in class, with automatic route rerouting, traffic management, shows broken roads, etc. Why re-invent the wheel? The links open in the Google maps application, so no issues there.\n"
}
] | 2 |
DAWZayas-Projects/BELLON-CABALLERO-OSCAR | https://github.com/DAWZayas-Projects/BELLON-CABALLERO-OSCAR | c93ee7db71f3496838271fe52e6396b6a82bed66 | d2ca89e5bd9bd9ec29f27cfccc75638e0b30b18e | 0b4af32d32614f000c5a8f6e3ca44a6d08315391 | refs/heads/master | 2021-01-18T23:22:17.777362 | 2016-06-12T10:32:22 | 2016-06-12T10:32:22 | 48,105,390 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.539432168006897,
"alphanum_fraction": 0.5461919903755188,
"avg_line_length": 31.632352828979492,
"blob_id": "a087419d45c438d8da7f55dd1844f983d7df6aac",
"content_id": "0e216d5f7983fba4e7a4c430be694a4561a80d09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2219,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 68,
"path": "/templates/base.html",
"repo_name": "DAWZayas-Projects/BELLON-CABALLERO-OSCAR",
"src_encoding": "UTF-8",
"text": "{% load staticfiles %}\n<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\">\n\n <!-- Latest compiled and minified CSS -->\n <link rel=\"stylesheet\"\n href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css\"\n integrity=\"sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7\"\n crossorigin=\"anonymous\" />\n\n <!-- Optional theme -->\n <link rel=\"stylesheet\"\n href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap-theme.min.css\"\n integrity=\"sha384-fLW2N01lMqjakBkx3l/M9EahuwpSfeNvV63J5ezn3uZzapT0u7EYsXMjQV+0En5r\"\n crossorigin=\"anonymous\">\n\n <!-- Font awesome -->\n <link rel=\"stylesheet\"\n href=\"https://maxcdn.bootstrapcdn.com/font-awesome/4.5.0/css/font-awesome.min.css\">\n\n <link rel=\"stylesheet\" href='{% static \"css/base.css\" %}' />\n\n <title>{% block head %} My blog {% endblock %} </title>\n </head>\n <body>\n {% include \"messages.html\" %}\n <nav class=\"navbar navbar-default\">\n <div class=\"container-fluid\">\n <div class=\"navbar-header\">\n <a class=\"navbar-brand\" href=\"{% url 'posts:list' %}\">\n Django Blog\n </a>\n </div>\n <div class=\"navbar-form navbar-right\">\n <div class=\"navbar-collapse collapse\">\n {% if user.is_authenticated %}\n <span>Logged in as {{ request.user.get_full_name }}</span>\n \n {% if user.socialaccount_set.all.0.get_avatar_url %}\n <img class=\"imgPerfil\" src=\"{{ user.socialaccount_set.all.0.get_avatar_url }}\" />\n {% else %}\n <img class=\"imgPerfil\" src=\"{% static 'images/default.png' %}\" />\n {% endif %}\n <a id=\"logout\" href=\"/accounts/logout\" class=\"btn btn-warning\">Logout</a>\n {% else %}\n <a id=\"google_login\" href=\"/accounts/google/login\" class=\"btn btn-success\">\n Sign in with Google\n </a>\n {% endif %}\n </div>\n </div>\n </div>\n \n </nav>\n \n <div class='container'>\n {% block content %}\n\n {% endblock %}\n </div>\n </body>\n <!-- Latest compiled and minified JavaScript -->\n <script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js\"\n integrity=\"sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS\"\n crossorigin=\"anonymous\"></script>\n</html>\n"
},
{
"alpha_fraction": 0.6494770050048828,
"alphanum_fraction": 0.658477246761322,
"avg_line_length": 30.623077392578125,
"blob_id": "e4994be08293f819e8655bd8a77d35804100960f",
"content_id": "34d0e2444ef968d8fbfa75f26550ce3fe13cdf6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4111,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 130,
"path": "/posts/views.py",
"repo_name": "DAWZayas-Projects/BELLON-CABALLERO-OSCAR",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom urllib import quote_plus\nfrom django.utils import timezone\n\n# Create your views here.\n\nfrom .models import Post\nfrom .forms import PostForm\n\n\ndef post_list(request):\n queryset_list = Post.objects.filter(draft=False).filter(publish__lte=timezone.now())\n #queryset = Post.objects.all().order_by(\"-timestamp\")\n\n query = request.GET.get(\"q\")\n if query:\n queryset_list = queryset_list.filter(title__icontains=query)\n\n paginator = Paginator(queryset_list, 2) # Show 25 contacts per page\n\n page = request.GET.get('page')\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"queryset\": queryset,\n \"queryset_list_len\": len(queryset_list),\n \"title\": \"All posts\"\n }\n return render(request, 'post_list.html', context)\n \n@login_required\ndef my_posts(request):\n \n username = request.user.id\n queryset_list = Post.objects.filter(user=username)\n \n query = request.GET.get(\"q\")\n if query:\n queryset_list = queryset_list.filter(title__icontains=query)\n \n paginator = Paginator(queryset_list, 2) # Show 25 contacts per page\n \n page = request.GET.get('page')\n queryset = queryset_list\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n \n context = {\n \"queryset\": queryset,\n \"queryset_list_len\": len(queryset_list),\n \"title\": \"Your posts\"\n }\n return render(request, 'post_list.html', context)\n\ndef post_detail(request, slug=None):\n instance = get_object_or_404(Post, slug=slug)\n absolute_url = request.build_absolute_uri\n share_url = quote_plus(instance.content)\n context = {\n \"instance\": instance,\n \"title\": instance.title,\n \"absolute_url\": absolute_url,\n \"share_url\": share_url\n }\n return render(request, 'post_detail.html', context)\n\n@login_required\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, \"Successfully created new post\")\n return HttpResponseRedirect(instance.get_absolute_url())\n submit_button = 'Create'\n context = {\n 'form': form,\n 'submit_button': submit_button,\n }\n\n return render(request, 'post_form.html', context)\n\ndef post_update(request, slug=None):\n\n instance = get_object_or_404(Post, slug=slug)\n\n if instance.user != request.user:\n raise Http404\n\n form = PostForm(request.POST or None, request.FILES or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"Successfully updated post: \" + instance.title)\n return HttpResponseRedirect(instance.get_absolute_url())\n submit_button = 'Update'\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n 'form': form,\n 'submit_button': submit_button,\n }\n return render(request, 'post_form.html', context)\n\ndef post_delete(request, slug=None):\n instance = get_object_or_404(Post, slug=slug)\n\n if instance.user != request.user:\n raise Http404\n\n instance.delete()\n return redirect(\"posts:list\")\n"
},
{
"alpha_fraction": 0.5641025900840759,
"alphanum_fraction": 0.7179487347602844,
"avg_line_length": 18.5,
"blob_id": "8520695ab763ede8da88a1d75dc7677563d5d189",
"content_id": "8be4491e5a72fd6b4f8ee11a558fff7a2fcfbfd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 78,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "DAWZayas-Projects/BELLON-CABALLERO-OSCAR",
"src_encoding": "UTF-8",
"text": "Django==1.9.4\ndjango-bootstrap-form==3.2\nPillow==3.1.1\ndjango-allauth==0.25.2\n"
},
{
"alpha_fraction": 0.6164037585258484,
"alphanum_fraction": 0.7602523565292358,
"avg_line_length": 28.90566062927246,
"blob_id": "257b219b0a7cf0ef7628c3be1dc2a509319fe400",
"content_id": "f0b3974689c5692914a7c1b05c3a196009071483",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1610,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 53,
"path": "/README.md",
"repo_name": "DAWZayas-Projects/BELLON-CABALLERO-OSCAR",
"src_encoding": "UTF-8",
"text": "#**Proyecto Final DAW: Python + Django**\n###*Oscar Bellón Caballero*\n\nEn este proyecto se va a llevar a cabo una página web (todavía por determinar) basada en Python, apoyándose en su framework más conocido [Django](https://www.djangoproject.com/).\n\nTambién se intentarán añadir varios [paquetes](https://www.djangopackages.com/) de Django, para extender su funcionalidad.\n\n\n###Python\n- ¿Qué es Python?\n- ¿Por qué Python?\n- [Introducción al lenguaje](http://goo.gl/zN1MGC)\n\n###Django\n- ¿Qué es Django?\n- ¿Por qué Django? ¿Alternativas?\n- Posibilidades de Django\n\n###Python + Django\n- Desarrollo de la aplicación\n\n\n###-----\n\n###Presentación\n\n[Prezi](http://prezi.com/grqsa66w10zh/?utm_campaign=share&utm_medium=copy)\n\n[Documentación](Documentacion/Documentacion proyecto final.pdf)\n\n###-----\n\n###Commits importantes\n\n[Creación del modelo Post básico](../../tree/5118438baa8a5b43ffd0c0ffc89592e2a9758107)\n\n[Primeras plantillas funcionando](../../tree/d9fb50d51ed10b2352ad2b1f99598fdad570c68f)\n\n[Primera queryset a plantilla](../../tree/164ccac38a86fd7a0593999d790e6e0bf20cdb9d)\n\n[Post_detail dinámico y link](../../tree/f4520717b16ae9427a9c471f75df2959186d964b)\n\n[Crear, editar, borrar y redirigir](../../tree/aa3f4460000a819dee37e5b85fe1cecc4235e8a2)\n\n[Plantillas, herencia](../../tree/160fc60330128d74fd0a265939447a60892af9c8)\n\n[Paginación y Bootstrap](../../tree/2c698a431c51febbb6f3ad673de24e3c1b97ad01)\n\n[Slug e imágenes](../../tree/7192dc966daab3fc754ccb4d256981695a3d301e)\n\n[Usuarios relacionados al post](../../tree/b2eab269e540f3b62b6e5daab86cd5aa688a2969)\n\n[Blog terminado](../../tree/master)\n"
}
] | 4 |
lukaskubis/darkskylib | https://github.com/lukaskubis/darkskylib | 89b536e324c19e4746b59371ba93f1d04de5231d | 93aa2d6fb8b25d309267a9351c3e351b1ddf0795 | fcf9fa5f8f1d37c869a8d532ca00fc3f7fada60e | refs/heads/master | 2022-04-03T02:41:20.666651 | 2019-03-19T07:51:04 | 2019-03-19T07:51:04 | 67,506,799 | 122 | 35 | MIT | 2016-09-06T12:37:55 | 2020-01-23T16:59:55 | 2019-12-26T09:02:10 | Python | [
{
"alpha_fraction": 0.6003298759460449,
"alphanum_fraction": 0.6157229542732239,
"avg_line_length": 28.819671630859375,
"blob_id": "de19162b85280093ba4ba7ec82ec298c9d8cdf63",
"content_id": "b1ef009050dd476b52c17ab8939993f4043c2706",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1819,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 61,
"path": "/test/__init__.py",
"repo_name": "lukaskubis/darkskylib",
"src_encoding": "UTF-8",
"text": "import os\nimport pickle\nimport unittest\n\nimport darksky\nimport requests\n\n\nclass TestPickle(unittest.TestCase):\n \"\"\" Forecast pickling \"\"\"\n @classmethod\n def setUpClass(cls):\n def mock_request_get(*args, **kwargs):\n response = type('Response', (object,), {})\n response.headers = {}\n response.status_code = 200\n\n with open('./test/response.json', 'r') as fixture:\n response.text = fixture.read()\n\n return response\n\n cls.request_get = requests.get\n requests.get = mock_request_get\n\n @classmethod\n def tearDownClass(cls):\n os.system('find . -name \"*.pickle\" -exec rm {} \\;')\n requests.get = cls.request_get\n\n def test_pickle(self):\n location = -77.843906, 166.686520 # McMurdo station, antarctica\n\n # This doesn't actually hit the API since we mocked out the request lib\n forecast = darksky.forecast('test_key', *location)\n\n # Make sure we got the right data, via our mock\n self.assertEqual(forecast.currently.temperature, -23.58)\n\n # Ensure pickling by actually pickling\n with open('./forecast.pickle', 'wb') as outfile:\n pickle.dump(forecast, outfile)\n\n # Check that the file exists\n self.assertTrue(os.path.exists('./forecast.pickle'))\n\n def test_unpickle(self):\n # Check that the previous test, which writes out the pickle, succeeded\n self.assertTrue(os.path.exists('./forecast.pickle'))\n\n # Load the pickle file\n with open('./forecast.pickle', 'rb') as infile:\n forecast = pickle.load(infile)\n\n # Make sure it loaded right\n self.assertTrue(forecast)\n self.assertEqual(forecast.currently.temperature, -23.58)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5767022371292114,
"alphanum_fraction": 0.6498769521713257,
"avg_line_length": 31.593582153320312,
"blob_id": "0093a821fb4ce7a585757bb11877762efa98ae64",
"content_id": "d485e6cf54863847c9c087be97af6025f45e831a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 6104,
"license_type": "permissive",
"max_line_length": 334,
"num_lines": 187,
"path": "/README.rst",
"repo_name": "lukaskubis/darkskylib",
"src_encoding": "UTF-8",
"text": "darkskylib\n==========\n\nThis library for the `Dark Sky\nAPI <https://darksky.net/dev/docs>`__ provides access to detailed\nweather information from around the globe.\n\nQuick start\n-----------\n\nBefore you start using this library, you need to get your API key\n`here <https://darksky.net/dev/register>`__.\n\n\nAPI Calls\n~~~~~~~~~\n\nFunction ``forecast`` handles all request parameters and returns a\n``Forecast`` object.\n\n.. code:: python\n\n >>> from darksky import forecast\n >>> boston = forecast(key, 42.3601, -71.0589)\n >>>\n\nThe first 3 positional arguments are identical to the 3 required\nparameters for API call. The optional query parameters need to be\nprovided as keyword arguments.\n\nUsing ``time`` argument will get you a **time machine call**.\nUsing ``timeout`` argument will set default `request timeout <http://docs.python-requests.org/en/master/api/#requests.request>`__ .\n\n.. code:: python\n\n >>> BOSTON = key, 42.3601, -71.0589\n >>> from datetime import datetime as dt\n >>> t = dt(2013, 5, 6, 12).isoformat()\n >>> boston = forecast(*BOSTON, time=t)\n >>> boston.time\n 1367866800\n\nData Points and Data Blocks\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe values as well as ``DataPoint`` and ``DataBlock`` objects are\naccessed using instance attributes or dictionary keys. You can access\ncurrent values directly, without going through ``currently`` data point.\n\n.. code:: python\n\n >>> boston['currently']['temperature']\n 60.72\n >>> boston.temperature\n 60.72\n\n**Data blocks** are indexable and iterable by their ``data`` values.\n\n.. code:: python\n\n >>> len(boston.hourly)\n 24\n >>>\n >>> boston.hourly[1].temperature\n 59.49\n >>>\n >>> # list temperatures for next 10 hours\n ... [hour.temperature for hour in boston.hourly[:10]]\n [60.83, 59.49, 58.93, 57.95, 56.01, 53.95, 51.21, 49.21, 47.95, 46.31]\n\nNonexistent attributes will raise ``AttributeError`` and dictionary keys\n``KeyError`` the way you'd expect.\n\nRaw data\n~~~~~~~~\n\nTo get the raw data dictionary, you can either access it through\ninstance attributes or navigate to it through dictionary keys, the same\nway you would navigate the actual dictionary.\n\n.. code:: python\n\n >>> boston.hourly[2]\n {'ozone': 290.06, 'temperature': 58.93, 'pressure': 1017.8, 'windBearing': 274, 'dewPoint': 52.58, 'cloudCover': 0.29, 'apparentTemperature': 58.93, 'windSpeed': 7.96, 'summary': 'Partly Cloudy', 'icon': 'partly-cloudy-night', 'humidity': 0.79, 'precipProbability': 0, 'precipIntensity': 0, 'visibility': 8.67, 'time': 1476410400}\n >>>\n >>> boston['hourly']['data'][2]\n {'ozone': 290.06, 'temperature': 58.93, 'pressure': 1017.8, 'windBearing': 274, 'dewPoint': 52.58, 'cloudCover': 0.29, 'apparentTemperature': 58.93, 'windSpeed': 7.96, 'summary': 'Partly Cloudy', 'icon': 'partly-cloudy-night', 'humidity': 0.79, 'precipProbability': 0, 'precipIntensity': 0, 'visibility': 8.67, 'time': 1476410400}\n\nFlags and Alerts\n~~~~~~~~~~~~~~~~\n\nAll dashes ``-`` in attribute names of **Flags** objects are replaced by\nunderscores ``_``. This doesn't affect the dictionary keys.\n\n.. code:: python\n\n >>> # instead of 'boston.flags.isd-stations'\n ... boston.flags.isd_stations\n ['383340-99999', '383390-99999', '383410-99999', '384620-99999', '384710-99999']\n >>>\n >>> boston.flags['isd-stations']\n ['383340-99999', '383390-99999', '383410-99999', '384620-99999', '384710-99999']\n\nEven though **Alerts** are represented by a list, the data accessibility\nthrough instance attributes is preserved for alerts in the list.\n\n.. code:: python\n\n >>> boston.alerts[0].title\n 'Freeze Watch for Norfolk, MA'\n\nUpdating data\n~~~~~~~~~~~~~\n\nUse ``refresh()`` method to update data of a ``Forecast`` object. The\n``refresh()`` method takes optional queries (including ``time``, making\nit a **Time machine** object) as keyword arguments. Calling\n``refresh()`` without any arguments will set all queries to default\nvalues. Use ``timeout`` argument to set the request timeout.\n\n.. code:: python\n\n >>> boston.refresh()\n >>> (boston.time, boston.temperature, len(boston.hourly))\n (1476403500, 60.72, 49)\n >>>\n >>> boston.refresh(units='si', extend='hourly')\n >>> (boston.time, boston.temperature, len(boston.hourly))\n (1476404205, 15.81, 169)\n >>>\n >>> boston.refresh(units='us')\n >>> (boston.time, boston.temperature, len(boston.hourly))\n (1476404489, 60.57, 49)\n\nFor Developers\n~~~~~~~~~~~~~~\n\nResponse headers are stored in a dictionary under ``response_headers``\nattribute.\n\n.. code:: python\n\n >>> boston.response_headers['X-response-Time']\n '146.035ms'\n\nExample script\n--------------\n\n.. code:: python\n\n from darksky import forecast\n from datetime import date, timedelta\n\n BOSTON = 42.3601, 71.0589\n\n weekday = date.today()\n with forecast('API_KEY', *BOSTON) as boston:\n print(boston.daily.summary, end='\\n---\\n')\n for day in boston.daily:\n day = dict(day = date.strftime(weekday, '%a'),\n sum = day.summary,\n tempMin = day.temperatureMin,\n tempMax = day.temperatureMax\n )\n print('{day}: {sum} Temp range: {tempMin} - {tempMax}'.format(**day))\n weekday += timedelta(days=1)\n\nOutput:\n\n::\n\n Light rain on Friday and Saturday, with temperatures bottoming out at 48°F on Tuesday.\n ---\n Sun: Partly cloudy in the morning. Temp range: 44.86 - 57.26°F\n Mon: Mostly cloudy in the morning. Temp range: 44.26 - 55.28°F\n Tue: Clear throughout the day. Temp range: 36.85 - 47.9°F\n Wed: Partly cloudy starting in the afternoon, continuing until evening. Temp range: 33.23 - 47.93°F\n Thu: Light rain overnight. Temp range: 35.75 - 49.71°F\n Fri: Light rain in the morning and afternoon. Temp range: 45.47 - 57.11°F\n Sat: Drizzle in the morning. Temp range: 43.3 - 62.08°F\n Sun: Clear throughout the day. Temp range: 39.81 - 60.84°F\n\nLicense\n-------\n\nThe code is available under terms of `MIT\nLicense <https://raw.githubusercontent.com/lukaskubis/darkskylib/master/LICENSE>`__\n"
},
{
"alpha_fraction": 0.7193877696990967,
"alphanum_fraction": 0.7193877696990967,
"avg_line_length": 27,
"blob_id": "9e6e5c2a0822c0bb160610ea15abba8104727c43",
"content_id": "c54d018b98b4858e584a3fb3e013c7705ce34059",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 196,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 7,
"path": "/darksky/__init__.py",
"repo_name": "lukaskubis/darkskylib",
"src_encoding": "UTF-8",
"text": "# __init__.py\n\nfrom .forecast import Forecast\n\n\ndef forecast(key, latitude, longitude, time=None, timeout=None, **queries):\n return Forecast(key, latitude, longitude, time, timeout, **queries)\n"
},
{
"alpha_fraction": 0.5583038926124573,
"alphanum_fraction": 0.5731448531150818,
"avg_line_length": 34.375,
"blob_id": "afe3b2fc06793d90dbb2beb08d0383598344d7ef",
"content_id": "316f6d668778ff5a4db1d338327231de973e5ace",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1415,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 40,
"path": "/setup.py",
"repo_name": "lukaskubis/darkskylib",
"src_encoding": "UTF-8",
"text": "import os\nfrom setuptools import setup\n\n# use pandoc to convert\nwith open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst')) as f:\n README = f.read()\n\nsetup(name='darkskylib',\n version='0.3.91',\n description='The Dark Sky API wrapper',\n long_description=README,\n url='https://github.com/lukaskubis/darkskylib',\n author='Lukas Kubis',\n author_email='[email protected]',\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Home Automation',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: OS Independent',\n ],\n keywords='darksky dark-sky dark sky forecast home weather home-weather weather-station',\n packages=['darksky'],\n install_requires=[\n 'future',\n 'requests',\n ],\n test_suite='test',\n zip_safe=True\n )\n"
},
{
"alpha_fraction": 0.6019575595855713,
"alphanum_fraction": 0.603588879108429,
"avg_line_length": 31.263158798217773,
"blob_id": "e2e22c7c8490867b4ce8b6eeeb59364f37fa53f0",
"content_id": "f366e128688b32bcc110db3e63776e37c02f8aea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1839,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 57,
"path": "/darksky/forecast.py",
"repo_name": "lukaskubis/darkskylib",
"src_encoding": "UTF-8",
"text": "# forecast.py\nfrom __future__ import print_function\nfrom builtins import super\n\nimport json\nimport sys\nimport requests\n\nfrom .data import DataPoint\n\n_API_URL = 'https://api.darksky.net/forecast'\n\n\nclass Forecast(DataPoint):\n def __init__(self, key, latitude, longitude, time=None, timeout=None, **queries):\n self._parameters = dict(key=key, latitude=latitude, longitude=longitude, time=time)\n self.refresh(timeout, **queries)\n\n def __setattr__(self, key, value):\n if key in ('_queries', '_parameters', '_data'):\n return object.__setattr__(self, key, value)\n return super().__setattr__(key, value)\n\n def __getattr__(self, key):\n currently = object.__getattribute__(self, 'currently')\n _data = object.__getattribute__(currently, '_data')\n if key in _data.keys():\n return _data[key]\n return object.__getattribute__(self, key)\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n del self\n\n @property\n def url(self):\n time = self._parameters['time']\n timestr = ',{}'.format(time) if time else ''\n uri_format = '{url}/{key}/{latitude},{longitude}{timestr}'\n return uri_format.format(url=_API_URL, timestr=timestr, **self._parameters)\n\n def refresh(self, timeout=None, **queries):\n self._queries = queries\n self.timeout = timeout\n request_params = {\n 'params': self._queries,\n 'headers': {'Accept-Encoding': 'gzip'},\n 'timeout': timeout\n }\n\n response = requests.get(self.url, **request_params)\n self.response_headers = response.headers\n if response.status_code is not 200:\n raise requests.exceptions.HTTPError('Bad response')\n return super().__init__(json.loads(response.text))\n"
},
{
"alpha_fraction": 0.5569155216217041,
"alphanum_fraction": 0.5569155216217041,
"avg_line_length": 25.354839324951172,
"blob_id": "6b0835a8d77249d92ff50363ec644ebbecf25fa6",
"content_id": "1e26725def4bb456567241853a1668f5cbbe1bf5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1634,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 62,
"path": "/darksky/data.py",
"repo_name": "lukaskubis/darkskylib",
"src_encoding": "UTF-8",
"text": "# data.py\n\n\nclass DataPoint(object):\n def __init__(self, data):\n self._data = data\n\n if isinstance(self._data, dict):\n for name, val in self._data.items():\n setattr(self, name, val)\n\n if isinstance(self._data, list):\n setattr(self, 'data', self._data)\n\n def __setattr__(self, name, val):\n def setval(new_val=None):\n return object.__setattr__(self, name, new_val if new_val else val)\n\n # regular value\n if not isinstance(val, (list, dict)) or name == '_data':\n return setval()\n\n # set specific data handlers\n if name in ('alerts', 'flags'):\n return setval(eval(name.capitalize())(val))\n\n # data\n if isinstance(val, list):\n val = [DataPoint(v) if isinstance(v, dict) else v for v in val]\n return setval(val)\n\n # set general data handlers\n setval(DataBlock(val) if 'data' in val.keys() else DataPoint(val))\n\n def __getitem__(self, key):\n return self._data[key]\n\n def __len__(self):\n return len(self._data)\n\n\nclass DataBlock(DataPoint):\n def __iter__(self):\n return self.data.__iter__()\n\n def __getitem__(self, index):\n # keys in darksky API datablocks are always str\n if isinstance(index, str):\n return self._data[index]\n return self.data.__getitem__(index)\n\n def __len__(self):\n return self.data.__len__()\n\n\nclass Flags(DataPoint):\n def __setattr__(self, name, value):\n return object.__setattr__(self, name.replace('-', '_'), value)\n\n\nclass Alerts(DataBlock):\n pass\n"
}
] | 6 |
zetkin/checkmarx | https://github.com/zetkin/checkmarx | a6e73317963daa815d22fd2ba796f2b41f134cf2 | afe675554cbe057fe3ba9c5646651f8641f531bc | 5d99b265c9093958a46a36c648ca2b0d3e3db97d | refs/heads/master | 2023-02-19T07:43:10.716940 | 2021-10-16T09:22:46 | 2021-10-16T09:22:46 | 240,683,627 | 0 | 0 | null | 2020-02-15T10:12:53 | 2021-10-16T09:22:50 | 2023-02-14T22:04:39 | Python | [
{
"alpha_fraction": 0.5874729752540588,
"alphanum_fraction": 0.5907127261161804,
"avg_line_length": 28.870967864990234,
"blob_id": "3dfe252a870fab25efb0b1b1b0115ab6977c3952",
"content_id": "f20c3ebdd24cf3b3e1ea2d65e9ec0173baae7413",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 31,
"path": "/checkmarx/src/checkmarx/types.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\n\nclass Point(namedtuple(\"Point\", [\"x\", \"y\"])):\n def __truediv__(self, other):\n other = _convert_other(other)\n return Point(self.x / other.x, self.y / other.y)\n\n def __mul__(self, other):\n other = _convert_other(other)\n return Point(self.x * other.x, self.y * other.y)\n\n def __sub__(self, other):\n other = _convert_other(other)\n return Point(self.x - other.x, self.y - other.y)\n\n def __add__(self, other):\n other = _convert_other(other)\n return Point(self.x + other.x, self.y + other.y)\n\n\ndef _convert_other(obj):\n if isinstance(obj, (tuple, list)) and len(obj) == 2:\n return Point(obj[0], obj[1])\n if isinstance(obj, (int, float)):\n return Point(obj, obj)\n return obj\n\n\nPolygon = namedtuple(\"Polygon\", [\"topleft\", \"topright\", \"bottomright\", \"bottomleft\"])\nQR = namedtuple(\"QR\", [\"data\", \"polygon\"])\n"
},
{
"alpha_fraction": 0.6811594367027283,
"alphanum_fraction": 0.6811594367027283,
"avg_line_length": 8.857142448425293,
"blob_id": "0931c98d3451bed08486aecc589057a5311ac761",
"content_id": "fa955efa75eafd2f19a5bd6a73c341a5e9b255e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 7,
"path": "/checkmarx/src/checkmarx/exceptions.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCheckMarx Exceptions\n\"\"\"\n\n\nclass QRNotFound(Exception):\n pass\n"
},
{
"alpha_fraction": 0.6044158339500427,
"alphanum_fraction": 0.650413990020752,
"avg_line_length": 27.605262756347656,
"blob_id": "22a34a370e62fdbc05f350d960e5802e447142d2",
"content_id": "c72cf8a44ff39119517ef4c1094b90e629b8e655",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1087,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 38,
"path": "/checkmarx/test/test_main.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "import os\n\nimport pytest\nfrom starlette.status import HTTP_400_BAD_REQUEST\nfrom starlette.testclient import TestClient\n\nfrom checkmarx import main\n\n\[email protected](scope=\"module\")\ndef client():\n return TestClient(main.APP)\n\n\ndef submit_image(client, relpath):\n \"\"\"Submit an image to the ``/scan`` endpoint and return the response.\"\"\"\n dirname = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))\n test_img = os.path.join(dirname, relpath)\n with open(test_img, \"rb\") as f:\n return client.post(\"/scan\", files={\"image\": f})\n\n\ndef test_post_real_questionnaire(client):\n # TODO: Update with appropriate config when implemented.\n response = submit_image(client, \"static/img/2019-11-09 12.37.44.jpg\")\n assert response.json() == {\n \"result\": [\n \"Header\",\n \"17 Feb 20:00-21:30\",\n \"24 Feb 16:00-18:00\",\n \"25 Feb 12:00-13:30\",\n ]\n }\n\n\ndef test_post_unreal_questionnaire(client):\n response = submit_image(client, \"static/img/logo.png\")\n assert response.status_code == HTTP_400_BAD_REQUEST\n"
},
{
"alpha_fraction": 0.6465753316879272,
"alphanum_fraction": 0.6849315166473389,
"avg_line_length": 19.22222137451172,
"blob_id": "ea8f729d4f77fb081841f7d50f909cff6f23265a",
"content_id": "1bdb30113dc6f89dd04831d8b8876f9b7658c68d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 18,
"path": "/sandbox/thresholding.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\nfrom time import sleep\nimport cv2 as cv\n\nwarped = cv.imread(\"forms/feminism.jpg\")\ngray = cv.cvtColor(warped, cv.COLOR_BGR2GRAY)\nclipped = cv.adaptiveThreshold(\n gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 31, 12\n)\n\n\ndef wait():\n while cv.waitKey(0) != 27:\n sleep(0.1)\n cv.destroyAllWindows()\n\n\ncv.imshow(\"Original\", clipped)\nwait()\n"
},
{
"alpha_fraction": 0.7565217614173889,
"alphanum_fraction": 0.7565217614173889,
"avg_line_length": 18.16666603088379,
"blob_id": "c3cce2dec6a85513600bd8e911b236b6adff127a",
"content_id": "007893bf35aaac1e005b28d89ff95c9cd8735713",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 6,
"path": "/frontengels/Dockerfile",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "FROM node:slim\nWORKDIR /usr/src/app\nCOPY package.json package-lock.json ./\nRUN npm update\nRUN npm install\nCOPY . .\n"
},
{
"alpha_fraction": 0.6809815764427185,
"alphanum_fraction": 0.7055214643478394,
"avg_line_length": 24.736841201782227,
"blob_id": "5973e80e79d2f1cf67e7ba485af56ef57f56c53c",
"content_id": "df077e51f120f00daabca36ae204a044047c8bbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 489,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 19,
"path": "/README.md",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "Document Scanner Stack\n======================\n\nServices:\n * `checkmarx`: Find checked boxes in a document\n * `metadata-server`: Serve example metadata on a document\n * `frontengels`: React app front-end for `checkmarx`\n\n\nUsage:\n\n```\ndocker-compose up -d --build\n```\n\nThen go to ~~`localhost:3000`~~ (front-end currently not working)\n`localhost:5000/docs` and upload an image or submit an HTTP request to\n`localhost:5000/scan` with a form parameter `image` containing the image to\nprocess.\n"
},
{
"alpha_fraction": 0.6274921298027039,
"alphanum_fraction": 0.6684155464172363,
"avg_line_length": 22.219512939453125,
"blob_id": "ce49fff1ee0a9ef7cedb5838ea628011a0f123e8",
"content_id": "3b6d79ae2cc61d47d7e3b8d3c6525a47b68734e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 41,
"path": "/sandbox/qr-scanner-demo.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\nfrom argparse import ArgumentParser\nfrom time import sleep\n\nimport cv2 as cv\nfrom pyzbar import pyzbar\n\nparser = ArgumentParser()\nparser.add_argument(\"--image\", required=True, help=\"input image\")\nparser.add_argument(\"--debug\", action=\"store_true\", help=\"perform extra debug steps\")\nargs = parser.parse_args()\n\n\norig = cv.imread(args.image)\n\n\ndef show(img):\n cv.imshow(\"a\", img)\n while cv.waitKey(0) != 27:\n sleep(0.1)\n cv.destroyAllWindows()\n\n\n# PyZbar\nqr_codes = pyzbar.decode(orig) # Benchmark\nimage = orig.copy()\ncolors = [(255,0,0), (0,255,0), (120,155,120), (0,0,255)]\nfor obj in qr_codes:\n for p, c in zip(obj.polygon, colors):\n cv.circle(image, (p.x, p.y), 1, c, 9)\nshow(image)\n\n\n# OpenCV\nimage = orig.copy()\ndecoder = cv.QRCodeDetector()\ndata, bbox, rectifiedImage = decoder.detectAndDecode(orig)\nfor obj in bbox:\n for p in obj:\n cv.circle(image, tuple(p), 1, (0,255,0), 9)\nshow(image)\nshow(rectifiedImage)\n"
},
{
"alpha_fraction": 0.5522922873497009,
"alphanum_fraction": 0.560171902179718,
"avg_line_length": 21.516128540039062,
"blob_id": "7ea7aeed4dddc07bbad23a5982e70915c045e6a8",
"content_id": "d229b652214371d0a93a2dad2fb572485992fd10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1396,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 62,
"path": "/checkmarx/scripts/annotate.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "import argparse\nimport json\n\nimport cv2 as cv\n\n\ndef show_image(path):\n img = cv.imread(path, cv.IMREAD_UNCHANGED)\n cv.imshow(path, img)\n\n\ndef read_clicks(name, n):\n clicks = []\n\n def fn(event, x, y, flags, param):\n if event == cv.EVENT_LBUTTONUP:\n pos = (x, y)\n clicks.append(pos)\n print(f\"Click {len(clicks)}:\", pos)\n\n cv.setMouseCallback(name, fn)\n\n while len(clicks) < n:\n key = cv.waitKey(1)\n if key in (113, 27):\n exit()\n\n return clicks\n\n\ndef annotation_path(img_path):\n return \".\".join(img_path.split(\".\")[:-1]) + \".json\"\n\n\ndef write_annotation(path, annotation):\n with open(path, \"wt\") as f:\n json.dump(annotation, f, indent=2)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"images\", nargs=\"+\", help=\"Images to annotate\")\n args = parser.parse_args()\n\n for path in args.images:\n print(f\"Annotating '{path}'...\")\n show_image(path)\n print(\"Paper coords\")\n paper_coords = read_clicks(path, 4)\n print(\"QR coords\")\n qr_coords = read_clicks(path, 4)\n cv.destroyAllWindows()\n annotation = {\n \"path\": path,\n \"paper_coords\": paper_coords,\n \"qr_coords\": qr_coords,\n }\n write_annotation(annotation_path(path), annotation)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.581818163394928,
"alphanum_fraction": 0.581818163394928,
"avg_line_length": 23.0625,
"blob_id": "4ec224a877fcd17e26ec75171f8432693c5dd272",
"content_id": "8e60c32473a42fc9b1b2776720e558f96e57cf5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 16,
"path": "/checkmarx/src/checkmarx/utils.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\n\n\ndef exe(cmd, args):\n \"\"\"Execute a command.\"\"\"\n cwd = os.path.dirname(os.path.abspath(__file__))\n try:\n return (\n subprocess.check_output([cmd, *args], stderr=subprocess.PIPE)\n .decode(\"ascii\")\n .strip()\n )\n except (FileNotFoundError, subprocess.CalledProcessError):\n raise\n return None\n"
},
{
"alpha_fraction": 0.6537656784057617,
"alphanum_fraction": 0.6537656784057617,
"avg_line_length": 24.157894134521484,
"blob_id": "b684806539cb5420f64b1107459ce3834b5dffa3",
"content_id": "15e79a39dc49dcd17b8c32c59f48eaa2514e59e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 38,
"path": "/checkmarx/src/checkmarx/config.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDocument Configs\n================\n\nA document config will contain all information pertaining to a\ncheckbox-style questionnaire.\n\"\"\"\nfrom typing import List\n\nimport pydantic\n\nfrom checkmarx.types import Point\n\n\nclass DocumentConfig(pydantic.BaseModel):\n \"\"\"\n All sizes have the format (width, height) and units of mm.\n\n Attributes:\n page_size: Size of the document.\n checkbox_size: Size of a checkbox in the document.\n qr_size: Size of the QR code.\n qr_offset: Offset from the top-left corner of the document to the\n top-left corner of the QR code.\n fields: Questions in the questionnaire.\n \"\"\"\n\n page_size: Point\n checkbox_size: Point\n qr_size: Point\n qr_offset: Point\n checkbox_titles: List[List[str]]\n\n @pydantic.validator(\"*\", pre=True)\n def convert_to_named_tuple(cls, value, field):\n if field.type_ is Point:\n return Point(*value)\n return value\n"
},
{
"alpha_fraction": 0.6351351141929626,
"alphanum_fraction": 0.6756756901741028,
"avg_line_length": 23.66666603088379,
"blob_id": "ea47bea60f1a330dae33dc08875fc6589278ad57",
"content_id": "142314725477cf91a29914bfaee832717e5dd343",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 6,
"path": "/metadata-server/Dockerfile",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "FROM python:3.9-slim\n\nCOPY requirements.txt ./\nRUN pip install -r requirements.txt\nCOPY . .\nENTRYPOINT [\"uvicorn\", \"--host\", \"0.0.0.0\", \"main:APP\"]\n"
},
{
"alpha_fraction": 0.6685463786125183,
"alphanum_fraction": 0.6741854548454285,
"avg_line_length": 27.5,
"blob_id": "52a8d5854ba5258a1d55bace2191e660696b30cf",
"content_id": "c3676fcf3181a523050e8bfbd6dc65d36b952a47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1596,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 56,
"path": "/checkmarx/src/checkmarx/main.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "import argparse\nimport io\nimport os\nimport tempfile\nfrom pprint import pprint\nfrom typing import List\n\nimport fastapi\nimport pydantic\nfrom PIL import Image, UnidentifiedImageError\nfrom starlette.status import HTTP_400_BAD_REQUEST\n\nfrom checkmarx import scanner\nfrom checkmarx.exceptions import QRNotFound\n\n\nAPP = fastapi.FastAPI()\n\n\nclass ScanResponse(pydantic.BaseModel):\n checked_boxes: List[str]\n\n\[email protected](\"/scan\", response_model=ScanResponse)\nasync def scan(image: fastapi.UploadFile = fastapi.File(...)):\n \"\"\"Scan a document.\"\"\"\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, os.path.basename(image.filename)) + \".png\"\n\n # TODO: This is slow! We only need to convert to PNG because the qr\n # scanner does not execute correctly for JPEG...\n Image.open(io.BytesIO(await image.read())).save(path)\n\n try:\n result = scanner.main(path, debug=False)\n except UnidentifiedImageError as e:\n msg = \"Unable to open image file: \" + str(e)\n raise fastapi.HTTPException(HTTP_400_BAD_REQUEST,msg) from e\n except QRNotFound as e:\n msg = \"Did not find QR code\"\n raise fastapi.HTTPException(HTTP_400_BAD_REQUEST,msg) from e\n return {\"checked_boxes\": result}\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"image\")\n parser.add_argument(\n \"--debug\", action=\"store_true\", help=\"perform extra debug steps\"\n )\n args = parser.parse_args()\n pprint(scanner.main(args.image, args.debug))\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6203761100769043,
"alphanum_fraction": 0.6371151208877563,
"avg_line_length": 32.365516662597656,
"blob_id": "487521280db75d812b06eadbb612ff3892664a0b",
"content_id": "d394fc3256758441222d1e27a469f59f52c9c70b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4839,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 145,
"path": "/sandbox/egg-heads-scanner.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\nfrom argparse import ArgumentParser\nfrom time import sleep\n\nimport cv2 as cv\nimport numpy as np\n\n\nparser = ArgumentParser()\nparser.add_argument(\"--image\", required=True, help=\"input image\")\nparser.add_argument(\"--debug\", action=\"store_true\", help=\"perform extra debug steps\")\nargs = parser.parse_args()\n\n\ndef wait():\n while cv.waitKey(0) != 27:\n sleep(0.1)\n cv.destroyAllWindows()\n\n\ndef draw_contour(img, contour):\n \"\"\"Debug function for showing a contour on an image.\"\"\"\n copy = img.copy()\n cv.drawContours(copy, [contour], -1, (0, 255, 0), 2)\n cv.imshow(\"Contour\", copy)\n wait()\n\n\ndef edge_detect(img):\n \"\"\"Return a single channel image depicting edged as high-intensity areas.\"\"\"\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n return cv.Canny(gray, 75, 200)\n\n\ndef locate_document_contour(img):\n \"\"\"Find a document-like contour in an edged image.\"\"\"\n cnts, _ = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key=cv.contourArea, reverse=True)[:5] # 5 largest contours\n\n # loop over the contours\n for c in cnts:\n # approximate the contour\n peri = cv.arcLength(c, True)\n approx = cv.approxPolyDP(c, 0.02 * peri, True)\n\n # if our approximated contour has four points, then we\n # can assume that we have found our screen\n\n # TODO: We should prioritise 4 but also accept up to 8. We should construct\n # ... an algorithm based on the lenght and point-count. We can also assume\n # ... that the predominant color will be white.\n if len(approx) == 4:\n return approx\n\n\ndef order_points(pts):\n # initialzie a list of coordinates that will be ordered\n # such that the first entry in the list is the top-left,\n # the second entry is the top-right, the third is the\n # bottom-right, and the fourth is the bottom-left\n import ipdb ; ipdb.set_trace()\n rect = np.zeros((4, 2), dtype = \"float32\")\n\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis = 1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis = 1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n\n # return the ordered coordinates\n return rect\n\n\ndef four_point_transform(image, pts):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\")\n\n # compute the perspective transform matrix and then apply it\n M = cv.getPerspectiveTransform(rect, dst)\n warped = cv.warpPerspective(image, M, (maxWidth, maxHeight))\n\n # return the warped image\n return warped\n\n\nif __name__ == \"__main__\":\n image = cv.imread(args.image)\n orig = image.copy()\n edged = edge_detect(image)\n\n if args.debug:\n cv.imshow(\"A\", edged)\n wait()\n\n contour = locate_document_contour(edged)\n if contour is None:\n raise Exception(\"ERR\")\n\n if args.debug:\n draw_contour(image, contour)\n\n warped = four_point_transform(orig, contour.reshape(4, 2))\n\n # convert the warped image to grayscale, then threshold it\n warped = cv.cvtColor(warped, cv.COLOR_BGR2GRAY)\n _, warped = cv.threshold(warped, 200, 0, cv.THRESH_TRUNC)\n\n # show the original and scanned images\n cv.imshow(\"Original\", orig)\n cv.imshow(\"Scanned\", warped)\n wait()\n"
},
{
"alpha_fraction": 0.6256827712059021,
"alphanum_fraction": 0.6404204964637756,
"avg_line_length": 32.11603927612305,
"blob_id": "1e9d9ab6076dbdae0d83e63b80d422e2c0a98a98",
"content_id": "c659953a6a53528430d2d981730fb635b5950ee3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9703,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 293,
"path": "/checkmarx/src/checkmarx/scanner.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "import re\n\nimport cv2 as cv\nimport numpy as np\nimport requests\nfrom PIL import Image\n\nfrom checkmarx import utils\nfrom checkmarx.config import DocumentConfig\nfrom checkmarx.exceptions import QRNotFound\nfrom checkmarx.types import Point, Polygon, QR\n\nCORNER_PATTERN = \"\".join(r\" (\\((?P<x%s>\\d+),(?P<y%s>\\d+)\\))\" % (i, i) for i in range(4))\nQUIRC_RE = re.compile(\n r\"corners:(?P<corners>%s).+Payload: (?P<data>.+)\" % CORNER_PATTERN,\n flags=re.DOTALL,\n)\n\n\ndef get_single_qr(img_path):\n output = utils.exe(\"./qrtest\", (\"-v\", \"-d\", img_path))\n match = QUIRC_RE.search(output)\n # TODO: Raise error if multiple found\n if match:\n corners = [Point(*map(int, match.group(f\"x{i}\", f\"y{i}\"))) for i in range(4)]\n\n # TODO: Replace with real URL when ready\n data = match.group(\"data\")\n data = \"http://metadata-server:8000/feminism-handout\"\n\n return QR(data, Polygon(*corners))\n raise QRNotFound\n\n\ndef fetch_config(url):\n \"\"\"Given a QR decoded url, return the doc config.\"\"\"\n return DocumentConfig.parse_obj(requests.get(url).json())\n\n\ndef draw_contour(img, contour):\n \"\"\"Debug function for showing a contour on an image.\"\"\"\n copy = img.copy()\n cv.drawContours(copy, contour, -1, (0, 255, 0), 2)\n imshow(copy)\n\n\ndef four_point_transform(image, pts):\n tl, tr, br, bl = pts\n rect = pts.astype(\"float32\")\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array(\n [[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]],\n dtype=\"float32\",\n )\n\n # compute the perspective transform matrix and then apply it\n M = cv.getPerspectiveTransform(rect, dst)\n warped = cv.warpPerspective(image, M, (maxWidth, maxHeight))\n\n # return the warped image\n return warped\n\n\ndef locate_document(\n qr_coords: Polygon,\n page_size: Point,\n qr_size: Point,\n qr_offset: Point,\n):\n \"\"\"Based on finding a single QR code in the image, return estimated\n document coordinates.\n \"\"\"\n i_hat = np.array(qr_coords.topright - qr_coords.topleft) / qr_size.x\n j_hat = np.array(qr_coords.bottomleft - qr_coords.topleft) / qr_size.y\n # `A` is the transformation matrix for change of basis\n A = np.c_[i_hat, j_hat]\n\n topleft_mm = -np.array(qr_offset)\n topright_mm = topleft_mm + [page_size.x, 0]\n bottomleft_mm = topleft_mm + [0, page_size.y]\n bottomright_mm = topleft_mm + page_size\n\n topleft = A.dot(topleft_mm) + qr_coords.topleft\n topright = A.dot(topright_mm) + qr_coords.topleft\n bottomleft = A.dot(bottomleft_mm) + qr_coords.topleft\n bottomright = A.dot(bottomright_mm) + qr_coords.topleft\n\n return (topleft, topright, bottomright, bottomleft)\n\n\ndef calculate_filter_blocksize(doc_width):\n \"\"\"Dynamically approximate a good blocksize for adaptive thresholding\n based on the size of a document.\n\n Args:\n doc_width: document width in pixels\n \"\"\"\n blocksize = int(doc_width / 80)\n return max(blocksize + blocksize % 2 - 1, 1) # Odd and at least 1\n\n\ndef centrepoint(contour):\n \"\"\"Given an arbitrary contour, return the centrepoint (x, y).\"\"\"\n return contour.mean(axis=(0, 1))\n\n\ndef shrink_countour(contour, shrinkage, shape):\n \"\"\"Shrink a contour by a given amount.\n\n Method:\n 1. Subtract the centre x/y from the coordinates.\n 2. Multiply by shrinkage.\n 3. Add centre x/y.\n \"\"\"\n M = cv.moments(contour)\n cx = M[\"m10\"] / M[\"m00\"]\n cy = M[\"m01\"] / M[\"m00\"]\n centre = np.array((cx, cy)).reshape(1, 1, -1)\n new_contour = (contour - centre) * shrinkage + centre\n # new_contour[:, 0, 0] = new_contour[:, 0, 0].clip(0, shape[0])\n # new_contour[:, 0, 1] = new_contour[:, 0, 1].clip(0, shape[1])\n return new_contour\n\n\ndef clip_image(img):\n \"\"\"Convert an image to grayscale, then threshold it.\"\"\"\n blocksize = calculate_filter_blocksize(img.shape[1])\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n return cv.adaptiveThreshold(\n gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, blocksize, 12\n )\n\n\ndef has_area(contour, area, threshold):\n \"\"\"Return whether a contour has an area within a given threshold.\"\"\"\n max_area = area * (1 + threshold)\n min_area = area * (1 - threshold)\n return min_area < cv.contourArea(contour) < max_area\n\n\ndef four_sided(contour):\n \"\"\"Verrrry approximate function for whether a contour is four-sided.\"\"\"\n perimeter = cv.arcLength(contour, closed=True)\n return 2 < len(cv.approxPolyDP(contour, 0.02 * perimeter, closed=True)) < 6\n\n\ndef within_aspect_ratio(contour, aspect_ratio, threshold):\n \"\"\"Return whether a contour has an aspect ratio within a given threshold.\"\"\"\n _x, _y, w, h = cv.boundingRect(contour)\n return aspect_ratio * (1 - threshold) < w / h < aspect_ratio * (1 + threshold)\n\n\ndef group_columns(boxes, threshold_px):\n \"\"\"Group boxes by their vertical alignment within a given threshold.\"\"\"\n pass\n\n\ndef minimum_rows(boxes, minimum):\n \"\"\"Filter away columns of boxes which have fewer than `minimum` rows.\"\"\"\n pass\n\n\ndef locate_rectangles(img, area, aspect_ratio, threshold=0.40):\n \"\"\"Locate all rectangles in an image that have an area which falls\n within +/- :arg:`threshold` percent of :arg:`area`.\"\"\"\n contours, _ = cv.findContours(img.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n\n for f in (\n lambda x: has_area(x, area, threshold),\n four_sided,\n lambda x: within_aspect_ratio(x, aspect_ratio, threshold),\n ):\n contours = filter(f, contours)\n\n return contours\n\n\ndef get_checkboxes(img, page_size, checkbox_size):\n \"\"\"Return a list of checkboxes, ordered by vertical location in ``img``\"\"\"\n # TODO: Perform appropriate column grouping using n-columns from checkbox_titles\n # Get checkbox area in pixels^2\n sf = img.shape[0] / page_size[1] # px/mm\n area_px = np.prod(checkbox_size) * sf ** 2\n # TODO: Also filter by aspect ratio\n aspect_ratio = checkbox_size[0] / checkbox_size[1]\n boxes = locate_rectangles(img, area_px, aspect_ratio)\n boxes = sorted(boxes, key=lambda x: centrepoint(x)[1]) # Sort by highest on page\n return [\n [shrink_countour(b, 0.9, img.shape[::-1]).round().astype(np.int64)]\n for b in boxes\n ]\n\n\ndef percentage_colored(img, contour):\n \"\"\"Return the percentage of contour within a binary image which is colored.\"\"\"\n mask = np.zeros(img.shape, dtype=\"uint8\")\n cv.drawContours(mask, [contour], -1, 1, -1) # Draw filled contour on mask\n area = (mask > 0).sum()\n extracted = cv.bitwise_and(img, mask)\n # TODO: We could multiply by a gaussian kernel here to give greater weight\n # to the central pixels\n return 1 - extracted.sum() / area\n\n\ndef checked_contours(img, contours, threshold):\n \"\"\"Find rectangles which have been checked.\n\n Args:\n img: Image. This should be clipped to {0,1} values.\n contours: Contours to extract.\n threshold: Percentage of colored pixels to determine whether a check\n box is colored. E.g. 0.07 -> if more than 7% of the check box is\n colored, the box is considered checked.\n \"\"\"\n color = [\n [percentage_colored(img, c) for c in contour_columns]\n for contour_columns in contours\n ]\n return [[c > threshold for c in color_columns] for color_columns in color]\n\n\ndef imshow(img):\n import matplotlib.pyplot as plt\n\n plt.imshow(img)\n plt.show()\n\n\ndef main(image_path, debug):\n image_pil = Image.open(image_path)\n image = np.array(image_pil)\n\n qr_obj = get_single_qr(image_path)\n\n if debug:\n print(\"QR location:\", qr_obj.polygon)\n draw_contour(image, np.array(qr_obj.polygon).astype(\"int32\").reshape(4, 1, 2))\n\n config = fetch_config(qr_obj.data)\n\n contour = locate_document(\n qr_obj.polygon,\n config.page_size,\n config.qr_size,\n config.qr_offset,\n )\n contour_np = np.array(contour).astype(\"int32\")\n if debug:\n print(\"Document location:\", contour)\n draw_contour(image, contour_np.reshape(4, 1, 2))\n\n image = four_point_transform(image, contour_np)\n if debug:\n imshow(image)\n\n clipped = clip_image(image)\n if debug:\n imshow(clipped)\n\n checkboxes = get_checkboxes(clipped, config.page_size, config.checkbox_size)\n if debug:\n print(f\"Found {len(checkboxes)} check boxes\")\n for contour in checkboxes:\n draw_contour(image, contour)\n\n checked = checked_contours(clipped, checkboxes, threshold=0.01)\n if checked:\n titles = np.array(config.checkbox_titles)\n checked = np.array(checked)\n result = list(titles[: checked.shape[0], :][checked])\n else:\n result = []\n\n return result\n"
},
{
"alpha_fraction": 0.5836791396141052,
"alphanum_fraction": 0.6141079068183899,
"avg_line_length": 24.785715103149414,
"blob_id": "c5b0251dfce854ac20997adbfa9a00bb2615c423",
"content_id": "95e6d9cf0c17a88df319be155abe39970d89c166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 723,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 28,
"path": "/checkmarx/setup.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDoc Scanner Setup\n=================\n\"\"\"\nfrom setuptools import find_packages, setup\n\nEXTRAS_REQUIRE = {\n \"lint\": [\"black==20.8b1\", \"isort==5.8.0\", \"pylint==2.7.4\"],\n \"test\": [\"pytest==6.2.3\", \"pytest-cov==2.11.1\"],\n}\n\nwith open(\"requirements.txt\") as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"checkmarx\",\n version=\"0.1.0\",\n description=\"Document OMR\",\n license=\"Proprietary\",\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n python_requires=\"~=3.9\",\n install_requires=requirements,\n tests_require=EXTRAS_REQUIRE[\"test\"],\n extras_require=EXTRAS_REQUIRE,\n zip_safe=True,\n entry_points={\"console_scripts\": [\"checkmarx = checkmarx.main:main\"]},\n)\n\n"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.622390866279602,
"avg_line_length": 17.785715103149414,
"blob_id": "d5c174f49a6f3a111c43ca6abe3e40bee8a1f528",
"content_id": "be8093c9444b92fa9062576ec32e9598b57afdd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 28,
"path": "/checkmarx/src/checkmarx/scanner_test.py",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np\n\nimport scanner\nfrom checkmarx.types import Point\n\n\ndef test_rotation():\n a = Point(0, 1)\n b = Point(2, 0)\n response = scanner.rotation(a, b)\n expected = 2.0 / np.sqrt(5.0)\n np.testing.assert_equal(expected, response)\n\n\ndef test_get_angle():\n topleft = Point(1.0, 1.0)\n topright = Point(2.0, 0.0)\n response = scanner.get_angle(topleft, topright)\n np.testing.assert_close(response, 45)\n\n\ndef main():\n test_rotation()\n test_get_angle()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6715542674064636,
"alphanum_fraction": 0.6862170100212097,
"avg_line_length": 21.733333587646484,
"blob_id": "f94910837ffcf5e1e0a6c3af81a7ff41d9f44ed8",
"content_id": "ef5cf8f296fe6495261c48199c84b4a997bab381",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 15,
"path": "/checkmarx/Dockerfile",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "FROM python:3.9-slim\n\nWORKDIR /usr/src/app\n\n# build-essential is required for gcc and relevant headers\nRUN apt update -qq && apt install -yq --no-install-recommends \\\n build-essential \\\n libglib2.0-0 \\\n libjpeg-dev \\\n libpng-dev\n\nCOPY requirements.txt ./\nRUN pip install -r requirements.txt\nCOPY . .\nRUN pip install -e .\n"
},
{
"alpha_fraction": 0.5254237055778503,
"alphanum_fraction": 0.7175140976905823,
"avg_line_length": 16.700000762939453,
"blob_id": "6cb9cd8111531589b15bca7d7d30ef9b5c2d3aed",
"content_id": "6b0ec8d2100c699bbc69288a263d7955e60ba129",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 10,
"path": "/checkmarx/requirements.txt",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "fastapi==0.65.2\ngunicorn==20.1.0\nnumpy~=1.20.2\nopencv-python-headless==4.5.1.48\nPillow==8.3.2\npydantic==1.8.2\npython-multipart==0.0.5\nrequests\nstarlette==0.13.6\nuvicorn==0.13.4\n"
},
{
"alpha_fraction": 0.7031086087226868,
"alphanum_fraction": 0.7170799970626831,
"avg_line_length": 30.811111450195312,
"blob_id": "78d2486654d3b3eed8fc5c7bcae5303b5f253b67",
"content_id": "fcef1a121f6909f71bc70561bf3455e1a3105863",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2863,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 90,
"path": "/checkmarx/README.md",
"repo_name": "zetkin/checkmarx",
"src_encoding": "UTF-8",
"text": "\n\nA prototype document scanner for finding checkboxes in questionnaires and\ndetermining which of them have been marxed.\n\nA QR code will be used to define characteristics of the questionnaire (likely\nvia an online resource rather than directly encoded in the QR code), such as:\n * Document size in mm (e.g. `(210, 297)` for A4)\n * Checkbox sizes in mm\n * QR code size in mm\n * QR code position offset in mm\n * Checkmark titles\n\nThis information will be used to locate the checkboxes and determine which have\nbeen marxed and their corresponding fields.\n\n\nRequirements\n------------\n\nThis project relies heavily on a good QR code detector. The polygon output of\nthe QR code is used to infer the document's coordinates, which in turn is used\nto infer checkbox sizes (in pixels, the sizes in mm must be defined).\n\nAt the moment [`quirc`](https://github.com/dlbeer/quirc/) is used which is\nwritten in C. A pre-compiled program has been committed to this repository\nwhich works in the docker setup defined. If a local setup is required, pull\nthe `quirc` repo and compile the `qrtest` program then either copy it or link\nto it from this directory.\n\n\nUsage\n-----\n\nSimply: `checkmarx [-h] --image IMAGE [--debug]`\n\nIf the `--debug` flag is used, extra information will be visualised during\nthe processing / inference stages.\n\n\nImplementation Details\n----------------------\n\n### Processing Flow\n\nThe entire processing flow occurs as follows:\n 1. Find a QR code using a system call to `quirc`\n 2. Fetch the document config from the QR code message\n 3. Infer the document shape (in pixels) based on the size of the QR polygon,\n and the details from the document config\n 4. Extract and threshold the document from the whole image to produce a single\n channel binary image\n 5. Collect all checkboxes in the document by searching for contours which\n match the stated size of the checkboxes from the config (these are sorted\n by vertical position)\n 6. Determine which boxes are marxed based on whether they have over a certain\n percentage of black pixels\n 7. Return an array of marxed boxes, sorted in descending order\n 8. Lose chains\n\n\n### QR Code Data\n\nThe QR code should encode a URL which can be used to fetch a JSON object\ncontaining all document information:\n\n```json\n{\n \"page_size\": [210, 297],\n \"checkbox_size\": [12, 10],\n \"qr_size\": [24, 24],\n \"qr_offset\": = [14, 14],\n \"checkbox_titles\": [\n [\"Is this a questionnaire?\"],\n [\"The seminar does a good job integrating.\"],\n [\"I made new professional contacts.\"],\n [\"One final question.\"]\n ]\n}\n```\n\n\nFurther Resources\n-----------------\n\nhttps://github.com/dlbeer/quirc/\n\nhttps://docs.opencv.org/3.4/da/d6e/tutorial_py_geometric_transformations.html\n\nhttps://docs.opencv.org/3.4/d7/d4d/tutorial_py_thresholding.html\n"
}
] | 19 |
jhz-shaokejia/topic_segmenter | https://github.com/jhz-shaokejia/topic_segmenter | a204fba907a26b4079a09180a1b23298d5daa8c1 | a6823672ce887121069a1b9acc98458745368b16 | d903c53d58d45dff87d42986c60f5aa5cc9a1823 | refs/heads/master | 2021-01-20T03:39:54.754774 | 2017-04-27T07:36:00 | 2017-04-27T07:36:00 | 89,570,177 | 0 | 0 | null | 2017-04-27T07:43:06 | 2017-04-27T03:48:41 | 2016-12-04T06:21:20 | null | [
{
"alpha_fraction": 0.6127674579620361,
"alphanum_fraction": 0.6131182312965393,
"avg_line_length": 34.1728401184082,
"blob_id": "2c1a3816b708b42c8678d26b26ac9e9638655122",
"content_id": "0c8b6aef36e46b7e1259c08faca6a24394a79769",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2851,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 81,
"path": "/visualization/TopicQueue.py",
"repo_name": "jhz-shaokejia/topic_segmenter",
"src_encoding": "UTF-8",
"text": "\nfrom Queue import PriorityQueue\nfrom collections import namedtuple\n\n\n\nEntry = namedtuple('Entry', ['priority', 'wordset', 'id'])\n\n\nclass TopicCover():\n \"\"\"docstring for TopicQueue\"\"\"\n\n def __init__(self, messages):\n \"\"\" Generates a TopicCover class for a set of messages\n Inputs:\n messages: the message dictionary: { id: text, ... }\n \"\"\"\n self.pq = PriorityQueue()\n self.cover = []\n self.words_in_cover = set()\n\n # add message dictionary and process all messages (add to priority queue)\n self.message_corpus = messages\n # TODO: process messages prior to ingestion\n for msg_id in self.message_corpus.iterkeys():\n self.add_entry(msg_id)\n\n\n @staticmethod\n def get_priority(wordset):\n \"\"\" Returns the priority of a given wordset of a message \"\"\"\n return -len(wordset)\n\n\n def add_entry(self, message_id):\n \"\"\" Add a message to the topic queue \"\"\"\n message_words = set(self.message_corpus[message_id].split())\n entry = Entry(priority=self.get_priority(message_words), wordset=message_words, id=message_id)\n self.pq.put( entry )\n\n\n def update_entry(self, entry):\n \"\"\" Updates the priority and wodset of an entry based on the actual cover \"\"\"\n new_words = entry.wordset.difference(self.words_in_cover)\n return Entry(priority=self.get_priority(new_words), wordset=new_words, id=entry.id)\n\n\n def stop_updating(self, entry):\n \"\"\" Checks if the priority queue has entries with potentially lower priority \"\"\"\n try:\n return self.pq.queue[0].priority >= entry.priority\n except IndexError:\n # In case the priority queue is empty return True\n return True\n\n\n def increment_cover(self):\n \"\"\" Update cover with one more message \"\"\"\n while True:\n # get the best entry and update it to ge the new priority and wordset\n popped_entry = self.pq.get()\n new_entry = self.update_entry(popped_entry)\n if self.stop_updating(new_entry):\n break\n else:\n # if it is not the best entry, put back in the priority queue\n self.pq.put( new_entry )\n\n # add to cover, and record words\n self.cover.append( self.message_corpus[new_entry.id] )\n self.words_in_cover.update( new_entry.wordset )\n\n\n def get_cover(self, min_messages):\n \"\"\" Returns the N-message cover, computed with a greedy algorithm \"\"\"\n if len(self.message_corpus) < min_messages:\n raise ValueError('Cannot obtain a cover bigger than the message corpus!')\n\n if len(self.cover) < min_messages:\n while len(self.cover) <= min_messages:\n self.increment_cover()\n return self.cover[:min_messages]\n\n"
},
{
"alpha_fraction": 0.5970923900604248,
"alphanum_fraction": 0.6003807783126831,
"avg_line_length": 31.094444274902344,
"blob_id": "f9d276a15edc8329c103d277d8227cbe14848c1c",
"content_id": "b18cee91075454b6ce33b7a291c6a4da38d8cc95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5778,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 180,
"path": "/visualization/corpus.py",
"repo_name": "jhz-shaokejia/topic_segmenter",
"src_encoding": "UTF-8",
"text": "\nimport os\nimport sys\nimport warnings\nfrom functools import wraps\nfrom collections import defaultdict\n\nimport re\nimport cPickle as pk\nimport pandas as pd\n\nfrom nltk.corpus import stopwords as NLTKstopwords\nfrom gensim import corpora\n\n\n\ndef load_corpus(pickle_file):\n \"\"\" Returns the Corpus instance \"\"\"\n with open(pickle_file, 'rb') as f:\n return pk.load(f)\n\n\n\nclass Corpus(object):\n \"\"\" Corpus object, to be used for Models such as TFIDF and word2vec / GloVe \"\"\"\n\n def __init__(self, stopwords='NLTK', punctuation='.,!`', user_string='<u\\w*>', output=None):\n \"\"\" Note - Pass an iterable to stopwords to override NLTKs stopwords \"\"\"\n super(Corpus, self).__init__()\n self.STOPWORDS = stopwords\n self.PUNCTUATION = punctuation\n self.USER_STRING = user_string\n\n # corpus storage\n self.pickle_path = output\n\n # Initialize to None\n self.documents = None\n self.dictionary = None\n self.corpus = None\n\n\n def check_init(func):\n \"\"\" Wraps function with a initialized documents checker \"\"\"\n @wraps(func)\n def _func(*args):\n if self.documents is not None:\n warnings.warn('Corpus must be processed first')\n # return None\n else:\n return func(*args)\n\n return _func\n\n\n ### PROPERTIES -----------------------------------\n\n @property\n def STOPWORDS(self):\n \"\"\" Set of stopwords. These words are not incorporated into the corpus dictionary \"\"\"\n return self.__STOPWORDS\n\n @STOPWORDS.setter\n def STOPWORDS(self, stopwords):\n if stopwords != 'NLTK':\n self.__STOPWORDS = set(NLTKstopwords.words('english')) # from NLTK\n else:\n # Override NLTKs stopword list\n self.__STOPWORDS = set(stopwords)\n\n @property\n def USER_STRING(self):\n \"\"\" Set of punctuation symbols to be removed. These symbols are removed from the documents \"\"\"\n return self.__USER_STRING\n\n @USER_STRING.getter\n def USER_STRING(self):\n return self.__USER_STRING.pattern # resolves to the pattern compiled, not the compilation\n\n @USER_STRING.setter\n def USER_STRING(self, user_string):\n if isinstance(user_string, str): # check if it is not precompiled\n self.__USER_STRING = re.compile(user_string)\n else:\n self.__USER_STRING = user_string\n\n\n ### LOADERS -----------------------------------\n\n def from_topic_table(self, topics_table):\n \"\"\" Generate the set of documents from the topic_table list \"\"\"\n grouped = topics_table[['topic', 'text']].groupby('topic')\n docs = grouped.agg(lambda x: ' '.join( map(lambda x: x.encode('ascii', 'replace'), x) ))\n\n self.documents = docs.values.flatten()\n\n\n def from_topic_table_csv(self, path_to_table):\n \"\"\" Reads the csv file and then calls `from_topics_table` \"\"\"\n topics_table = pd.read_csv(path_to_table, encoding='UTF-8', index_col=0)\n self.from_topic_table(topics_table)\n\n\n ### PROCESSORS -----------------------------------\n\n def remove_punct(self, doc):\n \"\"\" Removes each of the symbols specified in the PUNCTUTATION property \"\"\"\n return reduce( lambda d, s: d.replace(s, ''), [doc,] + list(self.PUNCTUATION) )\n\n\n def remove_usernames(self, doc):\n \"\"\" Removes the usernames from a document, according to the specified in the PUNCTUTATION property \"\"\"\n return self.__USER_STRING.sub('', doc)\n\n\n def get_full_remover(self):\n \"\"\" Returns the current punctuation + username remover \"\"\"\n def remover(document):\n \"\"\" Removes punctuation marks from documents \"\"\"\n return reduce( lambda d, f: f(d), [document, self.remove_punct, self.remove_usernames] )\n return remover\n\n\n @check_init\n def process(self):\n \"\"\" Processes the entire corpus \"\"\"\n docs = reduce( lambda x, f: map(f, x), [self.documents, self.remove_punct, self.remove_usernames] )\n\n # Remove stopwords and tokenize\n texts = map( lambda doc: [word for word in doc.lower().split() if word not in STOPWORDS], docs )\n\n # Generate a dictionary with the term frequency\n frequency = defaultdict(int)\n for text in texts:\n for token in text:\n frequency[token] += 1\n\n # remove words that appear only once\n texts = map( lambda text: filter(lambda token: frequency[token] > 1, text), texts )\n\n # Generate dictionary of terms\n self.dictionary = corpora.Dictionary(texts)\n # dictionary.save('/tmp/deerwester.dict') # store the dictionary, for future reference\n\n # Finally, generate the corpus\n self.corpus = map( self.dictionary.doc2bow, texts )\n\n\n def store_corpus(self, pickle_path=None, verbose=False):\n \"\"\" Stores the corpus as a pickle file \"\"\"\n if pickle_path is not None:\n output = pickle_path\n else:\n output = self.pickle_path\n\n # Check existence of the output folder and create if necessary\n out_folder = '/'.join(output.split('/')[:-1])\n if not os.path.exists(out_folder):\n print(' - The specified folder was not found... folder will be created: \\033[36m{}\\033[0m'.format(out_folder))\n os.makedirs(out_folder)\n\n # Save corpus\n with open(output, 'wb') as f:\n pk.dump(self, f)\n\n if verbose:\n print(' -- Saved pickle file to: {}'.fortmat(output))\n\n\n\ndef main():\n the_corpus = Corpus()\n the_corpus.from_topic_table_csv(sys.argv[1])\n\n # Store if the output path was specified\n if len(sys.argv) > 2:\n the_corpus.store_corpus(sys.argv[2])\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5561811923980713,
"alphanum_fraction": 0.5636037588119507,
"avg_line_length": 35.175926208496094,
"blob_id": "6a8f2332ecd13362fc9d1aa0776dc96f2dabb427",
"content_id": "5d9620b3e83f60f11690449bf5815f0d2ba037cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3907,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 108,
"path": "/run_segmenter.py",
"repo_name": "jhz-shaokejia/topic_segmenter",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport math\nimport pandas as pd\n\nfrom text.Message import Message\nfrom grammar.MessageTokenizer import MessageTokenizer\nfrom segmenter.ConversationSegmenter import ConversationSegmenter\nfrom text.JSONParser import JSONParser\n\nclass SegmenterRunner:\n def __init__(self, json_file_name, output_folder=None):\n self.json_file_name = json_file_name\n self.output_folder = output_folder\n self.topics_table = None\n\n\n def run(self):\n parser = JSONParser(self.json_file_name)\n self.messages = parser.getMessages()\n self.tokenizer = MessageTokenizer()\n windowSize = 3\n cosineSimilarityThreshold = 0.8\n segmenter = ConversationSegmenter(\n self.messages, windowSize, cosineSimilarityThreshold, self.tokenizer)\n self.topics = segmenter.segment()\n\n self.build_table()\n\n if self.output_folder is not None:\n self.report_table()\n else:\n self.report()\n\n\n def build_table(self):\n \"\"\" Builds a table for each of topics \"\"\"\n TOTAL_TOPICS = len(self.topics)\n\n for i, topic in enumerate(self.topics):\n\n ## TODO: get-topic-name\n topic_name = 'topic-{num:0{l}}'.format(num=i, l=int(math.floor(math.log10(TOTAL_TOPICS))) + 1)\n\n _messages = topic.getMessages()\n _reasons = topic.getReasons()\n\n topic_table = pd.DataFrame({'ID': map(lambda m: m.getID(), _messages),\n 'text': map(lambda m: m.getText(), _messages),\n 'reason': _reasons })\n topic_table['topic'] = topic_name\n\n # append to list of topic-tables\n if self.topics_table is not None:\n # merge topic into table\n self.topics_table = self.topics_table.append(topic_table, ignore_index=True)\n else:\n self.topics_table = topic_table\n\n\n def report_table(self):\n # Check existence of the output folder and create if necessary\n if not os.path.exists(self.output_folder):\n print(' - The specified folder was not found... folder will be created: \\033[36m{}\\033[0m'.format(self.output_folder))\n os.makedirs(self.output_folder)\n\n # Parse output path/file name and save table to topics_CHANNEL.csv\n filename = self.json_file_name.split('/')[-1].replace('.json', '')\n folderpath = self.output_folder[:-1] if self.output_folder.endswith('/') else self.output_folder\n out_path = '{path}/topics_{name}.csv'.format(path=folderpath, name=filename)\n self.topics_table.to_csv(out_path, encoding='utf-8')\n\n # Report output table\n print(' --> Output Topic table: \\033[32m {} \\033[0m'.format(out_path))\n\n\n def report(self):\n idGroups = []\n print(\"============================= detailed ========================\")\n for topic in self.topics:\n print(\"== Topic ==\")\n idGroup = []\n for (message, reason) in zip(topic.getMessages(), topic.getReasons()):\n idGroup.append(message.getID())\n print(\"\\n\\t------ id: \\t\" + str(message.getID()) + \"\\t\" + reason)\n print(\"\" + message.getText())\n print(\"\\n\")\n idGroups.append(idGroup)\n\n print(\"===============================\")\n\n print(\"============================= short ========================\")\n for topic in self.topics:\n print(\"== Topic ==\")\n for message in topic.getMessages():\n print(str(message.getID()) + \":\\t\" + message.getText())\n print(\"\\n\")\n\n print(idGroups)\n\n\n\ndef main(json_input, output_folder=None):\n SegmenterRunner(json_input, output_folder).run()\n\n\nif __name__ == '__main__':\n main(*sys.argv[1:]) # optionally might include a output_folder specification\n"
},
{
"alpha_fraction": 0.7587064504623413,
"alphanum_fraction": 0.7587064504623413,
"avg_line_length": 31.040000915527344,
"blob_id": "46dfc9333151f032575fa760f88804877fdaf629",
"content_id": "87f40d29660eb34fd36ad882ddf97e51c9ada3b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 25,
"path": "/config/install_config.md",
"repo_name": "jhz-shaokejia/topic_segmenter",
"src_encoding": "UTF-8",
"text": "# Topic Segmenter Installation\n\n\n## Environment\n\nUsage of a virtualenvironment is recommended (either through conda or virtualenv). \n\nAll the package specification can be found in the `config/` folder.\n\n**With conda** create a exact copy of the environment by running `conda env create -f config/nlp-environment.yml` \n\n**With virtualenv (or without virtual environments at all)** just use pip `pip install -r config/nlp-requirements.txt`\n\n\n## NLTK dependencies\n\nThe following NLTK resources need to be downloaded in order to run the topic-segmenter (open `python` run `import nltk` and then run `nltk.download()` for the NLTK resource downloader window to appear):\n\nFrom the *All packages*\n\n* `averaged_perceptron_tagger`\n* `punkt`\n* `universal_tagset`\n\n* (in some occasions, additionally) `wordnet`\n\n\n\n"
}
] | 4 |
Nootencorp/techdegree-project-1 | https://github.com/Nootencorp/techdegree-project-1 | db07e57b50d08711a0a39c2e947a03f34d7fe144 | 4e92775cadb8490baab3da98bc08b5ff409127b9 | 0c5a5643dd819f9785925f8c642aad996978335c | refs/heads/master | 2020-04-03T00:24:20.110947 | 2018-10-30T20:09:02 | 2018-10-30T20:09:02 | 154,875,751 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5340078473091125,
"alphanum_fraction": 0.5446880459785461,
"avg_line_length": 33.21154022216797,
"blob_id": "9ca2aeee34783f013bb275733ffdea211e010921",
"content_id": "1c887d2b522a9ce16fae71048ecf8c50108bcb45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1779,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 52,
"path": "/guessing_game.py",
"repo_name": "Nootencorp/techdegree-project-1",
"src_encoding": "UTF-8",
"text": "# Dear Treehouse Grader,\n#\tThank you for taking the time to review my project. As you can see, I am shooting for the \"Exceeds\" score.\n# However, if my project only \"Meets\" requirements, that is okay. In this case, please do not reject my project.\n# Thanks again. Happy grading!\n# -Jeremy\n\nimport random\nimport sys\n\n\nattempt_history = []\n\n\n\ndef start_game():\n print(\"\\nWelcome to the Number Guessing Game!\\n\")\n number = random.randint(1,10)\n guess = ()\n attempts = 0\n\n while guess != number:\n try:\n guess = input(\"Pick a number between 1 and 10: \")\n guess = int(guess)\n\n if guess > 10:\n raise ValueError\n elif guess < 1:\n raise ValueError\n attempts += 1\n if guess > number:\n print(\"It's lower!\")\n elif guess < number:\n print(\"It's higher!\")\n elif guess == number:\n print(\"You got it! It took you {} tries.\".format(attempts))\n play_again = input(\"\\nWould you like to play again? [y]es/[n]o: \")\n if play_again.lower() == \"y\":\n attempt_history.append(attempts)\n # asceding sort method from stackoverflow.com\n # https://stackoverflow.com/questions/9758959/sort-a-list-of-numerical-strings-in-ascending-order\n attempt_history.sort(key=int)\n print(\"\\nTHE HIGHSCORE IS {}\".format(attempt_history[0]))\n start_game()\n else:\n sys.exit(\"\\nClosing game, thank you for playing!!!\")\n except ValueError:\n print(\"\\nOh no, that is not a valid input! Please try again.\")\n\n\nif __name__ == '__main__':\n start_game()\n"
}
] | 1 |
spking/p-skitakall | https://github.com/spking/p-skitakall | aecaeeda5f1b921f2cca08deefd55e3f1940d131 | 709c6157de7632bcb977cc01cb6bb033eb1e2a82 | b684f03e61e0821f9d837a742cfe678940bc546f | refs/heads/master | 2022-12-08T12:21:59.523039 | 2020-09-01T14:09:15 | 2020-09-01T14:09:15 | 234,331,543 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5322580933570862,
"alphanum_fraction": 0.6612903475761414,
"avg_line_length": 11,
"blob_id": "32819bc982c3baba3b34375da8aedfc2c8afd9a1",
"content_id": "35d751c61b3beea0c2bb2a5674f31ccaccb13621",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 5,
"path": "/main.py",
"repo_name": "spking/p-skitakall",
"src_encoding": "UTF-8",
"text": "# Sölvi Scheving Pálsson #\n# 04/02/2020 #\n\nimport botprofile\nimport classes\n\n\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7172011733055115,
"avg_line_length": 25.384614944458008,
"blob_id": "364e0089946b34658fd325b27fbd755143cc0c32",
"content_id": "d1813f61cff8c0de86f19a838281004671dfaf99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 344,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 13,
"path": "/PM/ProjectPlan.md",
"repo_name": "spking/p-skitakall",
"src_encoding": "UTF-8",
"text": "__List of project goals__\n\n- [ ] Finish class python file\n- [ ] Finish main python file\n- [ ] Finalize functionality within Python Shell\n- [ ] Start work on GUI with tkinter or pygame\n- [ ] Finalize functionality in executable form\n- [ ] Develop local multiplayer\n- [ ] Develop online multiplayer\n\n__Credits__\n\nCreator: Sölvi \"ban1c\" Scheving\n"
},
{
"alpha_fraction": 0.7470641136169434,
"alphanum_fraction": 0.7520325183868408,
"avg_line_length": 68.1875,
"blob_id": "3d07bbae85ba7fbcc2c01d88a42d41a3e811cd4a",
"content_id": "5be466c6b58cf36a6f4725fcd8215091568609a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2214,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 32,
"path": "/README.md",
"repo_name": "spking/p-skitakall",
"src_encoding": "UTF-8",
"text": "__Description__\n\nClassic Shit-man game.\n\n__Game Rules__\n\n52 card deck; 2+ players. Each player starts with 9 cards, three of them are face down and inaccessible, three are laid face up \non top of the three face down cards and are also inaccessible. The remaining three cards are on hand and are accessible.\n\nThe player with the lowest card that isn't a Speciality Card starts by placing a card on hand. If two or more players both have the lowest card, they follow\nthe hierarchy of sorts. The hierarchy of sorts are, in descending order: Hearts, Spades, Diamonds and Clubs. The low card that has the highest sort starts.\nThe player to the left must then place a card that is equal to or higher on top of the placed deck and pull a card from the remaining deck,\nand the player to the left does the same, and the next player and so forth. If the player doesn't have a card equal to or higher the top of the placed deck\nthe player can take a \"risk\", by playing the top card from the remaining deck, without seeing what it is, if the risk card is equal to or higher then \nthe top of the placed deck the game proceeds as usual, if it is lower the player picks up the placed deck. Once the remaining deck is finished each player finishes their hand.\nOnce their hand is finished the 3 cards facing up become available and can be placed. Once the three cards facing up are finished the three cards\nfacing down become available. The player who finishes their cards wins. \n\nPlacing conditions:\nIf the same number card is placed in all sorts in a row the placed deck is \"blown up\" and gets thrown into the dead pile.\nIf a player has the same number card on hand in a different sort when it isn't their go; that player can \"shoot-in\" that card and skip the players waiting behind, this effect can not be used with the final six cards on hand.\n\n__Speciality Cards__\n\nSpeciality Cards can be placed on any card.\n\n| Card | Effect |\n|----------|:-------------:|\n| 2 | Resets the deck |\n| 5 | The next card must be lower then 5 |\n| 9 | Is transparent, the card beneath is still in effect |\n| 10| Blows up the deck, the placed deck is thrown out and becomes a dead pile. The player who played the ten places a card.|\n"
},
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.5047619342803955,
"avg_line_length": 14,
"blob_id": "fb10ea9ec532fde998deea90a263dcca96fe61ab",
"content_id": "39a6321fc6cf11d5c56c9e00a713cc181b4059f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 7,
"path": "/botprofile.py",
"repo_name": "spking/p-skitakall",
"src_encoding": "UTF-8",
"text": "# Solvi Scheving Pálsson#\n# Bot logic file #\n# 03/02/2019 #\n\nclass Bot:\n def __init__(self, d):\n self.deck = d\n"
},
{
"alpha_fraction": 0.5447761416435242,
"alphanum_fraction": 0.56965172290802,
"avg_line_length": 17.272727966308594,
"blob_id": "f671944d5bb896574249b7b6b0fbfd1d2132c723",
"content_id": "aa54dd25bf4cf4042cb249170a2b4d354cd398e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 22,
"path": "/classes.py",
"repo_name": "spking/p-skitakall",
"src_encoding": "UTF-8",
"text": "# Skítakall - Classes Skrá #\n# Sölvi Scheving Pálsson #\n# 2. Febrúar 2020 #\nimport random\nimport time\nfrom termcolor import cprint\n\n\nclass Deck:\n def __init__(self, t, n):\n self.tegund = t\n self.numer = n\n\n def __str__(self):\n return self.tegund + \",\" + str(self.numer)\n\n\ndef createdeck(t, listi):\n for x in range(1, 14):\n t1 = Deck(t, x)\n listi.append(t1)\n return listi\n"
}
] | 5 |
ankit96/flockathon-utility | https://github.com/ankit96/flockathon-utility | f2f6a45ff3d5696afa8120a85c4f9ac28e6bbb91 | c71499588b5cff4f96a7f3f4ca687b7f8da8c28e | 5260bdd8b6ccd8e54c1d616d166b462a509d56c1 | refs/heads/master | 2022-07-25T13:26:37.933335 | 2019-10-21T17:17:25 | 2019-10-21T17:17:25 | 216,624,518 | 1 | 0 | null | 2019-10-21T17:19:55 | 2019-10-21T17:20:47 | 2022-07-06T20:20:52 | Python | [
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.7432712316513062,
"avg_line_length": 39.16666793823242,
"blob_id": "9ea9090ac48bbe4d126b85f301ca554492fc8e92",
"content_id": "d87e43cb6148b144714d9daa3a94b74710a31fbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 483,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 12,
"path": "/README.md",
"repo_name": "ankit96/flockathon-utility",
"src_encoding": "UTF-8",
"text": "# Flockathon-utility\n--------------\nIt is a Utility code for flockathon \n\n\n##### What it does\n- Users populate the specific youtube channel with their own playlist.The objective of this code is to delete all playlist at midnight\n\n\n##### Why a new Repo then ?\n- This code needs another active dyno and heroku provides only one dynos for one app.So created 2 apps which are sharing a common postgresDB. \n Also due to use of heroku cli had to create this 2 different git repositories\n\n"
},
{
"alpha_fraction": 0.7558139562606812,
"alphanum_fraction": 0.7744185924530029,
"avg_line_length": 21.578947067260742,
"blob_id": "836db03763f356868cc4f33ccedd59f950e22ccb",
"content_id": "554b387796e2aebd42dda7e3817c75b17d8b2c98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 19,
"path": "/clock.py",
"repo_name": "ankit96/flockathon-utility",
"src_encoding": "UTF-8",
"text": "from apscheduler.schedulers.blocking import BlockingScheduler\nimport os\nimport subprocess\nimport psycopg2\nimport urlparse\nfrom deleteplaylist import deleteall\nimport logging\nlogging.basicConfig()\nsched = BlockingScheduler()\n\[email protected]_job('cron', day_of_week='mon-sun', hour=18, minute=31)\n#@sched.scheduled_job('interval', hours=7 , minutes=52)\ndef scheduled_job():\n\t\n\tdeleteall()\n \n#scheduled_job()dsd\n\nsched.start()\n\n"
}
] | 2 |
sjquant/sanic-redis | https://github.com/sjquant/sanic-redis | 593f3a71faa3cad5f29d03c303953db8487c4d4a | adc9ff527910f6dcd5abc173bba3aa6b127175d2 | 7e40281aea19223a0128316627eb778838afa10b | refs/heads/master | 2020-07-28T07:00:15.483054 | 2019-09-14T02:42:33 | 2019-09-14T02:42:33 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5362318754196167,
"alphanum_fraction": 0.5532822012901306,
"avg_line_length": 17.619047164916992,
"blob_id": "ae35f6ecc82ad4285fc446fd5594c07f38456558",
"content_id": "5bde40bbbd58f8345c3b3c0da213643d93e4954b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1173,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 63,
"path": "/README.md",
"repo_name": "sjquant/sanic-redis",
"src_encoding": "UTF-8",
"text": "sanic-redis\n==============\nRedis support for sanic.\n\nBuilt on top of [aioredis](https://github.com/aio-libs/aioredis).\n\nInstallation\n------------\n\nYou can install this package as usual with pip:\n\n pip install sanic-redis\n\nExample\n\n```python\nfrom sanic import Sanic\nfrom sanic.response import text\nfrom sanic_redis import SanicRedis\n\napp = Sanic(__name__)\napp.config.update(\n {\n 'REDIS': {\n 'address': ('127.0.0.1', 6379),\n # 'db': 0,\n # 'password': 'password',\n # 'ssl': None,\n # 'encoding': None,\n # 'minsize': 1,\n # 'maxsize': 10\n }\n }\n)\n\n\nredis = SanicRedis(app)\n \n \[email protected]('/test1')\nasync def test1(request):\n with await redis.conn as r:\n await r.set('key', 'value1')\n result = await r.get('key')\n return text(result)\n\n\[email protected]('/test2')\nasync def test2(request):\n with await request.app.redis as r:\n await r.set('key', 'value2')\n result = await r.get('key')\n return text(result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n```\n\nResources\n---------\n\n- [PyPI](https://pypi.python.org/pypi/sanic-redis)\n"
},
{
"alpha_fraction": 0.5641025900840759,
"alphanum_fraction": 0.6025640964508057,
"avg_line_length": 14.600000381469727,
"blob_id": "ae1896b5e18bd3a61bc9a78329a156faa676325c",
"content_id": "b65765ed9fb6b77fd106849db8e0b029ba932a80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 78,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 5,
"path": "/sanic_redis/__init__.py",
"repo_name": "sjquant/sanic-redis",
"src_encoding": "UTF-8",
"text": "from .core import SanicRedis\n\n\n__all__ = ['SanicRedis']\n__version__ = '0.1.0'\n"
},
{
"alpha_fraction": 0.6046175956726074,
"alphanum_fraction": 0.6161616444587708,
"avg_line_length": 30.5,
"blob_id": "5da8b329d2d692b334b9db53fc4b3e6209152f23",
"content_id": "ab0ff1a60cbcee55679d794fd56b8cdc35c41721",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 22,
"path": "/setup.py",
"repo_name": "sjquant/sanic-redis",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\n\nsetup(\n name='sanic-redis',\n version='0.1.1',\n description='Adds redis support to sanic .',\n long_description='sanic-redis is a sanic framework extension which adds support for the redis.',\n url='https://github.com/strahe/sanic-redis',\n author='strahe',\n license='MIT',\n packages=['sanic_redis'],\n install_requires=('sanic', 'aioredis'),\n zip_safe=False,\n keywords=['sanic', 'redis', 'aioredis'],\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Internet :: WWW/HTTP :: Session',\n ],\n)\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 14.55555534362793,
"blob_id": "6361fe455f7e11eb8bf56b9dece2eb7467b845c2",
"content_id": "ed902f96fdcd6ba51eb769652d95c6a1cadd7fab",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 140,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "sjquant/sanic-redis",
"src_encoding": "UTF-8",
"text": "aiofiles==0.3.1\naioredis==1.0.0\nasync-timeout==2.0.0\nhiredis==0.2.0\nhttptools==0.0.9\nsanic==0.5.4\nujson==1.35\nuvloop==0.8.0\nwebsockets==3.3\n"
},
{
"alpha_fraction": 0.5215146541595459,
"alphanum_fraction": 0.5215146541595459,
"avg_line_length": 30.405405044555664,
"blob_id": "eaf83152e869d575ee54fe38c0999d09e5b3a25d",
"content_id": "2ee148b63b36d6ccda422bd8f4b09fe65627a811",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1162,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 37,
"path": "/sanic_redis/core.py",
"repo_name": "sjquant/sanic-redis",
"src_encoding": "UTF-8",
"text": "from sanic import Sanic\nfrom aioredis import create_redis_pool\n\n\nclass SanicRedis:\n def __init__(self, app: Sanic=None, redis_config: dict=None):\n self.app = app\n self.config = redis_config\n self.conn = None\n\n if app:\n self.init_app(app=app)\n\n def init_app(self, app: Sanic, redis_config: dict=None):\n self.app = app\n self.config = redis_config\n\n @app.listener('before_server_start')\n async def aio_redis_configure(_app, loop):\n _c = dict(loop=loop)\n if self.config:\n config = self.config\n else:\n config = _app.config.get('REDIS')\n for key in ['address', 'db', 'password', 'ssl', 'encoding', 'minsize',\n 'maxsize', 'timeout']:\n if key in config:\n _c.update({key: config.get(key)})\n _redis = await create_redis_pool(**_c)\n\n _app.redis = _redis\n self.conn = _redis\n\n @app.listener('after_server_stop')\n async def close_redis(_app, _loop):\n _app.redis.close()\n await _app.redis.wait_closed()\n"
}
] | 5 |
Manonmani-PL/Manga-faceNet | https://github.com/Manonmani-PL/Manga-faceNet | d1b61f529e77a5406266cb416e578bf91e7b6410 | 436da98014822062bec10d5fdea589c0a39fc7fe | 9df3becfd3af937f3e79503fbdce446694d40e9c | refs/heads/master | 2022-11-12T19:17:35.757002 | 2020-06-20T15:50:04 | 2020-06-20T15:50:04 | 273,722,841 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.638424813747406,
"alphanum_fraction": 0.7088305354118347,
"avg_line_length": 54.86666488647461,
"blob_id": "ad7f0dc214a0aceae2252a24bcab5db47cf91a1b",
"content_id": "d3011bbe34ef0fe786f8893ba8a97f42e4a40657",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 15,
"path": "/README.md",
"repo_name": "Manonmani-PL/Manga-faceNet",
"src_encoding": "UTF-8",
"text": "# 1.Manga FaceNet\n The purpose of the project is used to identify face in manga pages, so that it will be helpful to search the particular character by recognizing manga face\n This code is just implementation of [Manga FaceNet](https://www.cs.ccu.edu.tw/~wtchu/papers/2017ICMR-chu2.pdf)\n \n ## Architecture\n * Apply selective search algorithm to manga pages and then\n * Feeding that candidate regions to Manga FaceNet(CNN)\n * Result will be predicted as Face or Not\n\n## Accuracy and Loss of Manga FaceNet\n \n \"Epoch 111/111\\n\",\n \"500/500 [==============================] - 204s 409ms/step - loss: 0.0078 - accuracy: 0.9974 - val_loss: 0.0012 - val_accuracy: 0.9964\\n\"\n\n1. This FaceNet model is trained with 13999 images of face region and 10000 images of not face region, validated with 3000 of face and not face region\n"
},
{
"alpha_fraction": 0.46485260128974915,
"alphanum_fraction": 0.4812925159931183,
"avg_line_length": 29.807018280029297,
"blob_id": "c14efc49af2c29aea6dbf381c0e724851ec876cb",
"content_id": "22e8ca540df8c5c75e3ae56f93a1c553e3af3dbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1764,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 57,
"path": "/code/not_face_xmlparse.py",
"repo_name": "Manonmani-PL/Manga-faceNet",
"src_encoding": "UTF-8",
"text": "import xml.etree.ElementTree as ET\nimport cv2\nimport numpy\nimport os.path\nfrom os import walk\nfile_count = 6608\nchapter_name = \"HarukaRefrain\"\ntree = ET.parse('data_set/xml/'+chapter_name+'.xml')\nroot = tree.getroot()\n\nfor pages in root.findall('pages'):\n\n for page in pages:\n page_number = page.get('index')\n file_name = str(page_number)\n len_fname = len(file_name)\n\n if len_fname == 1:\n image_name = str('00') + file_name + \".jpg\"\n elif len_fname == 3:\n image_name = file_name + \".jpg\"\n else:\n image_name = str('0') + file_name + \".jpg\"\n\n image_path = \"data_set/images/\" + chapter_name + \"/\" + image_name\n count = 0\n for body in page:\n if body.tag == \"body\":\n count = count +1\n xmin = body.get('xmin')\n ymin = body.get('ymin')\n xmax = body.get('xmax')\n ymax = body.get('ymax')\n xmn = int(xmin)\n xmx = int(xmax)\n ymn = int(ymin)\n ymx = int(ymax)\n img1 = cv2.imread(image_path, 0)\n\n cropped = img1[ymn:ymx, xmn:xmx]\n\n #rectangle = cv2.rectangle(img1, (xmn, ymn), (xmx, ymx), (255, 0, 0), 2)\n\n file_name = image_name.replace(\".jpg\", \"\")\n #filename is image number\n #save_crop to file_crop\n file_crop = file_name+\"-\"+str(count)+\".jpg\"\n\n file_count = file_count + 1\n save_path = \"data_set/not_face/\"+str(file_count)+\".jpg\"\n\n\n cv2.imwrite(save_path, cropped)\n\n #cv2.imshow(\"show\",rectangle)\n #k = cv2.waitKey(0)\n #exit()\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5463784337043762,
"alphanum_fraction": 0.5624538064002991,
"avg_line_length": 28.736263275146484,
"blob_id": "5ecc82115e750d6a48cdcf539c4d4bad5601b369",
"content_id": "9f8c7c26d3f05f12ede8cf446c17d9017ba80da3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5412,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 182,
"path": "/code/selective_search_algorithm.py",
"repo_name": "Manonmani-PL/Manga-faceNet",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Force matplotlib to not use any Xwindows backend.\n\nimport matplotlib\n# matplotlib.use('Agg')\n\n# import glob\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy\nimport os.path\nfrom PIL import Image\nimport selectivesearch\nimport cv2\n#imported all the necessary libraries\n# MANO\n\n# from selectivesearch import *\n\n\nSELECTIVESEARCH_SCALE = 100 # 255.0*3 # 1 ~ 255 ?\nSELECTIVESEARCH_SIGMA = 2.2 # Gaussian filter\nSELECTIVESEARCH_MIN_SIZE = 10\nDELETE_SIMILR_INCLUDE = True\nimage_path = ''\nif image_path != \" \":\n\n def main(image_path):\n # pan =0\n\n input_file_name = os.path.basename(image_path)\n dir_page = os.path.basename(\n os.path.dirname(image_path))\n #preprocessing the image\n image_array = pre_process(image_path)\n #selective search algo\n candidates = selective(image_path)\n\n # draw rectangles on the original image\n # iamge = cv2.imread(image_path)\n # image = cv2.rectangle(image, start_point, end_point, color, thickness)\n fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))\n\n ax.imshow(image_array)\n # print(candidates)\n fi = 0\n #result of ss algo, these are all the coordinates of the objects found\n for (x, y, w, h) in candidates:\n # mano\n fi = fi + 1\n #converting image into numpy array\n image123 = cv2.imread(image_path)\n #cropping the objects which is given by ssearch\n #cropping to feed into neural network\n crop = image123[y:y + h, x:x + w]\n file_dir = input_file_name.strip('.jpg')\n\n # save_crop = \"data_set/result/\" + dir_page + \"/\" + file_dir + \"/\" + str(fi) + \".jpg\"\n # dir_sc = \"data_set/result/\" + dir_page\n # dir_fc = os.path.dirname(save_crop)\n # #creating folder if it is not present and saving the cropped images\n # if not os.path.exists(dir_sc):\n # os.mkdir(dir_sc)\n # if not os.path.exists(dir_fc):\n # os.mkdir(dir_fc)\n # cv2.imwrite(save_crop, crop)\n # # cv2.waitKey(0)\n\n #drawing bounding box\n rect = mpatches.Rectangle(\n (x, y), w, h, fill=False, edgecolor='red', linewidth=1)\n #adding that region\n if x > 0 and y > 0 and y > h and x > w:\n ax.add_patch(rect)\n\n delete_similr = \"__delete_similr\" if DELETE_SIMILR_INCLUDE else ''\n #saving the cropped file\n out_file = \"data_set/result/\" + dir_page + \"/\" + input_file_name\n out_all_dir = os.path.dirname(out_file)\n #creating directory if thet is not available\n if not os.path.exists(out_all_dir):\n os.mkdir(out_all_dir)\n\n plt.savefig(out_file)\n#preprocess\ndef pre_process(image_path):\n resize1 = (256, 256)\n image = cv2.imread(image_path)\n image = cv2.resize(image, resize1)\n image_array = numpy.asarray(image)\n\n return image_array\n\n\ndef selective(image_path):\n # loading lena image\n\n image_array = pre_process(image_path)\n #applying all the diversification strategies | basically ssearch\n img_lbl, regions = selectivesearch.selective_search(\n image_array,\n scale=SELECTIVESEARCH_SCALE,\n sigma=SELECTIVESEARCH_SIGMA,\n min_size=SELECTIVESEARCH_MIN_SIZE\n )\n\n candidates = set()\n\n pan = 0\n for r in regions:\n\n x, y, w, h = r['rect']\n\n # excluding same rectangle (with different segments)\n if r['rect'] in candidates:\n # if x > 0 and y > 0 and y > h and x > w:\n # small regions\n continue\n # excluding regions smaller than 2000 pixels\n\n if r['size'] < 30 * 30:\n continue\n # distorted rects\n\n x, y, w, h = r['rect']\n\n if h > 0 and w > 0:\n if w / h > 3 or h / w > 3:\n # small regions folder in e drive with name x greater 3\n continue\n candidates.add(r['rect'])\n\n return post_process(candidates)\n\n\ndef post_process(candidates):\n pan = 0\n if not DELETE_SIMILR_INCLUDE:\n return candidates\n\n # print(len(candidates))\n\n filterd_candidates = candidates.copy()\n\n for c in candidates:\n # print(c)\n\n x, y, w, h = c\n\n for _x, _y, _w, _h in candidates:\n if x == _x and y == _y and w == _w and h == _h:\n continue\n\n if abs(x - _x) < 10 and \\\n abs(y - _y) < 10 and \\\n w * h - _w * _h < 30 * 30 and \\\n w * h - _w * _h > 0:\n # print(\"delete\")\n filterd_candidates.discard((_x, _y, _w, _h))\n\n # print(len(filterd_candidates))\n #filterd candidates are the ROI\n return filterd_candidates\n\n\ndef delete_min_size(candidates):\n filterd_candidates = candidates.copy()\n\n#path for the images ie dataset\nif __name__ == \"__main__\":\n list_path = []\n for i in range(1, 55):\n if i < 10:\n image_path = \"data_set/images/KimiHaBokuNoTaiyouDa/0\" + str(0) + str(i) + \".jpg\"\n else:\n image_path = \"data_set/images/KimiHaBokuNoTaiyouDa/\" + str(0) + str(i) + \".jpg\"\n\n main(image_path)\n list_path.append(image_path)\n # image_path = \"data_set/images/YasasiiAkuma/011.jpg\"\n # main(image_path)\n"
},
{
"alpha_fraction": 0.461497038602829,
"alphanum_fraction": 0.47711363434791565,
"avg_line_length": 28.822580337524414,
"blob_id": "82069e8427b56b8538da1816f68590bf2c4ad561",
"content_id": "ac53993f29fa943da2637dde9de0a0b2e7fb40f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1857,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 62,
"path": "/code/xml_parse.py",
"repo_name": "Manonmani-PL/Manga-faceNet",
"src_encoding": "UTF-8",
"text": "import xml.etree.ElementTree as ET\nimport cv2\nimport numpy\nimport os.path\nfrom os import walk\nfile_count = 6095\nchapter_name = \"Arisa\"\ntree = ET.parse('data_set/xml/'+chapter_name+'.xml')\nroot = tree.getroot()\n\nfor pages in root.findall('pages'):\n\n for page in pages:\n # for face in page:\n # print(face.attrib)\n # print(face.get('xmin'))\n # exit()\n page_number = page.get('index')\n file_name = str(page_number)\n len_fname = len(file_name)\n\n if len_fname == 1:\n image_name = str('00')+file_name+\".jpg\"\n elif len_fname == 3:\n image_name = file_name+\".jpg\"\n else:\n image_name = str('0')+file_name+\".jpg\"\n\n image_path = \"data_set/images/\"+chapter_name+\"/\"+image_name\n\n count = 0\n for face in page:\n if face.tag == \"face\":\n count = count +1\n xmin = face.get('xmin')\n ymin = face.get('ymin')\n xmax = face.get('xmax')\n ymax = face.get('ymax')\n xmn = int(xmin)\n xmx = int(xmax)\n ymn = int(ymin)\n ymx = int(ymax)\n img1 = cv2.imread(image_path, 0)\n\n cropped = img1[ymn:ymx, xmn:xmx]\n\n #rectangle = cv2.rectangle(img1, (xmn, ymn), (xmx, ymx), (255, 0, 0), 2)\n\n file_name = image_name.replace(\".jpg\", \"\")\n #filename is image number\n #save_crop to file_crop\n file_crop = file_name+\"-\"+str(count)+\".jpg\"\n\n file_count = file_count + 1\n save_path = \"data_set/face/\"+str(file_count)+\".jpg\"\n\n\n cv2.imwrite(save_path, cropped)\n\n #cv2.imshow(\"show\",rectangle)\n #k = cv2.waitKey(0)\n #exit()\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6030116081237793,
"alphanum_fraction": 0.6228610277175903,
"avg_line_length": 38.486488342285156,
"blob_id": "8daea74ca062a621c5fad713f4c292cdd8a36579",
"content_id": "932c65ac789d73f596a1ad5555d0c9762f7e689a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1461,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 37,
"path": "/code/haar_manga_face.py",
"repo_name": "Manonmani-PL/Manga-faceNet",
"src_encoding": "UTF-8",
"text": "import cv2\nimport sys\nimport os.path\n#filename is the picture\ndef detect(filename, cascade_file = \"/home/user/PycharmProjects/manga_face/lbpcascade_animeface.xml\"):\n#checking whether that xml exist\n if not os.path.isfile(cascade_file):\n raise RuntimeError(\"%s: not found\" % cascade_file)\n\n cascade = cv2.CascadeClassifier(cascade_file)\n #reading the image\n image = cv2.imread(filename, cv2.IMREAD_COLOR)\n #converting the image to gray scale image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #equalizing the histogram to improve the contrast\n gray = cv2.equalizeHist(gray)\n #detecting the face\n faces = cascade.detectMultiScale(gray,\n # detector options\n scaleFactor = 1.1,\n minNeighbors = 5,\n minSize = (24, 24))\n #taking the top bottom left right from the list of face\n for (x, y, w, h) in faces:\n #drawing bounding box\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n #popping a window of detecting faces\n cv2.imshow(\"AnimeFaceDetect\", image)\n cv2.waitKey(0)\n #saving the same\n cv2.imwrite(\"/home/user/PycharmProjects/manga_face/jing.jpg\", image)\n#check for passing arguments\nif len(sys.argv) != 2:\n sys.stderr.write(\"usage: detect.py <filename>\\n\")\n sys.exit(-1)\n#passing image to the function\ndetect(sys.argv[1])\n"
}
] | 5 |
fandemonium/rumen_mapping | https://github.com/fandemonium/rumen_mapping | d8c35e856c0dd9c1ee7ac80cf08f7a8a1a7bbea7 | 1f043823679a97a4d0722853a1d1a3a9a202f77a | 07f32e8aa0e59f708293713fb9ce8ab18d9cee8c | refs/heads/master | 2020-05-09T19:41:41.702864 | 2019-04-21T18:42:56 | 2019-04-21T18:42:56 | 181,385,708 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5729537606239319,
"alphanum_fraction": 0.6049821972846985,
"avg_line_length": 30.22222137451172,
"blob_id": "5c23ccc5816ba14b678abe2191120fd943c01309",
"content_id": "eb983a4e1ccb2a1c004cc8e776e7cca6daf55660",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 9,
"path": "/scripts/ena_download.py",
"repo_name": "fandemonium/rumen_mapping",
"src_encoding": "UTF-8",
"text": "import sys\n\nwith open(sys.argv[1]) as f:\n\tnext(f)\n\tfor lines in f:\n\t\tline = lines.strip().split(\"\\t\")\n\t\tena_id = line[0].replace(\"000000\", \"\")\n\t\tsub_dir = ena_id[:2].lower()\n\t\tprint \"curl -O http://ftp.ebi.ac.uk/pub/databases/ena/wgs/public/\" + sub_dir + \"/\" + ena_id +\".fasta.gz\"\n"
},
{
"alpha_fraction": 0.6699850559234619,
"alphanum_fraction": 0.6814335584640503,
"avg_line_length": 33.620689392089844,
"blob_id": "20030477f9a289ac408e3ca5b11c00c0c51017be",
"content_id": "d2758a4976bd5a095640f4c5d6213ffd1b696ada",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2009,
"license_type": "permissive",
"max_line_length": 155,
"num_lines": 58,
"path": "/scripts/eval_diff_coverage_cutoff.R",
"repo_name": "fandemonium/rumen_mapping",
"src_encoding": "UTF-8",
"text": "suppressMessages(library(ggplot2))\nsuppressMessages(library(DESeq2))\nsuppressMessages(library(phyloseq))\nsuppressMessages(library(reshape2))\n\nargs <- commandArgs(TRUE)\n\ncoverage <- readRDS(args[1])\n#coverage <- readRDS( \"merged_coverage_w_sample_info.RDS\")\n\n# create the gene information table\ncov.gene <- coverage[, c(\"group_id\", \"protein_id\", \"rmg_genome\", \"contig\", \"gene_start\", \"gene_end\")]\n# dereplicate\ncov.gene <- cov.gene[! duplicated(cov.gene), ]\n# make the protein_id the row.names\nrow.names(cov.gene) <- cov.gene$group_id\n\n# create the simplified sample information\ncov.si <- coverage[, c(\"sample_name\", \"PicoGreen_Conc\", \"Metadata1\", \"Clustering\", \"sample_id\", \"time\")]\ncov.si <- cov.si[! duplicated(cov.si), ]\n# make the sample_name the row.names\nrow.names(cov.si) <- cov.si$sample_name\n\n\n# subset for different cut offs\nfor (i in seq(0.4, 0.9, 0.1)){\n\tprint(i)\n\tcov <- subset(coverage, coverage >= i)\n\t# prepare for phyloseq object\n\t# create the wide count table\n\tcov.wide <- dcast(cov[, c(\"sample_name\", \"group_id\", \"read_count\")], group_id ~ sample_name, value.var = \"read_count\")\n\tcov.wide[is.na(cov.wide)] <- 0\n\t# make group_id the row.names\n\trow.names(cov.wide) <- cov.wide$group_id\n\t# remove the group_id column\n\tcov.wide$group_id <- NULL\n\n\t# make the phyloseq object\n\tcov.phy <- phyloseq(otu_table(as.matrix(cov.wide), taxa_are_rows=T), tax_table(as.matrix(cov.gene)))\n\tsample_data(cov.phy) <- cov.si\n\tprint(\"plot mds!!!\")\n\tpdf(paste0(\"plots/cov_\", i, \"_mds.pdf\"))\n\tp1 <- plot_ordination(cov.phy, ordinate(cov.phy, \"MDS\"), color = \"Clustering\", shape=\"Metadata1\", label=\"sample_name\") + geom_point(size = 3) + theme_bw()\n\tprint(p1)\n\tdev.off()\n\n\t# deseq2\n\tcov.diagdds <- phyloseq_to_deseq2(cov.phy, ~ Metadata1)\n\t# use parametric to estimate dispersion\n\tfor (j in c(\"local\", \"parametric\")){\n\t\tprint(j)\n\t\tcov.deseq <- DESeq(cov.diagdds, test=\"Wald\", fitType=j)\n\t\tpdf(paste0(\"plots/cov_\", i, \"_dispersion_\", j, \".pdf\"))\n\t\tp2 <- plotDispEsts(cov.deseq, main=j)\n\t\tprint(p2)\n\t\tdev.off()\n\t}\n}\n\n"
},
{
"alpha_fraction": 0.6337896585464478,
"alphanum_fraction": 0.6919726133346558,
"avg_line_length": 40.20512771606445,
"blob_id": "94c1921c57627b855fbe16c480e6133389bdf57c",
"content_id": "771204e084f1da4ff5b0dd66a8d4ed89ae31ee89",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3214,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 78,
"path": "/coverage_ana_history.R",
"repo_name": "fandemonium/rumen_mapping",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(DESeq2)\nlibrary(phyloseq)\nlibrary(reshape2)\n\n# read in the parsed coverage file\ncoverage <- read.delim(\"all_coverages_parsed.txt\", header=F)\n# add in header\nnames(coverage) <- c(\"sample_name\", \"contig\", \"gene_start\", \"gene_end\", \"rmg_genome\", \"protein_id\", \"read_count\", \"mapped_len\", \"gene_length\", \"coverage\")\n\n# read in sample metadata\nsi <- read.delim(\"../shotgun_metadata.txt\")\n## ad hoc adding more information\nsi$sample_id <- data.frame(do.call('rbind', strsplit(as.character(si$Sample_Name), \"_\", fixed=T)))[, 2]\nsi$time <- data.frame(do.call('rbind', strsplit(as.character(si$Sample_Name), \"_\", fixed=T)))[, 1]\n\n# save merged coverage for later use\ncoverage <- merge(coverage, si[, c(2, 3, 11, 14:16)], by.x=\"sample_name\", by.y=\"Sample_Name\")\ncoverage$group_id <- paste0(coverage$rmg_genome, \":\", coverage$protein_id, \":\", coverage$gene_start, \":\", coverage$gene_end)\nsaveRDS(coverage, \"merged_coverage_w_sample_info.RDS\")\n\n# create the gene information table\ncov.gene <- coverage[, c(\"group_id\", \"protein_id\", \"rmg_genome\", \"contig\", \"gene_start\", \"gene_end\")]\n# dereplicate\ncov.gene <- cov.gene[! duplicated(cov.gene), ]\n# make the protein_id the row.names\nrow.names(cov.gene) <- cov.gene$group_id\n\n# create the simplified sample information\ncov.si <- coverage[, c(\"sample_name\", \"PicoGreen_Conc\", \"Metadata1\", \"Clustering\", \"sample_id\", \"time\")]\ncov.si <- cov.si[! duplicated(cov.si), ]\n# make the sample_name the row.names\nrow.names(cov.si) <- cov.si$sample_name\n\n# run Rscript to loop through different cut off values:\n# Rscript ~/Documents/repos/rumen_mapping/scripts/eval_diff_coverage_cutoff.R merged_coverage_w_sample_info.RDS\n\n# 0.7 looks good\n# parametric for fitType\n \ncov <- subset(coverage, coverage >= 0.7)\n# prepare for phyloseq object\n# create the wide count table\ncov.wide <- dcast(cov[, c(\"sample_name\", \"group_id\", \"read_count\")], group_id ~ sample_name, value.var = \"read_count\")\ncov.wide[is.na(cov.wide)] <- 0\n# make group_id the row.names\nrow.names(cov.wide) <- cov.wide$group_id\n# remove the group_id column\ncov.wide$group_id <- NULL\n\n# make the phyloseq object\ncov.phy <- phyloseq(otu_table(as.matrix(cov.wide), taxa_are_rows=T), tax_table(as.matrix(cov.gene)))\nsample_data(cov.phy) <- cov.si\n\n# mds with treatment and clustering\n## clustering doesn't separate anything\nplot_ordination(cov.phy, ordinate(cov.phy, \"MDS\"), color = \"sample_id\", shape=\"Metadata1\", label=\"Sample_Name\") + geom_point(size = 3) + theme_bw()\n\n\n# deseq2\ncov.diagdds <- phyloseq_to_deseq2(cov.phy, ~ Metadata1)\n# use parametric to estimate dispersion\ncov.deseq <- DESeq(cov.diagdds, test=\"Wald\", fitType=\"parametric\")\n# get result\ncov.res <- data.frame(results(cov.deseq))\nsig <- subset(cov.res, padj < 0.05)\nsig$group_id <- row.names(sig)\n####\n## > sig\n## baseMean log2FoldChange lfcSE stat pvalue\n##k87_11225348_4 2.613768 20.06864 2.956836 6.787199 1.143318e-11\n##k87_16477912_4 2.613768 20.06864 2.956836 6.787199 1.143318e-11\n## padj group_id\n##k87_11225348_4 5.794905e-08 k87_11225348_4\n##k87_16477912_4 5.794905e-08 k87_16477912_4\n####\nsave.image(\"coverage_ana.RData\")\n#savehistory(\"coverage_ana_history.R\")\n"
},
{
"alpha_fraction": 0.5894398093223572,
"alphanum_fraction": 0.6562870144844055,
"avg_line_length": 36.878204345703125,
"blob_id": "a23a00b9bc12f4e781d22bab971b4e493a13f0fd",
"content_id": "e972c9206c052c44e324b15f68406f60810b3588",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5909,
"license_type": "permissive",
"max_line_length": 185,
"num_lines": 156,
"path": "/README.md",
"repo_name": "fandemonium/rumen_mapping",
"src_encoding": "UTF-8",
"text": "# shallow sequenced metagenome analysis\n## the project has a emphasis on rumen but could be applied to various environment. Most importantly, finding the most relevant genome data base. \n## If all fails, there is always the RefSeq and blast ...\n\n### reference data set: \n+ from paper: https://www.nature.com/articles/s41467-018-03317-6#Sec17\n \n `Assembly of 913 microbial genomes from metagenomic sequencing of the cow rumen`\n\n+ assembled genome and proteome files can be downloaded via:\n ```\n cd /PATH/TO/WHERE/EVERYTHING/IS\n mkdir rumen && cd rumen\n wget http://datashare.is.ed.ac.uk/download/DS_10283_2772.zip\n ```\n + NOTE: need to do multiple unzip and untar.\n\n+ the datashare site above only contains wgs contigs, and protein sequences. The identified and binned rumen unculatured genomes (RUG) are in ENA (as well as the raw sequences etc...). \n ```\n # ENA study accession number: PRJEB21624\n # use ENA protal api generater: will print a table of what the study contains\n curl -X GET --header 'Accept: text/plain' 'https://www.ebi.ac.uk/ena/portal/api/links/study?accession=PRJEB21624' \n ####result_id\tdescription\tentry_cnt\n ####assembly\tAssembly\t913\n ####read_experiment\tExperiment\t45\n ####read_run\tRead\t45\n ####wgs_set\tGenome assembly contig set\t913\n # then set the results for RUG IDs (may need to surpress cert error `-k`): wgs_acc RUG id\n curl -k -X GET --header 'Accept: text/plain' 'https://www.ebi.ac.uk/ena/portal/api/links/study?accession=PRJEB21624&result=wgs_set' > PRJEB21624_wgs_set.txt\n # the first column is the RUG wgs id (almost... needs to get rid of the trailing 0's)\n cd rumen\n mkdir RUG_genomes \n python ~/repos/rumen_mapping/scripts/ena_download.py PRJEB21624_wgs_set.txt > get_rug_genomes.sh\n cd RUG_genomes\n bash ../get_rug_genomes.sh\n # should 913 of them. and the headers contain RMG contig/protein ids. \n ```\n \n NOTE: The RMG contigs do not contain all RUG contigs/scafolds... Need to do mapping to RUG contigs\n \n \n### analysis procedures:\n\n+ use bwa.\n + create idex for the reference genome (need to concat all and checked. no duplicated headers):\n ```\n cd rumen/genomes\n cat *.fa > all_rmgs.fa\n bwa index -p ../bwa_index/rmg_genomes all_rmgs.fa\n cd ../RUG_genomes\n zcat *.gz > all_rugs.fa\n bwa index -p ../bwa_index/rug_genomes all_rugs.fa\n ```\n \n NOTE: don't use pipe to combine cat and bwa... it runs into issues.\n \n + location of the shallow sequenced metagenomes\n ```\n cd /PATH/TO/WHERE/EVERYTHING/IS\n cd 181214_fastqs\n ```\n \n + quality filter fastqs: (maxee 0.5)\n ```\n mkdir filtered_fq && cd fastq \n for i in *.gz; do vsearch --threads 4 --fastq_filter $i --fastq_maxee 0.5 --fastq_maxns 0 --fastqout ../filtered_fq/${i//.fastq.gz/.maxee0.5.fq}; done\n ```\n \n + mapping (to rug genomes)\n ```\n mkdir bwa_bams\n cd filtered_fq\n for i in *.fq; do bwa mem -t 4 /mnt/scratch/yangfan1/rumen/bwa_index/rug_genomes $i | samtools sort -@4 -o ../bwa_bams/${i//.fq/.sorted.bam} -; done\n ```\n\n + get mapped reads only:\n ```\n cd ../bwa_bams\n mkdir ../mapped_bam\n for i in *.bam; do samtools view -b -F 4 -q 10 $i > ../mapped_bam/${i//sorted/mapped}; done\n ```\n \n + get reference header:\n ```\n cd /PATH/TO/WHERE/EVERYTHING/IS\n cd rumen/genomes\n grep \">\" RMG_*.fa > ../RMG_genome_to_contigs.txt\n cd ../proteomes\n grep \">\" RMG_*.faa > ../RMG_genome_to_proteome.txt\n cd ../RUG_genomes\n grep \">\" all_rugs.fa > ../RUG_to_contigs.txt \n ```\n\n + create bed file from the supplmentary file `41467_2018_3317_MOESM11_ESM.txt`:\n ```\n python ~/repos/rumen_mapping/scripts/rug_parser.py RUG_to_contigs.txt\n # NOTE: to get the bed file from RUGs_geomes, and the supplementary table is very ad hoc. lots of incosistencies. \n ```\n \n bed file in a format like this (need to be 6 columns): id start end name some_value(eg. gc_content) strand\n note: the last 3 columns are optional. \n ```\n k87_58769312 514 1122 RMG_1025:k87_58769312_1 0.268 +\n k87_58769312 1307 2620 RMG_1025:k87_58769312_2 0.215 +\n k87_58269671 207 698 RMG_1025:k87_58269671_1 0.250 -\n ```\n or\n ```\n ENA|OMVS01000105|OMVS01000105.1\t222\t281\tRUG001:scaffold_10074_2\n ENA|OMVS01000105|OMVS01000105.1\t79\t164\tRUG001:scaffold_10074_3\n ENA|OMVS01000105|OMVS01000105.1\t105\t132\tRUG001:scaffold_10074_4\n ENA|OMVS01000105|OMVS01000105.1\t142\t163\tRUG001:scaffold_10074_4\n ```\n \n \n \n + bedtools to find intersect and coverage:\n ```\n cd /PATH/TO/WHERE/EVERYTHING/IS\n cd 181214_fastqs\n mkdir intersects\n cd mapped_bam\n # find intersect:\n for i in *.bam; do bedtools intersect -a ../../rumen/rug_genes.bed -b $i -bed -wa -wb > ../intersects/${i//mapped.bam/intersect.bed}; done\n # find coverage\n mkdir ../coverages\n for i in *.bam; do bedtools coverage -a ../../rumen/rug_genes.bed -b $i | grep -vw \"0.0000000\" > ../coverages/${i//mapped.bam/coverage.txt}; done\n # coverage hist may be useful too.\n mkdir ../coverages_hist\n for i in *.bam; do bedtools coverage -a ../../rumen/rug_genes.bed -b $i -hist | grep -vw \"1.0000000\" > ../coverages_hist/${i//mapped.bam/coverage_hist.txt}; done\n ```\n \n + combine all files for downstream analysis \n\t```\n\tcd /PATH/TO/WHERE/EVERYTHING/IS\n cd 181214_fastqs/intersects\n\tgrep \"\" * > ../all_intersects.txt\n\tcd 181214_fastqs/coverages\n\tgrep \"\" * > ../all_coverages.txt\n\tcd 181214_fastqs/coverages_hist\n\tgrep \"\" * > ../all_coverages_hist.txt\n\t```\n\n\tNote: because so few reads mapped, median depth of the mapped region won't help much... \n\t\n\n \n+ BACKUP OPTION: if use blastx \n \n + get nr:\n ```\n cd /PATH/TO/WHERE/EVERYTHING/IS\n mkdir nr_20190414 && cd nr_20190414/\n wget ftp://ftp.ncbi.nlm.nih.gov/blast/db/nr.*.gz\n for i in *.gz; do tar -zxvf $i; done\n ```\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6136986017227173,
"avg_line_length": 32.181819915771484,
"blob_id": "3c183bc7f584c1a43a467ba73d4a82d6ee223bda",
"content_id": "008d03904490cfe0418ae804e44d56541a4f02cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 11,
"path": "/scripts/rug_contig_bedfile.py",
"repo_name": "fandemonium/rumen_mapping",
"src_encoding": "UTF-8",
"text": "import sys\nimport gzip\nimport re\nfrom Bio import SeqIO\n\nwith gzip.open(sys.argv[1]) as f:\n\tfor record in SeqIO.parse(f, \"fasta\"):\n\t\tbwa_id = record.id\n\t\trug_id = \"RUG\" + re.split(\"RUG\", record.description)[1].split(\" \")[0]\n\t\tcontig = record.description.strip().split(\" \")[-1]\n\t\tprint bwa_id + \"\\t\" + \"1\" + \"\\t\" + str(len(record.seq)) + \"\\t\" + rug_id + \":\" + contig\n"
},
{
"alpha_fraction": 0.5472440719604492,
"alphanum_fraction": 0.5669291615486145,
"avg_line_length": 27.22222137451172,
"blob_id": "567a2ca7f0f2f1feb4f14278408a69610b541736",
"content_id": "9ff41b6c3ce143cfc986aa65378b3365cc86c034",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 9,
"path": "/scripts/rug_parser.py",
"repo_name": "fandemonium/rumen_mapping",
"src_encoding": "UTF-8",
"text": "import sys\nimport re\n\nfor lines in open(sys.argv[1], 'rU'):\n\tline = lines.strip()\n\tbwa_name = re.split(\">| \", line)[1]\n\trug_id = \"RUG\" + re.split(\"RUG\", line)[1].split(\" \")[0]\n\tcontig = line.split(\" \")[-1]\n\tprint bwa_name + \"\\t\" + rug_id + \"\\t\" + contig\n"
}
] | 6 |
leflemluc/MRIs | https://github.com/leflemluc/MRIs | 50fb7adbdfec7f377eac5f2c77cff0b110465e5c | a66cbdf899e9cdce002f53689f55aaa7c13985a2 | e38b038286ebc8f5ee9f851482dc33d119e8bd6f | refs/heads/master | 2020-04-01T19:37:48.873881 | 2019-09-13T03:58:11 | 2019-09-13T03:58:11 | 153,563,624 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.578568696975708,
"alphanum_fraction": 0.5975009202957153,
"avg_line_length": 28.311111450195312,
"blob_id": "5cad38e79387eb50bc8027037daadda13f32da0c",
"content_id": "080f1fce7062d88d33c5ac4a8d6aa0a8f25d9de3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2641,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 90,
"path": "/create_tfrecords/visualization.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import nibabel as nib\nimport numpy as np\nimport pylab as py\nimport matplotlib\n#matplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as manimation\nimport sys\ndpi = 1000\n\n\n\ndef show_slices(slices):\n \"\"\" Function to display row of image slices \"\"\"\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n\ndef display(PATH):\n epi_img = nib.load(PATH)\n epi_img_data = epi_img.get_fdata()\n cut_x = epi_img_data.shape[0] // 2\n cut_y = epi_img_data.shape[1] // 2\n cut_z = epi_img_data.shape[2] // 2\n slice_0 = epi_img_data[cut_x, :, :]\n slice_1 = epi_img_data[:, cut_y, :]\n slice_2 = epi_img_data[:, :, cut_z]\n show_slices([slice_0, slice_1, slice_2])\n plt.suptitle(\"Center slices for EPI image\")\n\n\ndef display_movie_x(PATH, save_path, title):\n FFMpegWriter = manimation.writers['ffmpeg']\n metadata = dict(title=title, artist='Matplotlib',\n comment='Movie support!')\n writer = FFMpegWriter(fps=5, metadata=metadata)\n\n epi_img = nib.load(PATH)\n epi_img_data = epi_img.get_fdata()\n\n fig, axes = plt.subplots(1, 1)\n\n with writer.saving(fig, save_path, 100):\n for i in range(0, 256, 3):\n print(i)\n slice = epi_img_data[i, :, :]\n axes.imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n writer.grab_frame()\n\n\n\ndef display_movie_y(PATH, save_path, title):\n FFMpegWriter = manimation.writers['ffmpeg']\n metadata = dict(title=title, artist='Matplotlib',\n comment='Movie support!')\n writer = FFMpegWriter(fps=5, metadata=metadata)\n\n epi_img = nib.load(PATH)\n epi_img_data = epi_img.get_fdata()\n\n fig, axes = plt.subplots(1, 1)\n\n with writer.saving(fig, save_path, 100):\n for i in range(0, 256, 3):\n print(i)\n slice = epi_img_data[:, i, :]\n axes.imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n writer.grab_frame()\n\ndef display_movie_z(PATH, save_path, title):\n FFMpegWriter = manimation.writers['ffmpeg']\n metadata = dict(title=title, artist='Matplotlib',\n comment='Movie support!')\n writer = FFMpegWriter(fps=5, metadata=metadata)\n\n epi_img = nib.load(PATH)\n epi_img_data = epi_img.get_fdata()\n\n fig, axes = plt.subplots(1, 1)\n\n with writer.saving(fig, save_path, 100):\n for i in range(0, 256, 3):\n print(i)\n slice = epi_img_data[:, :, i]\n axes.imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n writer.grab_frame()\n\n\n\nif __name__=='__main__':\n\n\n\n"
},
{
"alpha_fraction": 0.5942978858947754,
"alphanum_fraction": 0.6047845482826233,
"avg_line_length": 38.11538314819336,
"blob_id": "343bfc1dd4db089ae759a5344bc13800832df065",
"content_id": "cb661c72069ffa677c46f98f03f1b4c45e0e4505",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6103,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 156,
"path": "/create_tfrecords/create_tfrecords.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import os\nimport time \nimport pandas as pd \nimport tensorflow as tf \nimport numpy as np\nimport nibabel as nib\nimport sys \nfrom joblib import Parallel, delayed\nimport multiprocessing\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef normalize_n_create_tfrecords(file_names, minbycube, maxbycube, eps = 0.0001):\n count_fail=0\n examples = []\n #writer = tf.io.TFRecordWriter(save_filename)\n for index, row in file_names.iterrows():\n try:\n filename=row['filename']\n label=int(row['label'])\n name=str.encode(row['ind'])\n \n img = nib.load('data/'+filename).get_fdata()\n normalized_im=(img-minbycube)/(maxbycube+eps-minbycube)\n normalized_im = normalized_im.ravel()\n \n \n feature = {'label': _int64_feature(label),\n 'image': _float_feature(normalized_im),\n 'name': _bytes_feature(name)}\n \n # Create an example protocol buffer - Protocol buffers are a cross-platform, \n # cross-language library for efficient serialization of structured data.\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n examples.append(example)\n # Serialize to string and write on the file\n #writer.write(example.SerializeToString())\n except Exception as e: \n print(e)\n count_fail+=1\n #writer.close()\n #return index, count_fail\n return examples\n\ndef write_tfrecords(save_filename, examples_per_processor):\n writer = tf.io.TFRecordWriter(save_filename)\n index = 0\n nb_written=0\n for examples in examples_per_processor:\n print(\"Writing examples of processor number \" + str(index))\n index=1\n for example in examples: \n writer.write(example.SerializeToString())\n nb_written+=1\n writer.close()\n return nb_written\n\ndef write_tfrecords_sequentially(save_filename, file_names, minbycube, maxbycube, eps = 0.0001):\n count_fail=0\n start_50 = time.time()\n writer = tf.io.TFRecordWriter(save_filename)\n for index, row in file_names.iterrows():\n if (index+1)%50 == 0:\n print(index)\n print(\"50 files in \" + str(time.time() - start_50))\n start_50 = time.time()\n try:\n filename=row['filename']\n label=int(row['label'])\n name=str.encode(row['ind'])\n \n img = nib.load('data/'+filename).get_fdata()\n normalized_im=(img-minbycube)/(maxbycube+eps-minbycube)\n normalized_im = normalized_im.ravel()\n \n \n feature = {'label': _int64_feature(label),\n 'image': _float_feature(normalized_im),\n 'name': _bytes_feature(name)}\n \n # Create an example protocol buffer - Protocol buffers are a cross-platform, \n # cross-language library for efficient serialization of structured data.\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n # Serialize to string and write on the file\n writer.write(example.SerializeToString())\n except Exception as e: \n print(e)\n count_fail+=1\n writer.close()\n return index, count_fail\n \nif __name__==\"__main__\":\n print(\"Starting\")\n \n data_fold = sys.argv[1]\n train_or_test = sys.argv[2]\n print(\"Taking care of writing down \" + train_or_test + \" set \" + str(data_fold))\n \n dataset = pd.read_csv('./datafolds/datafold_'+str(data_fold)+'/'+train_or_test+'_set.csv')\n \n print(\"Loading min and max from:\")\n if train_or_test==\"train\":\n numpy_name = \"TRAIN\"\n else:\n numpy_name = \"TEST\"\n print('./datafolds/datafold_'+str(data_fold)+'/'+numpy_name+\"_max_by_cube_matrix.npy\")\n print(\"and\")\n print('./datafolds/datafold_'+str(data_fold)+'/'+numpy_name+\"_min_by_cube_matrix.npy\")\n maxbycube = np.load('./datafolds/datafold_'+str(data_fold)+'/'+numpy_name+\"_max_by_cube_matrix.npy\")\n minbycube = np.load('./datafolds/datafold_'+str(data_fold)+'/'+numpy_name+\"_min_by_cube_matrix.npy\")\n \n print(\"Now its time to write the tf records: \")\n start = time.time()\n nb_written, fail = write_tfrecords_sequentially('./'+train_or_test+'_256_3d.tfrecords', dataset, minbycube, maxbycube)\n print(\"fails : \" + str(fail))\n print(\"It took \" + str(time.time() - start) + \" to write the \" + str(nb_written) + \" tfrecords.\")\n \n \n \n \"\"\"\n print(\"Initializing parallelism\")\n if train_or_test==\"train\":\n numpy_name = \"TRAIN\"\n numprocs = int(multiprocessing.cpu_count() * 3.5)\n\n else:\n numpy_name = \"TEST\"\n numprocs = multiprocessing.cpu_count()\n \n print(\"Num processors used : \" + str(numprocs))\n \n parasize = int(len(dataset)/numprocs)\n slices = [(i*parasize, (i+1)*parasize) for i in range(numprocs)]\n slices[-1] = ((numprocs-1)*parasize, len(dataset)-1)\n \n print(\"Starting aggregating tf records\")\n start = time.time()\n tfrecords_results = Parallel(n_jobs=numprocs)(delayed(normalize_n_create_tfrecords)(dataset.iloc[Slice[0]: Slice[1]], minbycube, maxbycube, eps = 0.0001) for Slice in slices)\n #print(tfrecords_results)\n index_proc = 0\n for examples in tfrecords_results:\n print(\"There are \" + str(len(examples)) + \" examples in proc \" + str(index_proc))\n print(\"It took \" + str(time.time() - start) + \" to create all tfrecords\")\n print(\"Now its time to write the tf records: \")\n start = time.time()\n nb_written = write_tfrecords('./'+train_or_test+'_256_3d.tfrecords', tfrecords_results)\n print(\"It took \" + str(time.time() - start) + \" to write the \" + str(nb_written) + \" tfrecords.\")\n \"\"\"\n\n"
},
{
"alpha_fraction": 0.5930343270301819,
"alphanum_fraction": 0.6090171933174133,
"avg_line_length": 39.69902801513672,
"blob_id": "6c8f5b2bf7d36b397eb8e5d0fa65e3ccf225c640",
"content_id": "08e6965ed2289c546f38a2f6f7eb816ea5fe741d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4192,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 103,
"path": "/create_tfrecords/normalize_data.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import os\nimport time \nimport pandas as pd \nimport tensorflow as tf \nimport numpy as np\nimport sys \nfrom joblib import Parallel, delayed\nimport multiprocessing\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ngrid_cube_size = 8\nn_cube_by_dim = 256 //grid_cube_size\nn_of_cubes = (256 // 16)**3\n\ndef normalize(dataset):\n N = len(dataset)\n count_fail=0\n\n min_array = np.ones(256**3)*np.inf\n max_array = np.zeros(256**3)\n \n for index, row in dataset.iterrows():\n\n try:\n filename=row['filename']\n x=np.array(nib.load('./data/'+filename).get_fdata()).ravel()\n min_array = np.minimum(x, min_array)\n max_array = np.maximum(x, max_array)\n \n except Exception as e: \n count_fail+=1\n \n return min_array, max_array, count_fail\n\n\n\nif __name__ == \"__main__\":\n \n data_fold = sys.argv[1]\n train_or_test = sys.argv[2]\n dataset = pd.read_csv('./datafolds/datafold_'+str(data_fold)+'/'+train_or_test+'_set.csv')\n numprocs = multiprocessing.cpu_count() * 2\n \n print(\"Working on \" + train_or_test + \" dataset \"+ str(data_fold))\n print(\"Initalizing multiprocessing\")\n start = time.time()\n parasize = int(len(dataset)/numprocs)\n slices = [(i*parasize, (i+1)*parasize) for i in range(numprocs)]\n slices[-1] = ((numprocs-1)*parasize, len(dataset)-1)\n \n start = time.time()\n print(\"Creating the max and min by pixel arrays for \" + train_or_test)\n \n \n normlize_results = Parallel(n_jobs=numprocs)(delayed(normalize)(dataset.iloc[Slice[0]: Slice[1]]) for Slice in slices)\n max_x = np.maximum.reduce([normlize_results[i][1] for i in range(len(normlize_results))])\n min_x = np.minimum.reduce([normlize_results[i][0] for i in range(len(normlize_results))])\n \n #min_array, max_array, count_fail = normalize(dataset)\n \n print(\"Computing max and min vector in for \" + train_or_test + \" set \" + str(data_fold)+' in '+ str(time.time() - start))\n #print(\"...with \" + str(count_fail) + \" fails\")\n \n \n print(\"creating the max and min by cube arrays for \" + train_or_test + \" set \" + str(data_fold))\n start = time.time()\n max_x = np.reshape(max_x, (256,256,256))\n min_x = np.reshape(min_x, (256,256,256))\n \n minbycube = np.ones([n_cube_by_dim, n_cube_by_dim, n_cube_by_dim])*np.inf\n maxbycube = np.zeros([n_cube_by_dim, n_cube_by_dim, n_cube_by_dim])\n \n for i in range(n_cube_by_dim):\n for j in range(n_cube_by_dim):\n for k in range(n_cube_by_dim):\n minbycube[i,j,k] = np.minimum(minbycube[i,j,k], np.min(min_x[grid_cube_size*i:grid_cube_size*(i+1), grid_cube_size*j:grid_cube_size*(j+1), grid_cube_size*k:grid_cube_size*(k+1)]))\n maxbycube[i,j,k] = np.maximum(maxbycube[i,j,k], np.max(max_x[grid_cube_size*i:grid_cube_size*(i+1), grid_cube_size*j:grid_cube_size*(j+1), grid_cube_size*k:grid_cube_size*(k+1)]))\n\n maxbycube_matrix = np.ones_like(max_x)\n minbycube_matrix = np.ones_like(min_x)\n for i in range(n_cube_by_dim):\n for j in range(n_cube_by_dim):\n for k in range(n_cube_by_dim):\n maxbycube_matrix[grid_cube_size*i:grid_cube_size*(i+1), grid_cube_size*j:grid_cube_size*(j+1), grid_cube_size*k:grid_cube_size*(k+1)] *= maxbycube[i, j, k]\n minbycube_matrix[grid_cube_size*i:grid_cube_size*(i+1), grid_cube_size*j:grid_cube_size*(j+1), grid_cube_size*k:grid_cube_size*(k+1)] *= minbycube[i, j, k]\n \n print('...in '+ str(time.time() - start))\n \n print(\"write min and max in a numpy file\")\n if train_or_test == \"test\":\n NAME = \"TEST\"\n else:\n NAME = \"TRAIN\"\n np.save('./datafolds/datafold_'+str(data_fold)+'/' + NAME+'_min_by_cube_matrix', minbycube_matrix)\n np.save('./datafolds/datafold_'+str(data_fold)+'/' + NAME+'_max_by_cube_matrix', maxbycube_matrix)\n"
},
{
"alpha_fraction": 0.585196316242218,
"alphanum_fraction": 0.6025081872940063,
"avg_line_length": 45.132076263427734,
"blob_id": "cd28b4c6fd15b133ca55df001903e69f4c883896",
"content_id": "42879feb22b3de3c9d6c7117953910330eec0b66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7336,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 159,
"path": "/training_3D/train.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import sys \nimport os \nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\nfrom utils import _parse_function, _select_patch_and_reshape, MRI_SIZE, MODIFIED_SIZE, NUM_CHANNEL, OUTPUT_SIZE\nfrom model import inference, loss\n\nlogdir = 'logs_3D_CNN_full_156_patches_withL2loss_2_LR_1_B256/'\nchkpt = 'logs_3D_CNN_full_156_patches_withL2loss_2_LR_1_B256/model.ckpt'\n\n\nKEEP_RATE = 0.5\n\nPATH_TO_DATA = './Create_TFrecords/datafolds/'\n\nclass Trainer:\n \n\n def __init__(self, datafold, adam_rate=0.0001, batch_size = 256, n_epochs = 30, penalty_intensity = 0.05):\n \n path_folder = PATH_TO_DATA+ 'datafold_' + str(datafold) + '/'\n \n train_csv = pd.read_csv(path_folder+\"train_set.csv\")\n self.training_set_size = len(train_csv)\n self.train_tf_records_path = path_folder+'train_256_3d.tfrecords'\n \n test_csv = pd.read_csv(path_folder+\"test_set.csv\")\n self.test_set_size = len(test_csv)\n self.test_tf_records_path = path_folder+'test_256_3d.tfrecords'\n \n self.adam_rate = adam_rate\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.penalty_intensity = penalty_intensity\n print(\"adam_rate: \" + str(adam_rate))\n print(\"batch_size: \" + str(batch_size))\n print(\"n_epochs: \" + str(n_epochs))\n print(\"penalty_intensity: \" + str(penalty_intensity))\n \n self.logdir = path_folder + '/logs_3D_CNN_LR_'+str(adam_rate)+'_BS_'+str(batch_size)+'_L2_'+ str(penalty_intensity) + '/'\n self.tensorboard_n_checkpoint = self.logdir + 'tensorboard_n_checkpoint/'\n self.chkpt = self.tensorboard_n_checkpoint + 'model.ckpt'\n\n \n with tf.variable_scope('3D_CNN'):\n \n self.X = tf.placeholder(tf.float32, [None, MODIFIED_SIZE, MODIFIED_SIZE, MODIFIED_SIZE, NUM_CHANNEL], name='X')\n self.y = tf.placeholder(tf.float32, [None, OUTPUT_SIZE], name='y')\n self.keep_rate = tf.placeholder(tf.float32)\n score = inference(self.X, self.keep_rate, OUTPUT_SIZE)\n softmax = tf.nn.softmax(score)\n self.cost = loss(score, self.y, self.penalty_intensity)\n \n self.optimizer = tf.train.AdamOptimizer(self.adam_rate).minimize(self.cost, var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))\n \n self.preds = tf.equal(tf.argmax(softmax, axis=1), tf.argmax(self.y, axis=1))\n self.accuracy = tf.reduce_mean(tf.cast(self.preds, tf.float32))\n\n self.cost_summary = tf.summary.scalar(name='Cost', tensor=self.cost)\n \n self.accuracy_summary = tf.summary.scalar(name='Accuracy', tensor=self.accuracy)\n\n self.summary = tf.summary.merge_all()\n\n def run(self):\n \n with tf.Session(config=tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)) as sess:\n \n sess.run(tf.global_variables_initializer())\n\n self.saver = tf.train.Saver()\n \n \n if os.path.exists(self.logdir):\n print(\"restoring pre-existant weights\")\n self.saver.restore(sess, tf.train.latest_checkpoint(self.tensorboard_n_checkpoint)) \n \n \n self.file_writer = tf.summary.FileWriter(self.tensorboard_n_checkpoint, tf.get_default_graph())\n \n self.filenames = tf.placeholder(tf.string, shape=[None])\n #self.dataset = tf.data.TFRecordDataset(self.filenames).map(_parse_function).shuffle(buffer_size=buffer_size_validation_set).batch(batch_size).repeat()\n\n self.dataset = tf.data.TFRecordDataset(self.filenames).map(_parse_function).batch(self.batch_size).repeat()\n self.iterator = self.dataset.make_initializable_iterator()\n self.next_element = self.iterator.get_next()\n self.training_filenames = [self.train_tf_records_path]\n sess.run(self.iterator.initializer, feed_dict={self.filenames: self.training_filenames})\n\n self.val_filenames = tf.placeholder(tf.string, shape=[None])\n #self.val_dataset = tf.data.TFRecordDataset(self.val_filenames).map(_parse_function).shuffle(buffer_size=buffer_size_test_set).batch(batch_size).repeat()\n self.val_dataset = tf.data.TFRecordDataset(self.val_filenames).map(_parse_function).batch(self.batch_size).repeat()\n self.val_iterator = self.val_dataset.make_initializable_iterator()\n self.val_next_element = self.val_iterator.get_next()\n self.validation_filenames = [self.test_tf_records_path]\n sess.run(self.val_iterator.initializer, feed_dict={self.val_filenames: self.validation_filenames})\n\n\n for epoch in range(n_epochs):\n self.train(sess, epoch)\n self.validate(sess)\n self.saver.save(sess, chkpt)\n \n\n \n def train(self, sess, epoch):\n import time\n n_batches=self.training_set_size//self.batch_size\n avg_cost = 0\n avg_accuracy = 0\n times = 0\n time_10batches=0\n for batch in range(n_batches):\n t0 = time.time()\n x_batch, y_batch, _name = sess.run(self.next_element)\n #patch_x = _select_patch_and_reshape(x_batch, patch_size)\n _, batch_cost, batch_accuracy, summ = sess.run([self.optimizer, self.cost, self.accuracy, self.summary], feed_dict={self.X: x_batch, self.y: y_batch, self.keep_rate: KEEP_RATE})\n avg_cost += batch_cost\n avg_accuracy += batch_accuracy\n self.file_writer.add_summary(summ, epoch * n_batches + batch)\n completion = batch / n_batches\n print_str = '|' + int(completion * 20) * '#' + (19 - int(completion * 20)) * ' ' + '|'\n print('\\rEpoch {0:>3} {1} {2:3.0f}% Cost {3:6.4f} Accuracy {4:6.4f}'.format('#' + str(epoch + 1), \n print_str, completion * 100, avg_cost / (batch + 1), avg_accuracy / (batch + 1)), end='')\n t1 = time.time()\n batch_time = t1-t0\n times+=batch_time\n time_10batches+=batch_time\n \n if (batch+1)%10:\n print(\" 10 batches took \" + str(time_10batches))\n time_10batches = 0\n \n print(end=' ')\n print(\"Epoch took \" + str(times)) \n\n\n def validate(self, sess):\n #TODO: access full size of dataset\n n_batches = self.test_set_size//self.batch_size\n avg_accuracy = 0\n for batch in range(n_batches):\n x_batch, y_batch, _name = sess.run(self.val_next_element)\n patch_x = _select_patch_and_reshape(x_batch, patch_size)\n avg_accuracy += sess.run([self.accuracy, ], feed_dict={self.X: patch_x, self.y: y_batch, self.keep_rate: 0.0})[0]\n\n avg_accuracy /= n_batches\n print('Validation Accuracy {0:6.4f}'.format(avg_accuracy))\n \n\nif __name__ == '__main__':\n data_fold = sys.argv[1]\n adam_rate = float(sys.argv[2])\n batch_size = int(sys.argv[3])\n n_epochs = int(sys.argv[4])\n penalty_intensity = float(sys.argv[5])\n Trainer(data_fold, adam_rate, batch_size, n_epochs, penalty_intensity).run()\n #datafold, adam_rate=0.0001, batch_size = 256, n_epochs = 30, penalty_intensity = 0.05\n\n"
},
{
"alpha_fraction": 0.5460495352745056,
"alphanum_fraction": 0.5909303426742554,
"avg_line_length": 47.1136360168457,
"blob_id": "9dd04c68b5c9bffc1e1177f677c3d15cec382d42",
"content_id": "5cd6e2d42ce2e01b42886a48f4f6cd1486e32c7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2139,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 44,
"path": "/training_3D/model.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\n\ndef inference(x, keep_rate, classNum):\n with tf.name_scope(\"layer_a\"):\n conv1 = tf.layers.conv3d(inputs=x, filters=16, kernel_size=[3,3,3],strides=(2,2,2), padding='same', activation=tf.nn.relu)\n print(conv1)\n conv2 = tf.layers.conv3d(inputs=conv1, filters=32, kernel_size=[3,3,3],strides=(2,2,2), padding='same', activation=tf.nn.relu)\n print(conv2)\n pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)\n cnn3d_bn_1 = tf.layers.batch_normalization(inputs=pool3, training=True)\n\n with tf.name_scope(\"layer_c\"):\n # conv => 8*8*8\n conv4 = tf.layers.conv3d(inputs=cnn3d_bn_1, filters=64, kernel_size=[3,3,3],strides=(2,2,2), padding='same', activation=tf.nn.relu)\n print(conv4)\n conv5 = tf.layers.conv3d(inputs=conv4, filters=128, kernel_size=[3,3,3],strides=(2,2,2), padding='same', activation=tf.nn.relu)\n print(conv5)\n \n pool6 = tf.layers.max_pooling3d(inputs=conv5, pool_size=[2, 2, 2], strides=2)\n print(pool6)\n cnn3d_bn_2 = tf.layers.batch_normalization(inputs=pool6, training=True)\n print(cnn3d_bn_2)\n with tf.name_scope(\"fully_con\"):\n flattening = tf.reshape(cnn3d_bn_2, [-1, 2*2*2*128])\n print(flattening)\n dense = tf.layers.dense(inputs=flattening, units=1024, activation=tf.nn.relu)\n # (1-keep_rate) is the probability that the node will be kept\n dropout = tf.layers.dropout(inputs=dense, rate=keep_rate, training=True)\n print(dropout)\n\n with tf.name_scope(\"y_conv\"):\n y_conv = tf.layers.dense(inputs=dropout, units=classNum)\n print(y_conv)\n \n \n return y_conv \n \n \n \ndef loss(score, y, penalty_intensity=0.05):\n vars_ = tf.trainable_variables()\n lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars_]) * penalty_intensity\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=score, labels=y)) + lossL2\n\n \n \n\n\n\n"
},
{
"alpha_fraction": 0.6332976222038269,
"alphanum_fraction": 0.6584582328796387,
"avg_line_length": 31.6842098236084,
"blob_id": "85ba5f817875169002cb8f3c7f2f4f4f5f33a336",
"content_id": "a295caa1ed9cf1cbf72d2706340d6134fa2c6e05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1868,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 57,
"path": "/create_tfrecords/download_from_bucket.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "from google.oauth2 import service_account\nfrom google.cloud import storage\nimport nibabel as nib\nimport os\nimport pandas as pd\nimport tensorflow as tf\nimport gcsfs\nimport random\nimport numpy as np\nimport time\n\nMRI_SIZE = 256\nWORKING_SIZE = 168\n\napi_key = \"columbia-dl-storage-99c51286dd68.json\"\ncredentials = service_account.Credentials.from_service_account_file(api_key)\nclient = storage.Client(credentials=credentials, project=\"columbia-dl-storage\")\n\nbucket_name = \"columbia-dl-storage-bucket\"\nbucket = client.get_bucket(bucket_name)\n\nprefix = \"data\"\nblobs = bucket.list_blobs(prefix=prefix)\n\ndef download(list_names):\n \n index = 0\n couting_fail = 1\n start_100 = time.time()\n for filename in file_names_list:\n index+=1\n if couting_fail%10 == 0:\n print(\"couting_fail \" + str(couting_fail))\n if index % 50 ==0:\n print(index);\n print('It took ' + str(time.time() - start_100))\n start_100 = time.time()\n filename = filename[5:]\n blob = bucket.blob(os.path.join(prefix, filename))\n blob.download_to_filename('data/'+filename)\n start_pixel = (MRI_SIZE - WORKING_SIZE) // 2\n nii_file = nib.load('data/'+filename).get_data()[start_pixel:start_pixel+WORKING_SIZE, start_pixel:start_pixel+WORKING_SIZE, start_pixel:start_pixel+WORKING_SIZE]\n np.save('data/'+filename[:-4], nii_file)\n os.remove('data/'+filename)\n return \n\nif __name__ == \"__main__\":\n fs = gcsfs.GCSFileSystem(token=api_key, project=\"columbia-dl-storage\")\n with fs.open('columbia-dl-storage-bucket/ADNI_t1_list_with_fsstatus_20190111.csv') as f:\n df = pd.read_csv(f)\n df.to_csv('labels.csv')\n \n file_names_list = []\n for blob in blobs:\n file_names_list.append(blob.name)\n file_names_list= file_names_list[1:]\n download(file_names_list)\n \n"
},
{
"alpha_fraction": 0.5933806300163269,
"alphanum_fraction": 0.631205677986145,
"avg_line_length": 25.4375,
"blob_id": "85a7bbd35a77a65a53e0216db3dc924d383c8b52",
"content_id": "d03e51b965f1892cdb934c417ad3eb892119c2cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 423,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 16,
"path": "/training_3D/test_confusion_matrix.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "from mlxtend.evaluate import confusion_matrix\nfrom mlxtend.plotting import plot_confusion_matrix\n\n\ny_target = [1, 1, 1, 0, 0, 2, 0, 3]\ny_predicted = [1, 0, 1, 0, 0, 2, 1, 3]\n\ncm = confusion_matrix(y_target=y_target, \n y_predicted=y_predicted, \n binary=False)\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plot_confusion_matrix(conf_mat=cm)\nplt.show()\nplt.savefig(\"test_save.png\")\n"
},
{
"alpha_fraction": 0.6029275059700012,
"alphanum_fraction": 0.625464677810669,
"avg_line_length": 39.556602478027344,
"blob_id": "2cb1cdfcb42a2e4e7e89f138262d01e1eda581a8",
"content_id": "b035fbfa806238c25b4b077f1bdac19902e3bd0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4304,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 106,
"path": "/create_tfrecords/create_filenames.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import pandas as pd \nimport os \nimport re\nNUM_CLASSES=5\nNUM_FOLDS= 10\n\npattern1 = re.compile(r'S\\d+')\npattern2 = re.compile(r'\\d+_S_\\d+')\n\ndef create_labels(df):\n print(\"Read labels\")\n labels=df[['Subject','T1.SERIESID','Group']].copy()\n\n labels=labels[(labels.Group == 'CN')|(labels.Group == 'AD')|(labels.Group == 'EMCI')|(labels.Group == 'LMCI')|(labels.Group == 'MCI')]\n\n labels['new_ind']=labels['Subject']+\"_\"+labels['T1.SERIESID'].astype(str)\n labels.loc[labels.Group == 'CN','Group']=0\n labels.loc[labels.Group == 'AD','Group']=1\n labels.loc[labels.Group == 'EMCI','Group']=2\n labels.loc[labels.Group == 'LMCI','Group']=3\n labels.loc[labels.Group == 'MCI','Group']=4\n labels = labels.set_index('new_ind')\n labels=labels.drop(columns=['Subject','T1.SERIESID'])\n return labels\n\ndef create_full_dataset(labels, data_path):\n print(\"Create full data set: for each MRI, attach a label to it\")\n file_names=pd.DataFrame()\n for filename in os.listdir(data_path):\n row=dict()\n row['filename']=filename\n try: \n series_id = re.search(pattern1, filename).group(0)[1:]\n subject = re.search(pattern2,filename).group(0)\n ind=subject+\"_\"+series_id\n row['ind']=ind\n row['label']=labels.loc[ind,'Group'] \n except:\n row['ind']= 'No label'\n row['label']= 'No label' \n\n file_names=file_names.append(row,ignore_index=True)\n\n file_names = file_names[file_names['ind']!='No label']\n print(\"Done\")\n return file_names\n\ndef augment_dataset(full_dataset):\n print('Augment the dataset')\n target_count = full_dataset.label.value_counts()\n print('Class 0:', target_count[0])\n print('Class 1:', target_count[1])\n print('Class 2:', target_count[2])\n print('Class 3:', target_count[3])\n print('Class 4:', target_count[4])\n target_count.plot(kind='bar', title='Count (target)')\n \n df_class_0 = full_dataset[full_dataset['label']==0]\n \n #Augment all underrepresented classes\n \n missing_element_1 = target_count[0] - target_count[1]\n df_class_1 = full_dataset[full_dataset['label']==1]\n df_class_1_complementary = df_class_1.sample(missing_element_1, replace=True, random_state=2)\n df_class_1_final = pd.concat([df_class_1, df_class_1_complementary], axis=0)\n \n missing_element_2 = target_count[0] - target_count[2]\n df_class_2 = full_dataset[full_dataset['label']==2]\n df_class_2_complementary = df_class_2.sample(missing_element_2, replace=True, random_state=4)\n df_class_2_final = pd.concat([df_class_2, df_class_2_complementary], axis=0)\n \n missing_element_3 = target_count[0] - target_count[3]\n df_class_3 = full_dataset[full_dataset['label']==3]\n df_class_3_complementary = df_class_3.sample(missing_element_3, replace=True, random_state=8)\n df_class_3_final = pd.concat([df_class_3, df_class_3_complementary], axis=0)\n \n missing_element_4 = target_count[0] - target_count[4]\n df_class_4 = full_dataset[full_dataset['label']==4]\n df_class_4_complementary = df_class_4.sample(missing_element_4, replace=True, random_state=16)\n df_class_4_final = pd.concat([df_class_4, df_class_4_complementary], axis=0)\n \n \n \n dataset_augmented = pd.concat([df_class_0, df_class_1_final, df_class_2_final,\n df_class_3_final, df_class_4_final], axis=0).sample(frac=1, random_state=64).reset_index(drop=True)\n return dataset_augmented\n\n\n\nif __name__ == \"__main__\":\n df = pd.read_csv(\"labels.csv\")\n labels = create_labels(df)\n full_dataset = create_full_dataset(labels, \"./data\")\n dataset_augmented=augment_dataset(full_dataset)\n SIZE = len(dataset_augmented)\n #create the 10 datasets:\n slice_size = SIZE//NUM_FOLDS \n \n for i in range(NUM_FOLDS):\n print(\"Create the dataset \" + str(i))\n data_fold_path = \"./datafolds/datafold_\"+str(i)\n test_set = dataset_augmented.iloc[slice_size * i : slice_size * (i+1)]\n training_set = pd.concat([dataset_augmented.iloc[slice_size * (i+1):], \n dataset_augmented.iloc[:slice_size * i]], axis=0)\n training_set.to_csv(data_fold_path+\"/train_set.csv\")\n test_set.to_csv(data_fold_path+\"/test_set.csv\")\n \n"
},
{
"alpha_fraction": 0.5730049014091492,
"alphanum_fraction": 0.5927636623382568,
"avg_line_length": 37.58415985107422,
"blob_id": "e253b661fe49e322f0ef751ea5c80bac0edbc05a",
"content_id": "491a6d93229d0c7a04f24cc2513b3e527aa30630",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3897,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 101,
"path": "/lrp_2D/visualization_results.py",
"repo_name": "leflemluc/MRIs",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport matplotlib.image as mpimg\nimport tensorflow as tf\nfrom utils import _parse_function\nfrom matplotlib import pyplot as plt\n\nimport pandas as pd\ntesting_labels = pd.read_csv('data/testing_file_names.csv')\n\ntesting_labels[:3]\n\ndef get_ten_images():\n batch_size=10\n sess = tf.Session()\n filenames_x = tf.placeholder(tf.string, shape=[None])\n dataset_x = tf.data.TFRecordDataset(filenames_x).map(_parse_function).batch(batch_size)\n iterator_x = dataset_x.make_initializable_iterator()\n next_element_x = iterator_x.get_next()\n\n filenames_y = tf.placeholder(tf.string, shape=[None])\n dataset_y = tf.data.TFRecordDataset(filenames_y).map(_parse_function).batch(batch_size)\n iterator_y = dataset_y.make_initializable_iterator()\n next_element_y = iterator_y.get_next()\n\n filenames_z = tf.placeholder(tf.string, shape=[None])\n dataset_z = tf.data.TFRecordDataset(filenames_z).map(_parse_function).batch(batch_size)\n iterator_z = dataset_z.make_initializable_iterator()\n next_element_z = iterator_z.get_next()\n\n training_filenames_x = [\"../testing_flat_x_156_full_dataset.tfrecords\"]\n training_filenames_y = [\"../testing_flat_y_156_full_dataset.tfrecords\"]\n training_filenames_z = [\"../testing_flat_z_156_full_dataset.tfrecords\"]\n\n sess.run(iterator_x.initializer, feed_dict={filenames_x: training_filenames_x})\n sess.run(iterator_y.initializer, feed_dict={filenames_y: training_filenames_y})\n sess.run(iterator_z.initializer, feed_dict={filenames_z: training_filenames_z})\n images_x, labels_x, name_x = sess.run(next_element_x)\n images_y, labels_y, name_y = sess.run(next_element_y)\n images_z, labels_z, name_z = sess.run(next_element_z)\n \n return images_x, labels_x, name_x, images_y, labels_y, name_y, images_z, labels_z, name_z\n\n\ndef _parse_label(np_array):\n if np.argmax(np_array) == 0:\n label = 'CN'\n if np.argmax(np_array) == 1:\n label = 'AD'\n if np.argmax(np_array) == 2:\n label = 'EMCI'\n if np.argmax(np_array) == 3:\n label = 'CMCI'\n if np.argmax(np_array) == 4:\n label = 'MCI'\n return label\n\n\ndef create_images(N):\n for i in range(N):\n images_x, labels_x, name_x, images_y, labels_y, name_y, images_z, labels_z, name_z = get_ten_images()\n for j in range(10):\n print('New image below')\n label = _parse_label(labels_x[j])\n scale = 5\n fig, axes = plt.subplots(2,3)# squeeze=False)\n plt.axis('off')\n fig.set_size_inches(5*scale,5*scale)\n fig.subplots_adjust(hspace=-0.5)\n plt.tight_layout()\n MRI_X = images_x[j].reshape([156, 156])\n axes[0][0].imshow(MRI_X, cmap='gray')\n axes[0][0].set_title(label)\n MRI_Y = images_y[j].reshape([156, 156])\n axes[0][1].imshow(MRI_Y, cmap='gray')\n axes[0][1].set_title(label)\n MRI_Z = images_z[j].reshape([156, 156])\n axes[0][2].imshow(MRI_Z, cmap='gray')\n axes[0][2].set_title(label)\n \n lrp_x = mpimg.imread('results_x_neg/'+name_x[j].decode()+'_x.png')\n axes[1][0].imshow(lrp_x)\n axes[1][0].axis('off')\n axes[1][0].set_title(label)\n \n lrp_y = mpimg.imread('results_y_neg/'+name_x[j].decode()+'_y.png')\n axes[1][1].imshow(lrp_y)\n axes[1][1].axis('off')\n axes[1][1].set_title(label)\n \n lrp_z = mpimg.imread('results_z_neg/'+name_x[j].decode()+'_z.png')\n axes[1][2].imshow(lrp_z)\n axes[1][2].axis('off')\n axes[1][2].set_title(label)\n title = name_x[j].decode() + ' : ' + label\n fig.suptitle(title)\n plt.show()\n print('\\n\\n\\n')\n\n\ncreate_images(5) # produces 10 times number of images\n"
}
] | 9 |
gy0216/API | https://github.com/gy0216/API | bc13625a1e7876b81624391a8c643bb1154c8c3b | 277b321e899e8b3cb606b6e976be3dda2a28a7ef | b0ef0e77fcb4204fc2880bad415ca63091507b96 | refs/heads/master | 2021-09-09T22:46:22.830788 | 2018-03-20T04:07:10 | 2018-03-20T04:07:10 | 111,372,284 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6155303120613098,
"alphanum_fraction": 0.6246843338012695,
"avg_line_length": 38.11111068725586,
"blob_id": "23418b89749d03bd302e8462a004dc910bfc5599",
"content_id": "fa5777009c9def0776af32d29af713183abbbfb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3168,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 81,
"path": "/app.py",
"repo_name": "gy0216/API",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\nfrom flask_restful import Resource,Api\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom konltk.tokenizer import asp \nimport json\nfrom flask_jsonpify import jsonify\nfrom konltk.tokenizer import wordcount\nfrom flask_oauthlib.provider import OAuth2Provider\n\napp = Flask(__name__)\[email protected]('/success/autospacing/<text>')\ndef success_autospacing(text):\n q = asp.KltAsp()\n q.dic_init('/usr/local/asp_dic')\n str_ = q.asp(text.encode(\"utf8\"))\n it= json.dumps(str_, ensure_ascii=False, encoding ='utf-8') \n return it\n\[email protected]('/success/wordcounting/<text>')\ndef success_wordcounting(text):\n q = wordcount.WordCount()\n str_ = q.wordcount(text.encode('utf8'))\n it = json.dumps(str_, ensure_ascii=False, encoding='utf-8')\n return it\n\[email protected]('/success/wordcounting/sort/<text>')\ndef success_wordcounting_sort(text):\n q = wordcount.WordCount()\n str_ = q.wordcount(text.encode('utf8'))\n str_.sort(key=lambda t:t[1])\n it = json.dumps(str_, ensure_ascii=False, encoding='utf-8')\n return it \n\[email protected]('/success/wordcounting/sort_reverse/<text>')\ndef success_wordcounting_reverse(text):\n q = wordcount.WordCount()\n str_ = q.wordcount(text.encode('utf8'))\n str_.sort(key=lambda t:t[1])\n str_.reverse()\n it = json.dumps(str_, ensure_ascii=False, encoding='utf-8')\n return it \n\[email protected]('/nltk/autospacing',methods = ['POST','GET'])\ndef get_text_autospacing():\n if request.method=='POST':\n user = request.form['space']\n return redirect(url_for('success_autospacing', text = user))\n else :\n user = request.args.get('space')\n return redirect(url_for('success_autospacing',text= user))\[email protected]('/nltk/wordcounting', methods = ['POST','GET'])\ndef get_text_wordcounting():\n if request.method == 'POST':\n user = request.form['count']\n return redirect(url_for('success_wordcounting' , text = user))\n else :\n user = request.args.get('count')\n return redirect(url_for('success_wordcounting',text = user))\n\[email protected]('/nltk/wordcounting/sort', methods = ['POST','GET'])\ndef get_text_wordcounting_sort():\n if request.method == 'POST':\n user1 = request.form['count_sort']\n return redirect(url_for('success_wordcounting_sort' , text = user1))\n else :\n user1 = request.args.get('count_sort')\n return redirect(url_for('success_wordcounting_sort',text = user1))\n\[email protected]('/nltk/wordcounting/reverse', methods = ['POST','GET'])\ndef get_text_wordcounting_reverse():\n if request.method == 'POST':\n user1 = request.form['count_sort']\n return redirect(url_for('success_wordcounting_reverse' , text = user1))\n else :\n user1 = request.args.get('count_sort')\n return redirect(url_for('success_wordcounting_reverse',text = user1))\[email protected]('/')\ndef index():\n return render_template('index2.html')\nif __name__ == '__main__' :\n app.run(host='0.0.0.0',port=8888, debug=True)\n"
},
{
"alpha_fraction": 0.6219398975372314,
"alphanum_fraction": 0.6272079348564148,
"avg_line_length": 30.330097198486328,
"blob_id": "10fff0b46fa97df3eb13e881f12a4491f78d000e",
"content_id": "a3a1becc15b327808229dae3e8d0965a12393cc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3227,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 103,
"path": "/app-2.py",
"repo_name": "gy0216/API",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\nfrom flask_restful import Resource,Api\nfrom flask import Flask, redirect, url_for, request, render_template\nimport json\nimport konltk\nfrom konltk.tokenizer import asp\nfrom flask_jsonpify import jsonify\nfrom konltk.tokenizer import wordcount\nfrom flask_oauthlib.provider import OAuth2Provider\nfrom flask_restful import reqparse\nfrom flask import Flask \nfrom flask import make_response\nfrom flask import request\napp = Flask(__name__)\napi = Api(app)\n#_userText = request.json['text']\n\nclass autospace(Resource):\n\tdef post(self):\n\t\ttry:\n#\t\t\tresult = {\n#\t\t\t\t'text' : request.json['text']\n#\t\t\t}\n#\t\t\treturn jsonify({'text': result})\n#\t\t\tparser = reqparse.RequestParser()\n#\t\t\tparser.add_argument('text', type=str)\n#\t\t\targs = parser.parse_args()\n\n\t\t\t_userText = request.json['text'] \n\t\t\tprint _userText\n\t\t\tq = asp.KltAsp()\n\n\t\t\tq.dic_init('/usr/local/asp_dic')\n\t\t\tstr_ = q.asp(_userText.encode(\"utf8\"))\n\t\t\tit = json.dumps(str_, ensure_ascii=False, encoding ='utf-8')\n\t\t\tprint \"before response:\", str_\n\t\t\tprint \"it:\", it\n\t\t\treturn make_response(it) \n\t\texcept Exception as e:\n\t\t\treturn {'error': str(e)}\napi.add_resource(autospace, '/autospacing')\n\nclass wordcount(Resource):\n\tdef post(self):\n\t\t\t_userText = request.json['text']\n\t\t\tq=konltk.tokenizer.wordcount.WordCount()\n\t\t\tstr_=q.wordcount(_userText.encode(\"utf8\"))\n\t\t\tit = json.dumps(str_, ensure_ascii=False, encoding='utf-8')\n\t\t\treturn make_response(it)\napi.add_resource(wordcount, '/wordcounting')\n\nclass wordcount_sort(Resource):\n def post(self):\n _userText = request.json['text']\n q=konltk.tokenizer.wordcount.WordCount()\n str_=q.wordcount(_userText.encode(\"utf8\"))\n\t\t\tstr_.sort(key=lambda t:t[1])\n it = json.dumps(str_, ensure_ascii=False, encoding='utf-8')\n return make_response(it)\napi.add_resource(wordcount_sort, '/wordcount_sort')\n\nclass wordcount_reverse(Resource):\n def post(self):\n _userText = request.json['text']\n q=konltk.tokenizer.wordcount.WordCount()\n str_=q.wordcount(_userText.encode(\"utf8\"))\n str_.sort(key=lambda t:t[1])\n\t\t\tstr_.reverse()\n it = json.dumps(str_, ensure_ascii=False, encoding='utf-8')\n return make_response(it)\napi.add_resource(wordcount_reverse, '/wordcount_reverse')\n\n#@app.route('/nltk/autospacing',methods = ['POST','GET'])\n#def get_text_autospacing():\n#\tif request.method=='POST':\n#\t\ttry:\n#\t\t\tparser = reqparse.RequestParser()\n#\t\t\tparser.add_argument('text', type=str)\n#\t\t\targs = parser.parse_args()\n\n#\t\t\t_userText = args['text']\n\n#\t\t\tq = asp.KltAsp()\n#\t\t\tq.dic_init('/usr/local/asp_dic')\n#\t\t\tstr_ = q.asp(_userText.encode(\"utf8\"))\n\n#\t\t\treturn {'Text': str_}\n#\t\texcept Exception as e:\n#\t\t\treturn {'error': str(e)}\n#\t\tuser = request.form['space']\n#\t\treturn redirect(url_for('success_autospacing', text = user))\n#\telse :\n#\t\tuser = request.args.get('space')\n#\t\treturn redirect(url_for('success_autospacing',text= user))\n \n\n\n#@app.route('/')\n#def index():\n#\treturn render_template('index.html')\n\nif __name__ == '__main__' :\n\tapp.run(host='0.0.0.0',debug=True)\n"
}
] | 2 |
ParkLC/Projects | https://github.com/ParkLC/Projects | 105dddef73decb7f45e183647df47f21334b5816 | c70f43a059dac47a749c6c106f70a4c9a45d150d | 3703aab04ce9c97e337dcbd08cd804be42ae6985 | refs/heads/main | 2023-08-24T23:06:54.791416 | 2021-10-27T00:18:34 | 2021-10-27T00:18:34 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5480928421020508,
"alphanum_fraction": 0.5626413226127625,
"avg_line_length": 40.65306091308594,
"blob_id": "a1da960d47e62cd22f5ebafdcb99abef4669fbe4",
"content_id": "0b95022aebf5f510169fde411ce43815b3530618",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 26532,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 637,
"path": "/DeadWood/BoardLayersListener.java",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "import java.awt.*;\nimport javax.swing.*;\nimport javax.swing.ImageIcon;\nimport javax.imageio.ImageIO;\nimport java.awt.event.*;\nimport java.util.*;\nimport java.lang.*;\n\npublic class BoardLayersListener extends JFrame {\n\n // static Board<Room> board;\n static final long serialVersionUID = 0;\n static boolean roomsVisible = false;\n static boolean rolesVisible = false;\n static boolean moveSelections = false;\n static boolean upgradesVisible = false;\n static JButton[] roomButtonArr;\n static JButton[] roleButtonArr;\n static JButton[] upgradeButtonArrDollar = new JButton[5];\n static JButton[] upgradeButtonArrCredit = new JButton[5];\n static Role[] roleArr;\n @SuppressWarnings(\"unchecked\")\n Board<Room> board = Board.getInstance();\n \n // JLabels\n static JLabel[] upgradeLabels = new JLabel[8];\n static ArrayList<JLabel> scoreLabels;\n JLabel boardlabel;\n JLabel cardlabel;\n ArrayList<JLabel> playerlabels;\n static JLabel[] blankCards = new JLabel[12];\n\n // JButtons\n JButton bAct;\n JButton bRehearse;\n JButton bMove;\n JButton bUpgrade;\n JButton bTakeRole;\n JButton bEnd;\n\n // JLayered Pane\n JLayeredPane bPane;\n\n ImageIcon icon;\n\n Deadwood controller = Deadwood.getInstance();\n\n // Singleton\n private static BoardLayersListener instance = new BoardLayersListener();\n\n public static BoardLayersListener getInstance(){\n return instance;\n }\n\n private BoardLayersListener() {\n // Create window and board\n super(\"Deadwood\");\n setDefaultCloseOperation(EXIT_ON_CLOSE);\n bPane = getLayeredPane();\n boardlabel = new JLabel();\n icon = new ImageIcon(\"images/board.jpg\");\n boardlabel.setIcon(icon);\n boardlabel.setBounds(0, 0, icon.getIconWidth(), icon.getIconHeight());\n bPane.add(boardlabel, Integer.valueOf(1));\n setSize(icon.getIconWidth() + 200, icon.getIconHeight());\n\n // Create action buttons\n bAct = new JButton(\"ACT\");\n bAct.setName(\"act\");\n bAct.setBackground(Color.white);\n bAct.setBounds(icon.getIconWidth() + 10, 60, 150, 100);\n bAct.addMouseListener(new boardMouseListener());\n\n bRehearse = new JButton(\"REHEARSE\");\n bRehearse.setName(\"rehearse\");\n bRehearse.setBackground(Color.white);\n bRehearse.setBounds(icon.getIconWidth() + 170, 60, 150, 100);\n bRehearse.addMouseListener(new boardMouseListener());\n\n bMove = new JButton(\"MOVE\");\n bMove.setName(\"move\");\n bMove.setBackground(Color.white);\n bMove.setBounds(icon.getIconWidth() + 330, 60, 150, 100);\n bMove.addMouseListener(new boardMouseListener());\n\n bUpgrade = new JButton(\"UPGRADE\");\n bUpgrade.setName(\"upgrade\");\n bUpgrade.setBackground(Color.white);\n bUpgrade.setBounds(icon.getIconWidth() + 10, 170, 150, 100);\n bUpgrade.addMouseListener(new boardMouseListener());\n\n bTakeRole = new JButton(\"TAKE ROLE\");\n bTakeRole.setName(\"work\");\n bTakeRole.setBackground(Color.white);\n bTakeRole.setBounds(icon.getIconWidth() + 170, 170, 150, 100);\n bTakeRole.addMouseListener(new boardMouseListener());\n\n bEnd = new JButton(\"END\");\n bEnd.setName(\"end\");\n bEnd.setBackground(Color.white);\n bEnd.setBounds(icon.getIconWidth() + 330, 170, 150, 100);\n bEnd.addMouseListener(new boardMouseListener());\n\n // Place the action buttons in the top layer\n bPane.add(bAct, Integer.valueOf(2));\n bPane.add(bRehearse, Integer.valueOf(2));\n bPane.add(bMove, Integer.valueOf(2));\n bPane.add(bUpgrade, Integer.valueOf(2));\n bPane.add(bTakeRole, Integer.valueOf(2));\n bPane.add(bEnd, Integer.valueOf(2));\n\n // Current Player Label\n JLabel currPlayerLabel = new JLabel(\"Current Player: \");\n currPlayerLabel.setBounds(icon.getIconWidth() + 10, 800, 200, 200);\n bPane.add(currPlayerLabel, Integer.valueOf(2));\n }\n\n /* Method displays a popup window to the user\n */\n public void displayMessage(String message){\n JFrame frame = new JFrame(\"message\");\n JOptionPane.showMessageDialog(frame, message);\n }\n\n /* Method resets cards, players and shot counters\n * Called on a new day\n */\n public void resetGUI(){\n initBlankCards(controller.getRooms());\n initShotCounters(controller.getRooms());\n int offsetX = 0;\n int offsetY = 0;\n Player[] players = controller.getPlayerOrder();\n JLabel pLabel;\n ImageIcon pIcon;\n for(int i = 0; i < players.length; i++){\n movePlayer(players[i], 995 + offsetX, 275 + offsetY);\n offsetX += 50;\n if(i == 3){\n offsetX = 0;\n offsetY += 50;\n }\n }\n }\n\n /* Prompts the user for the amount of players in the game\n * Called at the start of the game\n */\n public int getPlayerAmount(){\n String count = \"\";\n JFrame playerPrompt = new JFrame();\n String[] choices = {\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"};\n count = (String) JOptionPane.showInputDialog(null, \"How Many Players?\", \"Player Selection\", JOptionPane.QUESTION_MESSAGE, null, choices, choices[0]);\n int result = Integer.parseInt(count);\n return result;\n }\n\n /* Method displays the current player's dice in the bottom right corner of the screen\n */\n public void displayCurrentPlayer(Player currentPlayer){\n JLabel pLabel = new JLabel();\n pLabel.setIcon(currentPlayer.getIcon());\n pLabel.setBounds(icon.getIconWidth() + 130, 877, currentPlayer.getIcon().getIconHeight(), currentPlayer.getIcon().getIconHeight());\n bPane.add(pLabel, Integer.valueOf(2));\n bPane.moveToFront(pLabel);\n }\n\n /* Method moves the players dice to a specifed x, y coordinate\n * Has to find correct JLabel to move based on player's name\n */\n public void movePlayer(Player player, int xCord, int yCord){\n for(int i = 0; i < playerlabels.size(); i++){\n if(player.getName() == playerlabels.get(i).getName()){\n playerlabels.get(i).setBounds(xCord, yCord, player.getIcon().getIconWidth(), player.getIcon().getIconHeight());\n }\n }\n bPane.repaint();\n }\n\n /* Method initalizes all players dice icons in the trailers at the start of every day\n */\n public void initPlayerPosition(Player[] players){\n JLabel pLabel;\n ImageIcon pIcon;\n int offsetX = 0;\n int offsetY = 0;\n playerlabels = new ArrayList<JLabel>();\n scoreLabels = new ArrayList<JLabel>();\n for(int i = 0; i < players.length; i++){\n pLabel = new JLabel();\n pIcon = players[i].getIcon();\n pLabel.setIcon(pIcon);\n pLabel.setBounds(995 + offsetX, 275 + offsetY, 46, 46);\n pLabel.setName(players[i].getName());\n playerlabels.add(pLabel);\n bPane.add(playerlabels.get(i), Integer.valueOf(10));\n pLabel.setVisible(true);\n\n offsetX += 50;\n\n if(i == 3){\n offsetX = 0;\n offsetY += 50;\n }\n scoreLabels.add(i, players[i].getPLabel());\n scoreLabels.get(i).setVisible(false);\n bPane.add(scoreLabels.get(i), Integer.valueOf(2));\n }\n }\n\n /* Method called when a room wraps\n * Players are put back in the blank areas of the rooms\n */\n public void resetPositions(Room room){\n ArrayList<Player> players = room.getPlayers();\n ArrayList<Player> p = room.getCard().getPlayers();\n players.addAll(p);\n\n int[] playerHolders = room.getPlayerHolderCoord();\n int x = 0;\n for(int i = 0; i < players.size() * 2; i+=2){\n movePlayer(players.get(x), playerHolders[i], playerHolders[i + 1]);\n x++;\n }\n \n }\n\n /* Method initalizes all the blank cards in each room at the start of every day\n */\n public void initBlankCards(Room[] rooms){\n ImageIcon cardImg;\n for(int i = 0; i < rooms.length; i++){\n if(!(rooms[i].getName().equals(\"Trailers\")) && !(rooms[i].getName().equals(\"Casting Office\"))){\n if(blankCards[i] == null){\n blankCards[i] = new JLabel();\n cardImg = new ImageIcon(\"images/cardback.jpg\");\n Image scaledImg = cardImg.getImage();\n scaledImg = scaledImg.getScaledInstance(205, 115, java.awt.Image.SCALE_SMOOTH);\n cardImg = new ImageIcon(scaledImg);\n blankCards[i].setIcon(cardImg);\n blankCards[i].setBounds(rooms[i].getCardX(), rooms[i].getCardY(), 205, 115);\n bPane.add(blankCards[i], Integer.valueOf(2));\n }\n blankCards[i].setVisible(true);\n }\n }\n }\n\n /* Method initalizes all the shot counters in each room at the start of every day\n */\n public void initShotCounters(Room[] rooms){\n ImageIcon shot = new ImageIcon(\"images/shot.png\");\n for(int i = 0; i < rooms.length; i++){\n if(!(rooms[i].getName().equals(\"Trailers\")) && !(rooms[i].getName().equals(\"Casting Office\"))){\n JLabel[] shotLabels = rooms[i].getShotLabels();\n ArrayList<Integer> shotCounterCoords = rooms[i].getShotCounterCoords();\n int x = 0;\n int y = 1;\n for(int j = 0; j < shotCounterCoords.size()/2; j++){\n if(shotLabels[j] == null){\n shotLabels[j] = new JLabel();\n shotLabels[j].setIcon(shot);\n shotLabels[j].setBounds(shotCounterCoords.get(x), shotCounterCoords.get(y), shot.getIconWidth(), shot.getIconHeight());\n bPane.add(shotLabels[j], Integer.valueOf(2));\n x+=2;\n y+=2;\n }\n shotLabels[j].setVisible(true);\n }\n }\n }\n }\n\n /* Method inializes the upgrade buttons when a player clicks the upgrade menu button\n */\n public void initUpgradeButtons(){\n upgradeButtonArrDollar[0] = new JButton(\"4\");\n upgradeButtonArrDollar[0].setName(\"$-2\");\n upgradeButtonArrDollar[0].addMouseListener(new boardMouseListener());\n upgradeButtonArrDollar[0].setBackground(Color.white);\n upgradeButtonArrDollar[1] = new JButton(\"10\");\n upgradeButtonArrDollar[1].setName(\"$-3\");\n upgradeButtonArrDollar[1].addMouseListener(new boardMouseListener());\n upgradeButtonArrDollar[1].setBackground(Color.white);\n upgradeButtonArrDollar[2] = new JButton(\"18\");\n upgradeButtonArrDollar[2].setName(\"$-4\");\n upgradeButtonArrDollar[2].addMouseListener(new boardMouseListener());\n upgradeButtonArrDollar[2].setBackground(Color.white);\n upgradeButtonArrDollar[3] = new JButton(\"28\");\n upgradeButtonArrDollar[3].setName(\"$-5\");\n upgradeButtonArrDollar[3].addMouseListener(new boardMouseListener());\n upgradeButtonArrDollar[3].setBackground(Color.white);\n upgradeButtonArrDollar[4] = new JButton(\"40\");\n upgradeButtonArrDollar[4].setName(\"$-6\");\n upgradeButtonArrDollar[4].addMouseListener(new boardMouseListener());\n upgradeButtonArrDollar[4].setBackground(Color.white);\n\n upgradeButtonArrCredit[0] = new JButton(\"5\");\n upgradeButtonArrCredit[0].setName(\"c-2\");\n upgradeButtonArrCredit[0].addMouseListener(new boardMouseListener());\n upgradeButtonArrCredit[0].setBackground(Color.white);\n upgradeButtonArrCredit[1] = new JButton(\"10\");\n upgradeButtonArrCredit[1].setName(\"c-3\");\n upgradeButtonArrCredit[1].addMouseListener(new boardMouseListener());\n upgradeButtonArrCredit[1].setBackground(Color.white);\n upgradeButtonArrCredit[2] = new JButton(\"15\");\n upgradeButtonArrCredit[2].setName(\"c-4\");\n upgradeButtonArrCredit[2].addMouseListener(new boardMouseListener());\n upgradeButtonArrCredit[2].setBackground(Color.white);\n upgradeButtonArrCredit[3] = new JButton(\"20\");\n upgradeButtonArrCredit[3].setName(\"c-5\");\n upgradeButtonArrCredit[3].addMouseListener(new boardMouseListener());\n upgradeButtonArrCredit[3].setBackground(Color.white);\n upgradeButtonArrCredit[4] = new JButton(\"25\");\n upgradeButtonArrCredit[4].setName(\"c-6\");\n upgradeButtonArrCredit[4].addMouseListener(new boardMouseListener());\n upgradeButtonArrCredit[4].setBackground(Color.white);\n\n ImageIcon board = new ImageIcon(\"images/board.jpg\");\n int Yoffset = 0;\n for(int i = 0; i < 5; i++){\n upgradeLabels[i] = new JLabel(\"rank \" + (i+2));\n upgradeLabels[i].setName(\"\" + (i+2));\n ImageIcon icon = new ImageIcon(\"images/dice/w\" + (i+2) + \".png\");\n upgradeLabels[i].setIcon(icon);\n upgradeLabels[i].setBounds(board.getIconWidth() + 10, (board.getIconHeight()/2) + Yoffset, icon.getIconWidth(), icon.getIconHeight());\n upgradeButtonArrDollar[i].setBounds(board.getIconWidth() + 70, (board.getIconHeight()/2) + Yoffset, 50, 30);\n upgradeButtonArrCredit[i].setBounds(board.getIconWidth() + 150, (board.getIconHeight()/2) + Yoffset, 50, 30);\n\n bPane.add(upgradeLabels[i]);\n bPane.add(upgradeButtonArrCredit[i]);\n bPane.add(upgradeButtonArrDollar[i]);\n Yoffset+=60;\n }\n\n upgradeLabels[5] = new JLabel(\"RANK\");\n upgradeLabels[5].setBounds(board.getIconWidth() + 10, (board.getIconHeight()/2) - 30, 100, 20);\n\n upgradeLabels[6] = new JLabel(\"DOLLARS\");\n upgradeLabels[6].setBounds(board.getIconWidth() + 70, (board.getIconHeight()/2) - 30, 100, 20);\n\n upgradeLabels[7] = new JLabel(\"CREDITS\");\n upgradeLabels[7].setBounds(board.getIconWidth() + 150, (board.getIconHeight()/2) - 30, 100, 20);\n\n bPane.add(upgradeLabels[5]);\n bPane.add(upgradeLabels[6]);\n bPane.add(upgradeLabels[7]);\n disableUpgrades();\n }\n\n /* Method hides all upgrade buttons from screen\n */\n public void disableUpgrades(){\n for(int i = 0; i < 5; i++){\n upgradeLabels[i].setVisible(false);\n upgradeButtonArrCredit[i].setVisible(false);\n upgradeButtonArrDollar[i].setVisible(false);\n }\n upgradeLabels[5].setVisible(false);\n upgradeLabels[6].setVisible(false);\n upgradeLabels[7].setVisible(false);\n }\n\n /* Method reveals all upgrade buttons on screen\n */\n public void enableUpgrades(){\n for(int i = 0; i < 5; i++){\n upgradeLabels[i].setVisible(true);\n upgradeButtonArrCredit[i].setVisible(true);\n upgradeButtonArrDollar[i].setVisible(true);\n }\n upgradeLabels[5].setVisible(true);\n upgradeLabels[6].setVisible(true);\n upgradeLabels[7].setVisible(true);\n }\n\n /* Method places card on board\n * Called when player enters room for the first time\n */\n public void revealCard(Room room, Card card){\n JLabel cardLabel = card.getJLabel();\n ImageIcon cardImg = card.getImage();\n cardLabel.setIcon(cardImg);\n cardLabel.setBounds(room.getCardX(), room.getCardY(), cardImg.getIconWidth(), cardImg.getIconHeight());\n cardLabel.setVisible(true);\n bPane.add(cardLabel, Integer.valueOf(4));\n blankCards[room.getID()].setVisible(false);\n }\n\n /* Method clears the card from a room after the room wraps\n * Called from bank\n */\n public void clearCard(Card card){\n JLabel cardLabel = card.getJLabel();\n cardLabel.setVisible(false);\n }\n\n /* Method clears 1 shot counter from room when player successfully acts\n */\n public void removeShotCounter(Room room){\n int shotNum = room.getShots();\n JLabel[] shotLabels = room.getShotLabels();\n shotLabels[shotNum].setVisible(false);\n }\n\n /* Method updates the players dice icon to display the correct rank\n */\n public void setNewRank(Player player, int rank){\n ImageIcon icon = new ImageIcon(\"images/dice/\" + (\"\"+player.getName().charAt(0)).toLowerCase() + rank + \".png\");\n player.setIcon(icon);\n for(int i = 0; i < playerlabels.size(); i++){\n if(player.getName() == playerlabels.get(i).getName()){\n playerlabels.get(i).setIcon(icon);\n }\n }\n displayScores(controller.getPlayerOrder());\n displayCurrentPlayer(player);\n bPane.repaint();\n }\n\n /* Method displays the score after every turn\n */\n public void displayScores(Player[] players){\n int offSet = 0;\n for(int i = 0; i < players.length; i++){\n scoreLabels.get(i).setText(\"<html> Dollars: \" + players[i].getDollars() + \n \"<br> Credits: \" + players[i].getCredits() + \n \"<br> Rank: \" + players[i].getRank() + \n \"<br> Score: \" + players[i].getScore() + \"</html>\");\n ImageIcon pIcon = players[i].getIcon();\n scoreLabels.get(i).setIcon(pIcon);\n scoreLabels.get(i).setBounds(25 + offSet, 900, 190, 100);\n scoreLabels.get(i).setVisible(true);\n bPane.repaint();\n offSet += 125;\n }\n }\n\n /* Method displays only the buttons that are legal moves for the player\n * NOTE: Occasionly, bTakeRole is displayed when it shouldn't, if clicked, you will be stuck and will have to end the game :(\n */\n public void displayVisibleButtons(Player player){\n disableMenu();\n if(player.getCurrentRole() != null){\n bAct.setVisible(true);\n bRehearse.setVisible(true);\n }\n if(player.getCurrentRole() == null && !player.getMoveFlag()){\n bMove.setVisible(true);\n }\n if(player.getCurrentRole() == null && player.getCurrentRoom().hasWrapped() == \"unwrapped\" && controller.getAvailableRolesCount() != 0){\n bTakeRole.setVisible(true);\n }\n if(player.getCurrentRoom().getName() == \"Casting Office\"){\n bUpgrade.setVisible(true);\n }\n bEnd.setVisible(true);\n }\n\n /* Method hides all menu buttons\n */\n public void disableMenu(){\n bAct.setVisible(false);\n bEnd.setVisible(false);\n bMove.setVisible(false);\n bRehearse.setVisible(false);\n bTakeRole.setVisible(false);\n bUpgrade.setVisible(false);\n }\n\n /* Method shows all menu buttons\n */\n public void enableMenu(){\n bAct.setVisible(true);\n bEnd.setVisible(true);\n bMove.setVisible(true);\n bRehearse.setVisible(true);\n bTakeRole.setVisible(true);\n bUpgrade.setVisible(true);\n }\n\n /* boardMouseListener notifies Deadwood.java of any mouse clicks\n * Depending on the mouse click, Deadwood.java updates the models and GUI\n */\n class boardMouseListener implements MouseListener {\n String actionMode = \"\";\n public void mouseClicked(MouseEvent e){\n //Act\n if(e.getSource() == bAct && !moveSelections){\n controller.actionMode = \"Act\";\n }\n //Rehearse\n else if(e.getSource() == bRehearse && !moveSelections){\n controller.actionMode = \"Rehearse\";\n }\n //Move\n else if(e.getSource() == bMove || moveSelections){\n moveSelections = true;\n actionMode = \"Move\";\n Player player = controller.getCurrentPlayer();\n Room currentRoom = player.getCurrentRoom();\n int offset = 0;\n if(!roomsVisible){\n ArrayList<Room> neighbors = board.getNeighbors(currentRoom);\n roomButtonArr = new JButton[neighbors.size()];\n for(int i = 0; i < neighbors.size(); i++){\n roomButtonArr[i] = new JButton(neighbors.get(i).getName());\n roomButtonArr[i].setName(neighbors.get(i).getName());\n roomButtonArr[i].setBackground(Color.white);\n roomButtonArr[i].setBounds(icon.getIconWidth() + 170, 300 + offset, 150, 100);\n roomButtonArr[i].addMouseListener(new boardMouseListener());\n bPane.add(roomButtonArr[i], Integer.valueOf(2));\n offset += 150;\n roomsVisible = true;\n }\n }\n disableMenu();\n for(int j = 0; j < roomButtonArr.length; j++){\n if(((JButton)e.getSource()).getName() == roomButtonArr[j].getName()){\n moveSelections = false;\n roomsVisible = false;\n controller.actionMode = (\"move-\" + ((JButton)e.getSource()).getName());\n for(int x = 0; x < roomButtonArr.length; x++){\n bPane.remove(roomButtonArr[x]);\n }\n enableMenu();\n bAct.setVisible(false);\n bRehearse.setVisible(false);\n bUpgrade.setVisible(false);\n bMove.setVisible(false);\n if(((JButton)e.getSource()).getName().equals(\"Trailers\") || ((JButton)e.getSource()).getName().equals(\"Casting Office\")){\n bTakeRole.setVisible(false);\n }\n if(((JButton)e.getSource()).getName().equals(\"Casting Office\")){\n bUpgrade.setVisible(true);\n }\n break;\n }\n }\n }\n //Upgrade\n else if(e.getSource() == bUpgrade && !moveSelections){\n actionMode = \"Upgrade\";\n enableUpgrades();\n disableMenu();\n bEnd.setVisible(true);\n }\n //Take a role\n else if(e.getSource() == bTakeRole || rolesVisible){\n actionMode = \"Role\";\n Player player = controller.getCurrentPlayer();\n Room currentRoom = player.getCurrentRoom();\n int invisibleCount = 0;\n int offset = 0;\n if(!rolesVisible){\n bEnd.setVisible(true);\n Role[] roomRoles = currentRoom.getRoles();\n Role[] cardRoles = currentRoom.getCard().getRoles();\n roleArr = new Role[roomRoles.length + cardRoles.length];\n System.arraycopy(roomRoles, 0, roleArr, 0, roomRoles.length);\n System.arraycopy(cardRoles, 0, roleArr, roomRoles.length, cardRoles.length);\n roleButtonArr = new JButton[roleArr.length]; \n for(int i = 0; i < roleButtonArr.length; i++){\n roleButtonArr[i] = new JButton(roleArr[i].getName());\n roleButtonArr[i].setName(roleArr[i].getName());\n roleButtonArr[i].setBackground(Color.white);\n roleButtonArr[i].setBounds(icon.getIconWidth() + 170, 300 + offset, 200, 30);\n roleButtonArr[i].addMouseListener(new boardMouseListener());\n bPane.add(roleButtonArr[i], Integer.valueOf(2));\n offset += 50;\n rolesVisible = true;\n if(roleArr[i].getRank() > player.getRank()|| roleArr[i].getCurrentPlayer() != null){\n roleButtonArr[i].setVisible(false);\n invisibleCount++;\n }\n } \n }\n disableMenu();\n for(int j = 0; j < roleButtonArr.length; j++){\n if(invisibleCount == roleButtonArr.length){\n bEnd.setVisible(true);\n }\n if(((JButton)e.getSource()).getName() == roleButtonArr[j].getName()){\n rolesVisible = false;\n controller.actionMode = (\"work-\" + ((JButton)e.getSource()).getName());\n for(int x = 0; x < roleButtonArr.length; x++){\n bPane.remove(roleButtonArr[x]);\n }\n enableMenu();\n bAct.setVisible(false);\n bRehearse.setVisible(false);\n bUpgrade.setVisible(false);\n bMove.setVisible(false);\n bTakeRole.setVisible(false);\n break;\n }\n }\n }\n //End turn\n else if(e.getSource() == bEnd){\n actionMode = \"End\";\n controller.endTurn();\n disableUpgrades();\n if(controller.isGameOver()){\n controller.endGame();\n }\n }\n else{\n //upgrade button's listen here\n for(int x = 0; x < 5; x++){\n if(e.getSource() == upgradeButtonArrDollar[x]){\n actionMode = \"upgrade-\" + upgradeButtonArrDollar[x].getName();\n Deadwood.actionMode = actionMode;\n disableUpgrades();\n bUpgrade.setVisible(true);\n break;\n }else if(e.getSource() == upgradeButtonArrCredit[x]){\n actionMode = \"upgrade-\" + upgradeButtonArrCredit[x].getName();\n Deadwood.actionMode = actionMode;\n disableUpgrades();\n bUpgrade.setVisible(true);\n break;\n }\n }\n }\n displayScores(controller.getPlayerOrder());\n }\n public void mousePressed(MouseEvent e) {\n }\n public void mouseReleased(MouseEvent e) {\n }\n public void mouseEntered(MouseEvent e) {\n }\n public void mouseExited(MouseEvent e) {\n }\n }\n}"
},
{
"alpha_fraction": 0.6760743856430054,
"alphanum_fraction": 0.6812059283256531,
"avg_line_length": 33.64444351196289,
"blob_id": "4b4c883344e150a1f4322c6adb5b74517bd74dcd",
"content_id": "d96a0c6bc7a9912a08b63afc0e378b0133627408",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1559,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 45,
"path": "/DeadWood/README.md",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "The purpose of this project was to make a board game with the Java Swing package. The entire code based was created by me and my team mate JohnHenry Ward.\nThis project was finished in 2020.\n\n# Deadwood\n\n**For the GUI based Version**\n\nCompile syntax: javac Deadwood.java\n\nRun syntax: java Deadwood\n\n--------------------------------------------------------------------\n\n**For the text based version**\nCompile syntax: javac Deadwood.java\n\nRun syntax: java Deadwood x\n\nx is the number of players, 2-8 inclusive\n\n**Legal operations during gameplay**\n\n```who``` prints current player\n\n```where``` prints location of current player\n\n```role``` prints role of current player\n\n```room options``` prints all legal moves to adjacent rooms\n\n```role options``` pinrts all available roles\n\n```move-{room}``` moves the current player to room specified by replacing room name with {room}\n\n```work-{role}``` assigns player to the specified role by replacing role name with {role}\n\n```act``` when player is assigned to a role, act sees if the player is successful in acting\n\n```rehearse``` when player is assigned to a role, rehearse adds a practice chip to the player\n\n```upgrade-{type}-{rank}``` when player is in casting office, player can upgrade their role, specifing payment type by replacing type with $ for dollars and c for credits, and rank by typing a number 2-6\n\n```score``` prints the dollars, credits, rank and total score of the current player\n\nNote: any other input will print out the possible commands\n"
},
{
"alpha_fraction": 0.5224415063858032,
"alphanum_fraction": 0.5472549200057983,
"avg_line_length": 40.07084655761719,
"blob_id": "b86ed7cda5e927fa640be300456d756ad0f64a4d",
"content_id": "183577093c1423fc1efc7283734fff65fd30f27d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 30145,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 734,
"path": "/DeadWood/Deadwood.java",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "import java.util.*;\nimport java.io.*;\nimport javax.swing.*;\n\npublic class Deadwood{\n static Player currentPlayer;\n static int currentPlayerIndex;\n static int currentDay = 1;\n static int maxDays;\n static Player[] playerOrder;\n static int scenesRemaining;\n static int playerAmount;\n static Room[] rooms;\n static Card[] cards = new Card[40];\n static String[] colors = new String[]{\"BLUE\", \"GREEN\", \"RED\", \"YELLOW\", \"CYAN\", \"ORANGE\", \"PINK\", \"VIOLET\"};//used for identifying players\n @SuppressWarnings(\"unchecked\")\n static Board<Room> board = Board.getInstance();\n static int cardsFlipped = -1;\n static Scanner sc;\n static Bank bank = new Bank();\n static BoardLayersListener gui = BoardLayersListener.getInstance();\n static volatile String actionMode = \"\";\n\n /* Singleton */\n public static final Deadwood instance = new Deadwood();\n\n private Deadwood() {\n }\n\n public static Deadwood getInstance() {\n return instance;\n }\n /* End Singleton */\n\n /* Method called before the game begins\n * special rules are put in place\n * board object is created\n * room objects and paths to rooms are created\n * each players room is set to trailers and each player is added to trailers\n */\n public static void initializeBoard(){\n playerAmount = gui.getPlayerAmount();\n playerOrder = new Player[playerAmount];\n int e = 0;\n while(e < playerAmount){\n playerOrder[e] = new Player(colors[e], \"images/dice/\" + (\"\"+colors[e].charAt(0)).toLowerCase() + \"1.png\", new JLabel());\n e++;\n }\n currentPlayer = playerOrder[0];\n \n gui.initPlayerPosition(playerOrder);\n specialRules();\n createRooms();\n createPaths();\n \n //set each players room to the trailers\n for(int x = 0; x < playerAmount; x++){\n playerOrder[x].setCurrentRoom(rooms[0]);\n rooms[0].addPlayer(playerOrder[x]);\n }\n \n gui.initUpgradeButtons();\n gui.initBlankCards(rooms);\n gui.initShotCounters(rooms);\n gui.displayScores(playerOrder);\n gui.displayVisibleButtons(getCurrentPlayer());\n gui.setVisible(true);\n }\n\n /* The first thing that happens at the beginning of each day (and the beginning of the game)\n * Sets the Players in the Trailer\n * Resets shot counter for each Room\n * current day is incremented\n */\n public static void newDay(){\n createRooms();\n createPaths();\n for(int x = 0; x < playerAmount; x++){\n playerOrder[x].setCurrentRoom(rooms[0]);\n rooms[0].addPlayer(playerOrder[x]);\n playerOrder[x].setMoveFlag(false);\n gui.resetGUI();\n }\n currentDay++;\n\n gui.displayMessage(\"It's a new day! Day \" + currentDay + \". All players are back at the trailers. \" + currentPlayer.getName() + \", you're up!\");\n gui.displayVisibleButtons(currentPlayer);\n }\n\n /* Method called when the game is over\n * Calculate scores and declares winner\n */\n public static void endGame(){\n String message = \"The game is over! Here are the scores:\\n\";\n Player winner = playerOrder[0];\n int topScore = 0;\n for(int x = 0; x < playerOrder.length; x++){\n message += \"\\nPlayer: \" + playerOrder[x].getName() + \" Score: \" + calculateScore(playerOrder[x]);\n if(calculateScore(playerOrder[x]) > topScore){\n winner = playerOrder[x];\n topScore = calculateScore(playerOrder[x]);\n }\n }\n message += \"\\nThe winner is player: \" + winner.getName();\n message += \"\\nThank you for playing :)\";\n gui.displayMessage(message);\n System.exit(1);\n }\n\n //read from text file\n //this will be called the first time a player enters a room\n //avoids problem of creating 10 new objects at the start of every day, and instead creating them over the course of the game\n /* Method called when a player enters a room for the first time\n * used for initalizing card objects\n * card object data read in from a cards.txt\n * FORMAT: name,budget,numberOfRoles,roleName,roleRank,roleDescription (continues for numberOfRoles)\n */\n public static void flipCard(Room room){\n cardsFlipped++;\n try{\n if(cardsFlipped == 0){\n File cardFile = new File(\"cards.txt\");\n sc = new Scanner(cardFile);\n }\n String cardLine = sc.nextLine();\n String[] cardLineArray = cardLine.split(\",\");\n String name = cardLineArray[0];\n int budget = Integer.parseInt(cardLineArray[1]);\n int numRoles = Integer.parseInt(cardLineArray[2]);\n String imageUrl = cardLineArray[cardLineArray.length - 1];\n cards[cardsFlipped] = new Card();\n room.setCard(cards[cardsFlipped]);\n room.getCard().initalize(name, budget, imageUrl);\n String roleName;\n int roleRank;\n Role[] roles = new Role[numRoles];\n int i = 0;\n int startingX = 0;\n int incr = 0;\n if(numRoles == 3){\n startingX = 20;\n }\n else if(numRoles == 2){\n startingX = 53;\n }\n else if(numRoles == 1){\n startingX = 83;\n }\n for(int x = 0; x < numRoles; x++){\n roleName = cardLineArray[3 + (x*2)];\n roleRank = Integer.parseInt(cardLineArray[4 + (x*2)]);\n Role role = new Role(roleName, roleRank, startingX + incr, 47);\n roles[i] = role;\n i++;\n incr+=62;\n }\n switch(roles.length){\n case 1: room.getCard().setRoles(roles[0]);\n break;\n case 2: room.getCard().setRoles(roles[0], roles[1]);\n break;\n case 3: room.getCard().setRoles(roles[0], roles[1], roles[2]);\n break;\n default: System.out.println(\"\\nERROR on card initialization\\n\");\n System.exit(0);\n }\n \n }\n catch(FileNotFoundException e){\n System.out.println(\"File not found\");\n }\n gui.revealCard(room, room.getCard());\n }\n\n /* Method called at start of each day\n * creates room object, sets name, shot counter, has empty card\n */\n public static void createRooms(){\n rooms = new Room[12];\n \n rooms[0] = new Room(\"Trailers\", 0, null, 995, 275);\n\n rooms[1] = new Room(\"Casting Office\", 0, null, 9, 459);\n\n rooms[2] = new Room(\"Main Street\", 3, null, 2, 969, 28, 969, 28, new int[] {804, 23, 858, 23, 912, 23}, 969, 148);\n rooms[2].setRoles(new Role(\"Railroad worker\", 1, 637, 22), new Role(\"Falls off Roof\", 2, 720, 22), \n new Role(\"Woman in black Dress\", 2, 637, 105), new Role(\"Mayor McGinty\", 4, 720, 105));\n\n rooms[3] = new Room(\"Saloon\", 2, null, 3, 632, 280, 632, 280, new int[] {679, 216, 626, 216}, 632, 400);\n rooms[3].setRoles(new Role(\"Reluctant Farmer\", 1, 877, 352), new Role(\"Woman in Red Dress\", 2, 877, 276));\n\n rooms[4] = new Room(\"Bank\", 1, null, 4, 623, 475, 623, 476, new int[] {840, 549}, 623, 596);\n rooms[4].setRoles(new Role(\"Suspicious Gentleman\", 2, 911, 554), new Role(\"Flustered Teller\", 3, 911, 470));\n\n rooms[5] = new Room(\"Church\", 2, null, 5, 623, 734, 624, 734, new int[] {682, 675, 623, 675}, 624, 854);\n rooms[5].setRoles(new Role(\"Dead Man\", 1, 857, 730), new Role(\"Crying Woman\", 2, 858, 730));\n\n rooms[6] = new Room(\"Hotel\", 3, null, 6, 969, 740, 969, 741, new int[] {1111, 683, 1058, 683, 1005, 683}, 969, 861);\n rooms[6].setRoles(new Role(\"Sleeping Drunkard\", 1, 1111, 469), new Role(\"Rare Player\", 1, 1044, 509), \n new Role(\"Falls from Balcony\", 2, 1111, 557), new Role(\"Australian Bartender\", 3, 1046, 596));\n\n rooms[7] = new Room(\"Secret Hideout\", 3, null, 7, 27, 732, 27, 732, new int[] {354, 764, 299, 764, 244, 764}, 27, 852);\n rooms[7].setRoles(new Role(\"Clumsy Pit Fighter\", 1, 435, 719), new Role(\"Thug with Knife\", 2, 521, 719), \n new Role(\"Dangerous Tom\", 3, 435, 808), new Role(\"Penny, who is Lost\", 4, 521, 808));\n \n rooms[8] = new Room(\"Ranch\", 2, null, 8, 252, 478, 252, 478, new int[] {525, 473, 472, 473}, 252, 598);\n rooms[8].setRoles(new Role(\"Shot in Leg\", 1, 412, 608), new Role(\"Saucy Fred\", 2, 488, 608), \n new Role(\"Man Under Horse\", 3, 488, 525));\n\n rooms[9] = new Room(\"General Store\", 2, null, 9, 370, 282, 370, 282, new int[] {313, 330, 313, 277}, 370, 402);\n rooms[9].setRoles(new Role(\"Man in Overalls\", 1, 236, 276), new Role(\"Mister Keach\", 3, 236, 358));\n\n rooms[10] = new Room(\"Jail\", 1, null, 10, 281, 27, 281, 27, new int[] {442, 156}, 281, 147);\n rooms[10].setRoles(new Role(\"Prisoner in Cell\", 2, 519, 25), new Role(\"Feller in Irons\", 3, 519, 105));\n\n rooms[11] = new Room(\"Train Station\", 3, null, 11, 21, 69, 21, 69, new int[] {141, 11, 89, 11, 36, 11}, 21, 189);\n rooms[11].setRoles(new Role(\"Crusty Prospector\", 1, 114, 227), new Role(\"Dragged by Train\", 1, 51, 268), \n new Role(\"Preacher with Bag\", 2, 114, 320), new Role(\"Cyrus the Gunfighter\", 4, 49, 356));\n }\n\n /* Method called to create paths to each room node\n * Paths could be changed to create unique boards\n */\n public static void createPaths(){\n board.addPath(rooms[0], rooms[2]);//trailer <-> main street\n board.addPath(rooms[0], rooms[3]);//trailer <-> saloon\n board.addPath(rooms[0], rooms[6]);//trailer <-> Hotel\n board.addPath(rooms[2], rooms[3]);//main street <-> saloon\n board.addPath(rooms[2], rooms[10]);//main street <-> jail\n board.addPath(rooms[3], rooms[9]);//saloon <-> general store\n board.addPath(rooms[3], rooms[4]);//saloon <-> bank\n board.addPath(rooms[4], rooms[8]);//bank <-> ranch\n board.addPath(rooms[4], rooms[5]);//bank <-> church\n board.addPath(rooms[4], rooms[6]);//bank <-> hotel\n board.addPath(rooms[5], rooms[6]);//church <-> hotel\n board.addPath(rooms[5], rooms[7]);//church <-> secret hideout\n board.addPath(rooms[7], rooms[8]);//secret hideout <-> ranch\n board.addPath(rooms[7], rooms[1]);//secret hideout <-> casting office\n board.addPath(rooms[8], rooms[1]);//ranch <-> casting office\n board.addPath(rooms[8], rooms[9]);//ranch <-> general store\n board.addPath(rooms[9], rooms[11]);//general store <-> train station\n board.addPath(rooms[9], rooms[10]);//general store <-> jail\n board.addPath(rooms[10], rooms[11]);//jail <-> train station\n board.addPath(rooms[11], rooms[1]);//train station <-> casting office\n }\n \n /* Method checks if a room is next to another room\n * Used to make sure a move is legal\n */\n public static boolean isNeighbor(Room source, Room destination){\n ArrayList<Room> neighbors = board.getNeighbors(source);\n boolean isNeighbor = false;\n\n for(int x = 0; x < neighbors.size(); x++){\n if(!isNeighbor && destination.getName() == (neighbors.get(x)).getName()){\n isNeighbor = true;\n }\n }\n return isNeighbor;\n }\n\n /* The final method that is called at the end of the game\n * Score is calculated by (dollars + credits + (rank*5))\n */\n public static int calculateScore(Player player){\n return player.getDollars() + player.getCredits() + (player.getRank() * 5);\n }\n\n /* Method called from Bank.java\n * Used to clear players roles and set to null\n * Also sets the rooms wrapped status to true\n */\n public static void clearPlayerRoles(Room room){\n ArrayList<Player> offCardPlayers = room.getPlayers();\n ArrayList<Player> onCardPlayers = room.getCard().getPlayers();\n\n Role[] offCardRoles = room.getRoles();\n Role[] onCardRoles = room.getCard().getRoles();\n\n //Remove roles from players\n if(offCardPlayers != null){\n for(int x = 0; x < offCardPlayers.size(); x++){\n offCardPlayers.get(x).setCurrentRole(null);\n }\n }\n if(onCardPlayers != null){\n for(int x = 0; x < onCardPlayers.size(); x++){\n onCardPlayers.get(x).setCurrentRole(null);\n }\n }\n\n //Remove players from roles\n for(int x = 0; x < offCardRoles.length; x++){\n offCardRoles[x].setPlayer(null);\n }\n for(int x = 0; x < onCardRoles.length; x++){\n onCardRoles[x].setPlayer(null);\n }\n\n room.updateWrapped(true);\n }\n\n /* Method called when a scene wraps\n * Resets practice chips for each player to 0\n */\n public static void clearPracticeChips(ArrayList<Player> offCardPlayers, ArrayList<Player> onCardPlayers){\n for(int x = 0; x < offCardPlayers.size(); x++){\n offCardPlayers.get(x).resetPracticeChips();\n }\n for(int x = 0; x < onCardPlayers.size(); x++){\n onCardPlayers.get(x).resetPracticeChips();\n }\n }\n\n /* Gets current player, usually called from BoardLayersListener\n */\n public static Player getCurrentPlayer(){\n return currentPlayer;\n }\n\n /* Gets array of all players, usually called from BoardLayersListener\n */\n public static Player[] getPlayerOrder(){\n return playerOrder;\n }\n\n /* Gets array of all rooms, usually called from BoardLayersListener\n */\n public static Room[] getRooms(){\n return rooms;\n }\n\n /* Returns how many roles a player can take at 1 time\n * Takes into account player and role rank,\n * if role is already taken,\n * if room is wrapped\n * Usually called from BoardLayersListener\n */\n public static int getAvailableRolesCount(){\n Room room = currentPlayer.getCurrentRoom();\n if(room.getName() == \"Trailers\" || room.getName() == \"Casting Office\"){\n return 0;\n }\n Role[] roomRoles = room.getRoles();\n Card card = room.getCard();\n int roleCount = 0;\n Role[] cardRoles = card.getRoles();\n\n for(int i = 0; i < roomRoles.length; i++){\n if(roomRoles[i].getRank() <= currentPlayer.getRank() && roomRoles[i].getPlayer() == null){\n roleCount++;\n }\n }\n\n for(int i = 0; i < cardRoles.length; i++){\n if(cardRoles[i].getRank() <= currentPlayer.getRank() && cardRoles[i].getPlayer() == null){\n roleCount++;\n }\n }\n return roleCount;\n }\n\n /* Assigns a Role to a Player\n * If player takes on card role, they are removed from the room for room wrap bonus payouts\n * Returns true if Role is available and succesfully taken\n * Returns false otherwise\n */\n public static boolean takeARole(String roleName){\n Room currentRoom = currentPlayer.getCurrentRoom();\n Card currentCard = currentRoom.getCard();\n Role[] roomRoles = currentRoom.getRoles();\n Role[] cardRoles = currentCard.getRoles();\n Boolean roleTaken = false;\n for(int x = 0; x < roomRoles.length; x++){\n if(roomRoles[x].getName().equals(roleName) && roomRoles[x].isRoleAvailable() && currentPlayer.getRank() >= roomRoles[x].getRank() && currentRoom.hasWrapped() == \"unwrapped\"){\n currentPlayer.setCurrentRole(roomRoles[x]);\n currentPlayer.setRoleType(\"offCard\");\n roomRoles[x].setPlayer(currentPlayer);\n roomRoles[x].setRoleAvailable(false);\n roleTaken = true;\n }\n }\n\n for(int x = 0; x < cardRoles.length; x++){\n if(cardRoles[x].getName().equals(roleName) && cardRoles[x].isRoleAvailable() && currentPlayer.getRank() >= cardRoles[x].getRank() && currentRoom.hasWrapped() == \"unwrapped\"){\n currentPlayer.setCurrentRole(cardRoles[x]);\n currentPlayer.setRoleType(\"onCard\");\n cardRoles[x].setPlayer(currentPlayer);\n cardRoles[x].setRoleAvailable(false);\n currentCard.addPlayer(currentPlayer);\n currentRoom.removePlayer(currentPlayer);\n roleTaken = true;\n }\n }\n\n if(roleTaken){\n Role role = currentPlayer.getCurrentRole();\n if(currentPlayer.getRoleType() == \"offCard\"){\n gui.movePlayer(currentPlayer, role.getXCoord(), role.getYCoord());\n }\n else if(currentPlayer.getRoleType() == \"onCard\"){\n gui.movePlayer(currentPlayer, currentRoom.getCardX() + role.getXCoord(), currentRoom.getCardY() + role.getYCoord());\n }\n }\n return roleTaken;\n }\n\n /* Player must be assigned to a role to be able to act\n * \"Die is rolled\" and die output + Players practice chips are compared to the scene budget\n * Returns true for succesfully acting\n * Returns false otherwise\n */\n public static boolean attemptToAct(){\n Role playerRole = currentPlayer.getCurrentRole();\n Room playerRoom = currentPlayer.getCurrentRoom();\n Card currentCard = playerRoom.getCard();\n if(currentCard == null){\n return false;\n }\n int budget = currentCard.getBudget();\n String roleType = currentPlayer.getRoleType();\n\n Boolean acted = false;\n\n if(playerRole != null && playerRoom.hasWrapped() != \"wrapped\"){\n int[] dieRoll = rollDie(1);\n //Commented out but not deleted in case TA or Prof wants to see what is being roled\n //System.out.println(\"Die roll: \" + dieRoll[0] + \" practice chips: \" + currentPlayer.getPracticeChips());\n //System.out.println(\"Budget: \" + currentPlayer.getCurrentRoom().getCard().getBudget());\n if(dieRoll[0] + currentPlayer.getPracticeChips() >= budget){//Success\n Bank.actingSuccess(currentPlayer, roleType);\n acted = true;\n gui.removeShotCounter(playerRoom);\n }else if(roleType == \"offCard\"){//Fail\n Bank.actingFail(currentPlayer);\n }\n }else{\n System.out.println(\"You have yet to take a role!\");\n }\n gui.displayScores(playerOrder);\n return acted;\n }\n\n /* Method called when a player has a role but doesn't want to act\n * Returns true if they can rehearse\n * Returns false otherwise\n */\n public static boolean rehearse(){\n Role playerRole = currentPlayer.getCurrentRole();\n Room playerRoom = currentPlayer.getCurrentRoom();\n String message = \"\";\n\n if(playerRole != null && playerRoom.hasWrapped() != \"wrapped\"){\n int budget = ((currentPlayer.getCurrentRoom()).getCard()).getBudget();\n if((currentPlayer.getPracticeChips()) >= budget){\n message += \"The budget of the room is \" + budget + \" and you have \" + currentPlayer.getPracticeChips() + \" practice chips so you are guarenteed success if you act! So no more rehearsing!!\";\n gui.displayMessage(message);\n return true;\n }else{\n currentPlayer.addPracticeChip();\n message += currentPlayer.getName() + \" has received a practice chip\\n\";\n message += \"They now have \" + currentPlayer.getPracticeChips();\n gui.displayScores(playerOrder);\n gui.displayMessage(message);\n return true;\n }\n }else{\n message += \"You have yet to take a role!\";\n gui.displayMessage(message);\n return false;\n }\n \n }\n\n /* Ends a players turn\n * Method called when player clicks end button in BoardLayersListener\n */\n public static void endTurn(){\n currentPlayer.setMoveFlag(false);\n currentPlayerIndex++;\n if(currentPlayerIndex == playerAmount){\n currentPlayerIndex = 0;\n }\n currentPlayer = playerOrder[currentPlayerIndex];\n gui.displayCurrentPlayer(currentPlayer);\n gui.displayVisibleButtons(currentPlayer);\n \n if(unwrappedRooms().size() <= 1){\n clearFinalRoom(unwrappedRooms().get(0));\n newDay();\n }\n if(isGameOver()){\n endGame();\n }\n \n }\n\n /* Method gets the arrayList of unwrapped rooms\n * Used to check if the day should end\n */\n public static ArrayList<Room> unwrappedRooms(){\n ArrayList<Room> unwrappedRooms = new ArrayList<Room>();\n int count = 0;\n for(int i = 2; i < rooms.length; i++){\n if(rooms[i].hasWrapped() == \"unwrapped\"){\n count++;\n unwrappedRooms.add(rooms[i]);\n }\n }\n if(count <= 1){\n return unwrappedRooms;\n }\n return unwrappedRooms;\n }\n\n /* Method called when the day is over but there is one room that needs its card clared\n * Called from endTurn()\n */\n public static void clearFinalRoom(Room room){\n ArrayList<Player> offCardPlayers = room.getPlayers();\n Role[] offCardRoles = room.getRoles();\n\n if(offCardPlayers != null){\n for(int x = 0; x < offCardPlayers.size(); x++){\n offCardPlayers.get(x).setCurrentRole(null);\n }\n }\n\n for(int x = 0; x < offCardRoles.length; x++){\n offCardRoles[x].setPlayer(null);\n }\n\n if(room.getCard() != null){\n ArrayList<Player> onCardPlayers = room.getCard().getPlayers();\n Role[] onCardRoles = room.getCard().getRoles();\n\n if(onCardPlayers != null){\n for(int x = 0; x < onCardPlayers.size(); x++){\n onCardPlayers.get(x).setCurrentRole(null);\n }\n }\n\n for(int x = 0; x < onCardRoles.length; x++){\n onCardRoles[x].setPlayer(null);\n }\n\n gui.bPane.remove(room.getCard().getJLabel());\n }\n room.updateWrapped(true);\n }\n\n /* Player wants to move from their current room to a new room\n * Room must be adjacent to players current room\n * Returns true if succesfully moved\n * Returns false otherwise\n */\n public static void movePlayer(Player player, Room newRoom){\n Room currentRoom = player.getCurrentRoom();\n\n boolean isNeighbor = isNeighbor(currentRoom, newRoom);\n if(isNeighbor && player.getCurrentRole() == null){\n player.getCurrentRoom().removePlayer(player);\n player.setCurrentRoom(newRoom);\n newRoom.addPlayer(player);\n for(int i = 0; i < playerOrder.length; i++){\n if(currentPlayer.equals(playerOrder[i])){\n gui.movePlayer(currentPlayer, newRoom.getPlayerHolderCoord()[i*2], newRoom.getPlayerHolderCoord()[i*2 + 1]);\n }\n }\n }else if(!isNeighbor){\n System.out.println(\"Sorry! \" + currentRoom.getName() + \" is not next to \" + newRoom.getName());\n }else if(!(player.getCurrentRole() == null)){\n System.out.println(\"Sorry! You can't leave a room until it has wrapped! There are still \" + currentRoom.getShots() + \" shot(s) remaining!\");\n }\n }\n\n /* Die is rolled when a player attempts to act (dieAmount = 1) OR\n * Die is rolled when a scene wraps (dieAmount = scene budget)\n * Returns an array of ints with each index representing a die roll\n */\n public static int[] rollDie(int dieCount){\n int[] dieArray = new int[dieCount];\n for(int d = 0; d < dieCount; d++){\n Random rr = new Random();\n int roll = rr.nextInt(6) + 1;\n dieArray[d] = roll;\n }\n\n //sorted from lowest (index 0), to highest (index dieCount - 1)\n Arrays.sort(dieArray);\n\n return dieArray;\n }\n\n //method goes room by room summing up all shots remaing\n //used to check if the day should end\n public static int totalShotsRemaning(){\n int shotsRemaining = 0;\n for(int x = 0; x < 10; x++){\n shotsRemaining += rooms[x].getShots();\n }\n \n return shotsRemaining;\n }\n\n public static boolean isGameOver(){\n return currentDay > maxDays;\n }\n\n /* Method called before the game begins, and adds special rules depending on playerCount\n * 2-3 Players: 3 days\n * 4 Players: Normal rules\n * 5 Players: Players start with 2 credits\n * 6 Players: Players start with 4 credits\n * 7-8 Players: Players start at rank 2\n */\n public static void specialRules(){\n if(playerAmount <= 3){\n maxDays = 3;\n }else if(playerAmount == 4){\n maxDays = 4;\n }else if(playerAmount == 5){\n //players start with 2 credits\n for(int i = 0; i < playerAmount; i++){\n playerOrder[i].addCredits(2);\n }\n maxDays = 4;\n }else if(playerAmount == 6){\n //players start with 4 credits\n for(int i = 0; i < playerAmount; i++){\n playerOrder[i].addCredits(4);\n }\n maxDays = 4;\n }else{\n //players start with rank 2\n for(int i = 0; i < playerAmount; i++){\n playerOrder[i].setRank(2);\n gui.setNewRank(playerOrder[i], 2);\n }\n maxDays = 4;\n }\n }\n \n /* Deadwood.java should be called as such: java Deadwood*/\n public static void main(String args[]){\n initializeBoard();\n gui.displayCurrentPlayer(currentPlayer);\n\n //the game begins\n while(currentDay <= maxDays){\n \n Scanner in = new Scanner(System.in);\n String playerInput = actionMode;\n\n while(!(playerInput.equals(\"end\"))){\n while(playerInput.equals(\"\")){\n playerInput = actionMode;\n }\n if(playerInput.contains(\"work\")){\n try{\n String[] inputArray = playerInput.split(\"-\");\n String roleName = inputArray[1];\n if(takeARole(roleName)){\n actionMode = \"\";\n endTurn();\n break;\n }\n else{\n System.out.println(\"Sorry! This role is either spelt wrong, not in this room, already has someone acting on it, the room is wrapped, or you aren't the right rank!\\n\");\n actionMode = \"\";\n }\n } catch(ArrayIndexOutOfBoundsException ex){\n System.out.println(\"Whoops, looks like your syntax is wrong. If you need to see what roles there are, type 'role options'\\n\");\n actionMode = \"\";\n }\n }else if(playerInput.equals(\"Act\")){\n if(attemptToAct()){\n actionMode = \"\";\n endTurn();\n break;\n }\n else{\n actionMode = \"\";\n endTurn();\n break;\n }\n }else if(playerInput.equals(\"Rehearse\")){\n if(rehearse()){\n actionMode = \"\";\n endTurn();\n break;\n }\n }else if(playerInput.contains(\"move\")){\n String[] moveLocation = playerInput.split(\"-\");\n String location = moveLocation[1];\n for(int x = 0; x < rooms.length; x++){\n if(rooms[x].getName().equals(location) && !currentPlayer.getMoveFlag()){\n movePlayer(currentPlayer, rooms[x]);\n currentPlayer.setMoveFlag(true);\n if(rooms[x].getCard() == null){\n //not equal to trailers or casting office\n if(!(rooms[x].equals(rooms[0])) && !(rooms[x].equals(rooms[1]))){\n flipCard(rooms[x]);\n }\n }\n gui.displayVisibleButtons(currentPlayer);\n }\n }\n actionMode = \"\";\n }else if(playerInput.contains(\"upgrade\")){\n int rankChoice = 0;\n String[] upgradePlayer = playerInput.split(\"-\");\n if(upgradePlayer.length == 1 && currentPlayer.getCurrentRoom().getName().equals(\"Casting Office\")){\n // bank.displayPrices();\n } else if(upgradePlayer[1].toLowerCase().equals(\"c\") || upgradePlayer[1].equals(\"$\") && upgradePlayer.length == 3 && currentPlayer.getCurrentRoom().getName().equals(\"Casting Office\")){\n try{\n rankChoice = Integer.parseInt(upgradePlayer[2]);\n actionMode = \"\";\n }catch(Exception z){\n System.out.println(\"WRONG SYNTAX\");\n actionMode = \"\";\n break;\n }\n if(bank.upgrade(currentPlayer, rankChoice, upgradePlayer[1].toLowerCase().charAt(0))){\n break;\n }\n }\n }\n playerInput = \"\";\n }//end of while that checks for player input\n currentPlayer.setMoveFlag(false);\n } //end of while check for days\n }\n}"
},
{
"alpha_fraction": 0.5591264963150024,
"alphanum_fraction": 0.5615986585617065,
"avg_line_length": 23.02970314025879,
"blob_id": "42c3531895c8f555383eeccf5e6f9623c56031f3",
"content_id": "aacf06464e7368d4f7de019250791876dd6a6885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2427,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 101,
"path": "/DeadWood/DeadwoodTextBased/Room.java",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "import java.util.*;\n\npublic class Room {\n private Card card;\n private int shotCounter;\n private Role[] roles;\n private String name;\n //ArrayList<Player> players represents players actively in the room (note: not players on the card)\n private ArrayList<Player> players = new ArrayList<Player>(); \n private boolean wrapped = false;\n\n public Room(String n, int shots, Card c){\n name = n;\n shotCounter = shots;\n card = c;\n wrapped = false;\n if(n == \"Trailers\" || n == \"Casting Office\"){\n wrapped = true;\n }\n }\n\n public String getName(){\n return name;\n }\n\n public Card getCard(){\n return card;\n }\n\n public void setCard(Card c){\n card = c;\n }\n\n public int getShots(){\n return shotCounter;\n }\n\n public void removeShot(){\n shotCounter = shotCounter - 1;\n }\n\n public Role[] getRoles(){\n return roles;\n }\n\n /* All four iterations of setRoles used when the specific rooms are created\n * What method called depends on how many roles are in each room (4, 3, 2, 1)\n * Note: roles only for the room, NOT for the card\n */\n public void setRoles(Role a, Role b, Role c, Role d){\n roles = new Role[] {a, b, c, d};\n }\n \n public void setRoles(Role a, Role b, Role c){\n roles = new Role[] {a, b, c};\n }\n \n public void setRoles(Role a, Role b){\n roles = new Role[] {a, b};\n }\n\n public void setRoles(Role a){\n roles = new Role[] {a};\n }\n\n public String hasWrapped(){\n if(wrapped){\n return \"wrapped\";\n }\n else{\n return \"unwrapped\";\n }\n }\n\n public void updateWrapped(boolean update){\n wrapped = update;\n }\n\n public ArrayList<Player> getPlayers(){\n return players;\n }\n\n /* Player is added to room when they enter the room\n * Necessary to keep track for when the room wraps\n */\n public void addPlayer(Player player){\n players.add(player);\n }\n\n /* Player is removed from room when they leave room\n * Iterates through players to find the player specified and\n * removes them from players arrayList\n */\n public void removePlayer(Player player){\n for(int x = 0; x < players.size(); x++){\n if(players.get(x) == player){\n players.remove(x);\n }\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5709897875785828,
"alphanum_fraction": 0.5781570076942444,
"avg_line_length": 27.851484298706055,
"blob_id": "3dfb9337ed71b49d4223b1a11a03cd16a55b9516",
"content_id": "14767b4e627f85b49b915bd51d23c62f8525c4a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 5860,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 202,
"path": "/sudoku-master/src/sudoku/Sudoku.java",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "package sudoku; // package name\n\n\npublic class Sudoku {\n\t\n\tpublic static void main(String[] args) {\n\t\t//generates a random puzzle\n\t\tchar[][] puzzle = SudokuP.puzzle();\n\t \n\t System.out.println(\"the unsolved puzzle is: \");\n\t System.out.println();\n\t print_puzzle(puzzle);\n\t System.out.println();\n\t \n\t //if the puzzle is not already invalid, try to solve it\n\t if (check(puzzle)) {\n\t\t System.out.println(\"the solved puzzle is: \");\n\t\t System.out.println();\n\t\t solve(puzzle);\n\t }\n\t else \n\t \t System.out.println(\"this puzzle is invalid\");\n\t \n\t}\n\t\n\t\n\tpublic static boolean solveSudoku(char[][] puzzle) {\n\n\t\t//traverses looking for blank spaces to fill\n\t\tfor(int row=0;row<9;row++) {\n\t\t\tfor(int col=0;col<9;col++) { \n\t\t\t\tif(puzzle[row][col]=='.') {\n\t\t\t\t\t\n\t // we found an empty cell. Try filling it with each of the 9 digits:\n\t\t\t\t\tfor( int i=1; i<=9;i++) {\n\t\t\t\t\t\tchar num=Integer.toString(i).charAt(0); \n\t\t\t\t\t\tpuzzle[row][col]=num; //places a number in puzzle\n\t\t\t\t\t\t\n\t\t\t\t\t\t// Check if adding the number in the puzzle would cause the puzzle to be invalid\n\t\t\t\t\t\tif (guess_check(row, col, puzzle)) {\n\t\t\t\t\t\t\tif(solveSudoku(puzzle)) \n\t\t\t\t\t\t\t\treturn true; //we should keep pursuing this path\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpuzzle[row][col]='.'; // we failed to solve on the current path, so restore and resume recursion\n\t\t\t\t }\n\t\t\t\t\treturn false; //we ran out of guesses, there are no numbers that will be valid in this cell, so backtrack\n\t\t\t }\n\t\t\t }\n\t\t }\n\t\t return true; // this is the last time, its out of the blank space checking for loop \n\t\t //so all the cells must be filled and the fact that it got to this point means they are all valid\n\t}\n\t\n\t\n\t\tpublic static void solve(char[][] puzzle) { \n\t\t\t//wrapper method\n\t\t\tif(puzzle == null || puzzle.length == 0) \n\t\t\t\treturn; \n\t\t\n\t\t\tif(solveSudoku(puzzle))\n\t\t\t\tprint_puzzle(puzzle);\n\t\t\telse\n\t\t\t\tSystem.out.println(\"no solution\");\n\t\n\t\t}\n\t\t\n\t\n\t public static boolean guess_check(int row, int col, char[][] puzzle) {\n\t\t //this function validates individual boxes, rows, and columns for the solving purpose\n\t \t if(validRow(puzzle[row])==false) \n\t \t\t return false;\n\t \t if(validCol(col, puzzle)==false)\n\t \t\t return false;\n\t \t if (validBox(row-row%3,col-col%3,puzzle)==false) //this should get the valid starting position for the traverising \n\t \t\t return false;\n\t \t \n\t \t return true;\n\t }\n\t \n\t\t\t\n\t\tpublic static boolean check(char[][] puzzle) { \n\t\t\t//check automatically all rows and columns and boxes of the 9x9 this is for validating\n\t\t\t//the entire initial puzzle\n\t\t\t\n\t\t\t//checks each row\n\t\t\tfor(int i=0; i<9;i++ ) {\t\t\n\t\t\t\tif(!validRow(puzzle[i])) \n\t\t\t\t\treturn false;\n\t\t\t}\n\t\t\t//check each column\n\t\t\tfor(int i=0; i<9;i++ ) {\n\t\t\t\tif(!validCol(i, puzzle)) \n\t\t\t\t\treturn false;\n\t\t\t}\t\t\t\n\t\t\t\n\t\t\t//checks each box 9 boxes total\n\t\t\tint i=0;\n\t\t\twhile(i<=6) {\n\t\t\t\tint j=0;\n\t\t\t\twhile(j<=6) {\n\t\t\t\t\tif(!validBox(i,j,puzzle)) { //i and j are the starting location of the box\n\t\t\t\t\t\treturn false;\n\t\t\t\t }\n\t\t\t\t\tj=j+3;\n\t\t\t\t}\n\t\t\t i=i+3;\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t\t\t\n\t\t\n\t\t\n\t\t//the variable boolarray is used\n\t\t//when a number is found, the element of boolarray at the index of that number will be set to true\n\t\t//if it ever runs into an element that has already been set to true then valid will\n\t\t//be false because it will have already found that number\n\t\t\n\t\tpublic static boolean validCol(int Col, char[][] puzzle) {\n\t\t\t//traverses through one column and checks validity\n\t\t\tint index;\n\t\t\tboolean[] boolarray= {false,false,false,false,false,false,false,false,false};\n\t\t\t\tfor(int i=0; i<9; i++) {\n\t\t\t\t\tif(puzzle[i][Col]!='.') {\n\t\t\t\t\t\tindex=Character.getNumericValue(puzzle[i][Col]-1);\n\t\t\t\t\t\t\n\t\t\t\t\t\tif(boolarray[index] == false) {\n\t\t\t\t\t\t\tboolarray[index] = true; \n\t\t\t\t\t\t\t}\n\t\t\t\t\t\telse \n\t\t\t\t\t\t\treturn false; //means it already has seen that number\n\t\t\t\t\t\t\n\t\t\t\t\t}\t\n\t\t\t\t}\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\n\t\t \n\t\t\n\t\tpublic static boolean validRow(char[] row) {\n\t\t\t//traverses through one row and checks validity \n\t\t\tboolean[] boolarray= {false,false,false,false,false,false,false,false,false};\n\t\t\tint index;\n\t\t\t \n\t\t\t\tfor(int i=0; i<9; i++) { //for each character in the row\n\t\t\t\t\tif(row[i]!='.') { //if the character is not a period/blank\n\t\t\t\t\t\tindex=Character.getNumericValue(row[i])-1; //change the character to an int value-1\n\t\t\t\t\t\t\n\t\t\t\t\t\tif(boolarray[index] == false) { //changes the index of boolarray to true each time a number is found\n\t\t\t\t\t\t\tboolarray[index] = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse { //else boolarray at an index is already true return false\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\treturn true;//if the rows are valid return true\n\t\t }\n\t\t\n\t\t\n\t\t \n\t\tpublic static boolean validBox(int i,int j,char[][] puzzle) { \n\t\t\t//at any given i,j location this function will be called with i-i%3 and j-j%3 \n\t\t\t//because this will give us the top left most corner location of the box so we\n\t\t\t//know where to start traversing \n\t\t\tboolean[] boolarray= {false,false,false,false,false,false,false,false,false};\n\t\t\tint index;\n\t\t\n\t\t\tfor(int row=i;row<i+3;row++) { //checks 3x3 box\n\t\t\t\tfor(int column=j;column<j+3;column++) {\n\t\t\t\t\tif(puzzle[row][column]!='.') {\n\t\t\t\t\t\tindex=Character.getNumericValue(puzzle[row][column]-1);\n\t\t\t\t\t\tif(boolarray[index] == false) {\n\t\t\t\t\t\t\tboolarray[index] = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse \n\t\t\t\t\t\t\treturn false;\t\t\t\t\t\t\t \n\t\t\t\t\t}\t\t\n\t\t\t }\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t\n\t\t\n\t\t\n\t\t\n\n\t\tpublic static void print_puzzle(char[][] puzzle){\n\t //prints the puzzle \n\t for (int x = 0; x < 9; x++) {\n\t for (int y = 0; y < 9; y++) {\n\t \n\t System.out.print(puzzle[x][y]+\" \");\n\t }\n\t \n\t System.out.println();\n\t }\n\t \n\t }\n\t\n}\n\t\t\n\t\t\t\n\t\t\n\t\t\n\t\n\t\t\t\n\t\t\t\n\t\t\n\t\n\t\t\n\n"
},
{
"alpha_fraction": 0.536034107208252,
"alphanum_fraction": 0.5445628762245178,
"avg_line_length": 23.67894744873047,
"blob_id": "5687cfecd77e49cf82b0f4f49ecbe85973a4328a",
"content_id": "ab2f77a822a507e68ecf185743400fe5af32ed07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4690,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 190,
"path": "/DeadWood/Room.java",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "import java.util.*;\nimport javax.swing.*;\n\npublic class Room {\n private Card card;\n private int shotCounter;\n private Role[] roles;\n private String name;\n //ArrayList<Player> players represents players actively in the room (note: not players on the card)\n private ArrayList<Player> players = new ArrayList<Player>(); \n private boolean wrapped = false;\n private int ID;\n private int cardSlotX;\n private int cardSlotY;\n private ArrayList<Integer> shotCounterCoord = new ArrayList<Integer>();// Format: [X1][Y1][X2][Y2]...\n private int roomXCoord;\n private int roomYCoord;\n private JLabel[] shotLabels;\n private JLabel roomLabel = new JLabel();\n private int[] playerHolders = new int[16];// Format: [X1][Y1][X2][Y2]...\n\n public Room(String n, int shots, Card c, int id, int roomX, int roomY, int cardX, int cardY, int[] shotCCoord, int playerHolderX, int playerHolderY){\n name = n;\n shotCounter = shots;\n card = c;\n ID = id;\n cardSlotX = cardX;\n cardSlotY = cardY;\n roomXCoord = roomX;\n roomYCoord = roomY;\n for(int i : shotCCoord){\n shotCounterCoord.add(i);\n }\n wrapped = false;\n if(n == \"Trailers\" || n == \"Casting Office\"){\n wrapped = true;\n }\n shotLabels = new JLabel[shotCounter];\n\n int offset = 0;\n for(int j = 1; j <= 16; j+=2){\n if(j == 9){\n offset = 0;\n playerHolderY+=50;\n }\n playerHolders[j-1] = playerHolderX+offset;\n playerHolders[j] = playerHolderY;\n offset+=50;\n }\n }\n\n public Room(String n, int shots, Card c, int roomX, int roomY){\n name = n;\n shotCounter = shots;\n card = c;\n roomXCoord = roomX;\n roomYCoord = roomY;\n wrapped = false;\n if(n == \"Trailers\" || n == \"Casting Office\"){\n wrapped = true;\n }\n\n int offset = 0;\n for(int j = 1; j <= 16; j+=2){\n if(j == 9){\n offset = 0;\n roomY+=50;\n }\n playerHolders[j-1] = roomX+offset;\n playerHolders[j] = roomY;\n offset+=50;\n }\n }\n\n public String getName(){\n return name;\n }\n\n public Card getCard(){\n return card;\n }\n\n public int getID(){\n return ID;\n }\n\n public void setCard(Card c){\n card = c;\n }\n\n public int getShots(){\n return shotCounter;\n }\n\n public void removeShot(){\n shotCounter = shotCounter - 1;\n }\n\n public Role[] getRoles(){\n return roles;\n }\n\n /* All four iterations of setRoles used when the specific rooms are created\n * What method called depends on how many roles are in each room (4, 3, 2, 1)\n * Note: roles only for the room, NOT for the card\n */\n public void setRoles(Role a, Role b, Role c, Role d){\n roles = new Role[] {a, b, c, d};\n }\n \n public void setRoles(Role a, Role b, Role c){\n roles = new Role[] {a, b, c};\n }\n \n public void setRoles(Role a, Role b){\n roles = new Role[] {a, b};\n }\n\n public void setRoles(Role a){\n roles = new Role[] {a};\n }\n\n public String hasWrapped(){\n if(wrapped){\n return \"wrapped\";\n }\n else{\n return \"unwrapped\";\n }\n }\n\n public void updateWrapped(boolean update){\n wrapped = update;\n }\n\n public ArrayList<Player> getPlayers(){\n return players;\n }\n\n /* Player is added to room when they enter the room\n * Necessary to keep track for when the room wraps\n */\n public void addPlayer(Player player){\n players.add(player);\n }\n\n /* Player is removed from room when they leave room\n * Iterates through players to find the player specified and\n * removes them from players arrayList\n */\n public void removePlayer(Player player){\n for(int x = 0; x < players.size(); x++){\n if(players.get(x) == player){\n players.remove(x);\n }\n }\n }\n\n public int getCardX(){\n return cardSlotX;\n }\n\n public int getCardY(){\n return cardSlotY;\n }\n\n public int getRoomX(){\n return roomXCoord;\n }\n\n public int getRoomY(){\n return roomYCoord;\n }\n\n public ArrayList<Integer> getShotCounterCoords(){\n return shotCounterCoord;\n }\n\n public JLabel[] getShotLabels(){\n return shotLabels;\n }\n\n public JLabel getJLabel(){\n return roomLabel;\n }\n\n public int[] getPlayerHolderCoord(){\n return playerHolders;\n }\n}\n\n"
},
{
"alpha_fraction": 0.6582781672477722,
"alphanum_fraction": 0.6687537431716919,
"avg_line_length": 32.8979606628418,
"blob_id": "24ca5fbdd2d812d454b3b6c48db3b7ce86a5a643",
"content_id": "065500598d252b5ca692628fa1ab0b3bf1ec4719",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8305,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 245,
"path": "/Toxic_Comment_Classifier/toxic_classifier.py",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "import csv\nfrom collections import defaultdict\nfrom collections import Counter\nimport nltk\nfrom nltk.util import ngrams\nimport copy\nimport math\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n\n\ndef calcPr(dict, feature_vector, initial_pr):#This calculates the probability of a comment being toxic or nontoxic(depending on which dict you passed in)\n prc = math.log(initial_pr)\n total_prob = prc\n comment_wc = 0\n total_vocab = 50000\n for key in dict:\n comment_wc += dict[key]\n for key in feature_vector:\n total_prob = total_prob + math.log((dict[key] + 1)/float((comment_wc + total_vocab)))\n return total_prob\n\n#takes a list of 'uncleaned' sentences.\n#returns a dictionary with index as key and a list of cleaned tokens for the values.\ndef clean_text(comment_list):\n tokens = {}\n index = 0\n stop_words = set(stopwords.words('english'))\n porter = PorterStemmer()\n for comment in comment_list:\n tokenized_comment = word_tokenize(comment) #tokenize comments\n remove_punc = [word for word in tokenized_comment if word.isalpha()] #get rid of punctuation\n remove_stop_words = [w for w in remove_punc if not w in stop_words] # remove stop words\n tokens[index] = [porter.stem(word) for word in remove_stop_words] # stem words\n index += 1\n\n return tokens\n\ncolumns = defaultdict(list)\nwith open('train.csv', encoding=\"utf8\") as train:\n reader = csv.DictReader(train)\n for row in reader:\n for (k, v) in row.items():\n columns[k].append(v.lower())\n\ncomment_list = columns['comment_text'] #list of all comments\ntoxic_list = columns['toxic']\ns_toxic_list = columns['severe_toxic']\nobs_list = columns['obscene']\ninsult_list = columns['insult']\nid_hate_list = columns['identity_hate']\ncolumn_len = len(comment_list)\ntoxic_count = 0\nnontoxic_count = 0\nnew_toxic_list = [] #list of toxic/nontoxic classifications for each comment. 1 = toxic, 0 = nontoxic\n\nfor i in range (0, column_len):\n if (toxic_list[i] == '1' or s_toxic_list[i] == '1' or obs_list[i] == '1' or insult_list[i] == '1' or id_hate_list[i] == '1'):\n new_toxic_list.append(1)\n toxic_count += 1\n else:\n new_toxic_list.append(0)\n nontoxic_count += 1\n\n# Make a token dictionary, each \ntokens = clean_text(comment_list)\n\nindex = 0\nall_tokens_list = []\ntoxic_tokens_list = []\nnontoxic_tokens_list = []\nfor key in tokens:\n if (new_toxic_list[index] == 1):\n for i in tokens[key]:\n all_tokens_list.append(i)\n toxic_tokens_list.append(i)\n else:\n for i in tokens[key]:\n all_tokens_list.append(i)\n nontoxic_tokens_list.append(i)\n index +=1\n\nall_counts_dict = Counter(all_tokens_list)\nall_counts_dict_copy = copy.deepcopy(all_counts_dict)\n\ntoxic_counts_dict = Counter(toxic_tokens_list)\ntoxic_counts_dict_copy = copy.deepcopy(toxic_counts_dict)\n\nnontoxic_counts_dict = Counter(nontoxic_tokens_list)\nnontoxic_counts_dict_copy = copy.deepcopy(nontoxic_counts_dict)\n\n# Keep only top 50000 words\ncount = 0\nfor key in all_counts_dict_copy:\n count += 1\n if (count == 50000):\n del all_counts_dict[key]\n\nfor key in toxic_counts_dict_copy:\n if(all_counts_dict.get(key, \"<NONE>\") == \"<NONE>\"):\n del toxic_counts_dict[key]\n\nfor key in nontoxic_counts_dict_copy:\n if(all_counts_dict.get(key, \"<NONE>\") == \"<NONE>\"):\n del nontoxic_counts_dict[key]\n\n# Begin making feature vectors\n#row_counter_for_testing_purpose = 0\ntest_columns = defaultdict(list)\ntest_labels_columns = defaultdict(list)\nwith open('test.csv', encoding=\"utf8\") as test:\n reader = csv.DictReader(test)\n for row in reader:\n for (k, v) in row.items():\n test_columns[k].append(v.lower())\n\nrow_counter_for_testing_purpose = 0 #test on less data so it doesnt take an hour to get results\nwith open('test_labels.csv', encoding=\"utf8\") as labels:\n labels_reader = csv.DictReader(labels)\n for row in labels_reader:\n for (k, v) in row.items():\n test_labels_columns[k].append(v.lower())\n\ntest_comment_list = test_columns['comment_text'] #list of all comments\ntest_toxic_list = test_labels_columns['toxic']\ntest_s_toxic_list = test_labels_columns['severe_toxic']\ntest_obs_list = test_labels_columns['obscene']\ntest_insult_list = test_labels_columns['insult']\ntest_id_hate_list = test_labels_columns['identity_hate']\ntest_column_len = len(test_comment_list)\ntest_new_toxic_list = []\ntest_new_comment_list = []\n\nfor i in range (0, test_column_len):\n if (test_toxic_list[i] == '1' or test_s_toxic_list[i] == '1' or test_obs_list[i] == '1' or test_insult_list[i] == '1' or test_id_hate_list[i] == '1'):\n test_new_toxic_list.append(1)\n test_new_comment_list.append(test_comment_list[i])\n elif(test_toxic_list[i] == '-1'):\n continue\n else:\n test_new_toxic_list.append(0)\n test_new_comment_list.append(test_comment_list[i])\n\ntest_tokens = clean_text(test_new_comment_list)\ntest_toxic_comment_list = []\ntest_nontoxic_comment_list = []\ntest_comment_len = len(test_new_comment_list)\n\nindex = 0\nfor key in test_tokens:\n if (test_new_toxic_list[index] == 1):\n test_toxic_comment_list.append(test_tokens[index])\n else:\n test_nontoxic_comment_list.append(test_tokens[index])\n index += 1\n\ntoxic_vectors = []\nnontoxic_vectors = []\n\nfor comment in test_toxic_comment_list:\n vect_dict = {}\n for token in comment:\n if (all_counts_dict.get(token) != None):\n if (vect_dict.get(token) == None):\n vect_dict[token] = 1\n else:\n vect_dict[token] += 1\n toxic_vectors.append(vect_dict)\n\nfor comment in test_nontoxic_comment_list:\n vect_dict = {}\n for token in comment:\n if (all_counts_dict.get(token) != None):\n if (vect_dict.get(token) == None):\n vect_dict[token] = 1\n else:\n vect_dict[token] += 1\n nontoxic_vectors.append(vect_dict)\n\n\n# Calculate toxic/nontoxic classification on test set\ntoxic_initial_prob = toxic_count/column_len\nnontoxic_initial_prob = nontoxic_count/column_len\nprint(\"Toxic initial prob: \", toxic_initial_prob)\nprint(\"Non toxic initial prob: \", nontoxic_initial_prob)\nprint(\"this is len of toxic vectors: \", len(toxic_vectors))\nprint(\"this is len of nontoxic vectors: \", len(nontoxic_vectors))\nnontoxic_vector_classification = []\ntoxic_vector_classification = []\n\nfor feature_vector in nontoxic_vectors:\n toxic_pr = calcPr(toxic_counts_dict, feature_vector, toxic_initial_prob)\n nontoxic_pr = calcPr(nontoxic_counts_dict, feature_vector, nontoxic_initial_prob)\n if(toxic_pr > nontoxic_pr):\n nontoxic_vector_classification.append(\"T\")\n else:\n nontoxic_vector_classification.append(\"N\")\n\nfor feature_vector in toxic_vectors:\n toxic_pr = calcPr(toxic_counts_dict, feature_vector, toxic_initial_prob)\n nontoxic_pr = calcPr(nontoxic_counts_dict, feature_vector, nontoxic_initial_prob)\n if(toxic_pr > nontoxic_pr):\n toxic_vector_classification.append(\"T\")\n else:\n toxic_vector_classification.append(\"N\")\n\n#Show results from running on test set\nfalse_positive = 0\ntrue_positive = 0\nfalse_negative = 0\ntrue_negative = 0\nprint(\"Trained with : 50,000 total comments\")\nprint(\"Toxic vectors:\")\nprint(toxic_vector_classification)\nprint()\nprint(\"Nontoxic vectors:\")\nprint(nontoxic_vector_classification)\nprint()\n\nfor comment in toxic_vector_classification:#Toxic is our positive, nontoxic is our negative\n if(comment == 'T'):\n true_positive += 1\n elif(comment == 'N'):\n false_negative += 1\n\nfor comment in nontoxic_vector_classification:\n if(comment == 'T'):\n false_positive += 1\n elif(comment == 'N'):\n true_negative += 1\n\nprint(\"True Positives: \", true_positive)\nprint(\"True Negatives: \", true_negative)\nprint(\"False Positives: \", false_positive)\nprint(\"False Negatives: \", false_negative)\n\nprecision = true_positive/(true_positive + false_positive)\nrecall = true_positive/(true_positive + false_negative)\nb = .5\nf_score = 1/((b*(1/precision)) + (1 - b) * (1/recall))\nprint(\"Precision: \", precision * 100)\nprint(\"Recall: \", recall * 100)\nprint(\"F1 Score: \", f_score * 100)\nprint()\n"
},
{
"alpha_fraction": 0.7816593647003174,
"alphanum_fraction": 0.7991266250610352,
"avg_line_length": 27.625,
"blob_id": "7466f02721a3107b9af89da56ed5cbddcd140da9",
"content_id": "ccec3dcf3e351464630827621dd90bb49e3fb262",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 8,
"path": "/README.md",
"repo_name": "ParkLC/Projects",
"src_encoding": "UTF-8",
"text": "# Projects\nParker Carlson,\nBachelor of Science in Computer Science\n\nWWU 2021 Graduate\n\nThis repository is to act as a window into some of the projects I have done.\nPrimarily for the purpose of showing employers relevant work experience.\n"
}
] | 8 |
jlei00/football-data-analysis | https://github.com/jlei00/football-data-analysis | 70e7f584d6147c4994121987f4fb66d0c1d16234 | 8e8c034850f78d690120ad0fa2fcc1c1962712c8 | 7ea176272b1fa903e1c4cfbc89e3c5bbada4ea14 | refs/heads/master | 2018-09-30T17:19:40.166891 | 2018-06-11T17:26:11 | 2018-06-11T17:26:11 | 135,358,536 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6350498199462891,
"alphanum_fraction": 0.6541528105735779,
"avg_line_length": 28.082124710083008,
"blob_id": "f4daac0d44e03829bdc88800be571d66352217e6",
"content_id": "b13304635c94774ca3c93cdf3b4ed3e61e882ebc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 6020,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 207,
"path": "/finalproject.html",
"repo_name": "jlei00/football-data-analysis",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html>\n\n<head>\n <title>\n Football Analysis\n </title>\n</head>\n\n<style>\n\nbody {\n margin: 0;\n background: black;\n}\n\n.title {\n background-color: white;\n color: black;\n text-align: center;\n font-family: Tahoma, Geneva, sans-serif;\n}\n\n.title h2 {\n padding: 22px 16px;\n font-size: 54px;\n margin-bottom: 0;\n}\n\n.title h3 {\n padding: 22px 16px;\n font-size: 34px;\n margin-top: 0;\n margin-bottom: 0;\n}\n\n.desc {\n color: white;\n}\n\n.desc h3 {\n text-align: center;\n font-size: 34px;\n margin: 0;\n margin-top: 25px;\n padding: 12px 16px;\n}\n\n.desc p {\n border: 1px solid black;\n background-color: white;\n color: black;\n font-weight: bold;\n margin-left: 25%;\n margin-right: 25%;\n padding: 24px 32px;\n}\n\n.instruction {\n color: white;\n}\n\n.instruction h3 {\n text-align: center;\n font-size: 34px;\n margin: 0;\n padding: 12px 16px;\n}\n\n.instruction p {\n margin-left: 33%;\n margin-right: 33%;\n padding: 24px 32px;\n background-color: white;\n color: black;\n font-weight: bold;\n border: 1px solid black;\n}\n\n.features {\n color: white;\n}\n\n.features h3{\n text-align: center;\n font-size: 34px;\n margin: 0;\n padding: 12px 16px;\n}\n\n.features ul {\n margin-left: 31%;\n margin-right: 31%;\n list-style-type: none;\n padding: 24px 32px;\n background-color: white;\n color: black;\n font-weight: bold;\n border: 1px solid black;\n}\n\n.starting {\n color: white;\n}\n\n.starting h3 {\n margin-bottom: 85px;\n font-size: 34px;\n margin-top: 65px;\n text-align: center;\n}\n\n.starting a {\n text-decoration: none;\n border: 1px solid white;\n color:white;\n padding: 12px 16px;\n font-size: 48px;\n}\n\n.starting a:hover {\n background-color: white;\n color: black;\n border: 1px solid black;\n}\n\n</style>\n\n<body>\n\n<div class=\"title\">\n <h2> Football Data Analysis </h2>\n <h3> Created By: Jun Tao Lei and Nicholas Yun </h3>\n</div>\n\n<div class=\"desc\">\n <h3> Brief Description </h3>\n\n <p>\n Our website focused on the analysis of several football statistics of both teams and players.\n There are two main pages that present general informations about the teams and players. Some\n basic information available in the main team page are wins, draws, defeats, matches played, etc.\n Similarly, the information available in the main player page are appearances, goals, matches\n started, etc. In these pages, the user can click on the name of the teams or the players to\n open a page that has more specific stats on the team or player chosen. Alternatively, the user can also\n search for a team or player by name that disregards capitalization errors. If the name is not\n found when searched, the user will be redirected to the main page. Furthermore, there are quick\n links on both main pages that quickly scroll down to the first team or player of the top 5 leagues.\n </p>\n\n <p>\n In the specific team page or player page, the user can view expanded stats on the team or player\n chosen. In the specific team page, the user can see the standings of the team over the past four\n seasons except for the seasons that the team was not in the leagues whether due to relegations or\n some other factors. The user can also view the away and home forms of the team over the seasons\n available. There is also a graph that shows the average attendance of a team over the past few\n seasons if the data is available. In the specific player page, the user can also view the expanded\n stats on the player chosen. The user can view more stats like assists, minutes per goal, minutes\n played, etc. The teams that the player played for over the past few seasons can be viewed. There\n are also average stats like average appearances, average goals, and average assists. There are\n also graphs of the appearances versus goals and appearances versus assists.\n </p>\n</div>\n\n<div class=\"instruction\">\n <h3> Instructions </h3>\n <p>\n The instructions on using this website are intuitive. The user can switch between the main\n team page and main player page by choosing the particular page on the navigation bar. With\n the search bar, the user can also search for a specific team or a specific player.\n However, one must keep in mind that one can only search for a team in the team pages\n or a player in the player pages. There are also quick links on both main pages\n that links to the first player or team in the main pages.\n </p>\n</div>\n\n<div class=\"features\">\n <h3> Working Features </h3>\n <ul>\n <li> Displaying Player Stats </li>\n <li> Displaying Team Stats </li>\n <li> Expanding Player Stats </li>\n <li> Expanding Team Stats </li>\n <li> Viewing Player Stats For The Previous Seasons </li>\n <li> Viewing Team Stats For The Previous Seasons </li>\n <li> Quick Links Navigation</li>\n <li> Search Bar </li>\n <li> Clickable Team Names </li>\n <li> Clickable Player Names </li>\n <li> Average Attendance Over Seasons Graph </li>\n <li> Average Standings Over Seasons Graph </li>\n <li> Appearances Versus Goals Graph </li>\n <li> Appearances Versus Assists Graph </li>\n <li> Average Appearances, Goals, Assists For Players </li>\n </ul>\n</div>\n\n<div class=\"starting\">\n <h3> Choose the other server in case one server is down. </h3>\n <h3> <a href=\"http://homer.stuy.edu/~jlei00/finalproject/TeamMain.py\"> Team Page (Homer Server) </a> </h3>\n <h3> <a href=\"http://marge.stuy.edu/~jlei00/finalproject/TeamMain.py\"> Team Page (Marge Server) </a> </h3>\n <h3> <a href=\"http://homer.stuy.edu/~jlei00/finalproject/PlayerMain.py\"> Player Page (Homer Server) </a> </h3>\n <h3> <a href=\"http://marge.stuy.edu/~jlei00/finalproject/PlayerMain.py\"> Player Page (Marge Server) </a> </h3>\n</div>\n\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6061686277389526,
"alphanum_fraction": 0.6254234313964844,
"avg_line_length": 24.1524658203125,
"blob_id": "13ae11978c0d1ed7bd686eab926902b8aa916d96",
"content_id": "8dafb2ee7fdb59c052dd9efd2244355902967885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5609,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 223,
"path": "/PlayerSpecific.py",
"repo_name": "jlei00/football-data-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nprint \"Content-type: text/html\\n\"\n\nimport cgi\n\nimport cgitb\ncgitb.enable()\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nhead = '''<html>\n<head>\n\t<title> Football Statistics </title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"tableStyle.css\">\n</head>\n<body>\n\n<form method=\"GET\" action=\"PlayerSpecific.py\">\n\n<div class=\"topnav\">\n <a class=\"active\" href=\"TeamMain.py\"> Team Page </a>\n <a href=\"PlayerMain.py\"> Player Page </a>\n <div class=\"search-container\">\n <input type=\"text\" name=\"PlayerNameOption2\" size=\"20\" placeholder=\"Enter a player name...\">\n\t<button type=\"submit\"> Search </button>\n </div>\n</div>\n'''\n\nheadAlt = '''<html>\n<head>\n\t<meta http-equiv=\"refresh\" content=\"0;url=PlayerMain.py\" />\n\t<title> Error Page </title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"tableStyle.css\">\n</head>\n<body id='error'>\n\t<h1> Invalid Team Name Entered! </h1>\n</body>\n</html>\n'''\n\nfoot = '''</body>\n</html>'''\n\ndef cgiFieldStorageToDict( fieldStorage ):\n \tans = {}\n \tfor key in fieldStorage.keys():\n \t\tans[ key ] = fieldStorage[ key ].value\n \treturn ans\n\ndef makeTable(s):\n\tTable = '\\n<table>\\n\\n'\n\tkeyIndex = s[0].index('KEY')\n\tfor i in range(0,len(s)):\n\t\tTable += '\t<tr>\\n'\n\t\tfor x in range(0,len(s[i])):\n\t\t\tif i == 0 and x != keyIndex:\n\t\t\t\tTable += '\t\t<th> ' + s[i][x] + ' </th>\\n'\n\t\t\telse:\n\t\t\t\tif x != keyIndex:\n\t\t\t\t\tTable += '\t\t<td> ' + s[i][x] + ' </td>\\n'\n\t\tTable += '\t</tr>\\n\\n'\n\treturn '<div style=\"overflow-x:auto;\">' + Table + '</table>\\n' + '</div>'\n\ndef getData(s):\n\ts = s.replace('\\xef\\xbb\\xbf','')\n\ts = s.replace('\"','\\'')\n\ts = s.strip()\n\ts = s.split('\\n')\n\tfor i in range(len(s)):\n\t\ts[i] = s[i].split(',')\n\treturn s\n\ndef stripInfo(cutoff, s):\n\tfor i in range(len(s)):\n\t\ts[i] = s[i][:cutoff] + s[i][32:36]\n\treturn s\n\ndef getApperancesVsGoal(s):\n\tAppearances = s[0].index('Appearances')\n\tGoals = s[0].index('Goals')\n\tAppearancesList = []\n\tGoalsList = []\n\tfor i in range(len(s)):\n\t\tfor x in range(len(s[i])):\n\t\t\tif x == Appearances and i != 0:\n\t\t\t\tAppearancesList.append(int(s[i][x]))\n\t\t\tif x == Goals and i != 0:\n\t\t\t\tGoalsList.append(int(s[i][x]))\n\treturn AppearancesList, GoalsList\n\ndef getApperancesVsAssist(s):\n\tAppearances = s[0].index('Appearances')\n\tAssists = s[0].index('Assists')\n\tAppearancesList = []\n\tAssistsList = []\n\tfor i in range(len(s)):\n\t\tfor x in range(len(s[i])):\n\t\t\tif x == Appearances and i != 0:\n\t\t\t\tAppearancesList.append(int(s[i][x]))\n\t\t\tif x == Assists and i != 0:\n\t\t\t\tif s[i][x] == '':\n\t\t\t\t\tAssistsList.append(s[i][x])\n\t\t\t\telse:\n\t\t\t\t\tAssistsList.append(int(s[i][x]))\n\tfor i in range(len(AssistsList)):\n\t\tif AssistsList[i] == '':\n\t\t\tAppearancesList[i] = ''\n\twhile '' in AppearancesList:\n\t\tAppearancesList.remove('')\n\twhile '' in AssistsList:\n\t\tAssistsList.remove('')\n\treturn AppearancesList, AssistsList\n\ndef getAverage(s):\n\tif s == []:\n\t\treturn 'Invalid'\n\telse:\n\t\treturn sum(s) * 1.0 / len(s)\n\ndef makeStatsTable(s,name):\n\ttable = ''\n\tfor i in range(len(s)):\n\t\ttable += '\t<tr>\\n'\n\t\ttable += '\t\t<th> ' + name[i] + ' </th>\\n'\n\t\tif s[i] == 'Invalid':\n\t\t\ttable += '\t\t<td> N/A </td>\\n'\n\t\telse:\n\t\t\ttable += '\t\t<td> ' + str(s[i]) + ' </td>\\n'\n\t\ttable += '\t</tr>\\n\\n'\n\treturn '<table>\\n\\n' + table + '</table>\\n'\n\n#Get the team selected from TeamMain.py\ndef getOneTeam(player,s):\n\tNewList = []\n\tNewList.append(s[0][0])\n\tfor x in range(len(s)):\n\t\tfor i in range(len(s[x])):\n\t\t\tif s[x][i][4] == player:\n\t\t\t\tNewList.append(s[x][i])\n\tif len(NewList) == 1:\n\t\treturn []\n\treturn NewList\n\n#2017-18\nf = open('data/Player 2017-18.csv').read()\nf = stripInfo(11,getData(f))\n#2016-17\ng = open('data/Player 2016-17.csv').read()\ng = stripInfo(11,getData(g))\n#2015-16\nh = open('data/Player 2015-16.csv').read()\nh = stripInfo(11,getData(h))\n#2014-15\nj = open('data/Player 2014-15.csv').read()\nj = stripInfo(11,getData(j))\n#2014-18\nAll = [f,g,h,j]\n\ndef main():\n \tform = cgiFieldStorageToDict(cgi.FieldStorage())\n\n\tplayer = ''\n \tif \"PlayerName\" in form:\n \t\tplayer = form[\"PlayerName\"]\n \tplayer2 = ''\n \tif \"PlayerNameOption2\" in form:\n \t\tplayername = form[\"PlayerNameOption2\"].split()\n\t\tfor i in range(len(playername)):\n\t\t\tplayername[i] = playername[i].lower().capitalize()\n\t\tfor i in range(len(playername)):\n\t\t\tif i == 0:\n\t\t\t\tplayer2 += playername[i]\n\t\t\telse:\n\t\t\t\tplayer2 += ' ' + playername[i]\n\tif player == '' and player2 != '':\n\t\tplayer = player2\n\n \tPlayer = getOneTeam(player,All)\n\n\tif Player != []:\n\n\t\tappearances, goals = getApperancesVsGoal(Player)\n\n\t\tplt.title('Appearances Versus Goals')\n\t\tplt.xlabel('Appearances')\n\t\tplt.ylabel('Goals')\n\t\tplt.scatter(appearances,goals,alpha='0.5')\n\t\tplt.savefig('img/' + player + 'appearancesvgoals.png')\n\n\t\tplt.clf()\n\n\t\tappearances, assists = getApperancesVsAssist(Player)\n\n\t\tplt.title('Appearances Versus Assists')\n\t\tplt.xlabel('Appearances')\n\t\tplt.ylabel('Assists')\n\t\tplt.scatter(appearances,assists,alpha='0.5')\n\t\tplt.savefig('img/' + player + 'appearancesvassists.png')\n\n\t\tappearances, goals = getApperancesVsGoal(Player)\n\n\t\taverageGoals = getAverage(goals)\n\t\taverageAssists = getAverage(assists)\n\t\taverageAppearances = getAverage(appearances)\n\t\tAverageStats = [averageAppearances, averageGoals, averageAssists]\n\t\tnames = ['Average Appearances', 'Average Goals', 'Average Assists']\n\n\t\tprint(head)\n\t\tprint('<table>\\n\\n\t<tr>')\n\t\tprint(\"\t\t<td>\\n\t\t<img src='img/\" + player + \"appearancesvgoals.png' width='500px'>\\n\t\t</td>\\n\")\n\t\tprint(\"\t\t<td>\\n\t\t<img src='img/\" + player + \"appearancesvassists.png' width='500px'>\\n\t\t</td>\")\n\t\tprint('\t</tr>\\n')\n\t\tprint('</table>\\n')\n\t\tprint(makeStatsTable(AverageStats,names))\n\t\tprint(makeTable(Player))\n\t\tprint(foot)\n\telse:\n\t\tprint(headAlt)\n\nmain()\n"
},
{
"alpha_fraction": 0.5959511995315552,
"alphanum_fraction": 0.6039933562278748,
"avg_line_length": 27.847999572753906,
"blob_id": "e85d1afe977f400ce1097c3a90c4b7de64ee3b08",
"content_id": "a82f53773fd52caf1ade5579d98945aa30785d39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3606,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 125,
"path": "/PlayerMain.py",
"repo_name": "jlei00/football-data-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nprint \"Content-type: text/html\\n\"\n\nimport cgi\n\nhead = '''<html>\n<head>\n\t<title> Football Statistics </title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"tableStyle.css\">\n</head>\n<body>\n\n<form method=\"GET\" action=\"PlayerSpecific.py\">\n\n<div class=\"topnav\">\n <a class=\"active\" href=\"PlayerMain.py\"> Player Page </a>\n <a href=\"TeamMain.py\"> Team Page </a>\n <div class=\"dropdown\">\n \t<a class=\"dropbtn\"> Quick Links </a>\n \t<div class=\"dropdown-content\">\n \t<a href=\"#En\"> Premier League </a>\n\t\t<a href=\"#Sp\"> La Liga </a>\n\t\t<a href=\"#Ge\"> Bundesliga </a>\n\t\t<a href=\"#It\"> Serie A </a>\n\t\t<a href=\"#Fr\"> Ligue 1 </a>\n\t</div>\n </div>\n <div class=\"search-container\">\n <input type=\"text\" name=\"PlayerNameOption2\" size=\"20\" placeholder=\"Enter a player name...\">\n\t<button type=\"submit\"> Search </button>\n </div>\n</div>\n'''\n\nfoot = '''</body>\n</html>'''\n\ndef getData(s):\n\ts = s.replace('\\xef\\xbb\\xbf','')\n\ts = s.replace('\"','\\'')\n\ts = s.strip()\n\ts = s.split('\\n')\n\tfor i in range(len(s)):\n\t\ts[i] = s[i].split(',')\n\treturn s\n\ndef stripInfo (cutoff, s):\n\tfor i in range(len(s)):\n\t\tif s[i][4] == '':\n\t\t\ts[i] = 'none'\n\t\telse:\n\t\t\ts[i] = s[i][:cutoff]\n\twhile 'none' in s:\n\t\ts.remove('none')\n\treturn s\n\ndef makeTable(s):\n\tTable = '\\n<table>\\n\\n'\n\tExpandedMeaning = {'Pos':'League Position','P':'Matches Played','W':'Win','D':'Draws','L':'Defeats','F':'Goals For','A':'Goals Against','GD':'Goal Difference','Pts':'Points'}\n\tkeyIndex = s[0].index('KEY')\n\tseasonIndex = s[0].index('Season')\n\ttitles = []\n\tQuickLinks = {'Premier League':'En','La Liga':'Sp','Bundesliga':'Ge','Serie A':'It','French Ligue 1':'Fr'}\n\tfor i in range(0, len(s)):\n\t\tTable += '\t<tr>\\n'\n\t\tfor x in range(0,len(s[i])):\n\t\t\tif i == 0:\n\t\t\t\tif x != keyIndex and x != seasonIndex:\n\t\t\t\t\tif s[i][x] in ExpandedMeaning:\n\t\t\t\t\t\tTable += '\t\t<th> ' + ExpandedMeaning[s[i][x]] + ' </th>\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tTable += '\t\t<th> ' + s[i][x] + ' </th>\\n'\n\t\t\telse:\n\t\t\t\tif x != keyIndex and x != seasonIndex:\n\t\t\t\t\tif s[i][x] in QuickLinks and not s[i][x] in titles:\n\t\t\t\t\t\tTable += '\t\t<td> <a name=\"' + QuickLinks[s[i][x]] + '\"> ' + s[i][x] + ' </td>\\n'\n\t\t\t\t\t\ttitles.append(s[i][x])\n\t\t\t\t\telse:\n\t\t\t\t\t\tTable += '\t\t<td> ' + s[i][x] + ' </td>\\n'\n\t\tTable += '\t</tr>\\n\\n'\n\treturn '<div style=\"overflow-x:auto;\">' + Table + '</table>\\n' + '</div>'\n\ndef makeLink(s):\n\tfor i in range(1,len(s)):\n\t\tfor x in range(len(s[i])):\n\t\t\tif x == 4:\n\t\t\t\ts[i][x] = '<input type=\"submit\" name=\"PlayerName\" value=\"' + s[i][x] + '\">'\n\treturn s\n\ndef separateByLeague(League, restrictTableType, s):\n\tNewList = []\n\tif restrictTableType != '':\n\t\tExcept = s[0].index('Table Type')\n\tfor i in range(1,len(s)):\n\t\tif League == s[i][0]:\n\t\t\tif restrictTableType != '':\n\t\t\t\tif s[i][Except] == restrictTableType:\n\t\t\t\t\tNewList.append(s[i])\n\t\t\telse:\n\t\t\t\tNewList.append(s[i])\n\treturn NewList\n\ndef mergeLeague(League,s):\n\tNewList = []\n\tNewList.append(s[0])\n\tfor x in range(len(League)):\n\t\tfor i in range(len(League[x])):\n\t\t\tNewList.append(League[x][i])\n\treturn NewList\n\ng = open('data/Player 2017-18.csv').read()\ng = stripInfo(9, getData(g))\n\nPlayerPremierLeague = separateByLeague('Premier League', '', g)\nPlayerLaLiga = separateByLeague('La Liga', '', g)\nPlayerBundesliga = separateByLeague('Bundesliga', '', g)\nPlayerFrenchLigue1 = separateByLeague('French Ligue 1', '', g)\nPlayerSerieA = separateByLeague('Serie A', '', g)\nPlayerTop5 = [PlayerPremierLeague, PlayerLaLiga, PlayerBundesliga, PlayerFrenchLigue1, PlayerSerieA]\nPlayerLeagueMerged = mergeLeague(PlayerTop5, g)\nPlayerLeagueMerged = makeLink(PlayerLeagueMerged)\n\nprint(head)\nprint(makeTable(PlayerLeagueMerged))\nprint(foot)\n"
},
{
"alpha_fraction": 0.5852559208869934,
"alphanum_fraction": 0.618270993232727,
"avg_line_length": 25.370630264282227,
"blob_id": "85b60d6f79b221d9a7beb655a111237f7f6aaa96",
"content_id": "22787898e4a95a50a7ae9e71d2a9299cea62c618",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7542,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 286,
"path": "/TeamSpecific.py",
"repo_name": "jlei00/football-data-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nprint \"Content-type: text/html\\n\"\n\nimport cgi\n\nimport cgitb\ncgitb.enable()\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nhead = '''<html>\n<head>\n\t<title> Football Statistics </title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"tableStyle.css\">\n</head>\n<body>\n\n<form method=\"GET\" action=\"TeamSpecific.py\">\n\n<div class=\"topnav\">\n <a class=\"active\" href=\"TeamMain.py\"> Team Page </a>\n <a href=\"PlayerMain.py\"> Player Page </a>\n <div class=\"search-container\">\n <input type=\"text\" name=\"TeamNameOption2\" size=\"20\" placeholder=\"Enter a team name...\">\n\t<button type=\"submit\"> Search </button>\n </div>\n</div>\n'''\n\nheadAlt = '''<html>\n<head>\n\t<meta http-equiv=\"refresh\" content=\"0;url=TeamMain.py\" />\n\t<title> Error Page </title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"tableStyle.css\">\n</head>\n<body id='error'>\n\t<h1> Invalid Team Name Entered! </h1>\n</body>\n</html>\n'''\n\nfoot = '''</body>\n</html>'''\n\ndef cgiFieldStorageToDict( fieldStorage ):\n \t\"\"\"Get a plain dictionary, rather than the '.value' system used by the cgi module.\"\"\"\n \tans = {}\n \tfor key in fieldStorage.keys():\n \t\tans[ key ] = fieldStorage[ key ].value\n \treturn ans\n\ndef makeTable(s):\n\tExpandedMeaning = {'Pos':'League Position','P':'Matches Played','W':'Win','D':'Draws','L':'Defeats','F':'Goals For','A':'Goals Against','GD':'Goal Difference','Pts':'Points'}\n\tTable = '\\n<table>\\n\\n'\n\tkeyIndex = s[0].index('KEY')\n\ttableIndex = s[0].index('Table Type')\n\ttitleList = ['Table Type']\n\tfor i in range(0,len(s)):\n\t\tTable += '\t<tr>\\n'\n\t\ttitle = s[i][tableIndex]\n\t\twhile not title in titleList:\n\t\t\tTable += '\\n<div class=\"flex-container\">\\n\t<div>\\n\t\t' + title + '\\n\t</div>\\n</div>\\n'\n\t\t\ttitleList.append(title)\n\t\tfor x in range(0,len(s[i])):\n\t\t\tif i == 0:\n\t\t\t\tif x != keyIndex and x != tableIndex:\n\t\t\t\t\tif s[i][x] in ExpandedMeaning:\n\t\t\t\t\t\tTable += '\t\t<th> ' + ExpandedMeaning[s[i][x]] + ' </th>\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tTable += '\t\t<th> ' + s[i][x] + ' </th>\\n'\n\t\t\telse:\n\t\t\t\tif x != keyIndex and x != tableIndex:\n\t\t\t\t\tTable += '\t\t<td> ' + s[i][x] + ' </td>\\n'\n\t\tTable += '\t</tr>\\n\\n'\n\treturn '<div style=\"overflow-x:auto;\">' + Table + '</table>\\n' + '</div>'\n\ndef getData(s,mode):\n\ts = s.replace('\\xef\\xbb\\xbf','')\n\ts = s.replace('\\r','')\n\ts = s.strip()\n\ts = s.split('\\n')\n\tfor i in range(len(s)):\n\t\tif mode == 'Attendance':\n\t\t\ts[i] = s[i].split('\"')\n\t\t\tnewString = ''\n\t\t\tfor x in range(len(s[i])):\n\t\t\t\tif s[i][x].replace(',','').isdigit():\n\t\t\t\t\ts[i][x] = s[i][x].replace(',','')\n\t\t\t\tif x != 0 and s[i][x] != '':\n\t\t\t\t\tnewString += ',' + s[i][x]\n\t\t\t\telse:\n\t\t\t\t\tif s[i][x] != '':\n\t\t\t\t\t\tnewString += s[i][x]\n\t\t\ts[i] = newString.split(',')\n\t\t\twhile '' in s[i]:\n\t\t\t\ts[i].remove('')\n\t\t\t#print s[i]\n\t\telse:\n\t\t\ts[i] = s[i].split(',')\n\treturn s\n\n#Get the team selected from TeamMain.py\ndef getOneTeam(team,s):\n\tNewList = []\n\tNewList.append(s[0][0])\n\tfor x in range(len(s)):\n\t\tfor i in range(len(s[x])):\n\t\t\tif team == 'Borussia Mgladbach':\n\t\t\t\tif \"Bundesliga|Borussia M'gladbach\" in s[x][i][7]:\n\t\t\t\t\tNewList.append(s[x][i])\n\t\t\t\tif len(s[x][i]) > 8:\n\t\t\t\t\tif \"Bundesliga|Borussia M'gladbach\" in s[x][i][13]:\n\t\t\t\t\t\tNewList.append(s[x][i])\n\t\t\telse:\n\t\t\t\tif s[x][i][2] == team or s[x][i][1] == team:\n\t\t\t\t\tNewList.append(s[x][i])\n\tif len(NewList) == 1:\n\t\treturn []\n\treturn NewList\n\ndef separateByTableType(s,Type):\n\theading = s[0]\n\tNewList = []\n\tNewList.append(heading)\n\tfor i in range(len(s)):\n\t\tif s[i][11] == Type:\n\t\t\tNewList.append(s[i])\n\treturn NewList\n\ndef getStandings(s):\n\tStandings = []\n\tPeriod = []\n\tCount = 1\n\tTableType = ['Away League Table','League Table','Home League Table','Table Type']\n\tYears = {4:'2014/15',3:'2015/16',2:'2016/17',1:'2017/18'}\n\tYearsInvert = {'2014/15':4,'2015/16':3,'2016/17':2,'2017/18':1}\n\tfor i in range(len(s)):\n\t\tif s[i][11] == 'League Table':\n\t\t\tStandings.append(int(s[i][1]))\n\t\t\tif s[i][12] == Years[Count]:\n\t\t\t\tPeriod.append(Years[Count])\n\t\t\t\tif Count != 4:\n\t\t\t\t\tCount += 1\n\t\t\telse:\n\t\t\t\tCount = YearsInvert[s[i][12]]\n\t\t\t\tPeriod.append(Years[Count])\n\treturn Standings, Period\n\ndef getAttendance(s):\n\tAttendance = []\n\tfor i in range(1,len(s)):\n\t\tif '\\\"' in s[i][2]:\n\t\t\ts[i][2].replace('\\\"','')\n\t\tAttendance.append(int(s[i][2]))\n\treturn Attendance\n\ndef reverse(s):\n\tReversed = []\n\tfor i in range(len(s)):\n\t\tReversed.append(s.pop(-1))\n\treturn Reversed\n\ndef equalize(attendance,standings):\n\tif attendance != standings:\n\t\tif 25 in standings:\n\t\t\tstandings.remove(25)\n\treturn attendance,standings\n\ndef generateX(s):\n\tX = []\n\tfor i in range(1,len(s)+1):\n\t\tX.append(i)\n\treturn X\n\ndef trimAttendance(s,x):\n\twhile len(s) != len(x):\n\t\ts.pop(len(x))\n\treturn s\n\n###HOME ATTENDANCE CSV###\n#2017-18\na = open('data/Home attendance 2017-18.csv').read()\na = getData(a,'Attendance')\n#2016-17\nb = open('data/Home attendance 2016-17.csv').read()\nb = getData(b,'Attendance')\n#2015-16\nc = open('data/Home attendance 2015-16.csv').read()\nc = getData(c,'Attendance')\n#2014-15\nd = open('data/Home attendance 2014-15.csv').read()\nd = getData(d,'Attendance')\nAllAttendanceSeasons = [a,b,c,d]\n\n###LEAGUE STANDINGS CSV###\n#2017-18\nf = open('data/Standings 2017-18.csv').read()\nf = getData(f,'')\n#2016-17\ng = open('data/Standings 2016-17.csv').read()\ng = getData(g,'')\n#2015-16\nh = open('data/Standings 2015-16.csv').read()\nh = getData(h,'')\n#2014-15\nj = open('data/Standings 2014-15.csv').read()\nj = getData(j,'')\n#2014-18\nAll = [f,g,h,j]\n\ndef main():\n \tform = cgiFieldStorageToDict(cgi.FieldStorage())\n \tteam = ''\n \tif \"TeamName\" in form:\n \t\tteam = form[\"TeamName\"]\n \tteam2 = ''\n \tif \"TeamNameOption2\" in form:\n \t\tteamName = form[\"TeamNameOption2\"].split()\n\t\tfor i in range(len(teamName)):\n\t\t\tteamName[i] = teamName[i].lower().capitalize()\n\t\tfor i in range(len(teamName)):\n\t\t\tif i == 0:\n\t\t\t\tteam2 += teamName[i]\n\t\t\telse:\n\t\t\t\tteam2 += ' ' + teamName[i]\n\tif team == '' and team2 != '':\n\t\tteam = team2\n\n\tTeam = getOneTeam(team,All)\n\n\tif Team != []:\n\t\t#print(Team)\n\t\tRegularResult = separateByTableType(Team,'League Table')\n\t\tAwayResult = separateByTableType(Team,'Home League Table')\n\t\tHomeResult = separateByTableType(Team,'Away League Table')\n\t\tAttendanceAll = getOneTeam(team,AllAttendanceSeasons)\n\t\tAverageAttendance = reverse(getAttendance(AttendanceAll))\n\t\tStandingsHistory, TeamPeriod = getStandings(Team)\n\t\tStandingsHistory = reverse(StandingsHistory)\n\t\tTeamPeriod = reverse(TeamPeriod)\n\t\tAverageAttendance, StandingsHistory = equalize(AverageAttendance,StandingsHistory)\n\t\t###Line Graph###\n\t\t#TeamPeriod = ['2014-15','2015-16','2016-17','2017-18']\n\t\t#x = [1,2,3,4]\n\t\tx = generateX(TeamPeriod)\n\t\tplt.title('Standings Over Time')\n\t\tplt.xlabel('Seasons')\n\t\tplt.ylabel('Standings')\n\t\tplt.ylim(1,20)\n\t\tplt.gca().invert_yaxis()\n\t\tplt.xticks(x,TeamPeriod)\n\t\tplt.plot(x,StandingsHistory,marker='o')\n\t\tplt.savefig(\"img/\" + team + \"Standings.png\")\n\n\t\t###Clear Config###\n\t\tplt.clf()\n\n\t\tx = reverse(x)\n\t\tAverageAttendance = trimAttendance(AverageAttendance,x)\n\t\tplt.title('Average Attendance Over Seasons')\n\t\tplt.ylabel('Seasons')\n\t\tplt.xlabel('Average Attendance')\n\t\tplt.gca().invert_yaxis()\n\t\tplt.yticks(x,TeamPeriod)\n\t\tplt.scatter(AverageAttendance,x,alpha=.5)\n\t\tplt.savefig('img/' + team + 'Attendance.png')\n\n\t\tprint(head)\n\t\tprint('<table>\\n\\n\t<tr>')\n\t\tprint(\"\t\t<td>\\n\t\t<img src='img/\" + team + \"Standings.png' width='500px'>\\n\t\t</td>\\n\")\n\t\tprint(\"\t\t<td>\\n\t\t<img src='img/\" + team + \"Attendance.png' width='500px'>\\n\t\t</td>\")\n\t\tprint('\t</tr>\\n')\n\t\tprint('</table>\\n')\n\t\tprint(makeTable(RegularResult))\n\t\tprint('<br>')\n\t\tprint(makeTable(AwayResult))\n\t\tprint('<br>')\n\t\tprint(makeTable(HomeResult))\n\t\tprint('<br>')\n\t\tprint(foot)\n\telse:\n\t\tprint(headAlt)\n\nmain()\n"
},
{
"alpha_fraction": 0.5989263653755188,
"alphanum_fraction": 0.6068507432937622,
"avg_line_length": 27.764705657958984,
"blob_id": "2c67fff6cef1b8779fa74090838c062320b25155",
"content_id": "9914e5422a486e2121c51ac655a05bf6cf166ddd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3912,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 136,
"path": "/TeamMain.py",
"repo_name": "jlei00/football-data-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nprint \"Content-type: text/html\\n\"\n\nhead = '''<html>\n<head>\n\t<title> Football Statistics </title>\n\t<link rel=\"stylesheet\" type=\"text/css\" href=\"tableStyle.css\">\n</head>\n<body>\n\n<form method=\"GET\" action=\"TeamSpecific.py\">\n\n<div class=\"topnav\">\n <a class=\"active\" href=\"TeamMain.py\"> Team Page </a>\n <a href=\"PlayerMain.py\"> Player Page </a>\n <div class=\"dropdown\">\n \t<a class=\"dropbtn\"> Quick Links </a>\n \t<div class=\"dropdown-content\">\n \t<a href=\"#En\"> Premier League </a>\n\t\t<a href=\"#Sp\"> La Liga </a>\n\t\t<a href=\"#Ge\"> Bundesliga </a>\n\t\t<a href=\"#It\"> Serie A </a>\n\t\t<a href=\"#Fr\"> Ligue 1 </a>\n\t</div>\n </div>\n <div class=\"search-container\">\n <input type=\"text\" name=\"TeamNameOption2\" size=\"20\" placeholder=\"Enter a team name...\">\n\t<button type=\"submit\"> Search </button>\n </div>\n</div>\n'''\n\nfoot = '''</body>\n</html>'''\n\nf = open('data/Standings 2017-18.csv').read()\n\n# Extract data\n# Separate by new-lines and commas\ndef getData(s):\n\ts = s.replace('\\xef\\xbb\\xbf','')\n\ts = s.replace('\\r','')\n\ts = s.strip()\n\ts = s.split('\\n')\n\tfor i in range(len(s)):\n\t\ts[i] = s[i].split(',')\n\treturn s\n\n# Cut data\n# Strip everything on the cutoff index\n# Currently not used for TeamMain.py\ndef stripInfo (cutoff, s):\n\tfor i in range(len(s)):\n\t\tif s[i][4] == '':\n\t\t\ts[i] = 'none'\n\t\telse:\n\t\t\ts[i] = s[i][:cutoff]\n\twhile 'none' in s:\n\t\ts.remove('none')\n\treturn s\n\n# Make table from data\ndef makeTable(s):\n\tExpandedMeaning = {'Pos':'League Position','P':'Matches Played','W':'Win','D':'Draws','L':'Defeats','F':'Goals For','A':'Goals Against','GD':'Goal Difference','Pts':'Points'}\n\tTable = '\\n<table>\\n\\n'\n\tkeyIndex = s[0].index('KEY')\n\tseasonIndex = s[0].index('Season')\n\ttableIndex = s[0].index('Table Type')\n\tQuickLinks = {'Premier League':'En','La Liga':'Sp','Bundesliga':'Ge','Serie A':'It','French Ligue 1':'Fr'}\n\ttitles = []\n\tfor i in range(0, len(s)):\n\t\tTable += '\t<tr>\\n'\n\t\tfor x in range(0,len(s[i])):\n\t\t\tif i == 0:\n\t\t\t\tif x != keyIndex and x != tableIndex and x != seasonIndex:\n\t\t\t\t\tif s[i][x] in ExpandedMeaning:\n\t\t\t\t\t\tTable += '\t\t<th> ' + ExpandedMeaning[s[i][x]] + ' </th>\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tTable += '\t\t<th> ' + s[i][x] + ' </th>\\n'\n\t\t\telse:\n\t\t\t\tif x != keyIndex and x != tableIndex and x != seasonIndex:\n\t\t\t\t\tif s[i][x] in QuickLinks and not s[i][x] in titles:\n\t\t\t\t\t\tTable += '\t\t<td> <a name=\"' + QuickLinks[s[i][x]] + '\"> ' + s[i][x] + ' </td>\\n'\n\t\t\t\t\t\ttitles.append(s[i][x])\n\t\t\t\t\telse:\n\t\t\t\t\t\tTable += '\t\t<td> ' + s[i][x] + ' </td>\\n'\n\t\tTable += '\t</tr>\\n\\n'\n\treturn '<div style=\"overflow-x:auto;\">' + Table + '</table>\\n' + '</div>'\n\ndef makeLink(s):\n\tfor i in range(1,len(s)):\n\t\tfor x in range(len(s[i])):\n\t\t\tif x == 2:\n\t\t\t\tif i == 49:\n\t\t\t\t\ts[i][x] = 'Borussia Mgladbach'\n\t\t\t\ts[i][x] = '<input type=\"submit\" name=\"TeamName\" value=\"' + s[i][x] + '\">'\n\treturn s\n\nf = getData(f)\n\n# Get the stats for a league\n# Restricts the data to certain condition\ndef separateByLeague(League, restrictTableType, s):\n\tNewList = []\n\tif restrictTableType != '':\n\t\tExcept = s[0].index('Table Type')\n\tfor i in range(1,len(s)):\n\t\tif League == s[i][0]:\n\t\t\tif restrictTableType != '':\n\t\t\t\tif s[i][Except] == restrictTableType:\n\t\t\t\t\tNewList.append(s[i])\n\t\t\telse:\n\t\t\t\tNewList.append(s[i])\n\treturn NewList\n\n# Merge lists into one\ndef mergeLeague(League,s):\n\tNewList = []\n\tNewList.append(s[0])\n\tfor x in range(len(League)):\n\t\tfor i in range(len(League[x])):\n\t\t\tNewList.append(League[x][i])\n\treturn NewList\n\nPremierLeague = separateByLeague('Premier League', 'League Table', f)\nLaLiga = separateByLeague('La Liga', 'League Table', f)\nBundesliga = separateByLeague('Bundesliga', 'League Table', f)\nFrenchLigue1 = separateByLeague('French Ligue 1', 'League Table', f)\nSerieA = separateByLeague('Serie A', 'League Table', f)\nTop5 = [PremierLeague, LaLiga, Bundesliga, FrenchLigue1, SerieA]\nLeagueMerged = mergeLeague(Top5, f)\nLeagueMerged = makeLink(LeagueMerged)\n\nprint(head)\nprint(makeTable(LeagueMerged))\nprint(foot)\n"
}
] | 5 |
fengjingchao/13ccta | https://github.com/fengjingchao/13ccta | df2564016518771623b0f015d53eec0fe28b2db1 | 3d790ee07cc97770906fb00aa9dde77ba7aadd34 | 2411aeb2818b91add2c6084aa6283d8f9a5f9de7 | refs/heads/master | 2015-08-09T11:35:32.873852 | 2013-11-27T15:54:27 | 2013-11-27T15:54:27 | 14,351,049 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.7128027677536011,
"alphanum_fraction": 0.7491349577903748,
"avg_line_length": 24.04347801208496,
"blob_id": "287165cab5169e6a28f8d1c85b31ab1754d65844",
"content_id": "2af78a45b1ae34d144263cac5c1a824b89d701e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 578,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 23,
"path": "/restore_data/loaddata.sh",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"put data\"\nhadoop fs -put /mnt/tb2 /\nhadoop fs -put /mnt/tb3 /\nhadoop fs -put /mnt/tb4 /\n\necho \"done\"\n\necho \"create tables\"\nhbase org.apache.hadoop.hbase.util.RegionSplitter tb2 -c 200 -f ut\nhbase org.apache.hadoop.hbase.util.RegionSplitter tb3 -c 200 -f tw\nhbase org.apache.hadoop.hbase.util.RegionSplitter tb4 -c 200 -f usr\n\necho \"done\"\n\necho \"import data\"\n\nhbase org.apache.hadoop.hbase.mapreduce.Import tb2 /tb2 \nhbase org.apache.hadoop.hbase.mapreduce.Import tb3 /tb3 \nhbase org.apache.hadoop.hbase.mapreduce.Import tb4 /tb4 \n\necho \"done starting jobs\"\n\n\n"
},
{
"alpha_fraction": 0.5688143372535706,
"alphanum_fraction": 0.5855889916419983,
"avg_line_length": 28.14444351196289,
"blob_id": "ff1d3453466e1ed205a0c5a009dd5881054b31e4",
"content_id": "90d539ea082c16d467f301b55700139186355f88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2623,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 90,
"path": "/backend/getdata.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom thrift.transport.TSocket import TSocket\nfrom thrift.transport.TTransport import TBufferedTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom hbase import Hbase\nfrom hbase.ttypes import *\nimport struct\nimport json\n\ndef encode(num):\n return struct.pack('>q', num)\n\ndef decode(raw):\n return struct.unpack('>q', raw)[0]\n\nclass getdata():\n \"\"\"\n get the data from databse\n \"\"\"\n \n def __init__(self, config):\n \"\"\"\n\t print (i.split(':')[1])\n init the class, config is the path to the table config file\n \"\"\"\n\n self.params = {}\n f = open(config,'r')\n for line in f.readlines():\n if len(line) > 1: # 1 for \\n\n self.params[line.split(':')[0]] = line.split(':')[1][:-1]\n f.close()\n \n self.transport = TBufferedTransport(TSocket(self.params['address'], int(self.params['port'])))\n self.transport.open()\n self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)\n self.client = Hbase.Client(self.protocol)\n\n def query2(self, ts):\n table = self.params['table_2']\n result = self.client.getRowWithColumns(table, encode(ts), [], {})\n\t#print result\n\tif len(result) <= 0:\n\t return []\n\t\n\ttw = []\n\tcols = {}\n\t#print result[0].columns.keys()\n\t\n\tfor i in result[0].columns:\n\t cols[decode(i[len('ut:'):])] = result[0].columns[i].value\t\n\tfor i in sorted(cols.keys()):\n\t #print repr(cols[i].decode('utf-8'))[2:]\n\t tw.append(str(i)+':'+ json.dumps(cols[i].decode('utf-8'))[1:-1].replace('/', '\\\\/'))\n\t #tw.append(str(i)+':'+ repr(cols[i].decode('utf-8'))[2:-1].replace('\\\\\\'', '\\''))\n return tw\n\n def query3(self, startId, endId):\n table = self.params['table_3']\n\tcnt = 0\n scanner = self.client.scannerOpenWithStop(table, encode(startId), encode(endId+1), [], {})\n result = self.client.scannerGet(scanner)\n while (len(result) > 0):\n\t cnt = cnt + len(result[0].columns)\n #print result\n result = self.client.scannerGet(scanner)\n\tself.client.scannerClose(scanner)\n\n\treturn cnt\n\n def query4(self, rtid):\n table = self.params['table_4']\n\t#print 'try get'\n result = self.client.getRowWithColumns(table, encode(rtid), [], {})\n\tif len(result) <= 0:\n\t return []\n\n\tusr = []\n\t#print result[0].columns\n\tfor i in result[0].columns:\n\t usr.append(decode(i[len('usr:'):]))\n\t #print decode(i.split(':')[1])\n\t #usr.append(i['usr'])print i\n return usr\n \nif __name__ == '__main__':\n a = getdata('table_config.txt')\n r = a.query2(1380644973)\n print r\n"
},
{
"alpha_fraction": 0.45869946479797363,
"alphanum_fraction": 0.46836555004119873,
"avg_line_length": 20.452829360961914,
"blob_id": "9a4fbbaecfb3a80417155bdfb747b1305885ca2e",
"content_id": "8c9c6cb05c8973ab9f2d77eefdb6c8a04f956cea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1138,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 53,
"path": "/load_redis/l2s.js",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env node\n\nvar redis = require(\"redis\");\n\nclient = redis.createClient();\n\nclient.on(\"error\", function (err) {\n console.log(\"error event - \" + client.host + \":\" + client.port + \" - \" + err);\n});\n\n\nclient.keys(\"*\", function (err, keys) {\n var counter = 0;\n var total = keys.length;\n keys.forEach(function (key, pos) {\n client.lrange(key, 0, -1, function (err, data) {\n if(err){\n console.log(err);\n return;\n }\n var arr = []\n for (var i=0; i < data.length; ++i) {\n arr.push(data[i].split(':', 2))\n }\n arr.sort(function (a, b) {\n return a[0] - b[0];\n });\n\n for (var i=0; i < arr.length; ++i) {\n arr[i] = arr[i][0] + ':' + arr[i][1]\n }\n\n var value = arr.join('\\n');\n\n client.set(key, value, function (err) {\n if(err){\n console.log(err);\n return;\n }\n counter = counter + 1;\n if(counter == total){\n console.log(\"finished!\");\n client.quit(function (err, res) {\n console.log(\"Exiting from quit command.\");\n });\n }\n });\n\n });\n\n });\n\n});\n\n"
},
{
"alpha_fraction": 0.5114285945892334,
"alphanum_fraction": 0.5342857241630554,
"avg_line_length": 22.33333396911621,
"blob_id": "d3ad66a5ea8d4d67a7ee490661116a62451e5a87",
"content_id": "7f237e5b752ec3f2d3020c6951392c4da98281f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 15,
"path": "/load_data/gets3.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport subprocess\n\nd = open('./jsonlist.txt', 'w')\nwith open(\"./rawlist.txt\", \"r\") as f:\n for s3_path in f:\n subprocess.check_call(\n \"s3cmd get {0}\".format(s3_path.split()[-1]),\n shell=True\n )\n filename = s3_path.split()[-1].split('/')[-1]\n d.write(filename + '\\n')\nf.close()\nd.close()\n"
},
{
"alpha_fraction": 0.44117647409439087,
"alphanum_fraction": 0.44117647409439087,
"avg_line_length": 16,
"blob_id": "f6e21185779e2f4a65b061b167f3338b623fcd32",
"content_id": "54a952d3de8624ddb5c7af3f63ab961d227959f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/README.md",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "twitter_analysis\n================\n"
},
{
"alpha_fraction": 0.44690266251564026,
"alphanum_fraction": 0.4594395160675049,
"avg_line_length": 25.076923370361328,
"blob_id": "ac7ba68b6ca67694f017d8d02d48481148051f53",
"content_id": "20f77b2facce5fb4944ac84d62bffe35d54be0e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1356,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 52,
"path": "/script/load.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys,os\nimport struct\n\ndef barr2str(arr):\n return \"\\\\x\" + \"\\\\x\".join(\"{:02x}\".format(ord(i)) for i in arr)\n\nts_output = open('hb_ts', \"w\")\nud_output = open('hb_uid', \"w\")\nrt_output = open('hb_rt', \"w\")\n\ndef load(line):\n tweet = line.split('\\t', 4)\n\n tid = int(tweet[0])\n uid = int(tweet[1])\n ts = int(tweet[3])\n tx = tweet[4]\n\n ts_output.write(\n # put timestamp, tweet_ts, tweet:tw_id, text\n 'put \"timestamp\", \"{}\", \"tweet:{}\",\"{}\"\\n'.format(\n barr2str(struct.pack(\">Q\", ts)),\n barr2str(struct.pack(\">Q\", tid)),\n tx)\n )\n ud_output.write(\n # put user, uid, tweet:tw_id, \"\"\n 'put \"user\", \"{}\", \"tweet:{}\",\"\"\\n'.format(\n barr2str(struct.pack(\">Q\", uid)),\n barr2str(struct.pack(\">Q\", tid)) )\n )\n\n if tweet[2] == \"\":\n return\n\n rt_uid = int(tweet[2])\n\n rt_output.write(\n # put retweet, rt_uid, tweet:tw_id, uid\n 'put \"retweet\", \"{}\", \"tweet:{}\",\"{}\"\\n'.format(\n barr2str(struct.pack(\">Q\", rt_uid)),\n barr2str(struct.pack(\">Q\", tid)),\n barr2str(struct.pack(\">Q\", uid)) )\n )\n\n\n\nwith open(\"sample_output\",\"r\") as f:\n for line in f:\n load(line.rstrip())\n"
},
{
"alpha_fraction": 0.5977011322975159,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 42.5,
"blob_id": "1cf3d981d494b7106e79b063bea5ec49cd2f440a",
"content_id": "8b010d96c6d270ced58b2a9f982d433b1faf3cb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 2,
"path": "/load_data/Makefile",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "all: loadData.java\n\tjavac -cp \".:hbase-0.94.13.jar:lib/*:gson-2.2.4.jar\" loadData.java\n"
},
{
"alpha_fraction": 0.58746737241745,
"alphanum_fraction": 0.6031331419944763,
"avg_line_length": 21.52941131591797,
"blob_id": "9285768d4cc69739ead66858303c768e54153f77",
"content_id": "b3fd33a74dba6d2091bdc01241782474c1eaff13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 383,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 17,
"path": "/load_redis/gao.js",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env node\n\nvar fs = require('fs');\n\nfunction gao(filename) {\n var data = fs.readFileSync(filename, 'utf8');\n var content = data.split('\\n').filter(function (line) {\n return line;\n });\n for (var i=0; i < content.length; ++i) {\n var tweet = JSON.parse(content[i]);\n var ts = Date.parse(tweet['created_at']) / 1000;\n console.log(ts);\n }\n}\n\ngao('small.json');\n"
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6136363744735718,
"avg_line_length": 16.600000381469727,
"blob_id": "6407e4f36f5cabaa380ff566cbd0bb8d89f1a788",
"content_id": "60a18eccac266e5b6ef031fa823a3cd2f27fcff2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 88,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 5,
"path": "/load_redis/merge.sh",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\npython gao.py > out_se\nnode gao.js > out_ts\npaste -d\" \" out_ts out_se >> out\n"
},
{
"alpha_fraction": 0.5173978805541992,
"alphanum_fraction": 0.5249621868133545,
"avg_line_length": 19.65625,
"blob_id": "5ad13c8d47144b0f6d3c7ac7c1895fe1488b9d20",
"content_id": "84347958bc0de3a600a76c64ae1fafec7fac90c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 661,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 32,
"path": "/load_redis/gao.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport json\nimport re\n\n# objective\n# created_at: timestamp\n# tweet id:\n# text:\n\n\ndef chaogao(tweet_json):\n #print tweet_json\n tweet = json.loads(tweet_json)\n #text = (repr(tweet['text']))\n #if(text[0] == 'u'):\n #text = text[2:-1]\n #text = text.replace('/','\\\\/')\n i = tweet_json.find(\"text\\\":\\\"\") + 7\n pattern = re.compile(r'[^\\\\]\"')\n m = pattern.search(tweet_json, i)\n j = m.end()-1\n text = tweet_json[i:j]\n print \"{}:{}\".format(tweet['id'], text)\n #print \"\"\n\ndef gao(f):\n for line in f:\n if line.strip() != \"\":\n chaogao(line.rstrip())\n\ngao(open('small.json', \"r\"))\n"
},
{
"alpha_fraction": 0.42060086131095886,
"alphanum_fraction": 0.43347638845443726,
"avg_line_length": 19.2608699798584,
"blob_id": "06deb1c3139b355f97b8a09e4f0957277670a156",
"content_id": "e5a4ad90a4fd04dcbc58a32a408e8c19f03e65eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 466,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 23,
"path": "/setbiding.sh",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nps afx | grep \"main.js\" | while read line\ndo\n if [[ $line == *grep* || $line == *$0* ]];\n then\n continue\n fi\n arr=($line)\n nodepid=${arr[0]}\n taskset -p 1 $nodepid\ndone\n\nps afx | grep redis | while read line\ndo\n if [[ $line == *grep* || $line == *$0* ]];\n then\n continue\n fi\n arr=($line)\n redispid=${arr[0]}\n taskset -p 2 $redispid\ndone\n"
},
{
"alpha_fraction": 0.6223176121711731,
"alphanum_fraction": 0.6866952776908875,
"avg_line_length": 28,
"blob_id": "674aef4f5078d77a12d9915bb75a604b3d4e4eeb",
"content_id": "5b40c2462cd46f43b238f24c0d6836bd0c8a92a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 8,
"path": "/restore_data/download_data.sh",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nmkdir /mnt/tb2\nmkdir /mnt/tb3\nmkdir /mnt/tb4\n\ns3cmd get s3://proj_hbase_backup/export_tb2/* /mnt/tb2/ &\ns3cmd get s3://proj_hbase_backup/export_tb3/* /mnt/tb3/ &\ns3cmd get s3://proj_hbase_backup/export_tb4/* /mnt/tb4/ &\n\n"
},
{
"alpha_fraction": 0.4988066852092743,
"alphanum_fraction": 0.5155131220817566,
"avg_line_length": 22.22222137451172,
"blob_id": "ccf7a92d86e7b729732120359ce0db3dd0b0a47e",
"content_id": "220f27a6f42e9f0829742c63e3484cbb6166da54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 18,
"path": "/load_redis/s3_load.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport subprocess\n\ndef s3_get(s3_path):\n filename = s3_path.split('/')[-1]\n subprocess.check_call(\n \"s3cmd get {} && mv {} small.json\".format(s3_path, filename),\n shell=True\n )\n subprocess.check_call(\n \"./merge.sh\"\n )\n\nwith open('jsonlist.txt', 'r') as f:\n for line in f:\n if line.strip() != \"\":\n s3_get(line.strip())\n\n"
},
{
"alpha_fraction": 0.5490196347236633,
"alphanum_fraction": 0.5490196347236633,
"avg_line_length": 16,
"blob_id": "506eece17191172f4e9b27b354288943b535c9de",
"content_id": "406dbe28b02e46e1838d00aec8576ee1af8cbfac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 3,
"path": "/load_redis/red_load.sh",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n./load.rb $(ls out*) | redis-cli --pipe\n"
},
{
"alpha_fraction": 0.5492957830429077,
"alphanum_fraction": 0.6478873491287231,
"avg_line_length": 34.5,
"blob_id": "74dae3275beee09681f224b19864d701f455adb9",
"content_id": "fd149c69c97dfb6ccf828bc2ae9b3de59cf8e3bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 4,
"path": "/load_data/run.sh",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\njava -cp \".:hbase-0.94.13.jar:hadoop-core-1.2.1.jar:lib/*:gson-2.2.4.jar\" loadData $1 > load.log 2> load.log\n\n# $1 is config file\n"
},
{
"alpha_fraction": 0.6752350926399231,
"alphanum_fraction": 0.714106559753418,
"avg_line_length": 23.538461685180664,
"blob_id": "9196237453a523e831e4afa7dad916beb6e37cdf",
"content_id": "b9403b3f5985d5fe54eb2a94eb4bba9c39213fd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1595,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 65,
"path": "/backend/test_thrift.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom thrift.transport.TSocket import TSocket\nfrom thrift.transport.TTransport import TBufferedTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom hbase import Hbase\nfrom hbase.ttypes import *\nimport struct\n\n#a = u'\\u4f60'.encode('utf8')\n#print a\n\ntransport = TBufferedTransport(TSocket('localhost', 9090))\ntransport.open()\nprotocol = TBinaryProtocol.TBinaryProtocol(transport)\n\nclient = Hbase.Client(protocol)\n\n#print dir(client)\n\nl = client.getTableNames()\n\n#print l\n\n\ntable = 'test'\n\n#scan = TScan()\n#\n#scan.filterString = 'SingleColumnValueFilter(\\'cf\\', \\'a\\', =, \\'binary:\\xe3\\x80\\x90\\xe3\\x81\\x8b\\xe3\\x82\\x8f\\xe3\\x81\\x84\\xe3\\x81\\x99\\xe3\\x81\\x8e\\xe3\\x82\\x8b\\xe3\\x80\\x91\\', true, false)'\n#\n##scan.filterString = 'SingleColumnValueFilter(\\'cf\\', \\'a\\', >, \\'binary:3\\', true, false)'\n#\n#scanner = client.scannerOpenWithScan(table, scan, {})\n#\n#result = client.scannerGet(scanner)\n#while result:\n#\tprint result\n#\tresult = client.scannerGet(scanner)\n\n\n#scanner2 = client.scannerOpenWithScan(table, scan, {})\n#\n#result = client.scannerGet(scanner2)\n#print 'result'\n#print result\n#while result:\n#\tprint result\n#\tresult = client.scannerGet(scanner2)\n\n\nl = client.getRowWithColumns(table, '\\x00\\x00\\x00\\x7b', ['ccf:'], {})\n#print l[0].columns\n\na = struct.pack('>i', 23)\nb = struct.pack('>i', 44)\n#print a\nscanner = client.scannerOpenWithStop('test', a, b, [], {})\n\n#result = client.scannerGet(scanner)\nwhile (result = client.scannerGet(scanner)):\n print struct.unpack('>i',result[0].row)[0]\n #result = client.scannerGet(scanner)\n\nclient.scannerClose(scanner)\n"
},
{
"alpha_fraction": 0.5675675868988037,
"alphanum_fraction": 0.5883575677871704,
"avg_line_length": 21.904762268066406,
"blob_id": "cb560ff505ed891a5721d262f7a8680fc16c24b7",
"content_id": "42c115f7e9ebbaeb0f3e759caaf65c013fe31219",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1443,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 63,
"path": "/main.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "import datetime\nimport calendar\nfrom flask import Flask\nfrom flask import request\nfrom backend import getdata\nimport traceback\nfrom time import gmtime, strftime\n\napp = Flask(__name__)\ng = getdata.getdata('backend/table_config.txt')\nteam_str = 'supercloud,5830-2688-4282\\n'\n\[email protected]('/')\ndef hello_world():\n return 'Ehh! Please choose between q1, q2, q3, q4.'\n\n\[email protected]('/q1')\ndef q1():\n return team_str + strftime('%Y-%m-%d %H:%M:%S', gmtime()) + '\\n'\n\n\[email protected]('/q2')\ndef q2():\n ts = calendar.timegm( datetime.datetime.strptime(\n request.args.get('time', ''),\n \"%Y-%m-%d %H:%M:%S\").utctimetuple()\n )\n result = g.query2(ts)\n if len(result) <= 0:\n\treturn team_str + 'Nothing found'\n\n #print result\n buf = '\\n'.join(result)\n return team_str + buf + '\\n'\n\n\[email protected]('/q3')\ndef q3():\n start = request.args.get('userid_min', '')\n end = request.args.get('userid_max', '')\n return team_str + str(g.query3(int(start), int(end))) + '\\n'\n #return g.query3(int(\"{}, {}\".format(\n # request.args.get('userid_min', ''),\n # request.args.get('userid_max', '')\n #)\n\n\[email protected]('/q4')\ndef q4():\n result = sorted(g.query4(int(request.args.get('userid', ''))))\n if len(result) <= 0:\n\treturn team_str + 'Nothing found'\n buf = ''\n #print result\n for i in result:\n\tbuf += str(i) + '\\n'\n #print buf\n return team_str + buf\n\t\n\nif __name__ == '__main__':\n app.run()\n"
},
{
"alpha_fraction": 0.42507702112197876,
"alphanum_fraction": 0.4444645643234253,
"avg_line_length": 29.158470153808594,
"blob_id": "9d59a91dc37a0ba675e1ad0cf6730c2f32a659ed",
"content_id": "2e6f56596201eb7117a88789f046e05b4ed2456c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5519,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 183,
"path": "/main.js",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "var http = require('http');\n//var cluster = require('cluster');\nvar async = require('async');\nvar mysql = require('mysql');\nvar NodeCache = require('node-cache');\nvar cache = new NodeCache( { stdTTL: 1, checkperiod: 0 } );\nrequire(\"date-format-lite\");\nDate.masks.default = 'YYYY-MM-DD+hh:mm:ss';\nvar teamstr = \"supercloud,5830-2688-4282\\n\";\n\nvar redis = require(\"redis\");\n\nclient = redis.createClient();\n\n//var express = require('express');\n//var app = express();\nvar map = {};\n//var arypos = [];\n//var arycnt = [];\nvar conn;\n\nasync.series({\n loadtb4: function(callback) {\n pool = mysql.createPool({\n host : 'localhost',\n user : 'root',\n database : 'cloud',\n connectionLimit: 10,\n });\n\n pool.getConnection(function(err, conn) {\n conn.query('select * from tb4', function(err, rows) {\n var i = 0;\n rows.forEach(function(row) {\n map[row['id']] = row['users'];\n\n if (++i % 100000 == 0) {\n console.log('line ' + i);\n }\n });\n\n console.log('tb4 load done');\n callback(null, 1);\n });\n });\n },\n\n create_server: function(callback) {\n /*if (cluster.isMaster) {\n for (var i = 0; i < 0; i++) {\n cluster.fork();\n }\n } else {*/\n http.createServer(function (req, res) {\n try{\n //var body = \"hello, world\";\n //res.writeHead(200, {'Content-Type': 'text/plain', 'Content-Length': body.length});\n //res.write(\"hello, world\");\n qqq = req.url.substring(0, 3);\n\n if(qqq == \"/q1\"){\n cache.get(\"time\", function(err, kv) {\n //console.log(kv);\n var datestr;\n if (Object.keys(kv).length == 0) {\n var now = new Date();\n datestr = now.format();\n cache.set(\"time\", datestr);\n } else {\n datestr = kv[\"time\"]\n }\n\n res.writeHead(200, {'Content-Type': 'text/plain'});\n res.end(teamstr + datestr + '\\n');\n });\n\n }\n else if(qqq == \"/q2\"){\n var create_time = req.url.substring(9);\n var pattern = /(\\d{4})-(\\d{2})-(\\d{2})\\+(\\d{2}):(\\d{2}):(\\d{2})/;\n var myArray = pattern.exec(create_time);\n var timestamp = Date.UTC(\n parseInt(myArray[1]),\n parseInt(myArray[2]) - 1,\n parseInt(myArray[3]),\n parseInt(myArray[4]),\n parseInt(myArray[5]),\n parseInt(myArray[6])\n ) / 1000;\n\n //console.log(timestamp);\n client.lrange(timestamp, 0, -1, function (err, data) {\n res.writeHead(200, {'Content-Type': 'text/plain'});\n if(err){\n res.end(teamstr + 'Nothing found');\n }\n else{\n if(!data.length){\n res.end(teamstr + 'Nothing found');\n }else{\n\n var arr = new Array(data.length);\n var str = new Array(data.length);\n\n for (var i=0; i < data.length; ++i) {\n var index = data[i].indexOf(':');\n arr[i] = [data[i].substring(0,index), data[i].substring(index)];\n }\n //data.sort(function (a, b) {\n //return a.split(':')[0] - b.split(':')[0];\n //});\n arr.sort(function (a, b) {\n return a[0] - b[0];\n });\n\n for (var i=0; i < arr.length; ++i) {\n str[i] = arr[i][0] + arr[i][1]\n }\n\n var value = str.join('\\n');\n //var value = data.join('\\n');\n res.end(teamstr + value + '\\n');\n }\n }\n });\n }\n else if(qqq == \"/q3\"){\n var i = req.url.indexOf('max');\n var uid_min = req.url.substring(15, i - 8);\n var uid_max = req.url.substring(i+4);\n var min_cnt;\n var max_cnt\n\n //console.log(uid_min);\n //console.log(uid_max);\n pool.getConnection(function(err, conn) {\n conn.query('select count from tb3 where id < ' + uid_min + ' order by id desc limit 1', function(err, rows){\n if (!rows) {\n min_cnt = 0;\n } else {\n min_cnt = rows[0]['count'];\n }\n\n conn.query('select count from tb3 where id <= ' + uid_max + ' order by id desc limit 1', function(err, rows){\n // TODO out of bound\n if (!rows) {\n max_cnt = 0;\n } else {\n max_cnt = rows[0]['count'];\n }\n //console.log(min_cnt);\n //console.log(max_cnt);\n conn.release();\n res.writeHead(200, {'Content-Type': 'text/plain'});\n res.end(teamstr + (max_cnt - min_cnt) + '\\n');\n });\n });\n });\n }\n else if(qqq == \"/q4\"){\n var uid= req.url.substring(11);\n res.writeHead(200, {'Content-Type': 'text/plain'});\n data = map[uid];\n //console.log(data);\n if (data) {\n res.end(teamstr + data);\n } else {\n res.end(teamstr);\n }\n }\n } catch(e){\n console.log(e);\n res.writeHead(200, {'Content-Type': 'text/plain'});\n res.end(teamstr);\n }\n }).listen(80);\n //}).listen(3000);\n\n console.log(\"start listening on port 80\");\n //}\n callback(null, 2);\n },\n});\n"
},
{
"alpha_fraction": 0.3126843571662903,
"alphanum_fraction": 0.4365781843662262,
"avg_line_length": 21.600000381469727,
"blob_id": "d4eb6167f37ff6522257d1fd6aab091c47264a18",
"content_id": "44435fd0232fe8100963b02c10982c867b2d9231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 15,
"path": "/script/time.js",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "console.log(\n (new Date(\"Wed Aug 27 13:08:45 +0000 2008\")).getTime()\n);\n\nvar re = /(\\d{4})-(\\d{2})-(\\d{2})\\+(\\d{2}):(\\d{2}):(\\d{2})/;\narray = re.exec(\"2013-08-27+13:08:45\");\nconsole.log(\n (new Date(array[1],\n array[2],\n array[3],\n array[4],\n array[5],\n array[6]\n )).getTime()\n);\n"
},
{
"alpha_fraction": 0.46710240840911865,
"alphanum_fraction": 0.4745098054409027,
"avg_line_length": 28.05063247680664,
"blob_id": "a3e27ef351db2419ffa140300b45c2bc78134918",
"content_id": "8c934fca935a4b016247fadf10db92255f827ceb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2295,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 79,
"path": "/load_data/gao2p0.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys,os\nimport subprocess\nimport json\nimport datetime\nimport calendar\nimport struct\n\n\ndef load(js_output, tid, uid, ts, tx, rt):\n\n if len(rt) > 0:\n js_output.write(\n json.dumps({'tid':tid, 'uid':uid, 'ts':ts, 'tx':tx, 'rt':rt}) + '\\n'\n )\n else:\n js_output.write(\n json.dumps({'tid':tid, 'uid':uid, 'ts':ts, 'tx':tx}) + '\\n'\n )\n\ndef cao(js_output, tweet_json):\n tweet = json.loads(tweet_json)\n # tweet id\n # user id\n # retweet uid\n # timestamp\n ts = calendar.timegm( datetime.datetime.strptime(\n ' '.join(tweet['created_at'].split(' ')[:4] + tweet['created_at'].split(' ')[5:]) ,\n \"%a %b %d %X %Y\").utctimetuple()\n )\n # text\n #text = tweet['text'].encode('utf8').replace('\\n', '\\\\n').replace('\"', '\\\\\"').replace('\\\\', '\\\\\\\\')\n #text = \"\\\\n\".join(sp)\n #output.write( u\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(\n #tweet['id_str'],\n #tweet['user']['id_str'],\n #tweet['retweeted_status']['user']['id_str'] if tweet.has_key('retweeted_status') else \"\",\n #ts,\n #text.decode('utf8')\n #).encode('utf8')\n #)\n load( js_output,\n int(tweet['id_str'] ),\n int(tweet['user']['id_str'] ),\n int(ts ),\n tweet['text'],\n tweet['retweeted_status']['user']['id_str'] if tweet.has_key('retweeted_status') else \"\",\n )\n\n\ndef gao(s3_path, suffix):\n\n filename = s3_path.split('/')[-1]\n subprocess.check_call(\n \"s3cmd get {0}\".format(s3_path),\n shell=True\n )\n js_output = open(filename+suffix, 'w')\n with open(filename, \"r\") as f:\n for line in f:\n if line.rstrip() != \"\":\n cao(js_output, line.rstrip() )\n\n f.close()\n js_output.close()\n os.remove(filename )\n\njs_list = open('jsonlist.txt', 'w')\nwith open(\"./raw_list.txt\", \"r\") as f:\n for line in f:\n if line.rstrip() != \"\":\n s3path = line.rstrip().split()[-1]\n suffix = '_processed'\n gao(s3path, suffix)\n js_list.write(s3path.split('/')[-1]+suffix+'\\n')\n\nf.close()\njs_list.close()\n"
},
{
"alpha_fraction": 0.5435779690742493,
"alphanum_fraction": 0.5504587292671204,
"avg_line_length": 20.799999237060547,
"blob_id": "dead56fadf96c9a3d175d43d5dbe0038a753f44c",
"content_id": "d292bafff098b044ab706a1e8779a80d03cec65d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 20,
"path": "/load_redis/load.rb",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env ruby\n\ndef gen_redis_proto(*cmd)\n proto = \"\"\n proto << \"*\"+cmd.length.to_s+\"\\r\\n\"\n cmd.each{|arg|\n proto << \"$\"+arg.to_s.bytesize.to_s+\"\\r\\n\"\n proto << arg.to_s+\"\\r\\n\"\n }\n proto\nend\n\nARGV.each do |filename|\n file = File.new(filename, \"r\")\n while (line = file.gets)\n data = line.chomp(\"\\n\").split(' ', 2)\n STDOUT.write(gen_redis_proto(\"LPUSH\", data[0], data[1]))\n end\n file.close\nend\n"
},
{
"alpha_fraction": 0.4525845944881439,
"alphanum_fraction": 0.46002230048179626,
"avg_line_length": 27.90322494506836,
"blob_id": "b24bd1ba0582ab597289eed912b3b70e918c69c0",
"content_id": "e6e2864458fd538b2a2ef7a6027efa05c3fbff5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2689,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 93,
"path": "/backend/gao.py",
"repo_name": "fengjingchao/13ccta",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys,os\nimport subprocess\nimport json\nimport datetime\nimport calendar\nimport struct\n\nts_output = open('hb_ts', \"w\")\nud_output = open('hb_uid', \"w\")\nrt_output = open('hb_rt', \"w\")\n\ndef load(tid, uid, ts, tx, rt):\n\n def barr2str(arr):\n return \"\\\\x\" + \"\\\\x\".join(\"{:02x}\".format(ord(i)) for i in arr)\n\n ts_output.write(\n # put timestamp, tweet_ts, tweet:tw_id, text\n 'put \"timestamp\", \"{}\", \"tweet:{}\",\"{}\"\\n'.format(\n barr2str(struct.pack(\">Q\", ts)),\n barr2str(struct.pack(\">Q\", tid)),\n tx)\n )\n ud_output.write(\n # put user, uid, tweet:tw_id, \"\"\n 'put \"user\", \"{}\", \"tweet:{}\",\"\"\\n'.format(\n barr2str(struct.pack(\">Q\", uid)),\n barr2str(struct.pack(\">Q\", tid)) )\n )\n\n if rt == \"\":\n return\n\n rt_uid = int(rt)\n\n rt_output.write(\n # put retweet, rt_uid, tweet:tw_id, uid\n 'put \"retweet\", \"{}\", \"tweet:{}\",\"{}\"\\n'.format(\n barr2str(struct.pack(\">Q\", rt_uid)),\n barr2str(struct.pack(\">Q\", tid)),\n barr2str(struct.pack(\">Q\", uid)) )\n )\n\ndef cao(tweet_json):\n tweet = json.loads(tweet_json)\n # tweet id\n # user id\n # retweet uid\n # timestamp\n ts = calendar.timegm( datetime.datetime.strptime(\n ' '.join(tweet['created_at'].split(' ')[:4] + tweet['created_at'].split(' ')[5:]) ,\n \"%a %b %d %X %Y\").utctimetuple()\n )\n # text\n sp = tweet['text'].encode('utf8').split('\\n')\n text = \"\\\\n\".join(sp)\n #output.write( u\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(\n #tweet['id_str'],\n #tweet['user']['id_str'],\n #tweet['retweeted_status']['user']['id_str'] if tweet.has_key('retweeted_status') else \"\",\n #ts,\n #text.decode('utf8')\n #).encode('utf8')\n #)\n load( int(tweet['id_str'] ),\n int(tweet['user']['id_str'] ),\n int(ts ),\n text,\n tweet['retweeted_status']['user']['id_str'] if tweet.has_key('retweeted_status') else \"\",\n )\n\n\ndef gao(s3_path):\n\n filename = os.path.basename(s3_path)\n #subprocess.check_call(\n #\"s3cmd get {0}\".format(s3_path),\n #shell=True\n #)\n with open(filename, \"r\") as f:\n for line in f:\n if line.rstrip() != \"\":\n cao( line.rstrip() )\n\n #os.remove(filename )\n\n\nwith open(\"./json_files\", \"r\") as f:\n for line in f:\n if line.rstrip() != \"\":\n gao(line.rstrip())\n\n"
}
] | 22 |
TheCatTree/audio-transcriber | https://github.com/TheCatTree/audio-transcriber | f62dba88c3597430df2f0043762eff7b75c3b811 | 5cb55965eecc0a74c39b894f54980e87d24340ae | 0d5da4e551d35f26699aed354dc60ace32414f9a | refs/heads/master | 2023-01-07T02:02:40.437512 | 2020-11-10T03:04:57 | 2020-11-10T03:04:57 | 309,576,701 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7551169395446777,
"alphanum_fraction": 0.7573099136352539,
"avg_line_length": 37,
"blob_id": "02c8f5b031d90785af571d46cc4f017d86c66a6f",
"content_id": "a68ad585342efbc59cad1be14f3d1e474846d191",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1368,
"license_type": "permissive",
"max_line_length": 178,
"num_lines": 36,
"path": "/README.md",
"repo_name": "TheCatTree/audio-transcriber",
"src_encoding": "UTF-8",
"text": "# Python Audio Transcriber\n\n<!--- These are examples. See https://shields.io for others or to customize this set of shields. You might want to include dependencies, project status and licence info here --->\n\n\n\n\n\nProject name is a tool, that allows windows users to quickly transcribe an audio file.\n\nUses the Python speech recognition library, to transcribe audio files.\n\n## Prerequisites\n\nBefore you begin, ensure you have met the following requirements:\n<!--- These are just example requirements. Add, duplicate or remove as required --->\n* Python >= 3.6 installed.\n* Speech recognition library.\n* For mp3s, Pydub & ffmpeg\n\n```\n## Using Python Audio Transcriber\n\nTo use Python Audio Transcriber, follow these steps:\nDrag and drop audio file, on \"drop here.bat\"\n\n```\n\n## Contact\n\nIf you want to contact me you can reach me at <[email protected]>.\n\n## License\n<!--- If you're not sure which open license to use see https://choosealicense.com/--->\n\nThis project uses the following license: MIT License.\n"
},
{
"alpha_fraction": 0.6928648948669434,
"alphanum_fraction": 0.6997464895248413,
"avg_line_length": 32.253013610839844,
"blob_id": "376544628cee2caaef0c9993c18c8acf7b28fd85",
"content_id": "bde7f510a54228dde4e26e68e78d1d950da5b31a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2761,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 83,
"path": "/Transcriber.py",
"repo_name": "TheCatTree/audio-transcriber",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os\nimport ntpath\nimport sys\nimport json\nimport concurrent.futures\nimport speech_recognition as sr\nfrom pprint import pprint\nfrom pydub import AudioSegment\nfrom pydub.utils import make_chunks\nfrom os import path\n\nprint ('Number of arguments:', len(sys.argv), 'arguments.')\nprint ('Argument List:', str(sys.argv))\nlocation = os.path.dirname(os.path.abspath(__file__))\nsound_string = sys.argv[1]\nsound_location = sound_string\nsound_name = ntpath.basename(sound_string)\nout_location = os.path.dirname(sound_string)\n\nprint('Sound name', sound_name)\nr = sr.Recognizer()\n\n# Test for mp3\nname, extension = os.path.splitext(sound_name)\nprint('File extension:', extension )\n\nif extension.lower() == \".mp3\":\n print(\"Changing MP3 to Wav.\")\n temp_sound = AudioSegment.from_mp3(sound_location)\n temp_out = '{path}/temp/temp_mptowav.wav'.format(path=location)\n temp_sound.export(temp_out, format=\"wav\")\n sound_location = temp_out\n\n# Brake audio, into 40 second chunks.\ntemp_sound = AudioSegment.from_file(sound_location, format=\"wav\")\nchunks = make_chunks(temp_sound,40000)\nprint(\"Number of Chunks\",len(chunks))\n\n# Export chunks\nfor i, chunk in enumerate(chunks):\n chunk_name = \"chunk{0}.wav\".format(i)\n print (\"exporting\", chunk_name)\n chunk.export(\"{path}/temp/{name}\".format(path=location,name=chunk_name), format=\"wav\")\n\n\n\ndef transcribe(chunk_id):\n harvard = sr.AudioFile(\"{path}/temp/chunk{number}.wav\".format(path=location,number=chunk_id))\n with harvard as source:\n audio = r.record(source)\n\n # recognize speech using Google Cloud Speech\n g_cloud_json = open('{path}/credentials/googlecloud.json'.format(path=location))\n GOOGLE_CLOUD_SPEECH_CREDENTIALS = json.dumps(json.load(g_cloud_json))\n\n try:\n out = r.recognize_google_cloud(audio, credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS) # pretty-print the recognition result\n pprint(\"Google Cloud Speech recognition results chunk {0}:\".format(chunk_id))\n pprint(\"Chunk:{0} {1}\".format(chunk_id, out))\n # out = r.recognize_google(audio)\n except sr.UnknownValueError:\n print(\"Google Cloud Speech could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Cloud Speech service; {0}\".format(e))\n\n \n \n return {\n \"idx\": chunk_id,\n \"text\": out\n }\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n all_text = executor.map(transcribe, range(len(chunks)))\n\ntranscript = \"\"\nfor text in sorted(all_text, key=lambda x: x['idx']):\n transcript = transcript + \"{0}\".format(text)\n\nwith open('{path}/{name}.rtf'.format(name=sound_name, path=out_location), 'w') as file_out: \n file_out.write(transcript)\n\n"
}
] | 2 |
ODYTRON/US-total-births-by-day-1994-2003 | https://github.com/ODYTRON/US-total-births-by-day-1994-2003 | c5a3740b35c55204f566d80a90c1befaf61570ae | 8462e5da57f65472163629ced49820038beb3db6 | 14e0fe4d335685f1d03ac1e60f24dcf74c0b3987 | refs/heads/master | 2021-01-22T03:40:09.574015 | 2017-05-25T11:00:51 | 2017-05-25T11:00:51 | 92,383,836 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6553497910499573,
"alphanum_fraction": 0.6594650149345398,
"avg_line_length": 34,
"blob_id": "793251acd36cef32923bfac972a0bf43f08aa228",
"content_id": "7ebee19870cea63377cce30b9b00ba8821812fd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 972,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 27,
"path": "/US_births_total_by_day.py",
"repo_name": "ODYTRON/US-total-births-by-day-1994-2003",
"src_encoding": "UTF-8",
"text": "# the dictionary \r\nthe_dict = dict() \r\n# open the dataset\r\nf = open(\"births.csv\", 'r')\r\n# read the dataset\r\ntext = f.read()\r\n# split the dataset aka put it in list\r\ndata = text.split('\\n')\r\n# cut the heading year /year,month,date_of_month,day_of_week,births\r\ndata_no_head = (data[1:-1])\r\n# with this iteration we access the data so far\r\nfor row in data_no_head:\r\n# then we split every element with comma to make list of lists\r\n split_row = row.split(',')\r\n# store the day of week \r\n day_of_week = split_row[3] \r\n# store births\r\n births = int(split_row[4])\r\n# if the day of the week key is in the_dict\r\n if day_of_week in the_dict:\r\n# set the specific key in the dict and add the number of births\r\n the_dict[day_of_week] = the_dict[day_of_week] + births \r\n# else else just set the key with the births / this is the initial trigger to fill the_dict \r\n else:\r\n the_dict[day_of_week] = births\r\n# print the dict out of the loop \r\nprint (the_dict)\r\n"
},
{
"alpha_fraction": 0.718142569065094,
"alphanum_fraction": 0.7613390684127808,
"avg_line_length": 45.20000076293945,
"blob_id": "0a7c66a823ff5ee5ee0db3d168bde844a77dfb76",
"content_id": "2e3ded4fc5b72176c2b2346c40aec972c5cd1762",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 20,
"path": "/README.md",
"repo_name": "ODYTRON/US-total-births-by-day-1994-2003",
"src_encoding": "UTF-8",
"text": "# US-total-births-by-day-1994-2003\nCalculate number of births by day in the United States of America for the years 1994 - 2003 using jupyter notebook\n\n## Calculate number of births by day in the United States of America for the years 1994 - 2003 using jupyter notebook\n** This is the first guided project i did from dataquest.io (thanks Vik)\n\nThe dataset of this project is taken from the Centers for Disease control AND Prevention's National Center for health Statistics. The structure of the dataset is described below :\n\n- `year` (1994 to 2003)\n- `month` (1 to 12)\n- `date_of_month` Day number of the month (1 to 31)\n- `day_of_week` Day of week (1 to 7)\n- `births` Number of births that day\n\n\n** Instructions \n\nWrite a code that returns a dictionary containing the total number of births for each unique day of the week.\n\nthe original dataset can be found here: https://github.com/fivethirtyeight/data/tree/master/births \n"
}
] | 2 |
pprasitanond/Python-Challenge | https://github.com/pprasitanond/Python-Challenge | 8c810e11ff3b7ae27b10999b134076244435bb3e | 40b1a21632b50447c5349194f32763257b9f6b59 | e9106d52bc91f2d171c09775e11773796522043b | refs/heads/master | 2022-11-11T03:49:48.926583 | 2020-07-08T00:31:19 | 2020-07-08T00:31:19 | 276,618,145 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5172233581542969,
"alphanum_fraction": 0.5234864354133606,
"avg_line_length": 31.491525650024414,
"blob_id": "5c6086a7ce2556dff615070f059bec75efaf4bd2",
"content_id": "b83011d39878cce66cdce5c42d27d0ea1e78ca91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1916,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 59,
"path": "/PyPoll/main.py",
"repo_name": "pprasitanond/Python-Challenge",
"src_encoding": "UTF-8",
"text": "# import and create file pat\nimport os\nimport csv\n\n#path \ncsvpath = os.path.join('..','Resources','election_data_copy.csv')\n\n#declaring variables\ntotal_votes = 0\ncandidates = {}\ncandidates_per = {}\nwinner = \"\"\nwinner_count = 0\n\n# read file \nwith open(csvpath) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csvheader = next(csvfile)\n\n for row in csvreader:\n total_votes = total_votes + 1\n if row[2] in candidates.keys():\n candidates[row[2]] += 1\n else:\n candidates[row[2]] = 1\n\nfor key, value in candidates.items():\n candidates_per[key] = round((value/total_votes)*100,2)\n\nfor key in candidates.keys():\n if candidates[key] > winner_count:\n winner = key\n winner_count = candidates[key]\n\n#print analysis result\nprint(\"Election Results\")\nprint(\"-------------------------------------\")\nprint(\"Total Votes: \" + str(total_votes))\nprint(\"-------------------------------------\")\nfor key, value in candidates.items():\n print(key + \": \" + str(candidates_per[key]) + \"% (\" + str(value) + \")\")\nprint(\"-------------------------------------\")\nprint(\"Winner: \" + winner)\nprint(\"-------------------------------------\")\n\n#path to output folder\noutput_path = os.path.join(\"..\",\"Output\", \"budget_data.txt\")\nwith open(output_path, 'w', newline='') as text_file:\n print(\"Election Results\", file=text_file)\n print(\"-------------------------------------\", file=text_file)\n print(\"Total Votes: \" + str(total_votes), file=text_file)\n print(\"-------------------------------------\", file=text_file)\n for key, value in candidates.items():\n print(key + \": \" + str(candidates_per[key]) + \"% (\" + str(value) + \")\", file=text_file)\n print(\"-------------------------------------\", file=text_file)\n print(\"Winner: \" + winner, file=text_file)\n print(\"-------------------------------------\", file=text_file)\n\n csvfile.close()"
},
{
"alpha_fraction": 0.6697782874107361,
"alphanum_fraction": 0.6826137900352478,
"avg_line_length": 32.671051025390625,
"blob_id": "a8146c917205b649963b2afc4857c1fa379ad507",
"content_id": "e2d5f5985aac43a56fbf1fb29b80c3b7cce781c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2571,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 76,
"path": "/PyBank/main.py",
"repo_name": "pprasitanond/Python-Challenge",
"src_encoding": "UTF-8",
"text": "# import and create file path\nimport os\nimport csv\n\ncsvpath = os.path.join('..','Resources','budget_datacopy.csv')\n\n# define and initiate some variables\nmonths = []\nprofit_loss = []\nprofitchangelist = []\ndate_list = []\n\ncount_months = 0\nnet_profit_loss = 0\ncurrent_month_profit = 0\nprevious_month_profit = 0\nprofitchange = 0\nmax_net_change = 0\nmin_net_change = 0\n\n# read file \nwith open(csvpath) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csvheader = next(csvreader)\n\n for row in csvreader:\n count_months = count_months + 1\n\n current_month_profit = int(row[1])\n net_profit_loss += (current_month_profit)\n net_final = \"${:,.2f}\".format(net_profit_loss)\n\n current_month_profit = int(row[1])\n profitchange = float(current_month_profit) - float(previous_month_profit)\n profitchangelist.append(profitchange)\n previous_month_profit = current_month_profit\n\ndef average(profitchangelist):\n x = len(profitchangelist)\n total = sum(profitchangelist) - profitchangelist[0]\n avg = total / (x - 1)\n return avg\n\naverage_change = round(average(profitchangelist), 2)\naverage_change_final = \"${:,.2f}\".format(average_change)\n\n#greatest increase/decrease\ngreatest_increase = max(profitchangelist)\ngreatest_increase_final = \"${:,.2f}\".format(greatest_increase)\nincrease_index = profitchangelist.index(greatest_increase)\n\ngreatest_decrease = min(profitchangelist)\ngreatest_decrease_final = \"${:,.2f}\".format(greatest_decrease)\ndecrease_index = profitchangelist.index(greatest_decrease)\n\n#print\nprint(\"Financial Analysis\")\nprint(\"---------------------------\")\nprint(f\"Total Months: {count_months}\")\nprint(f\"Total: {net_final}\")\nprint(f\"Average Change: {average_change_final}\")\nprint(f\"Greatest Increase in Profits: Feb-2012 ({greatest_increase_final})\")\nprint(f\"Greatest Decrease in Profits: Sep-2013 ({greatest_decrease_final}\")\n\n#path to output folder\noutput_path = os.path.join(\"..\",\"Output\", \"Financial_Analysis.txt\")\nwith open(output_path, 'w', newline='') as text_file:\n print(\"Financial Analysis\", file = text_file)\n print(\"---------------------------\", file = text_file)\n print(f\"Total Months: {count_months}\", file = text_file)\n print(f\"Total: {net_final}\", file = text_file)\n print(f\"Average Change: {average_change_final}\", file = text_file)\n print(f\"Greatest Increase in Profits: Feb-2012 ({greatest_increase_final})\", file = text_file)\n print(f\"Greatest Decrease in Profits: Sep-2013 ({greatest_decrease_final}\", file = text_file)\n\n csvfile.close()\n\n\n\n\n "
}
] | 2 |
shivjain123/Brown_Field_Stars-Gravity_Calculated | https://github.com/shivjain123/Brown_Field_Stars-Gravity_Calculated | 71ddafca4ba96eb0976c76c6d9dedec2505a4d17 | c1073bf86fcf7574359e07c5026daca2973139e9 | f98e04f6fae6420f1adef4af1987dd5de921456d | refs/heads/master | 2023-07-30T00:25:19.388494 | 2021-09-05T11:45:11 | 2021-09-05T11:45:11 | 403,293,541 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5944540500640869,
"alphanum_fraction": 0.6273829936981201,
"avg_line_length": 22.125,
"blob_id": "a3a739cfff4948a04e940f7aa2cc2f35af4b105a",
"content_id": "9be8d4a114f31847047fc4c85d3d08a2802d841c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 24,
"path": "/gravity.py",
"repo_name": "shivjain123/Brown_Field_Stars-Gravity_Calculated",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\n\r\ndf = pd.read_csv('Stars/cleaned.csv')\r\n\r\ndf.drop([\"Unnamed: 0\"], axis = 1, inplace = True)\r\n\r\ndf['Mass']=df['Mass'].astype('float')\r\ndf[\"Mass\"] *= 1.989e+30\r\n\r\ndf[\"Radius\"] = df[\"Radius\"].astype(float)\r\ndf[\"Radius\"] *= 6.957e+8\r\n\r\nmass_list = df[\"Mass\"].tolist()\r\nradius_list = df[\"Radius\"].tolist()\r\n\r\ngravity_list = []\r\n\r\nfor index in range(len(mass_list)):\r\n gravity = (6.674e-11*mass_list[index])/(radius_list[index]*radius_list[index])\r\n gravity_list.append(gravity)\r\n\r\ndf[\"Gravity\"] = gravity_list\r\n\r\ndf.to_csv(\"Stars/Stars_with_Gravity.csv\")"
}
] | 1 |
weigriffiths/django_blog | https://github.com/weigriffiths/django_blog | 43d7cfae0e495aa2518ec21e7a7586637e95067f | 2313caff53174af45e64656adfb1a6c8063ee880 | 88fd8c05aa57715e8253bbd92471f6528f5e93f0 | refs/heads/main | 2023-02-27T07:02:16.097630 | 2021-02-07T11:17:16 | 2021-02-07T11:17:16 | 336,767,795 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7854406237602234,
"alphanum_fraction": 0.7854406237602234,
"avg_line_length": 36.42856979370117,
"blob_id": "31dcb4b1b5005652959e141a9de60080387f709f",
"content_id": "146f2ddaa8f87a1039adb95a5e043848435eb7d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 7,
"path": "/README.md",
"repo_name": "weigriffiths/django_blog",
"src_encoding": "UTF-8",
"text": "# Django Blog\nThis is a simple blog site run on localhost using Django.\n\nFully functioning, you can create an account, login, add post, delete post, view posts.\n\n# Installation\nRequires Django install and several dependencies (see <code>requirements.txt</code>)"
},
{
"alpha_fraction": 0.7450980544090271,
"alphanum_fraction": 0.7450980544090271,
"avg_line_length": 20.85714340209961,
"blob_id": "4ede11a716f77235f1d13e31211539539d37749a",
"content_id": "0e877d41d7c0994a93c1f3b40f31800f9c522eb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 7,
"path": "/blog/apps.py",
"repo_name": "weigriffiths/django_blog",
"src_encoding": "UTF-8",
"text": "# Remember to add the app to 'settings.py' file under INSTALLED_APPS\n\nfrom django.apps import AppConfig\n\n\nclass BlogConfig(AppConfig):\n name = 'blog'\n"
}
] | 2 |
TRYM-YOSHIKI/DNN_TPS_jul | https://github.com/TRYM-YOSHIKI/DNN_TPS_jul | 89897a607d1810a098940a8982b34bd7791e7d2e | 2596aac242836c40e005ecd899aa34f2c061b382 | 767bfb02ab23175cd17b92deffcdbeb1a54e648f | refs/heads/main | 2023-06-20T14:50:07.036846 | 2021-07-16T14:46:27 | 2021-07-16T14:46:27 | 386,669,173 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7762237787246704,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 16.875,
"blob_id": "522c1e312a30b514bea3701bc44954fafb7e46f1",
"content_id": "32fa3acd3ff03bd3f02f096e73b869ab672210bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 8,
"path": "/README.md",
"repo_name": "TRYM-YOSHIKI/DNN_TPS_jul",
"src_encoding": "UTF-8",
"text": "# DNN_TPS_jul\nディープニューラルネットワークを用いてKaggleのTablar Playground Series ー Jul 2021 に挑戦\n\n1.ベイズ最適化を用いてニューラルネットワークのパラメータを調整する\n\n↓↓↓↓↓↓\n\n2.データを学習させてモデルを作る\n"
},
{
"alpha_fraction": 0.5134142637252808,
"alphanum_fraction": 0.5309234857559204,
"avg_line_length": 30.070175170898438,
"blob_id": "13b059666c70785f92d22dc01a4c90e3b38ef08a",
"content_id": "81923e5f53ea308ef88a3027dbc70351899d2bc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3955,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 114,
"path": "/Beyesian_DNN.py",
"repo_name": "TRYM-YOSHIKI/DNN_TPS_jul",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.metrics import r2_score\nfrom bayes_opt import BayesianOptimization\nfrom sklearn.model_selection import KFold\n\n\n# データの前処理------------------------------------------------------\n# csvファイルからPandas DataFrameへ読み込み\ntrain = pd.read_csv('train.csv', delimiter=',', low_memory=False)\n\n# trainデータを入力データとラベルに分割する\nX = train.drop(['date_time'], axis=1).drop(['target_carbon_monoxide'], axis=1).drop(['target_benzene'], axis=1).drop(['target_nitrogen_oxides'], axis=1).values\nY_1 = train.target_carbon_monoxide.values # carbon_monoxide(一酸化炭素)\nY_2 = train.target_benzene.values # benzene(ベンゼン)\nY_3 = train.target_nitrogen_oxides.values # nitrogen_oxides(窒素酸化物)\nY_lst = [Y_1, Y_2, Y_3]\n\n\n# RMSLE カスタム評価関数 #####################\nfrom keras import backend as K\nmsle = keras.metrics.MeanSquaredLogarithmicError()\ndef root_mean_squared_logarithmic_error(y_true, y_pred):\n return K.sqrt(msle(y_true, y_pred))\n\n\n#メイン-------------------------------------------------------------\ndef main():\n # ベイズ最適化実行\n global y\n for y in Y_lst:\n optimizer = bayesOpt()\n print(optimizer.res)\n\n\n#ベイズ最適化---------------------------------------------------------\ndef bayesOpt():\n # 最適化するパラメータの下限・上限\n pbounds = {\n 'l1': (10, 100),\n 'l2': (10, 100),\n 'l1_drop': (0.0, 0.5),\n 'l2_drop': (0.0, 0.5),\n 'epochs': (5, 500),\n 'batch_size': (64, 2048)\n }\n # 関数と最適化するパラメータを渡す\n optimizer = BayesianOptimization(f=validate, pbounds=pbounds)\n # 最適化\n optimizer.maximize(init_points=5, n_iter=10, acq='ucb')\n return optimizer\n\n\n#評価------------------------------------------------------------------\ndef validate(l1, l2, l1_drop, l2_drop, epochs, batch_size):\n\n #モデルを構築&コンパイル----------------------\n def set_model():\n #モデルを構築\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(input_num,)),\n keras.layers.Dense(int(l1), activation='relu'),\n keras.layers.Dropout(l1_drop),\n keras.layers.Dense(int(l2), activation='relu'),\n keras.layers.Dropout(l2_drop),\n keras.layers.Dense(1, activation='linear')\n ])\n #モデルをコンパイル\n model.compile(optimizer='adam', \n loss='mean_squared_error')\n return model\n\n\n #交叉検証------------------------------------\n def Closs_validate():\n # 交差検証を実行\n valid_scores = [] # 評価を格納する配列\n kf = KFold(n_splits=5, shuffle=True, random_state=42) #データの分割の仕方を決定\n for fold, (train_indices, valid_indices) in enumerate(kf.split(X)):\n X_train, X_valid = X[train_indices], X[valid_indices]\n y_train, y_valid = y[train_indices], y[valid_indices]\n\n global input_num\n input_num = X_train.shape[1]\n\n # モデルをセット\n model = set_model()\n \n # 学習させる\n model.fit(X_train, y_train,\n validation_data=(X_valid, y_valid),\n epochs=int(epochs),\n batch_size=int(batch_size),\n verbose=0)\n\n # テストデータを適用する\n y_valid_pred = model.predict(X_valid)[:,0]\n \n # スコアを求める\n score = r2_score(y_valid, y_valid_pred)\n\n # 評価を格納する\n valid_scores.append(score)\n\n cv_score = np.mean(valid_scores)\n return cv_score\n \n return Closs_validate()\n\n\nif __name__ == '__main__':\n main()"
}
] | 2 |
rudra012/dj_celery_docker | https://github.com/rudra012/dj_celery_docker | 5462480d1d6bb59919084ae4fdc734ea6e9d0d92 | 37ba5f11eda472a445540605c33037e53f09a15f | 394366bf581cc9f1bf96b07f8775db2c4092d8f3 | refs/heads/master | 2022-12-10T10:18:40.504053 | 2019-05-28T07:20:55 | 2019-05-28T07:20:55 | 186,407,466 | 0 | 0 | MIT | 2019-05-13T11:35:35 | 2019-05-28T07:21:36 | 2022-12-08T05:06:17 | Python | [
{
"alpha_fraction": 0.6167483925819397,
"alphanum_fraction": 0.619766116142273,
"avg_line_length": 32.987178802490234,
"blob_id": "2bd8bb21fde5e0fc1aee9b06f8d6dae9baccc4f3",
"content_id": "f1290c083d13d04696ce9bed4f35c11d02cb6b81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2651,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 78,
"path": "/app/celerydemo/models.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import, unicode_literals\n\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.db.models import signals\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_celery_beat.models import PeriodicTask, PeriodicTasks\n\nfrom . import schedules\n\n\nclass TaskLog(models.Model):\n task_name = models.CharField(max_length=255)\n created = models.DateTimeField(auto_now_add=True, editable=False)\n modified = models.DateTimeField(auto_now=True)\n\n\nclass CustomPeriodicTask(PeriodicTask):\n PERIOD_CHOICES = (\n ('ONCE', _('Once')),\n ('DAILY', _('Daily')),\n ('WEEKLY', _('Weekly')),\n ('MONTHLY', _('Monthly')),\n )\n MONTHLY_CHOICES = (\n ('DAY', _('Day')),\n ('FIRSTWEEK', _('First Week')),\n ('SECONDWEEK', _('Second Week')),\n ('THIRDWEEK', _('Third Week')),\n ('FOURTHWEEK', _('Fourth Week')),\n ('LASTWEEK', _('Last Week')),\n ('LASTDAY', _('Last Day')),\n )\n end_time = models.DateTimeField(\n _('End Datetime'), blank=True, null=True,\n help_text=_(\n 'Datetime when the scheduled task should end')\n )\n every = models.PositiveSmallIntegerField(\n _('every'), null=False, default=1,\n help_text=_('For Weekly and Monthly Repeat')\n )\n scheduler_type = models.CharField(\n _('scheduler_type'), max_length=24, choices=PERIOD_CHOICES,\n null=True, blank=True\n )\n monthly_type = models.CharField(\n _('monthly_type'), max_length=24, choices=MONTHLY_CHOICES,\n null=True, blank=True\n )\n max_run_count = models.PositiveIntegerField(\n null=True, blank=True,\n help_text=_('To end scheduled task after few occurrence')\n )\n last_executed_at = models.DateTimeField(null=True, blank=True)\n last_executed_days = JSONField(null=True, blank=True)\n\n @property\n def schedule(self):\n if self.interval:\n return self.interval.schedule\n if self.crontab:\n crontab = schedules.my_crontab(\n minute=self.crontab.minute,\n hour=self.crontab.hour,\n day_of_week=self.crontab.day_of_week,\n day_of_month=self.crontab.day_of_month,\n month_of_year=self.crontab.month_of_year,\n )\n return crontab\n if self.solar:\n return self.solar.schedule\n if self.clocked:\n return self.clocked.schedule\n\n\nsignals.pre_delete.connect(PeriodicTasks.changed, sender=CustomPeriodicTask)\nsignals.pre_save.connect(PeriodicTasks.changed, sender=CustomPeriodicTask)\n"
},
{
"alpha_fraction": 0.7293814420700073,
"alphanum_fraction": 0.7293814420700073,
"avg_line_length": 28.846153259277344,
"blob_id": "9c35411410aaa0afd6c720fd3cd8bd6bb5b4e464",
"content_id": "6459e574b0f90505770141872a7134528457ad44",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1164,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 39,
"path": "/app/hello_django/urls.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.urls import path, include\nfrom rest_framework import serializers, viewsets\nfrom rest_framework.routers import DefaultRouter\n# from upload.views import image_upload\n\n\nclass AccountSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('first_name', 'username', 'email', 'password')\n\n\nclass SnippetViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = AccountSerializer\n\n\n# Create a router and register our viewsets with it.\nrouter = DefaultRouter()\nrouter.register(r'users', SnippetViewSet)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n # path('', image_upload, name='upload'),\n]\n\nif bool(settings.DEBUG):\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n"
},
{
"alpha_fraction": 0.48220065236091614,
"alphanum_fraction": 0.6990291476249695,
"avg_line_length": 16.22222137451172,
"blob_id": "ddca18ee0fab3333392b6468f3d0d79112aa82ac",
"content_id": "fc277ba71fe4f59030eb6432bda4f40fd1c782bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 309,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 18,
"path": "/app/requirements.txt",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "amqp==2.4.2\nanyjson==0.3.3\nbilliard==3.5.0.5\ncelery==4.0.2\ncertifi==2019.3.9\nDjango==2.2.1\ndjangorestframework==3.9.4\nkombu==4.5.0\npipenv==2018.11.26\npkg-resources==0.0.0\npsycopg2==2.8.2\npytz==2019.1\nredis==3.2.1\nsqlparse==0.3.0\nvine==1.3.0\nvirtualenv==16.5.0\nvirtualenv-clone==0.5.3\ndjango-celery-beat==1.5.0"
},
{
"alpha_fraction": 0.6918919086456299,
"alphanum_fraction": 0.6918919086456299,
"avg_line_length": 19.55555534362793,
"blob_id": "734d9596f703d5d9bcdde8cd2283d55a19c2b10e",
"content_id": "c0326266597fc02bab4cf5a938f46ce234430831",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 9,
"path": "/app/celerydemo/tasks.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from celery import shared_task\n\nfrom .models import TaskLog\n\n\n@shared_task\ndef logging_task():\n print('Logging task invoked...........')\n TaskLog.objects.create(task_name='test')\n"
},
{
"alpha_fraction": 0.6060009598731995,
"alphanum_fraction": 0.6069847345352173,
"avg_line_length": 31.26984214782715,
"blob_id": "7451423c800cf5058273638f62109a22ba996074",
"content_id": "3a7af876fad6428ad0532d5200112ffd3158ff7c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2033,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 63,
"path": "/app/celerydemo/clockedschedule.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "\"\"\"Clocked schedule Implementation.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\n\nfrom celery import schedules\nfrom celery.utils.time import maybe_make_aware\nfrom collections import namedtuple\n\n\nschedstate = namedtuple('schedstate', ('is_due', 'next'))\n\n\nclass clocked(schedules.BaseSchedule):\n \"\"\"clocked schedule.\n\n It depend on PeriodicTask once_off\n \"\"\"\n\n def __init__(self, clocked_time, enabled=True,\n model=None, nowfun=None, app=None):\n \"\"\"Initialize clocked.\"\"\"\n self.clocked_time = maybe_make_aware(clocked_time)\n self.enabled = enabled\n self.model = model\n super(clocked, self).__init__(nowfun=nowfun, app=app)\n\n def remaining_estimate(self, last_run_at):\n return self.clocked_time - self.now()\n\n def is_due(self, last_run_at):\n # actually last run at is useless\n print('is_due', last_run_at)\n last_run_at = maybe_make_aware(last_run_at)\n print('aware is_due', last_run_at)\n rem_delta = self.remaining_estimate(last_run_at)\n remaining_s = max(rem_delta.total_seconds(), 0)\n print('remaining_s: ', remaining_s)\n print('schedstate: ', schedstate)\n if not self.enabled:\n return schedstate(is_due=False, next=None)\n\n if remaining_s == 0:\n if self.model:\n self.model.enabled = False\n self.model.save()\n print('Executing function')\n return schedstate(is_due=True, next=None)\n return schedstate(is_due=False, next=remaining_s)\n\n def __repr__(self):\n return '<clocked: {} {}>'.format(self.clocked_time, self.enabled)\n\n def __eq__(self, other):\n if isinstance(other, clocked):\n return self.clocked_time == other.clocked_time and \\\n self.enabled == other.enabled\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __reduce__(self):\n return self.__class__, (self.clocked_time, self.nowfun)\n"
},
{
"alpha_fraction": 0.7377049326896667,
"alphanum_fraction": 0.744990885257721,
"avg_line_length": 26.450000762939453,
"blob_id": "9d421da240eaa3759392f8f5965f02a80ea610bd",
"content_id": "d38fe4791ec527482305f2c53fa409a771d6b85e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 549,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 20,
"path": "/app/hello_django/celery_app.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom celery import Celery\nfrom django.conf import settings \n# Set default Django settings\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello_django.settings')\n# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dcs.settings')\n\napp = Celery('hello_django')\napp.config_from_object('django.conf:settings')\n# app.autodiscover_tasks()\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n# Optional configuration, see the application user guide.\napp.conf.update(\n result_expires=3600,\n)\n\n#if __name__ == '__main__':\n# app.start()\n"
},
{
"alpha_fraction": 0.5858148336410522,
"alphanum_fraction": 0.5858148336410522,
"avg_line_length": 36.693878173828125,
"blob_id": "436a4ba5c466f6ac19d205f02ad6519bd9857d3c",
"content_id": "e6c635183288229d0f4ab71a7d12c4e456bd035b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1847,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 49,
"path": "/app/celerydemo/admin.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django_celery_beat.admin import PeriodicTaskAdmin\nfrom django_celery_beat.models import SolarSchedule\n\nfrom .models import TaskLog, CustomPeriodicTask\n\n\nclass CustomPeriodicTaskAdmin(PeriodicTaskAdmin):\n fieldsets = (\n (None, {\n 'fields': ('name', 'description', ('regtask', 'task'), 'enabled',),\n 'classes': ('extrapretty', 'wide'),\n }),\n ('Schedule', {\n 'fields': (\n ('scheduler_type', 'monthly_type'), ('start_time', 'end_time'),\n ('every', 'max_run_count'), 'one_off', 'crontab', 'interval', 'clocked'),\n 'classes': ('extrapretty', 'wide'),\n }),\n ('Schedule Run Details', {\n 'fields': ('total_run_count', 'last_run_at', 'last_executed_at',\n 'last_executed_days'),\n 'classes': ('extrapretty', 'wide'),\n }),\n ('Arguments', {\n 'fields': ('args', 'kwargs'),\n 'classes': ('extrapretty', 'wide', 'collapse', 'in'),\n }),\n ('Execution Options', {\n 'fields': ('expires', 'queue', 'exchange', 'routing_key',\n 'priority'),\n 'classes': ('extrapretty', 'wide', 'collapse', 'in'),\n }),\n )\n readonly_fields = ('total_run_count', 'last_run_at')\n\n def get_queryset(self, request):\n qs = super(PeriodicTaskAdmin, self).get_queryset(request)\n return qs.select_related('interval', 'crontab', 'solar', 'clocked')\n\n\nadmin.site.register(TaskLog)\nadmin.site.register(CustomPeriodicTask, CustomPeriodicTaskAdmin)\n# admin.site.unregister(PeriodicTask)\nadmin.site.unregister(SolarSchedule)\n# admin.site.unregister(IntervalSchedule)\n# admin.site.unregister(CrontabSchedule)\n# admin.site.register(IntervalSchedule)\n# admin.site.register(CrontabSchedule)\n"
},
{
"alpha_fraction": 0.605956494808197,
"alphanum_fraction": 0.6288659572601318,
"avg_line_length": 57.20000076293945,
"blob_id": "1c34bcdcea828a6b3a110cc38923fa1412b5e79d",
"content_id": "885079ddae529b50022969c4ed767440571386e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1746,
"license_type": "permissive",
"max_line_length": 316,
"num_lines": 30,
"path": "/app/celerydemo/migrations/0002_customperiodictask.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.1 on 2019-05-24 09:12\n\nimport django.contrib.postgres.fields.jsonb\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_celery_beat', '0011_auto_20190508_0153'),\n ('celerydemo', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CustomPeriodicTask',\n fields=[\n ('periodictask_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_celery_beat.PeriodicTask')),\n ('end_time', models.DateTimeField(blank=True, null=True, verbose_name='end_time')),\n ('every', models.IntegerField(default=1, verbose_name='every')),\n ('scheduler_type', models.CharField(blank=True, choices=[('ONCE', 'Once'), ('DAILY', 'Daily'), ('WEEKLY', 'Weekly'), ('MONTHLY', 'Monthly')], max_length=24, null=True, verbose_name='scheduler_type')),\n ('monthly_type', models.CharField(blank=True, choices=[('DAY', 'Day'), ('FIRSTWEEK', 'First Week'), ('SECONDWEEK', 'Second Week'), ('THIRDWEEK', 'Third Week'), ('FOURTHWEEK', 'Fourth Week'), ('LASTWEEK', 'Last Week'), ('LASTDAY', 'Last Day')], max_length=24, null=True, verbose_name='monthly_type')),\n ('max_run_count', models.PositiveIntegerField(blank=True, null=True)),\n ('last_executed_at', models.DateTimeField(blank=True, null=True)),\n ('last_executed_days', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),\n ],\n bases=('django_celery_beat.periodictask',),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5672082901000977,
"alphanum_fraction": 0.5745937824249268,
"avg_line_length": 34.6315803527832,
"blob_id": "de0ac705c6d812193513cc170d87eb505a545923",
"content_id": "3228010bbac9a208c2e92d3f731bb6180e368936",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 677,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 19,
"path": "/app/celerydemo/schedules.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from celery import schedules\n\n\nclass my_crontab(schedules.crontab):\n def is_due(self, last_run_at):\n print('cron is_due: ', last_run_at)\n # if last_run_at - date\n # if True:\n # return schedules.schedstate(False, 5.0)\n rem_delta = self.remaining_estimate(last_run_at)\n rem = max(rem_delta.total_seconds(), 0)\n print('rem', rem)\n due = rem == 0\n if due:\n rem_delta = self.remaining_estimate(self.now())\n rem = max(rem_delta.total_seconds(), 0)\n print('due, rem', due, rem)\n return schedules.schedstate(due, rem)\n # return super(my_crontab, self).is_due(last_run_at)\n"
},
{
"alpha_fraction": 0.7684210538864136,
"alphanum_fraction": 0.7684210538864136,
"avg_line_length": 18,
"blob_id": "1dc7aaa3f3fadac36d68ed758cfbe09d2c658529",
"content_id": "1fd9f6e70a9276de9e61e11bb09bc1094e862468",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 5,
"path": "/app/celerydemo/apps.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass CelerydemoConfig(AppConfig):\n name = 'celerydemo'\n"
},
{
"alpha_fraction": 0.5142515897750854,
"alphanum_fraction": 0.5169585943222046,
"avg_line_length": 50.05691146850586,
"blob_id": "50e8bada5774ac049312910f70dc42b1dc2b22e1",
"content_id": "e07a71eb9b4800edc187cc02d4aefdbbee8a2c19",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12560,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 246,
"path": "/app/celerydemo/schedulers.py",
"repo_name": "rudra012/dj_celery_docker",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import, unicode_literals\n\nimport datetime\nimport math\n\nfrom celery import schedules\nfrom celery.utils.time import maybe_make_aware\nfrom dateutil.relativedelta import relativedelta\nfrom django.conf import settings\nfrom django_celery_beat.schedulers import ModelEntry, DatabaseScheduler\nfrom django_celery_beat.utils import make_aware\n\nfrom .models import (\n CustomPeriodicTask)\n\ntry:\n from celery.utils.time import is_naive\nexcept ImportError: # pragma: no cover\n pass\n\nMONTH_FORMAT = \"%m-%Y\"\nDATETIME_FORMAT = \"%d-%m-%YT%H:%M:%SZ\"\n\n\ndef months_difference(date1, date2):\n return date1.month - date2.month + 12 * (date1.year - date2.year)\n\n\nclass CustomModelEntry(ModelEntry):\n max_interval = 60\n\n def is_due(self):\n # return super(CustomModelEntry, self).is_due()\n # Here write checks to be execute before calling scheduler\n print('\\n\\n\\nself.app.now: ', self.app.now())\n print('******', self.schedule, self.model._meta.model_name, '******', )\n print(self.model.name, self.model.task, self.model.enabled)\n if not self.model.enabled:\n # max interval second delay for re-enable.\n return schedules.schedstate(False, self.max_interval)\n\n # START DATE: only run after the `start_time`, if one exists.\n if self.model.start_time is not None:\n now = self._default_now()\n if getattr(settings, 'DJANGO_CELERY_BEAT_TZ_AWARE', True):\n now = maybe_make_aware(self._default_now())\n\n if now < self.model.start_time:\n # The datetime is before the start date - don't run.\n # send a delay to retry on start_time\n delay = math.ceil(\n (self.model.start_time - now).total_seconds()\n )\n print('Call function after {} seconds'.format(delay))\n return schedules.schedstate(False, delay)\n\n # ONE OFF TASK: Disable one off tasks after they've ran once\n def disable_task():\n self.model.enabled = False\n # self.model.total_run_count = 0 # Reset\n self.model.no_changes = False # Mark the model entry as changed\n self.model.save()\n # self.model.save(update_fields=[\"enabled\", ])\n print('Disable the periodic task', self.model)\n return schedules.schedstate(False, None) # Don't recheck\n\n print('self.model.__class__.__name__: ', self.model.__class__.__name__)\n if self.model.__class__.__name__ == 'CustomPeriodicTask':\n print('self.model.max_run_count, self.model.total_run_count')\n print(self.model.max_run_count, self.model.total_run_count)\n if self.model.one_off and self.model.enabled and self.model.total_run_count > 0:\n return disable_task()\n\n # if task executed max_run_count times then disable task\n if self.model.max_run_count and self.model.max_run_count <= self.model.total_run_count:\n return disable_task()\n\n if self.model.end_time is not None:\n now = self._default_now()\n if getattr(settings, 'DJANGO_CELERY_BEAT_TZ_AWARE', True):\n now = maybe_make_aware(self._default_now())\n\n if now >= self.model.end_time:\n # disable task if end date is passed\n return disable_task()\n\n print('self.model.scheduler_type: ', self.model.scheduler_type)\n print('last_run_at', self.last_run_at, self.model.last_run_at)\n last_executed_at = self.model.last_executed_at\n print('last_executed_at', last_executed_at)\n today = self.app.now()\n if self.model.scheduler_type == 'MONTHLY':\n # Get this month's last date\n month_last_date = datetime.datetime(\n today.year, today.month, 1) + relativedelta(\n months=1, days=-1)\n month_first_date = today.replace(day=1)\n today_week_no = today.isocalendar()[1]\n print('today_week_no:', today_week_no)\n\n if last_executed_at and last_executed_at.date() == today.date():\n # If task executed today then skip execution for today\n print('Executed today')\n return schedules.schedstate(False, self.max_interval)\n\n if self.model.monthly_type == 'LASTDAY':\n # Check if today is not month's last day then return False\n if month_last_date.date() != today.date():\n print('Not today so execute after {} seconds'.format(\n self.max_interval))\n return schedules.schedstate(False, self.max_interval)\n # return schedules.schedstate(False, self.max_interval)\n elif self.model.monthly_type in ['FIRSTWEEK', 'SECONDWEEK',\n 'THIRDWEEK', 'FOURTHWEEK']:\n first_week_no = month_first_date.isocalendar()[1]\n print('first_week_no:', first_week_no)\n week_diff = 0\n if self.model.monthly_type == 'SECONDWEEK':\n week_diff = 1\n elif self.model.monthly_type == 'THIRDWEEK':\n week_diff = 2\n elif self.model.monthly_type == 'FOURTHWEEK':\n week_diff = 3\n\n if today_week_no - first_week_no == week_diff:\n print('Week number pass')\n last_executed_days = self.model.last_executed_days\n print('last_executed_days: ', last_executed_days)\n # Check whether task executed before or not\n if last_executed_days:\n # If task executed then get month of execution\n last_executed_month_str = list(last_executed_days)[\n 0]\n print('last_executed_month_str: ',\n last_executed_month_str)\n # Validate for month string format\n if len(last_executed_month_str.split('-')) == 2:\n # Month of task execution\n last_executed_month = datetime.datetime.strptime(\n last_executed_month_str, MONTH_FORMAT)\n print('last_executed_month: ',\n last_executed_month)\n # Check whether task last executed task date is\n # this month or specified interval\n if months_difference(\n today, last_executed_month) not in [\n 0, self.model.every]:\n return schedules.schedstate(\n False, self.max_interval)\n\n elif self.model.monthly_type == 'LASTWEEK':\n last_week_no = month_last_date.isocalendar()[1]\n print('last_week_no:', last_week_no)\n if today_week_no == last_week_no:\n print('Last Week pass')\n last_executed_days = self.model.last_executed_days\n print('last_executed_days: ', last_executed_days)\n # Check whether task executed before or not\n if last_executed_days:\n # If task executed then get month of execution\n last_executed_month_str = list(last_executed_days)[\n 0]\n print('last_executed_month_str: ',\n last_executed_month_str)\n # Validate for month string format\n if len(last_executed_month_str.split('-')) == 2:\n # Month of task execution\n last_executed_month = datetime.datetime.strptime(\n last_executed_month_str, MONTH_FORMAT)\n print('last_executed_month: ',\n last_executed_month)\n # Check whether task last executed task date is\n # this month or specified interval\n if months_difference(\n today, last_executed_month) not in [\n 0, self.model.every]:\n return schedules.schedstate(\n False, self.max_interval)\n elif self.model.monthly_type == 'DAY' and self.model.crontab:\n month_day = self.model.crontab.day_of_month.isdigit()\n print('month_day: ', month_day)\n if self.model.last_executed_at and int(month_day) == int(\n today.day):\n current_month = today.month\n last_executed_month = self.model.last_executed_at.month\n if current_month - last_executed_month != self.model.every:\n return schedules.schedstate(\n False, self.max_interval)\n\n elif self.model.scheduler_type == 'WEEKLY':\n day_number = today.strftime(\"%w\")\n day_last_executed_at = self.model.last_executed_days.get(\n day_number) if self.model.last_executed_days else None\n print('day_last_executed_at: ', day_last_executed_at)\n if day_last_executed_at:\n day_last_executed_at = datetime.datetime.strptime(\n day_last_executed_at, DATETIME_FORMAT)\n print('day_last_executed_at: ', day_last_executed_at)\n if today.isocalendar()[1] - \\\n day_last_executed_at.isocalendar()[\n 1] != self.model.every:\n print(\"Already executed on last week on the same day\")\n return schedules.schedstate(False, self.max_interval)\n elif last_executed_at:\n if today.isocalendar()[1] - last_executed_at.isocalendar()[\n 1] != self.model.every:\n print(\"Already executed on last week on some day\")\n return schedules.schedstate(False, self.max_interval)\n\n print('Calling scheduler function: ', self.schedule, '####')\n return self.schedule.is_due(make_aware(self.last_run_at))\n\n def __next__(self):\n cls_obj = super(CustomModelEntry, self).__next__()\n\n # Changes on execution of task\n last_executed_days = self.model.last_executed_days or {}\n if self.model.scheduler_type == 'WEEKLY':\n today = self.app.now()\n last_executed_days[today.strftime(\"%w\")] = today.strftime(\n DATETIME_FORMAT)\n elif self.model.scheduler_type == 'MONTHLY':\n today = self.app.now()\n print('last_executed_days: ', last_executed_days)\n if last_executed_days and list(last_executed_days)[\n 0] == today.strftime(MONTH_FORMAT):\n print('Same month')\n month_dict = last_executed_days[today.strftime(MONTH_FORMAT)]\n month_dict[today.strftime(\"%w\")] = today.strftime(\n DATETIME_FORMAT)\n last_executed_days[today.strftime(MONTH_FORMAT)] = month_dict\n else:\n print('Different month')\n last_executed_days = {today.strftime(MONTH_FORMAT): {\n today.strftime(\"%w\"): today.strftime(DATETIME_FORMAT)}}\n print('last_executed_days: ', last_executed_days)\n self.model.last_executed_days = last_executed_days\n self.model.last_executed_at = self.app.now()\n self.model.save()\n # self.model.save(update_fields=[\"last_run_at\", \"total_run_count\"])\n return cls_obj\n\n\nclass CustomDatabaseScheduler(DatabaseScheduler):\n Entry = CustomModelEntry\n Model = CustomPeriodicTask\n"
}
] | 11 |
itzketan/23th-day | https://github.com/itzketan/23th-day | c1040966e58d6c67d979382ab9c536fb83daca6e | 40bf9778bb343614d19c80deecca1ff4b48d9cb4 | 73ff3a7b0dad9a34e2a3e5e6d87254ff85fc3d3c | refs/heads/main | 2023-06-10T23:17:25.771334 | 2021-07-07T15:45:18 | 2021-07-07T15:45:18 | 383,847,753 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6045876741409302,
"alphanum_fraction": 0.6542872786521912,
"avg_line_length": 30.157894134521484,
"blob_id": "3955add0de04fcbccbd515e68fdad311bb3edcb7",
"content_id": "25ff752d19adf8c181c5977e9156720d6ca75629",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1832,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 57,
"path": "/Weather.py",
"repo_name": "itzketan/23th-day",
"src_encoding": "UTF-8",
"text": "import requests\r\nimport tkinter as tk\r\nfrom tkinter import font\r\nfrom PIL import Image, ImageTk\r\n\r\nroot = tk.Tk()\r\n\r\nWIDTH = 620\r\nHEIGHT = 450\r\n\r\n\r\ndef get_weather(city):\r\n weather_key = \"7955ac19d8643e68ec645a36a86ca971\"\r\n url = 'https://api.openweathermap.org/data/2.5/weather'\r\n params = {'appid': weather_key, 'q': city, 'units': 'Metric'}\r\n response = requests.get(url, params=params)\r\n report = response.json()\r\n\r\n label['text'] = show_weather_report(report)\r\n\r\n\r\ndef show_weather_report(report):\r\n try:\r\n city_name = report['name']\r\n weather_condition = report['weather'][0]['description']\r\n temp = report['main']['temp']\r\n output = 'City: %s \\nCondition: %s \\nTemperature(°C): %s' % (city_name, weather_condition, temp)\r\n except:\r\n output = 'There was a problem while retrieving that information'\r\n return output\r\n\r\n\r\ncanvas = tk.Canvas(root, width=WIDTH, height=HEIGHT)\r\ncanvas.pack()\r\n\r\nframe = tk.Frame(root, bg='#0B90A9', bd=5)\r\nframe.place(relx=0.5, rely=0.1, relheight=0.1, relwidth=0.75, anchor='n')\r\n\r\nentry = tk.Entry(frame, font=('Courier New Baltic', 20))\r\nentry.place(relheight=1, relwidth=0.7)\r\n\r\nbtn = tk.Button(frame, text=\"Get Weather\", relief='raised', bg=\"gray\", font=20,\r\n command=lambda: get_weather(entry.get()))\r\nbtn.place(relx=0.72, relheight=1, relwidth=0.28)\r\n\r\nlow_frame = tk.Frame(root, bg='#0B90A9', bd=5)\r\nlow_frame.place(relx=0.5, rely=0.25, relheight=0.65, relwidth=0.75, anchor='n')\r\n\r\nbg_color = 'white'\r\nlabel = tk.Label(low_frame, font=('Calibri', 20), justify='center', bd=4)\r\nlabel.config(font=40, bg=bg_color)\r\nlabel.place(relheight=1, relwidth=1)\r\n\r\n\"\"\" weather_icon=tk.Canvas(label,bg=bg_color,bd=0,highlightthickness=0)\r\nweather_icon.place(relx=0.75,rely=0,relwidth=1,relheight=0.5) \"\"\"\r\n\r\nroot.mainloop()"
}
] | 1 |
tunap/rmfwsa | https://github.com/tunap/rmfwsa | d476f090368344a5147afe0c75e2e889481c0c9b | fb070c1b70df28a42bf16338d8c8043c7c270538 | b2c1f63a425a4102ca24dbef7105c16b63b6d526 | refs/heads/master | 2020-04-08T15:12:49.869121 | 2019-04-10T17:30:30 | 2019-04-10T17:30:30 | 159,470,091 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6467955112457275,
"alphanum_fraction": 0.6579321026802063,
"avg_line_length": 33.56226348876953,
"blob_id": "972bb19c4c81e9f95fa8aa528bb2664b0e50d207",
"content_id": "c767ce9b2ff628705d405f7dd8056b875b9dd907",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9159,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 265,
"path": "/regsoft/views.py",
"repo_name": "tunap/rmfwsa",
"src_encoding": "UTF-8",
"text": "# Imports\nfrom django.shortcuts import render\nimport pyrebase\nimport datetime\nimport requests\nimport pytz\nfrom django.contrib import auth\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nimport json\nfrom django.template.defaultfilters import slugify\nfrom django.utils.crypto import get_random_string\n\n\n# Initialize Firebase\nconfig = {\n 'apiKey': \"AIzaSyAqy5TutrzfZ6vq0P9b7Vz-UsH7gKeUfY0\",\n 'authDomain': \"rmfwsa-678cd.firebaseapp.com\",\n 'databaseURL': \"https://rmfwsa-678cd.firebaseio.com\",\n 'projectId': \"rmfwsa-678cd\",\n 'storageBucket': \"rmfwsa-678cd.appspot.com\",\n 'messagingSenderId': \"564967817070\"\n}\n\n# firebase.initializeApp(config);\nfirebase = pyrebase.initialize_app(config)\nauthorisation = firebase.auth()\ndb = firebase.database() # gets the database\ncurrentDate = datetime.datetime.now().strftime(\"%d/%m/%Y\") \ndayofweek = datetime.datetime.today().strftime(\"%A\")\nprint(dayofweek)\nprint(currentDate)\n\n\ndef foodmenuAPIRequest():\n\tloginUrl = 'http://118.185.138.207:8088/mobapi/login'\n\tmenuUrl = 'http://118.185.138.207:8088/mobapi/foodmenu'\n\n\n\theaders = {\n\t 'Host':'118.185.138.207:8088',\n\t 'User-Agent': 'Mozilla/5.0 (compatible; Rigor/1.0.0; http://rigor.com)',\n\t 'Content-Type': 'application/json',\n\t }\n\n\tr = requests.post(loginUrl, data='BUAPI', headers = headers)\n\tloginResponse=json.loads(r.text)\n\trefreshtoken = loginResponse['refreshtoken']\n\n\tr1 = requests.post(menuUrl, data=refreshtoken, headers = headers)\n\tmenuResponse=json.loads(r1.text)\n\tdays = {'Mon':'Monday','Tue':'Tuesday','Wed':'Wednesday','Thu':'Thursday','Fri':'Friday','Sat':'Saturday','Sun':'Sunday'}\n\tvald = {'B':'breakfast', 'D':'dinner','L':'lunch'}\n\tlst = []\n\tdct = {}\n\tfor i in menuResponse:\n\t lst.append([days[i['MENUDAY']],{vald[i['MENUTYPE']]:i['MENU'].split('$')}])\n\tfor i in lst:\n\t dct[i[0]] = dct.get(i[0],[])\n\t dct[i[0]] = dct[i[0]] + [i[1]] \n\treturn dct\n\ndef getbreakfastItems():\n \n breakfastItems = foodmenuAPIRequest()\n breakfastItems = breakfastItems[dayofweek][0]['breakfast']\n\n return breakfastItems\n\ndef getlunchItems():\n \n lunchItems = foodmenuAPIRequest()\n lunchItems = lunchItems[dayofweek][2]['lunch']\n\n return lunchItems\n\n# def getsnacksItems():\n \n# snacksItems = foodmenuAPIRequest()\n# snacksItems = snacksItems[dayofweek][3]['snacks']\n\n# return snacksItems\n\ndef getdinnerItems():\n \n dinnerItems = foodmenuAPIRequest()\n dinnerItems = dinnerItems[dayofweek][1]['dinner']\n\n return dinnerItems\n\n\ndef postrating(request):\n if request.method == \"POST\" and request.is_ajax():\n response = json.loads(request.body.decode('utf-8'))\n response = {str(key):str(value) for key, value in response.items()}\n rating = response.get('rating')\n item_name = response.get('item')\n\n idtoken = request.session['uid']\n\n # to get account info\n a = authorisation.get_account_info(idtoken)\n a = a[\"users\"][0]['localId']\n\n ratingID = get_random_string(8).lower()\n \n data = { \n \"date_submitted\": currentDate,\n \"item_name\": item_name,\n \"rating\": rating\n }\n # print(data)\n db.child('ratings').child(a).child(ratingID).set(data)\n return JsonResponse({\"success\": 1}, status = 200)\n return JsonResponse({\"success\": 0}, status = 400)\n\ndef signin(request):\n \"\"\" takes in email and password by the user\"\"\"\n return render(request, 'signin.html')\n\ndef index(request):\n\n breakfastItems = getbreakfastItems()\n lunchItems = getlunchItems()\n # snacksItems = getsnacksItems()\n dinnerItems = getdinnerItems()\n \n return render(request, 'index.html', {'breakfastItems':breakfastItems, 'lunchItems':lunchItems, 'dinnerItems':dinnerItems})\n\n\ndef postsign(request):\n \"\"\"stores user email and password. redirects if it matches with entries in firebase, o/w error\"\"\"\n email = request.POST.get('email')\n passw = request.POST.get('pass')\n try:\n # user variable used for lo gin with email and pwd check teminal for user id/token\n # utilises authorisation variable\n user = authorisation.sign_in_with_email_and_password(email,passw)\n except:\n return render(request, \"signin.html\", {'msg':\"Invalid credentials\"})\n # sign in allowed only for users authenticated in firebase\n print(user) #prints everything, for specifics use user('attribute')\n # add user session\n session_id = user['idToken']\n request.session['uid'] = str(session_id) #str representation imp\n\n breakfastItems = getbreakfastItems()\n lunchItems = getlunchItems()\n # snacksItems = getsnacksItems()\n dinnerItems = getdinnerItems()\n breakfastItems = [slugify(item) for item in breakfastItems]\n lunchItems = [slugify(item) for item in lunchItems]\n # snacksItems = [slugify(item) for item in snacksItems]\n dinnerItems = [slugify(item) for item in dinnerItems]\n\n return render(request,\"welcome.html\", {\"e\":email, 'breakfastItems':breakfastItems, 'lunchItems':lunchItems, 'dinnerItems':dinnerItems})\n\ndef logout(request):\n \"\"\"logout from current user session, utilises auth from django.contrib [ensure db rules]\"\"\"\n auth.logout(request)\n return render(request, \"signin.html\")\n\ndef signup(request):\n \"\"\" redirects to signup page\"\"\"\n return render(request, \"signup.html\")\n\ndef postsignup(request):\n \"\"\" create account, fetches unique userid, gets userdata, constructs database\"\"\"\n name = request.POST.get('name')\n email = request.POST.get('email')\n passw = request.POST.get('pass')\n # creates useraccount\n try:\n user = authorisation.create_user_with_email_and_password(email, passw)\n # grabs userdata\n uid = user['localId']\n data ={'name': name, 'status': '1'} # 1 means disabled, handled by the admin\n # database constructor(pushes data), make sure database rules are laid\n db.child(\"users\").child(uid).child(\"details\").set(data)\n\n except:\n return render(request, \"signup.html\",{\"msg\": \"Unable to create account, please try again!\"})\n\n return render(request, \"signin.html\")\n# NOTE: This function will return EMAIL_EXIST error in case a username is already taken.\n\ndef create(request):\n \"\"\"redirects to create.html, creates a new report\"\"\"\n return render(request, \"create.html\")\n\n# Pushes user data into firebase\n\ndef check(request):\n \"\"\" Grabs data from firebase using ID and grabs user data\"\"\"\n \n\n idtoken = request.session['uid']\n\n # to get account info\n a = authorisation.get_account_info(idtoken)\n a = a[\"users\"] # grabs user key form dict\n a = a[0] # grabs 1st list entry\n a = a['localId'] # grabs value from key stored inside 1st list entry stored inside user key\n # again we get a = localID of user\n\n # to retrieve time stamps we use shallow function (see docs)\n timestamps = db.child('users').child(a).child('reports').shallow().get().val()\n ## print(timestamps) # prints a dict, to get list we need to convert\n print(\"timestamps dict: \" + str(timestamps)) # check\n\n # appends timestamps to the list\n timelist= []\n for i in timestamps:\n timelist.append(i)\n timelist.sort(reverse=True) # sorts in reverse order\n\n print(\"timestamps list: \" + str(timelist)) # check\n\n # retrieves child data from list entries (work/progess)\n work = []\n for i in timelist:\n \"\"\" does not employ shallow as we don't want keys\"\"\"\n wor = db.child('users').child(a).child('reports').child(i).child('work').get().val()\n work.append(wor)\n\n print(\"work: \" + str(work)) # check\n\n # timestamps conversion into date format and appending dates to a list\n date=[]\n for i in timelist:\n i = float(i) # need to convert it into flow before passing\n dat = datetime.datetime.fromtimestamp(i).strftime('%H:%M %d-%m-%Y')\n date.append(dat)\n\n print(\"date: \" + str(date)) # check\n\n # combine 3 lists via zip Function\n comb_list = zip(timelist, date, work)\n print(\"Combine List: \"+ str(comb_list)) # check new list\n\n name = db.child('users').child(a).child('details').child('name').get().val()\n print(name)\n return render(request, \"check.html\", {'comb_list':comb_list, 'e': name})\n #\n\ndef postcheck(request):\n \n # fetches z from check (see url)\n time = request.GET.get('z')\n idtoken = request.session['uid']\n # to get account info\n a = authorisation.get_account_info(idtoken)\n a = a[\"users\"] # grabs user key form dict\n a = a[0] # grabs 1st list entry\n a = a['localId'] # grabs value from key stored inside 1st list entry stored inside user key\n # again we get a = localID of user\n\n\n work = db.child(\"users\").child(a).child('reports').child(time).child('work').get().val()\n progress = db.child(\"users\").child(a).child('reports').child(time).child('progress').get().val()\n i = float(time)\n date = datetime.datetime.fromtimestamp(i).strftime(\"%H:%M %d-%m-%Y\")\n name = db.child('users').child(a).child('details').child('name').get().val()\n\n return render(request, \"postcheck.html\", {'w':work, 'p':progress, 'd':date, 'e':name})\n"
}
] | 1 |
mcat-ee/django-database-files | https://github.com/mcat-ee/django-database-files | dd690ba62a85d102f5d4909b303cd6a762d7a20d | 45a2ed6698382a302c907541435f90e20b614889 | d8f49ecc75b920bbd419663f2861cea63844e251 | refs/heads/master | 2021-01-13T00:33:39.805018 | 2017-02-13T01:21:32 | 2017-02-13T01:21:32 | 81,771,296 | 0 | 0 | null | 2017-02-13T01:15:57 | 2016-09-16T04:17:08 | 2017-02-07T01:16:06 | null | [
{
"alpha_fraction": 0.521049439907074,
"alphanum_fraction": 0.521049439907074,
"avg_line_length": 23.83333396911621,
"blob_id": "fcc4f09f9e5df2c0d70ce65e6395cb7e54728691",
"content_id": "1c7c0b6feb55a563d54c145f4749a5efe7178630",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1639,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 66,
"path": "/database_files/migrations/0001_initial.py",
"repo_name": "mcat-ee/django-database-files",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\nfrom database_files.models import *\n\nclass Migration(migrations.Migration):\n initial = True\n \n dependencies = [\n ]\n \n operations = [\n migrations.CreateModel(\n name=\"File\",\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True,serialise=False,verbose_name=\"FILE\")),\n ('content', models.TextField()),\n ('size', models.IntegerField()),\n objects\n ]\n )\n ]\n \n \n \n '''\n content = models.TextField()\n size = models.IntegerField()\n \n objects = FileManager()\n '''\n \n \n '''\n def forwards(self, orm):\n \n # Adding model 'File'\n db.create_table('database_files_file', (\n ('id', orm['database_files.File:id']),\n ('content', orm['database_files.File:content']),\n ('size', orm['database_files.File:size']),\n ))\n db.send_create_signal('database_files', ['File'])\n \n \n \n def backwards(self, orm):\n \n # Deleting model 'File'\n db.delete_table('database_files_file')\n \n \n \n models = {\n 'database_files.file': {\n 'content': ('django.db.models.fields.TextField', [], {}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'size': ('django.db.models.fields.IntegerField', [], {})\n }\n }\n \n complete_apps = ['database_files']\n'''\n"
}
] | 1 |
valeriobasile/storkl | https://github.com/valeriobasile/storkl | 42fa62b1c9ed0050e30f34e71d3a0983ad96d5c2 | 5e8718194d84eb0d925906aad287de63cbb7ea8f | 23876d5e13df25c510327a25a89381e40dd650c8 | refs/heads/master | 2016-09-11T04:01:33.729345 | 2013-11-13T18:51:19 | 2013-11-13T18:51:19 | 2,217,097 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7374301552772522,
"alphanum_fraction": 0.7374301552772522,
"avg_line_length": 26.538461685180664,
"blob_id": "662eeabbd4f0a0d31104ebbcef325c30c8f5f776",
"content_id": "e1b2133bbb1c75c3dd4c7f1c634d550365e8e277",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 26,
"path": "/app/__init__.py",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext import restful\nfrom sqlalchemy import create_engine\nimport os\nfrom sqlalchemy.ext.declarative import declarative_base\n\napp = Flask(__name__)\napi = restful.Api(app)\n\n# create database\nbasedir = os.path.abspath(os.path.dirname(__file__))\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'storkl.db')\napp.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI\ndb = SQLAlchemy(app)\n\n# let's try dataset\nimport dataset\nDATASET_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'storkl.ds')\nds = dataset.connect(DATASET_DATABASE_URI)\n\n\nfrom app import views, models\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.5209923386573792,
"alphanum_fraction": 0.5381679534912109,
"avg_line_length": 26.068965911865234,
"blob_id": "7520edff96a699b27147507605234707a1e9decc",
"content_id": "5acf1009473049ca414200bcbf1b50fc9be20edb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1572,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 58,
"path": "/db_create_test_data.py",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "from app import db, models\nfrom datetime import datetime\n\n# create the database\ndb.create_all()\n\n# empty the db\nfor user in models.User.query.all():\n db.session.delete(user)\n \nfor project in models.Project.query.all():\n db.session.delete(project)\n\nfor task in models.Task.query.all():\n db.session.delete(task)\n\ndb.session.commit()\n\nu1 = models.User(username='john', \n email='[email protected]')\ndb.session.add(u1)\n\nu2 = models.User(username='mary', \n email='[email protected]',\n trusted=[u1])\ndb.session.add(u2)\n\np1 = models.Project(id=1, \n title='Hyperlamp', \n owner_id='mary', \n description='A lamp shaped like an hypercube.', \n created=datetime.utcnow())\ndb.session.add(p1)\n\nt1 = models.Task(id=1,\n project_id=1, \n name='Buy wooden sticks', \n description='go to Gamma and buy a few meters of thin cut wood.',\n users=[u1])\ndb.session.add(t1)\n\nt2 = models.Task(id=2,\n project_id=1, \n name='Buy paper', \n description='go to the store and buy a few square meters of multi-color paper.',\n users=[u1, u2],\n dependencies=[t1])\ndb.session.add(t2)\n\nt3 = models.Task(id=3,\n project_id=1, \n name='Build structure', \n description='put together wood and paper.',\n users=[u1],\n dependencies=[t1, t2])\ndb.session.add(t3)\n\ndb.session.commit()\n\n\n"
},
{
"alpha_fraction": 0.6564885377883911,
"alphanum_fraction": 0.6564885377883911,
"avg_line_length": 25.200000762939453,
"blob_id": "d2c3e03d183a42e6b03f4a81bc47ce93c6016175",
"content_id": "bf7df2eb26e33ec1f6c5992965f12d8b7e245e13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/app/utils.py",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "def flatten(list_of_lists):\n return [val for subl in list_of_lists for val in subl]\n \ndef unique(l):\n return list(set(l))\n"
},
{
"alpha_fraction": 0.5282608866691589,
"alphanum_fraction": 0.636956512928009,
"avg_line_length": 20.809524536132812,
"blob_id": "6f651bc335039da65bbf682db775abc72e3cfddf",
"content_id": "764d2050d62cef459a54d1b619ad9ed4043f1b20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 21,
"path": "/test_requests.py",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "import requests\n\nr = requests.get('http://127.0.0.1:5000/u/valerio')\nres = r.json()\nprint res\n\nr = requests.post('http://127.0.0.1:5000/u/new', data={'username' : 'valerio', 'email' : '[email protected]'})\nres = r.json()\nprint res\n\nr = requests.get('http://127.0.0.1:5000/u/valerio')\nres = r.json()\nprint res\n\nr = requests.delete('http://127.0.0.1:5000/u/valerio')\nres = r.json()\nprint res\n\nr = requests.get('http://127.0.0.1:5000/u/valerio')\nres = r.json()\nprint res\n\n\n"
},
{
"alpha_fraction": 0.6085256338119507,
"alphanum_fraction": 0.6143918633460999,
"avg_line_length": 32.149349212646484,
"blob_id": "71cf7961a4ee4dbcd8d1f989d6918892211d31ed",
"content_id": "fb6b012d4b21745817205bfe1d7ca0e4d413fa89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5114,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 154,
"path": "/app/views.py",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "from app import db, app, models, api\nfrom utils import *\nfrom flask import make_response, jsonify\nfrom flask.ext import restful\nfrom flask.ext.restful import abort, reqparse\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import UnmappedInstanceError\n\n### User ###\nclass User(restful.Resource):\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('username')\n self.parser.add_argument('email')\n\n def get(self, username):\n user = models.User.query.get(username)\n if user:\n return jsonify(user.serialize())\n else:\n abort(404, message=\"User {} doesn't exist\".format(username))\n \n def post(self, username):\n args = self.parser.parse_args()\n try:\n new_user = models.User(username=args['username'], \n email=args['email'])\n db.session.add(new_user)\n db.session.commit()\n return 201\n except IntegrityError:\n abort(400, message=\"User {} already exists\".format(args['username']))\n \n def delete(self, username):\n args = self.parser.parse_args()\n try:\n user = models.User.query.get(username)\n db.session.delete(user)\n db.session.commit()\n return 201\n except UnmappedInstanceError:\n abort(400, message=\"User {} does not exist\".format('username'))\n \napi.add_resource(User, '/u/<string:username>')\n\n\n### User - owns - Project ###\nclass Ownership(restful.Resource):\n def get(self, username):\n projects = models.Project.query.filter_by(owner_id=username).all()\n return jsonify({ 'projects' : [p.serialize() for p in projects] })\n\napi.add_resource(Ownership, '/u/<string:username>/owned')\n\n\n### User - is in task comprised by - Project ###\nclass UserInvolvement(restful.Resource):\n def get(self, username):\n user = models.User.query.get(username)\n return jsonify({ 'projects' : [p.serialize() for p in user.involved()] })\n \napi.add_resource(UserInvolvement, '/u/<string:username>/involved')\n\n\n### User - Task ###\nclass Assignment(restful.Resource):\n def get(self, username):\n user = models.User.query.get(username)\n return jsonify({ 'tasks' : [t.serialize() for t in user.tasks] })\n\napi.add_resource(Assignment, '/u/<string:username>/tasks')\n\n\n### User - User ###\nclass Trust(restful.Resource):\n def get(self, username):\n user = models.User.query.get(username)\n return jsonify({ 'users' : [u.serialize() for u in user.trusted] })\n\napi.add_resource(Trust, '/u/<string:username>/trusted')\n\n\n### User - User ###\n# every user involved in projects in which User is involved (minus himself)\nclass Association(restful.Resource):\n def get(self, username):\n user = models.User.query.get(username)\n associates = unique(flatten(p.involved() for p in user.involved()))\n \n associates.remove(user)\n return jsonify({ 'users' : [u.serialize() for u in associates] })\napi.add_resource(Association, '/u/<string:username>/associated')\n\n\n### Project ###\nclass Project(restful.Resource):\n def get(self, project_id):\n project = models.Project.query.get(project_id)\n if not project:\n abort(404, message=\"Project {} doesn't exist\".format(project_id))\n\n return jsonify(project.serialize())\n \napi.add_resource(Project, '/p/<int:project_id>')\n\n\n### Project - is in task comprised by - Project ###\nclass ProjectInvolvement(restful.Resource):\n def get(self, project_id):\n project = models.Project.query.get(project_id)\n return jsonify({ 'users' : [u.serialize() for u in project.involved()] })\napi.add_resource(ProjectInvolvement, '/p/<int:project_id>/involved')\n\n\n### Project - Task ###\nclass ProjectTasks(restful.Resource):\n def get(self, project_id):\n project = models.Project.query.get(project_id)\n return jsonify({ 'tasks' : [t.serialize() for t in project.tasks] })\n\napi.add_resource(ProjectTasks, '/p/<int:project_id>/tasks')\n\n\n### Task ###\nclass Task(restful.Resource):\n def get(self, task_id):\n task = models.Task.query.get(task_id)\n if not task:\n abort(404, message=\"Task {} doesn't exist\".format(task_id))\n\n return jsonify(task.serialize())\n \napi.add_resource(Task, '/t/<int:task_id>')\n\n\nclass Dependency(restful.Resource):\n def get(self, task_id):\n task = models.Task.query.get(task_id)\n if not task:\n abort(404, message=\"Task {} doesn't exist\".format(task_id))\n\n return jsonify({'dependency': \n {'dependencies' : \n [t.serialize() for t in task.dependencies], \n 'dependents' : \n [t.serialize() for t in task.dependents] }\n })\n \napi.add_resource(Dependency, '/t/<int:task_id>/dep')\n\n# error handling\[email protected](404)\ndef not_found(error):\n return make_response(jsonify( { 'error': 'Not found' } ), 404)\n \n\n\n\n\n"
},
{
"alpha_fraction": 0.6301546096801758,
"alphanum_fraction": 0.631872832775116,
"avg_line_length": 19.592920303344727,
"blob_id": "0273e2cbdad2f05bcc08601b4e756d5aae6b73e9",
"content_id": "c6ce7573361704ab40230d454eaf8b67987d3968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2328,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 113,
"path": "/README.md",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "Storkl\n======\nSimple Time ORganizing Kit (\"L\" is for fashion).\n\nStorkl is a platform for project and task management, designed to be simple and inter-operable. \n\nTask color code:\n----------------\n- no deadline: grey;\n- yes deadline but user not assigned to task: blue;\n- deadline farther than CLOSE: green;\n- deadline between CLOSE and URGENT; yellow;\n- deadline closer than URGENT: red;\n- past deadline: purple;\n\ne.g. CLOSE = 3 days, URGENT = 1 day\n\n# Glossary\n\n- _involvement_: a **user** is involved in a **project** if she owns \nit or she has at least one **task** from the project assigned\n- _ownership_: a **user** _owns_ every **project(( she creates. \n**Projects** can only have one owner.\n\n# API\n\nThe core of Storkl's data model consists of three entities: \n**User**, **Project**, **Task**. They have statuses that change based \non who is the (logged in) user looking at them. \n\nHere _STORKL_ is the base url of the Storkl installation.\n\n## User\n\n### GET\n\n> _STORKL_/u/${username}\n\nretrieves information about user ${username}\n\n> _STORKL_/u/${username}/projects\n\nlists projects owned by user ${username}\n\n> _STORKL_/u/${username}/involved\n\nlists projects in which user ${username} is involved\n\n> _STORKL_/u/${username}/tasks\n\nlists tasks assigned to user ${username}\n\n### POST\n\n> _STORKL_/u/new\n\ncreates a new user\n\n| argument | type | example |\n| -------- | -------------- | ----- |\n| username | string | _valerio_ |\n| email | string | [email protected]_ |\n| password | SHA-1 checksum | _229fe88b25ae8307601bf6c9c050bf02755b7e26_ |\n\n\nTask\n----\n\n> _STORKL_/t/${task_id}\n\nretrieves information about task ${task_id}\n\n> _STORKL_/t/${task_id}/users\n\nlists users assigned to task ${task_id}\n\n### POST\n\n> _STORKL_/t/new\n\ncreates a new task\n\n| argument | type | example |\n| ----------- | ------- | ----------- |\n| name | string | _buy paint_ |\n| project\\_id | integer | _1_ |\n\n> _STORKL_/t/${task_id}/u/${username}/add\n\nassign task ${task_id} to user ${username}\n\n> _STORKL_/t/${task_id}/u/${username}/del\n\nremove assignment of task ${task_id} from user ${username}\n\nProject\n-------\n\n### GET\n\n> _STORKL_/p/${project_id}\n\nretrieves information about project ${project_id}\n\n> _STORKL_/p/${project_id}/involved\n\nlists users involved in pproject ${project_id}\n\n### POST\n\n> _STORKL_/t/new\n\ncreates a new project\n\n"
},
{
"alpha_fraction": 0.5778425931930542,
"alphanum_fraction": 0.5827988386154175,
"avg_line_length": 39.22352981567383,
"blob_id": "5bd18c9bf05e7da78177f81d96a20d65b7fd403d",
"content_id": "6a4a04b72dba6426cc1566355a2b7154eb4109af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3430,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 85,
"path": "/app/models.py",
"repo_name": "valeriobasile/storkl",
"src_encoding": "UTF-8",
"text": "from app import db\n\nfrom app.utils import *\n\nassignment = db.Table('assignment',\n db.Column('user', db.String(64), db.ForeignKey('user.username')),\n db.Column('task', db.Integer, db.ForeignKey('task.id'))\n)\n\n\ntrust = db.Table('trust',\n db.Column('trustee', db.String(64), db.ForeignKey('user.username'), primary_key=True),\n db.Column('trusted', db.String(64), db.ForeignKey('user.username'), primary_key=True)\n)\n\ndependency = db.Table('dependency',\n db.Column('master', db.Integer, db.ForeignKey('task.id')),\n db.Column('slave', db.Integer, db.ForeignKey('task.id'))\n)\n\nclass User(db.Model):\n username = db.Column(db.String(64), index = True, primary_key = True)\n email = db.Column(db.String(120), index = True, unique = True)\n projects = db.relationship('Project', backref = 'owner', lazy = 'dynamic')\n tasks = db.relationship('Task', \n secondary=assignment, \n backref=db.backref('user', \n lazy='dynamic'))\n trusted = db.relationship('User', \n secondary=trust, \n backref=db.backref('trustees'), \n lazy='dynamic',\n primaryjoin=username==trust.c.trustee,\n secondaryjoin=username==trust.c.trusted)\n \n def involved(self):\n return list(set([task.project for task in self.tasks]))\n \n def serialize(self):\n serialized = {'username' : self.username, \n 'email' : self.email}\n return serialized\n\n\nclass Project(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n title = db.Column(db.String(64))\n owner_id = db.Column(db.String(64), db.ForeignKey('user.username'))\n description = db.Column(db.Text())\n created = db.Column(db.DateTime())\n tasks = db.relationship('Task', backref = 'project', lazy = 'dynamic')\n \n def involved(self):\n return unique(flatten([task.users for task in self.tasks]))\n \n return list(set([\n val for subl in [\n task.users for task in self.tasks\n ] for val in subl\n ]))\n \n def serialize(self):\n user = User.query.get(self.owner_id)\n serialized = {'title' : self.title, \n 'owner' : user.serialize(), \n 'description' : self.description, \n 'created' : self.created}\n return serialized\n\n\nclass Task(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n project_id = db.Column(db.Integer, db.ForeignKey('project.id'))\n name = db.Column(db.String(64))\n description = db.Column(db.Text())\n users = db.relationship('User', secondary=assignment, backref=db.backref('task', lazy='dynamic'))\n dependencies = db.relationship('Task', secondary=dependency, primaryjoin=dependency.c.slave==id, secondaryjoin=dependency.c.master==id, backref='dependent')\n dependents = db.relationship('Task', secondary=dependency, primaryjoin=dependency.c.master==id, secondaryjoin=dependency.c.slave==id, backref='dependency')\n\n def serialize(self):\n project = Project.query.get(self.project_id)\n serialized = {'name' : self.name, \n 'project' : project.serialize(), \n 'description' : self.description}\n return serialized\n\n \n\n"
}
] | 7 |
tengku-kargo/pair-programming-tengku | https://github.com/tengku-kargo/pair-programming-tengku | d65398d14970102c4460b79453118f730a9f9436 | b8ab68623d1de6aefafddc2957c4afea576bbdf0 | af8236648def713a5b138e69e080078bca0e85e3 | refs/heads/master | 2020-11-23T21:12:21.037412 | 2019-12-13T11:26:41 | 2019-12-13T11:26:41 | 227,822,492 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7821100950241089,
"alphanum_fraction": 0.786697268486023,
"avg_line_length": 42.70000076293945,
"blob_id": "522255c1a11642319cc9190b24ba1efc8cada8f6",
"content_id": "a63dcd34d89f4c2e416e5bccc6bb5479ce6f5651",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 10,
"path": "/README.md",
"repo_name": "tengku-kargo/pair-programming-tengku",
"src_encoding": "UTF-8",
"text": "Evaluation:\n- Do automation for graphql and postgresql\n- Give parameter to read error to make sure the result is correct (if and else)\n- Connect to database via Python Client (pyscopg2)\n- Know more about Python 3\n\nThings That can be Improved:\n- Reducing time to test BE ticket with automation\n- Automate email and phone generation (random email & phone generator)\n- Create script with schedule(datetime) to make sure the quality of code"
},
{
"alpha_fraction": 0.5934455394744873,
"alphanum_fraction": 0.6111603379249573,
"avg_line_length": 18.482759475708008,
"blob_id": "6107ca4ce0ae35f7d62275ff2470872d97728237",
"content_id": "d3fbbbea2d3c48b86a85e9128cf81491477df653",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1129,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 58,
"path": "/automated.py",
"repo_name": "tengku-kargo/pair-programming-tengku",
"src_encoding": "UTF-8",
"text": "# importing the requests library \nimport requests \nimport psycopg2\n\n# api-endpoint \nURL = \"GRAPHQL_URL\"\n\nemail = \"[email protected]\"\nphone = \"+62812288291240\"\n\nquery = '''\nmutation {\n signUpCompanyTest(input: {\n email: \"''' + email + '''\"\n companyName: \"PT. Testing Programming\"\n password: \"DATABASE_PW\"\n phoneNumber: \"''' + phone + '''\"\n role: \"transporter\"\n }) {\n body\n statusCode\n }\n}\n'''\n\nr = requests.post(URL, json={'query': query})\nif r.status_code == 200:\n print('Request Success!')\n data = r.json()\n if \"errors\" in data:\n print(data['errors'][0]['message'])\n else: \n print(data['data'])\nelse:\n print('Request Error!')\n\nhost=\"DATABASE_HOST\"\ndbname=\"DATABASE_NAME\"\nuser=\"DATABASE_USER\"\npassword=\"DATABASE_PASSWORD\"\nconn = psycopg2.connect(host=host, database=dbname, user=user, password=password)\ncur = conn.cursor()\n\nquery = \"\"\"\nselect * from users\nwhere phone_number = '\"\"\" + phone + \"\"\"'\nand email = '\"\"\" + email + \"\"\"';\n\"\"\"\n\ncur.execute(query)\n\nquery = \" delete from users where phone_number = '\" + phone + \"';\"\n\ncur.execute(query)\nconn.commit()\n\ncur.close()\nconn.close()"
}
] | 2 |
igniteflow/polymorph | https://github.com/igniteflow/polymorph | fbadd778bbd057470156e128656e2fb98dcfe21a | e781d665d958cf1ab83e8cd83bf0c105244a60c9 | e00a9440caa80c4260c34daf38629a61caccb410 | refs/heads/master | 2021-01-17T17:41:03.398567 | 2017-03-07T12:19:23 | 2017-03-07T12:19:23 | 84,131,471 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46930232644081116,
"alphanum_fraction": 0.47348836064338684,
"avg_line_length": 21.87234115600586,
"blob_id": "0b64d4ed051000248f990f04460734887612aa87",
"content_id": "a9a14d714080b73ff1a170712e357aac5a4a68fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2150,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 94,
"path": "/polymorph/tools.py",
"repo_name": "igniteflow/polymorph",
"src_encoding": "UTF-8",
"text": "import yaml\n\n\nclass RowTools(object):\n \"\"\"\n transform a Python object to csv friendly rows\n\n Example:\n\n {\n 'foo': 'bar',\n 'cars': ['one', 'two'],\n 'fruit': [\n {'apple': 'green'},\n {'banana': 'yellow'},\n ]\n }\n\n Becomes rows:\n\n [\n ('foo', 'bar'),\n ('cars.0', 'one'),\n ('cars.1', 'two'),\n ('fruit.0.apple', 'green'),\n ('fruit.1.banana', 'yellow'),\n ]\n \"\"\"\n rows = None\n keys = None\n\n def _str(self, data):\n if self.rows is None:\n self.rows = []\n\n identifier = '.'.join([str(i) for i in self.keys])\n self.rows.append((identifier, data))\n self.keys.pop()\n\n def _list(self, data):\n items = []\n for i, item in enumerate(data):\n if i > 0 and self.keys[-1] == (i - 1):\n # remove the index from the previous iteration\n self.keys.pop()\n items.append(self.recurse(item, key=i))\n return items\n\n def _dict(self, data):\n # assumes keys can only be strings\n for k, v in data.items():\n self.recurse(v, key=k)\n\n def recurse(self, data, key=None):\n if self.keys is None:\n self.keys = []\n\n if key is not None:\n self.keys.append(key)\n\n if isinstance(data, list):\n _data = self._list(data)\n self.keys.pop()\n return _data\n elif isinstance(data, dict):\n return self._dict(data)\n elif isinstance(data, (str, unicode)):\n self._str(data)\n\n def to_rows(self, data):\n self.recurse(data)\n return self.rows\n\n def rows_to_data(self, rows):\n # TODO\n pass\n\n\nclass YamlToCsv(object):\n def load_from_file(self, path):\n with open(path) as f:\n return yaml.load(f)\n\n def write_to_file(self, path, data):\n with open(path, 'w+') as f:\n f.write(yaml.dump(data))\n\n def to_rows(self, data):\n \"\"\"\n csv will have two columns:\n (1) identifier\n (2) value\n \"\"\"\n pass\n"
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6201298832893372,
"avg_line_length": 24.66666603088379,
"blob_id": "d35660deaa13a5b6ccc46a5aca28096c8054dbe3",
"content_id": "1b6b02b300348f370bb7dcaa65d6747907801ec0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 12,
"path": "/setup.py",
"repo_name": "igniteflow/polymorph",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nsetup(name='polymorph',\n version='0.1',\n description='Python tooling to tranform data',\n url='https://github.com/igniteflow/polymorph',\n author='Phil Tysoe',\n author_email='[email protected]',\n license='MIT',\n packages=['polymorph'],\n zip_safe=False\n)\n"
},
{
"alpha_fraction": 0.5812973976135254,
"alphanum_fraction": 0.5846672058105469,
"avg_line_length": 20.981481552124023,
"blob_id": "f539998386189a9c65fae24be14ef51eb6218ea8",
"content_id": "3c1d7e0d0d6053a2db37173d4d3b2b2d1ed14d92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1187,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 54,
"path": "/polymorph/tests/test_tools.py",
"repo_name": "igniteflow/polymorph",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom polymorph.tools import YamlToCsv, RowTools\n\n\nTEST_DATA_DIR = './polymorph/tests/test_data/'\n\ndef get_test_file_path(filename):\n return '{}{}'.format(TEST_DATA_DIR, filename)\n\n\ndef test_load_from_file():\n yaml_to_csv = YamlToCsv()\n path = get_test_file_path('simple_example.yaml')\n assert yaml_to_csv.load_from_file(path) == {'foo': 'bar'}\n\n\ndef test_write_to_file():\n yaml_to_csv = YamlToCsv()\n path = get_test_file_path('output.yaml')\n yaml_to_csv.write_to_file(path, {'foo': 'bar'})\n\n with open(path) as f:\n assert f.read() == '{foo: bar}\\n'\n\n # should probably mock open instead of actually creating a file\n os.remove(path)\n\n\nDATA = {\n 'foo': 'bar',\n 'cars': ['one', 'two'],\n 'fruit': [\n {'apple': 'green'},\n {'banana': 'yellow'},\n ]\n}\n\nROWS = [\n ('foo', 'bar'),\n ('cars.0', 'one'),\n ('cars.1', 'two'),\n ('fruit.0.apple', 'green'),\n ('fruit.1.banana', 'yellow'),\n]\n\ndef test_to_rows():\n row_tools = RowTools()\n assert sorted(row_tools.to_rows(DATA)) == sorted(ROWS)\n\n\ndef test_rows_to_data():\n row_tools = RowTools()\n assert sorted(row_tools.rows_to_data(ROWS)) == sorted(DATA)\n"
},
{
"alpha_fraction": 0.6737967729568481,
"alphanum_fraction": 0.6737967729568481,
"avg_line_length": 10,
"blob_id": "8b24eac846f42270f94119c6a3b27547cf506f4c",
"content_id": "50fb7821e80cd2172809a380bcf1d5e101a563a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 17,
"path": "/README.md",
"repo_name": "igniteflow/polymorph",
"src_encoding": "UTF-8",
"text": "# Polymorph\n\nA set of tools for transforming data\n\n***WIP***\n\n## Install deps\n\n`pip install -r requirements.txt`\n\n## Run the tests\n\n`pytest`\n\n### With coverage\n\n`py.test --cov=polymorph`\n"
},
{
"alpha_fraction": 0.8399999737739563,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 7.333333492279053,
"blob_id": "10b553c662a6aa1af4f47952441560cc84c63838",
"content_id": "662e64708b19b704379d43d6bef5fabe5d851544",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 25,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "igniteflow/polymorph",
"src_encoding": "UTF-8",
"text": "pytest\npytest-cov\npyyaml\n"
}
] | 5 |
jellyhappy/taobao_sdk | https://github.com/jellyhappy/taobao_sdk | 1eca2fc260dcbba0c6e3ae4b8f3692470a2eb42f | 436cfb5edbeb8c9dae29fec2b987b6f3003dbb03 | c8eb4276c3bd62a2a4bff582877e136fcf609470 | refs/heads/master | 2022-03-29T21:03:24.014577 | 2019-12-29T14:54:05 | 2019-12-29T14:54:05 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6142649054527283,
"alphanum_fraction": 0.671033501625061,
"avg_line_length": 33.29999923706055,
"blob_id": "76b83e3a1883afcdc82073ea5efc9d4c128eac87",
"content_id": "fc0a7abcd33e7ff7d5e8e663cfcf811c7ffdcc56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 687,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 20,
"path": "/taobao/tests/test_trade.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nimport unittest\nfrom taobao.api import TaoBao\nfrom taobao.exception import TradeError\n\n\nclass TestTrade(unittest.TestCase):\n\n def test_trade_sold_get(self):\n tb = TaoBao(\"1023173103\", \"sandbox5e4645d3045c1dd3365afe74c\", \"12345\", sanbox=True)\n tb._get_session_key()\n # self.assertRaises(TradeError,tb.tradeapi.trade_sold_get,\"tid,type,status,payment,orders,rx_audit_status\")\n tb.tradeapi.trade_sold_get(\"tid,type,status,payment,orders,rx_audit_status\")\n \n r, data = tb.tradeapi.trade_sold_get(\n \"tid,type,status,payment,orders,rx_audit_status\", \"2019-02-01\")\n print(r, data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.546329915523529,
"alphanum_fraction": 0.5539068579673767,
"avg_line_length": 27.408071517944336,
"blob_id": "669ddbd6848c1b4a5c76ef0be53a77cebaabec83",
"content_id": "a45578fb49863362ec33ccfd002ddb462d199ac4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8559,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 223,
"path": "/taobao/trade/trade.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "# 交易类接口\n\nimport inspect\nfrom taobao.comm import Common\nfrom taobao.exception import TradeError\n\nSTATUS = {\n \"WAIT_BUYER_PAY\": \"等待买家付款\",\n \"WAIT_SELLER_SEND_GOODS\": \"等待卖家发货\",\n \"SELLER_CONSIGNED_PART\": \"卖家部分发货\",\n \"WAIT_BUYER_CONFIRM_GOODS\": \"等待买家确认收货\",\n \"TRADE_BUYER_SIGNED\": \"买家已签收(货到付款专用)\",\n \"TRADE_FINISHED\": \"交易成功\",\n \"TRADE_CLOSED\": \"交易关闭\",\n \"TRADE_CLOSED_BY_TAOBAO\": \"交易被淘宝关闭\",\n \"TRADE_NO_CREATE_PAY\": \"没有创建外部交易(支付宝交易)\",\n \"WAIT_PRE_AUTH_CONFIRM\": \"余额宝0元购合约中\",\n \"PAY_PENDING\": \"外卡支付付款确认中\",\n \"ALL_WAIT_PAY\": \"所有买家未付款的交易(包含:WAIT_BUYER_PAY、TRADE_NO_CREATE_PAY)\",\n \"ALL_CLOSED\": \"所有关闭的交易(包含:TRADE_CLOSED、TRADE_CLOSED_BY_TAOBAO)\",\n \"PAID_FORBID_CONSIGN\": \"该状态代表订单已付款但是处于禁止发货状态\"\n}\n\nDEFAULT_TYPE = \"guarantee_trade,auto_delivery,ec,cod,step\"\nOPTION_TYPE = [\n \"fixed\", # 一口价\n \"auction\", # 拍卖\n \"guarantee_trade\", # 一口价、拍卖\n \"step\", # 分阶段\n \"independent_simple_trade\", # 旺店入门版\n \"independent_shop_trade\", # 旺店标准版\n \"auto_delivery\", # 自动发货\n \"ec\", # 直冲\n \"cod\", # 货到付款\n \"game_equipment\", # 游戏装备\n \"shopex_trade\", # SHopex\n \"netcn_trade\", # 万网交易\n \"external_trade\", # 同一外部交易\n \"instant_trade \", # 即时到账\n \"b2c_cod\", # 大商家货到付款\n \"hotel_trade\", # 酒店类型\n \"super_market_trade\", # 商超交易\n \"super_market_cod_trade\", # 商超货到付款\n \"taohua\", # 淘花网\n \"waimai\", # 外卖\n \"o2o_offlinetrade\", # O2O交易\n \"nopaid\", # 即时到帐/趣味猜交易类型\n \"eticket\", # 电子凭证\n \"tmall_i18n\", # 天猫国际\n \"nopaid\", # 无付款交易\n \"insurance_plus\", # 保险\n \"finance\", # 基金\n \"pre_auth_type\", # 预授权交易\n \"lazada\", # lazada\n]\n\nTRADE_TYPE = {\n \"default\": DEFAULT_TYPE,\n \"all\": \",\".join(OPTION_TYPE)\n}\n\nREFUND_STATUS = {\n \"WAIT_SELLER_AGREE\": \"买家已经申请退款\",\n \"WAIT_BUYER_RETURN_GOODS\": \"卖家已经同意退款,等待买家退货\",\n \"WAIT_SELLER_CONFIRM_GOODS\": \"买家已经退货,等待卖家确认收货\",\n \"SELLER_REFUSE_BUYER\": \"卖家拒绝退款\",\n \"CLOSED\": \"退款关闭\",\n \"SUCCESS\": \"退款成功\"\n}\n\n\nclass TradeApi(Common):\n\n def trade_sold_get(self, fields, start_created=None, end_created=None, status=None, buyer_nick=None,\n type=DEFAULT_TYPE, ext_type=None, rate_status=None,\n tag=None, page_no=None, page_size=None, use_has_next=None):\n \"\"\"\n 查询卖家已卖出的交易数据(根据创建时间)\n 参数:\n feilds: 需要返回的字段列表,逗号分隔\n start_created: 交易开始时间(三个月内)\n end_created: 交易结束时间\n status:交易状态\n buyer_nick: 买家昵称\n type: 交易类型(默认查询5种类型)\n ext_type:扩展类型\n rate_status: 评价状态\n tag: 交易自定义分组\n page_no: 页码\n page_size: 每页条数\n use_has_next: 是否启用has_next的分页方式\n \"\"\"\n\n data = {\n \"fields\": fields\n }\n\n frame = inspect.currentframe()\n args, _, _, values = inspect.getargvalues(frame)\n for ar in args:\n if values[ar]:\n data[ar] = values[ar]\n\n return self.post(\"taobao.trades.sold.get\", data)\n\n def trade_get(self, fields, tid):\n \"\"\"\n 获取单笔交易的部分信息\n 参数:\n fields: 要返回的字段列表,逗号分隔\n tid: 交易编号\n \"\"\"\n data = {\n \"fields\": fields,\n \"tid\": tid\n }\n\n return self.post(\"taobao.trade.get\", data)\n\n def trade_fullinfo_get(self, fields, tid):\n \"\"\"\n 获取单笔交易的详细信息\n 1. 只有在交易成功的状态下才能取到交易佣金,其它状态下取到的都是零或空值 \n 2. 只有单笔订单的情况下Trade数据结构中才包含商品相关的信息 \n 3. 获取到的Order中的payment字段在单笔子订单时包含物流费用,多笔子订单时不包含物流费用 \n 4. 获取红包优惠金额可以使用字段 coupon_fee \n \"\"\"\n\n data = {\n \"fields\": fields,\n \"tid\": tid\n }\n\n return self.post(\"taobao.trade.fullinfo.get\", data)\n\n def trade_memo_add(self, tid, memo, flag=0):\n \"\"\"\n 对一笔交易添加备注\n 参数:\n tid: 交易编号\n memo: 交易备注,最大长度: 1000个字节\n flag: 交易备注旗帜,可选值为:0(灰色), 1(红色), 2(黄色), 3(绿色), 4(蓝色), 5(粉红色),默认值为0\n \"\"\"\n\n data = {\n \"tid\": tid,\n \"memo\": memo,\n \"flag\": flag\n }\n\n return self.post(\"taobao.trade.memo.add\", data)\n\n def trade_memo_update(self, tid, memo=None, flag=0, reset=False):\n \"\"\"\n 修改交易备注\n 参数:\n tid: 交易编号\n memo: 卖家交易备注。最大长度: 1000个字节\n flag: \t卖家交易备注旗帜,可选值为:0(灰色), 1(红色), 2(黄色), 3(绿色), 4(蓝色), 5(粉红色),默认值为0\n reset: 是否对memo的值置空若为true,则不管传入的memo字段的值是否为空,都将会对已有的memo值清空,慎用;若用false,则会根据memo是否为空来修改memo的值:若memo为空则忽略对已有memo字段的修改,若memo非空,则使用新传入的memo覆盖已有的memo的值\n \"\"\"\n data = {\n \"tid\": tid,\n \"memo\": memo,\n \"flag\": flag,\n \"reset\": reset\n }\n\n return self.post(\"taobao.trade.memo.update\", data)\n\n def refunds_receive_get(self, fields, status=None, buyer_nick=None, type=None, start_modified=None,\n end_modified=None, page_no=1, page_size=40, use_has_next=False):\n \"\"\"\n 查询卖家收到的退款列表\n 参数:\n fields: 需要返回的字段\n status: 退款状态,默认查询所有退款状态的数据,除了默认值外每次只能查询一种状态\n buyer_nick: 卖家昵称\n type: 交易类型列表,一次查询多种类型可用半角逗号分隔,默认同时查guarantee_trade, auto_delivery这两种类型的数据\n start_modified: 查询修改时间开始。格式: yyyy-MM-dd HH:mm:ss\n end_modified: 查询修改时间结束。格式: yyyy-MM-dd HH:mm:ss\n page_no: 页码。取值范围:大于零的整数; 默认值:1\n page_size: 每页条数。取值范围:大于零的整数; 默认值:40;最大值:100\n use_has_next: 是否启用has_next的分页方式,如果指定true,则返回的结果中不包含总记录数,但是会新增一个是否存在下一页的的字段,通过此种方式获取增量退款,接口调用成功率在原有的基础上有所提升\n \"\"\"\n data = {\n \"fields\": fields,\n \"status\": status,\n \"buyer_nick\": buyer_nick,\n \"type\": type,\n \"start_modified\": start_modified,\n \"end_modified\": end_modified,\n \"page_no\": page_no,\n \"page_size\": page_size,\n \"use_has_next\": use_has_next\n }\n\n return self.post(\"taobao.refunds.receive.get\", data)\n\n def trade_amount_get(self, fields, tid):\n \"\"\"\n 交易账务查询\n 1. 只供卖家使用,买家不可使用 \n 2. 可查询所有的状态的交易,但不同状态时交易的相关数据可能会有不同\n \"\"\"\n data = {\n \"fields\": fields,\n \"tid\": tid\n }\n\n return self.post(\"taobao.trade.amount.get\", data)\n\n def trade_close(self, tid, close_reason):\n \"\"\"\n 卖家关闭一笔交易\n 关闭一笔订单,可以是主订单或子订单。当订单从创建到关闭时间小于10s的时候,会报“CLOSE_TRADE_TOO_FAST”错误。\n \"\"\"\n data = {\n \"tid\": tid,\n \"close_reason\": close_reason\n }\n\n return self.post(\"taobao.trade.close\", data)\n"
},
{
"alpha_fraction": 0.5258883237838745,
"alphanum_fraction": 0.5258883237838745,
"avg_line_length": 24.205127716064453,
"blob_id": "6dc847805c741abcb836d4c4a0416073adea1c27",
"content_id": "4cfa6921a70576d9b427da8d699b53f524617e16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1027,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 39,
"path": "/taobao/model/base.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\n\nclass ErrorResponse(object):\n\n request_id = None\n code = None\n msg = None\n sub_code = None\n sub_msg = None\n\n\nclass PositiveResponse(object):\n pass\n\n\nclass BaseResponse(object):\n \"\"\"响应基类\"\"\"\n\n def __init__(self, data=None):\n \"\"\"\n 初始化\n data:请求返回的实体\n \"\"\"\n if data.get(\"error_response\", None):\n self.positive = False\n self.error_response = ErrorResponse()\n self.construct_obj(\n self.error_response, data.get(\"error_response\"))\n else:\n self.positive = True\n\n @staticmethod\n def construct_obj(instance, data):\n \"\"\"构造返回实体\"\"\"\n if isinstance(data, dict):\n for key, val in data.items():\n if isinstance(val, dict):\n setattr(instance, key, PositiveResponse())\n BaseResponse.construct_obj(getattr(instance, key), val)\n else:\n setattr(instance, key, val)\n"
},
{
"alpha_fraction": 0.6195899844169617,
"alphanum_fraction": 0.6195899844169617,
"avg_line_length": 35.5,
"blob_id": "659041651d0e6c4aa11b266fb915ce7b93ba5a19",
"content_id": "c45539adc0ed00468fc386a40cc39c1e4ac9e95d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 12,
"path": "/taobao/model/trade.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nfrom .base import BaseResponse, PositiveResponse\n\n\nclass SoldGetResponse(BaseResponse):\n \"\"\"卖出的交易数据\"\"\"\n\n def __init__(self, data):\n super(SoldGetResponse, self).__init__(data)\n if data.get(\"trades_sold_get_response\", None):\n self.trades_sold_get_response = PositiveResponse()\n self.construct_obj(self.trades_sold_get_response,\n data.get(\"trades_sold_get_response\"))\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 20,
"blob_id": "fdbd0b104c11c7760b57de740df3d9228c577afc",
"content_id": "fbd7463ccd21af76fec9613f537bc3b077ac4b0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/taobao/trade/__init__.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "from .trade import *"
},
{
"alpha_fraction": 0.47478991746902466,
"alphanum_fraction": 0.5472689270973206,
"avg_line_length": 26.171428680419922,
"blob_id": "c1f5c370fa158876d0e0d23878629ac332d64d65",
"content_id": "d7b1ad784f653f101b569b19279666457ef72822",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 972,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 35,
"path": "/taobao/tests/test_sign.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nimport unittest\nfrom taobao.api import TaoBao\nfrom taobao.exception import SignError\n\n\nclass TestComm(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.taobao = TaoBao(\"12345678\", \"helloworld\", \"abcd\", sanbox=True)\n\n def test_sign_error(self):\n \"\"\"验签错误\"\"\"\n data = \"123\"\n self.assertRaises(SignError, self.taobao.comm.sign, data)\n\n def test_sign(self):\n \"\"\"验证签名算法\"\"\"\n data = {\n \"method\": \"taobao.item.seller.get\",\n \"app_key\": \"12345678\",\n \"session\": \"test\",\n \"timestamp\": \"2016-01-01 12:00:00\",\n \"format\": \"json\",\n \"v\": \"2.0\",\n \"sign_method\": \"md5\",\n \"fields\":\"num_iid,title,nick,price,num\",\n \"num_iid\":\"11223344\"\n }\n r = self.taobao.comm.sign(data,type=\"md5\")\n self.assertEqual(r,\"66987CB115214E59E6EC978214934FB8\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.5365044474601746,
"alphanum_fraction": 0.540099561214447,
"avg_line_length": 29.369747161865234,
"blob_id": "861a2ebefd1bbe020e67edf19c135b564afc86ac",
"content_id": "d4be8d233c848ea0b0581514105293f302551b00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4004,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 119,
"path": "/taobao/comm/common.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\n\nfrom taobao.exception import SignError, SignTypeError, ValidateError, PostError\nfrom hashlib import md5\nimport hmac\nimport traceback\nfrom datetime import datetime\nimport json\nimport requests\nfrom taobao.model.base import BaseResponse\n\n\nclass Common(object):\n \"\"\"公共请求参数\"\"\"\n\n def __get__(self, instance, owner):\n self._secret = instance._secret\n self._key = instance._key\n self._session = instance._session\n self._url = instance._url\n return self\n\n def sign(self, data, type=\"hmac\"):\n \"\"\"\n 签名算法\n data: 待签名的数据字典\n type: 签名算法类型,可选值:hmac和md5\n \"\"\"\n if not isinstance(data, dict):\n raise SignError(\"签名数据格式错误\")\n\n if type not in (\"hmac\", \"md5\"):\n raise SignTypeError(\"签名算法错误\")\n\n try:\n # 按字母序排序\n keys = sorted([key for key, val in data.items() if not (\n key == \"sign\" or isinstance(val, bytes))])\n # 拼接字符串\n strs = \"\".join(f\"{key}{data[key]}\" for key in keys if data[key])\n if type == \"md5\":\n return md5(f\"{self._secret}{strs}{self._secret}\".encode(\"utf-8\")).hexdigest().upper()\n else:\n return hmac.new(strs).hexdigest().upper()\n except Exception as err:\n raise SignError(f\"数据签名异常:{traceback.format_exc()}\")\n\n def _get_comm_args(self, method, target_app_key=None, sign_method=\"md5\", format=\"json\", version=\"2.0\", partner_id=None, simplify=False):\n \"\"\"\n 获取公共请求参数\n method: 方法名\n target_app_key: 被调用的目标AppKey,仅当被调用的API为第三方ISV提供时有效\n sign_method: 签名算法,默认md5\n format: 返回格式,默认json\n version: API协议版本,默认2\n partner_id:合作伙伴标识\n simplify: 是否采用精简JSON返回格式,仅当format=json时有效,默认值为:False\n \"\"\"\n\n data = {\n \"method\": method,\n \"app_key\": self._key,\n \"session\": self._session,\n \"timestamp\": datetime.strftime(datetime.now(), \"%Y-%m-%d %H:%M:%S\"),\n \"format\": format,\n \"v\": version,\n \"sign_method\": sign_method\n }\n\n if target_app_key:\n data['target_app_key'] = target_app_key\n if partner_id:\n data['partner_id'] = partner_id\n if simplify:\n data['simplify'] = simplify\n\n return data\n\n def _parse_comm_response(self, method, data):\n \"\"\"\n 公共响应\n data: 淘宝返回的结果\n \"\"\"\n \n if \"taobao.\" in method:\n method = method.split(\"taobao.\")[1].replace(\".\", \"_\")\n\n # return BaseResponse(method, data=data)\n\n if data.get(\"error_response\", False):\n return False, data[\"error_response\"]\n\n if data.get(f\"{method}_response\", False):\n return True, data[f\"{method}_response\"]\n\n def post(self, method, data):\n \"\"\"\n 提交请求\n method: 请求方法\n data: 接口参数\n return: 返回结果 \n \"\"\"\n if not isinstance(data, dict):\n raise ValidateError(\"数据格式错误\")\n\n try:\n # 组织请求数据\n comm_data = self._get_comm_args(method)\n data.update(comm_data)\n data['sign'] = self.sign(data, type=data['sign_method'])\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded;charset=utf-8\"\n }\n\n response = requests.post(\n self._url, data=data, headers=headers).json()\n # return response\n return self._parse_comm_response(method, response)\n except Exception as err:\n raise PostError(f\"接口:{method} 调用失败:{traceback.format_exc()}\")\n"
},
{
"alpha_fraction": 0.6908517479896545,
"alphanum_fraction": 0.6908517479896545,
"avg_line_length": 10.740740776062012,
"blob_id": "1360e35b55f37a4e28dff3e18a90b7c9a6248641",
"content_id": "566d12678126a4bc95f298c8a3f181b420c42b11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 27,
"path": "/README.md",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "# Taobao SDK\n\n第三方淘宝SDK\n\n## Usage\n\n使用方法:\n\n```python\nfrom taobao.api import TaoBao\n\ntaobao = TaoBao(key, secret, session)\ntaobao.tradeapi.trade_sold_get(...)\n```\n\n## Api\n\n### 交易类\n\n* trade_sold_get:查询卖家已卖出的交易数据\n* trade_fullinfo_get: 获取单笔交易的详细信息\n* trade_amount_get: 交易账务查询\n* trade_memo_update: 修改交易备注\n\n### 菜鸟配货\n\n* 商品信息查询\n"
},
{
"alpha_fraction": 0.4883720874786377,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 35.5,
"blob_id": "d2ad82bb43bf657d77973843dc99d1073ea9ddd6",
"content_id": "52c65a523b691824a59041462dbb9454f7708cc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 731,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 20,
"path": "/taobao/tests/test_cainiao.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nimport unittest\nfrom taobao.api import TaoBao\nimport json\n\n\nclass TestCaiNiao(unittest.TestCase):\n\n def test_wlb_wms_sku_get(self):\n tb = TaoBao(\"23229023\", \"497eb6372b6f152059a65015775838c8\", \"6100b28fe461f6883cf4040163de9c53a0f618ecea5c5582999166418\")\n # print(tb.tradeapi.trade_sold_get(\"orders\"))\n res = tb.cainiao.wlb_wms_sku_get(item_id=\"10732069\",owner_user_id=1)\n print(json.dumps(res))\n\n def test_wlb_wms_inventory_query(self):\n tb = TaoBao(\"23229023\", \"497eb6372b6f152059a65015775838c8\", \"6100b28fe461f6883cf4040163de9c53a0f618ecea5c5582999166418\")\n res = tb.cainiao.wlb_wms_inventory_query()\n print(json.dumps(res))\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.614503800868988,
"alphanum_fraction": 0.614503800868988,
"avg_line_length": 13.38888931274414,
"blob_id": "3946ded1356a9637bd14f415edd9c48aa8f95553",
"content_id": "5529c8f5b8c9eb3201624fef049edb9fbafac9e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 18,
"path": "/taobao/exception/exceptions.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\n\nclass SignError(Exception):\n \"\"\"签名异常\"\"\"\n pass\n\nclass SignTypeError(Exception):\n pass\n\nclass ValidateError(Exception):\n \"\"\"验证错误\"\"\"\n pass\n\nclass PostError(Exception):\n \"\"\"请求失败\"\"\"\n pass\n\nclass TradeError(Exception):\n \"\"\"交易类错误\"\"\"\n pass\n\n"
},
{
"alpha_fraction": 0.6352657079696655,
"alphanum_fraction": 0.6352657079696655,
"avg_line_length": 33.41666793823242,
"blob_id": "c0e63af3d830284df35f347723d7975410ccf301",
"content_id": "5b85655d464d300f4a98babfd8625539921b8d1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 12,
"path": "/taobao/model/user.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nfrom .base import BaseResponse\n\n\nclass Subscribe(BaseResponse):\n \"\"\"appstore应用订购\"\"\"\n\n def __init__(self, data):\n super(Subscribe, self).__init__(data)\n if data.get(\"appstore_subscibe_get_response\", None):\n self.appstore_subscibe_get_response = object()\n self.construct_obj(\n self.appstore_subscibe_get_response, data.get(\"appstore_subscibe_get_response\"))\n"
},
{
"alpha_fraction": 0.6511628031730652,
"alphanum_fraction": 0.6795865893363953,
"avg_line_length": 24.733333587646484,
"blob_id": "581c94f2299338f54d4cdef6794fb933395d796c",
"content_id": "6995dcc7c4f66ed3347ae50dcff5673798d41a6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/taobao/tests/test_user.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nimport unittest\nfrom taobao.api import TaoBao\nfrom taobao.exception import SignError\n\n\nclass TestUser(unittest.TestCase):\n\n def test_appstore_subscibe_get(self):\n taobao = TaoBao(\"12345678\", \"helloworld\", \"abcd\", sanbox=True)\n r, data = taobao.userapi.appstore_subscibe_get(\"test123\")\n self.assertEqual(r, False)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 22,
"blob_id": "6e4dcbc5981912adedccd5689fed4d7694461076",
"content_id": "5ad42dc507542972033e604df9f8355fd70970f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 1,
"path": "/taobao/cainiao/__init__.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "from .cainiao import *"
},
{
"alpha_fraction": 0.5409326553344727,
"alphanum_fraction": 0.5409326553344727,
"avg_line_length": 25.054054260253906,
"blob_id": "1339fd2386fb3992a541b3578fd3f1e18e52ab8d",
"content_id": "dad6213097f828e73b2a92ba4b1dddeb967b42c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 999,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 37,
"path": "/taobao/api.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nfrom taobao.user import UserApi\nfrom taobao.trade import TradeApi\nfrom taobao.cainiao import CaiNiaoApi\nfrom taobao.comm import Common\nimport requests\n\n\nclass TaoBao(object):\n \"\"\"淘宝SDK\"\"\"\n\n def __init__(self, key, secret, session, ssl=False, sanbox=False):\n \"\"\"\n 参数列表:\n key:\n secret:\n session: \n ssl: 是否使用HTTPS\n sandbox: 是否沙箱环境\n \"\"\"\n self._key = key\n self._secret = secret\n self._session = session\n if ssl:\n if sanbox:\n self._url = \"https://gw.api.tbsandbox.com/router/rest\"\n else:\n self._url = \"https://eco.taobao.com/router/rest\"\n else:\n if sanbox:\n self._url = \"http://gw.api.tbsandbox.com/router/rest\"\n else:\n self._url = \"http://gw.api.taobao.com/router/rest\"\n\n comm = Common()\n userapi = UserApi()\n tradeapi = TradeApi()\n cainiao = CaiNiaoApi()\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 22,
"blob_id": "79dfcd18eb371f2efdc69bda8b43275645d71868",
"content_id": "3c3397248608fe70fb08862139a72a2e9589320e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 1,
"path": "/taobao/user/__init__.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nfrom .userapi import *"
},
{
"alpha_fraction": 0.49059560894966125,
"alphanum_fraction": 0.4976488947868347,
"avg_line_length": 27.35555648803711,
"blob_id": "769e2f523a6342752f8924bb7879a953c93432f2",
"content_id": "a9dc3afbc569e060eec150950115e2de953b1cea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1314,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 45,
"path": "/taobao/cainiao/cainiao.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# @Time : 2019-12-29\n# @Author : Kevin Kong ([email protected])\n\n# 菜鸟配货仓API\n\nimport inspect\nfrom taobao.comm import Common\n\n\nclass CaiNiaoApi(Common):\n\n def wlb_wms_sku_get(self, item_code=None, item_id=None, owner_user_id=None):\n \"\"\"\n 商品信息查询\n \"\"\"\n data = {\n \"item_code\": item_code,\n \"item_id\": item_id,\n \"owner_user_id\": owner_user_id\n }\n\n return self.post(\"taobao.wlb.wms.sku.get\", data)\n\n def wlb_wms_inventory_query(self, item_id=None, store_code=None, inventory_type=None,\n type=None, batch_code=None, produce_date=None, due_date=None,\n channel_code=None, page_no=None,\n page_size=None):\n \"\"\"\n 菜鸟商品库存查询 \n \"\"\"\n data = {\n \"item_id\": item_id,\n \"store_code\": store_code,\n \"inventory_type\": inventory_type,\n \"type\": type,\n \"batch_code\": batch_code,\n \"produce_date\": produce_date,\n \"due_date\": due_date,\n \"channel_code\": channel_code,\n \"page_no\": page_no,\n \"page_size\": page_size\n }\n\n return self.post(\"taobao.wlb.wms.inventory.query\", data)\n"
},
{
"alpha_fraction": 0.5231788158416748,
"alphanum_fraction": 0.5231788158416748,
"avg_line_length": 16.705883026123047,
"blob_id": "cb38093d03cd5b87076133c621a552b11a16bd3f",
"content_id": "31e31e62ae154b225cd008846e2bdd502da3fa6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 17,
"path": "/taobao/user/userapi.py",
"repo_name": "jellyhappy/taobao_sdk",
"src_encoding": "UTF-8",
"text": "\nfrom taobao.comm import Common\n\n\nclass UserApi(Common):\n \"\"\"用户接口\"\"\"\n\n def appstore_subscibe_get(self, nick):\n \"\"\"\n 查询appstore应用订购关系\n nick: 用户昵称\n \"\"\"\n\n data = {\n \"nick\": nick\n }\n\n return self.post(\"taobao.appstore.subscribe.get\", data)\n"
}
] | 17 |
JefferyKu/RSScrawler | https://github.com/JefferyKu/RSScrawler | 3ab68f9f2f86143dda5915630bbecc98b2964ec4 | 4a34e5d0f6be7ed4af193b51297b1f89370c8978 | eefa01d259630224a08d68b813477e8e2ac5e151 | refs/heads/master | 2021-04-15T07:16:43.134799 | 2018-03-16T08:14:12 | 2018-03-16T08:14:12 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6179383993148804,
"alphanum_fraction": 0.6265060305595398,
"avg_line_length": 33.90654373168945,
"blob_id": "edbd43e9f676f4d7a7b62c681c44854f254baadd",
"content_id": "16deef3e6e68c90eaaa866643600edae5bc1d4ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3736,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 107,
"path": "/notifiers.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n# Enthält Code von:\n# https://github.com/Gutz-Pilz/pyLoad-stuff/blob/master/SJ.py\n\nimport base64\nimport logging\nimport urllib\nimport urllib2\nfrom rssconfig import RssConfig\nimport re\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nlog_info = logging.info\nlog_error = logging.error\nlog_debug = logging.debug\n\ndef api_request_cutter(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]\n\ndef notify(added_items):\n notifications = RssConfig('Notifications')\n homeassistant_settings = notifications.get(\"homeassistant\").split(',')\n pushbullet_token = notifications.get(\"pushbullet\")\n pushover_settings = notifications.get(\"pushover\").split(',')\n items = []\n for item in added_items:\n item = re.sub(r' - <a href.*<\\/a>', '', item)\n items.append(item)\n if len(items) > 0:\n cut_items = list(api_request_cutter(items, 5))\n if len(notifications.get(\"homeassistant\")) > 0:\n for cut_item in cut_items:\n homassistant_url = homeassistant_settings[0]\n homeassistant_password = homeassistant_settings[1]\n Homeassistant(cut_item, homassistant_url, homeassistant_password)\n if len(notifications.get(\"pushbullet\")) > 0:\n Pushbullet(items, pushbullet_token)\n if len(notifications.get('pushover')) > 0:\n for cut_item in cut_items:\n pushover_user = pushover_settings[0]\n pushover_token = pushover_settings[1]\n Pushover(cut_item, pushover_user, pushover_token)\n\ndef Homeassistant(items, homassistant_url, homeassistant_password):\n data = urllib.urlencode({\n 'title': 'RSScrawler:',\n 'body': \"\\n\\n\".join(items)\n })\n try:\n req = urllib2.Request(homassistant_url, data)\n req.add_header('X-HA-Access', homeassistant_password)\n req.add_header('Content-Type', 'application/json')\n response = urllib2.urlopen(req)\n except urllib2.HTTPError:\n log_debug('FEHLER - Konnte Home Assistant API nicht erreichen')\n return False\n res = json.load(response)\n if res['sender_name']:\n log_debug('Home Assistant Erfolgreich versendet')\n else:\n log_debug('FEHLER - Konnte nicht an Home Assistant Senden') \n \ndef Pushbullet(items, token):\n data = urllib.urlencode({\n 'type': 'note',\n 'title': 'RSScrawler:',\n 'body': \"\\n\\n\".join(items)\n })\n auth = base64.encodestring('%s:' %token).replace('\\n', '')\n try:\n req = urllib2.Request('https://api.pushbullet.com/v2/pushes', data)\n req.add_header('Authorization', 'Basic %s' % auth)\n response = urllib2.urlopen(req)\n except urllib2.HTTPError:\n log_debug('FEHLER - Konnte Pushbullet API nicht erreichen')\n return False\n res = json.load(response)\n if res['sender_name']:\n log_debug('Pushbullet Erfolgreich versendet')\n else:\n log_debug('FEHLER - Konnte nicht an Pushbullet Senden')\n\ndef Pushover(items, pushover_user, pushover_token):\n data = urllib.urlencode({\n 'user': pushover_user,\n 'token': pushover_token,\n 'title': 'RSScrawler',\n 'message': \"\\n\\n\".join(items)\n })\n try:\n req = urllib2.Request('https://api.pushover.net/1/messages.json', data)\n response = urllib2.urlopen(req)\n except urllib2.HTTPError:\n log_debug('FEHLER - Konnte Pushover API nicht erreichen')\n return False\n res = json.load(response)\n if res['status'] == 1:\n log_debug('Pushover Erfolgreich versendet')\n else:\n log_debug('FEHLER - Konnte nicht an Pushover Senden')\n"
},
{
"alpha_fraction": 0.6044742465019226,
"alphanum_fraction": 0.6158836483955383,
"avg_line_length": 61.95774459838867,
"blob_id": "ae8931675d232d89f294acb181f6fe8c4d3dfc98",
"content_id": "e4a7e5032e7b2bcbdeb3d6957466bee9b7f620ca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4477,
"license_type": "permissive",
"max_line_length": 881,
"num_lines": 71,
"path": "/files.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n\nimport errno\nimport logging\nimport os\nimport sys\nimport rssdb\n\ndef check():\n lists_nonregex = [ \"MB_3D\", \"MB_Filme\", \"MB_Staffeln\", \"SJ_Serien\", \"YT_Channels\"]\n lists_regex = [\"MB_Regex\", \"SJ_Serien_Regex\", \"SJ_Staffeln_Regex\"]\n\n for nrlist in lists_nonregex:\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + nrlist + '.txt'), 'r+') as f:\n content = f.read()\n f.seek(0)\n f.truncate()\n if content == '':\n content = 'XXXXXXXXXX'\n content = \"\".join([s for s in content.strip().splitlines(True) if s.strip()])\n f.write(content.replace('.', ' ').replace(';', '').replace(',', '').replace('Ä', 'Ae').replace('ä', 'ae').replace('Ö', 'Oe').replace('ö', 'oe').replace('Ü', 'Ue').replace('ü', 'ue').replace('ß', 'ss').replace('(', '').replace(')', '').replace('*', '').replace('|', '').replace('\\\\', '').replace('/', '').replace('?', '').replace('!', '').replace(':', '').replace(' ', ' '))\n\n for rlist in lists_regex:\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + rlist + '.txt'), 'r+') as f:\n content = f.read()\n f.seek(0)\n f.truncate()\n if content == '':\n content = 'XXXXXXXXXX'\n content = \"\".join([s for s in content.strip().splitlines(True) if s.strip()])\n f.write(content)\n\ndef _mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n logging.error(\"Kann Pfad nicht anlegen: %s\" % path)\n raise\n\ndef startup():\n if not os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen')):\n _mkdir_p(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen'))\n if not os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads')):\n _mkdir_p(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads'))\n if not os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen')):\n _mkdir_p(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen'))\n lists = [ \"MB_3D\", \"MB_Filme\", \"MB_Staffeln\", \"SJ_Serien\", \"MB_Regex\", \"SJ_Serien_Regex\", \"SJ_Staffeln_Regex\", \"YT_Channels\"]\n for l in lists:\n if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + l + '.txt')):\n open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/MB_Filme.txt'), \"a\").close()\n placeholder = open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + l + '.txt'), 'w')\n placeholder.write('XXXXXXXXXX')\n placeholder.close()\n if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads/MB_Downloads.db')):\n if rssdb.merge_old():\n os.remove(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads/MB_Downloads.db'))\n os.remove(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads/SJ_Downloads.db'))\n os.remove(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads/YT_Downloads.db'))\n else:\n logging.error(\"Kann alte Downloads-Datenbanken nicht verbinden!\")\n\ndef einsteller(einstellungen, version, jdpfad, port):\n open(einstellungen, \"a\").close()\n einsteller = open(einstellungen, 'w')\n einsteller.write('# RSScrawler.ini (Stand: RSScrawler ' + version + ')\\n\\n[RSScrawler]\\njdownloader = ' + jdpfad + '\\nport = ' + port + '\\nprefix = \\ninterval = 10\\nenglish = False\\nhoster = Share-Online\\n\\n[MB]\\nquality = 720p\\nignore = cam,subbed,xvid,dvdr,untouched,remux,avc,pal,md,ac3md,mic,xxx,hou,h-ou\\nhistorical = False\\nregex = False\\ncutoff = False\\ncrawl3d = False\\nenforcedl = False\\ncrawlseasons = True\\nseasonsquality = 720p\\nseasonpacks = False\\nseasonssource = web-dl.*-(tvs|4sj)|webrip.*-(tvs|4sj)|webhd.*-(tvs|4sj)|netflix.*-(tvs|4sj)|amazon.*-(tvs|4sj)|itunes.*-(tvs|4sj)|bluray|bd|bdrip\\nimdbyear = 2010\\nimdb = 0.0\\n\\n[SJ]\\nquality = 720p\\nrejectlist = XviD,Subbed,HDTV\\nregex = False\\n\\n[YT]\\nyoutube = False\\nmaxvideos = 10\\nignore = \\n\\n[Notifications]\\nhomeassistant = \\n\\npushbullet = \\npushover = \\n\\n[Crawljobs]\\nautostart = True\\nsubdir = True\\n')\n einsteller.close()\n"
},
{
"alpha_fraction": 0.4686228930950165,
"alphanum_fraction": 0.487427294254303,
"avg_line_length": 43.92534255981445,
"blob_id": "180a0b7415046ece4dd3da3eb02978365ac305f0",
"content_id": "eeae697fdd133594909e406b4d594014cef8f9a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22867,
"license_type": "permissive",
"max_line_length": 499,
"num_lines": 509,
"path": "/search.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n\nimport common\nfrom notifiers import notify\nfrom rssconfig import RssConfig\nfrom rssdb import RssDb\nfrom url import getURL\nfrom url import postURL\n\nfrom bs4 import BeautifulSoup as bs\nfrom HTMLParser import HTMLParser\nimport feedparser\nimport json\nimport logging\nimport re\nimport os\nimport sys\nimport time\nimport StringIO\n\ndef get(title):\n config = RssConfig('MB')\n quality = config.get('quality')\n query = title.replace(\".\", \" \").replace(\" \", \"+\")\n mb = getURL('aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZw=='.decode('base64') + '/search/' + query + \"+\" + quality + '/feed/rss2/')\n mb = re.findall(r'<title>(.*?)<\\/title>\\n.*?<link>(.*?)<\\/link>', mb)\n\n unrated = []\n for result in mb:\n if not result[1].endswith(\"-MB\") and not result[1].endswith(\".MB\"):\n unrated.append([rate(result[0]), result[1].replace(\"/\", \"+\"), result[0]])\n\n rated = sorted(unrated, reverse=True)\n\n results = {}\n i = 0\n for result in rated:\n res = {\"link\": result[1], \"title\": result[2]}\n results[\"result\" + str(i)] = res\n i += 1\n mb = results\n\n results = {}\n i = 0\n sj = postURL(\"aHR0cDovL3Nlcmllbmp1bmtpZXMub3JnL21lZGlhL2FqYXgvc2VhcmNoL3NlYXJjaC5waHA=\".decode('base64'), data={'string': \"'\" + query + \"'\"})\n try:\n sj = json.loads(sj)\n except:\n sj = []\n for result in sj:\n res = {\"id\": result[0], \"title\": html_to_str(result[1])}\n results[\"result\" + str(i)] = res\n i += 1\n sj = results\n return mb, sj\n\ndef rate(title):\n score = 0\n if \".bluray.\" in title.lower():\n score += 7\n if \".bd.\" in title.lower():\n score += 7\n if \".bdrip.\" in title.lower():\n score += 7\n if re.match(r'.*\\-(4SJ|TVS)', title):\n score += 4\n if \".dl.\" in title.lower():\n score += 2\n if re.match(r'.*\\.(DTS|DD\\+*51|DD\\+*71|AC3\\.5\\.*1)\\..*', title):\n score += 2\n if re.match(r'.*\\.(720|1080|2160)p\\..*', title):\n score += 2\n if \".ml.\" in title.lower():\n score += 1\n if \".dd20.\" in title.lower():\n score += 1\n if \"dubbed.\" in title.lower():\n score -= 1\n if \".synced.\" in title.lower():\n score -= 1\n if \".ac3d.\" in title.lower():\n score -= 1\n if \".dtsd.\" in title.lower():\n score -= 1\n if \".hdtv.\" in title.lower():\n score -= 1\n if \".dtv\" in title.lower():\n score -= 1\n if \".pdtv\" in title.lower():\n score -= 1\n if \"tvrip.\" in title.lower():\n score -= 1\n if \".subbed.\" in title.lower():\n score -= 2\n if \".xvid.\" in title.lower():\n score -= 2\n if \".pal.\" in title.lower():\n score -= 10\n if \"dvd9\" in title.lower():\n score -= 10\n try:\n config = RssConfig('SJ')\n reject = config.get(\"rejectlist\").replace(\",\", \"|\").lower() if len(\n config.get(\"rejectlist\")) > 0 else r\"^unmatchable$\"\n except TypeError:\n reject = r\"^unmatchable$\"\n r = re.search(reject, title.lower())\n if r:\n score -= 5\n if \".subpack.\" in title.lower():\n score -= 10\n return score\n\ndef html_to_str(unescape):\n return HTMLParser().unescape(unescape)\n\ndef download_dl(title, jdownloaderpath, hoster, staffel, db, config):\n search_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0].replace(\".\", \" \").replace(\" \", \"+\")\n search_url = \"aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZy9zZWFyY2gv\".decode('base64') + search_title + \"/feed/rss2/\"\n feedsearch_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0]\n if not '.dl.' in feedsearch_title.lower():\n logging.debug(\"%s - Release ignoriert (nicht zweisprachig, da wahrscheinlich nicht Retail)\" %feedsearch_title)\n return False\n for (key, value, pattern) in dl_search(feedparser.parse(search_url), feedsearch_title):\n download_link = False\n req_page = getURL(value[0])\n soup = bs(req_page, 'lxml')\n download = soup.find(\"div\", {\"id\": \"content\"})\n url_hosters = re.findall(r'href=\"([^\"\\'>]*)\".+?(.+?)<', str(download))\n for url_hoster in url_hosters:\n if not \"bW92aWUtYmxvZy5vcmcv\".decode(\"base64\") in url_hoster[0]:\n if hoster.lower() in url_hoster[1].lower():\n download_link = url_hoster[0]\n\n if download_link:\n notify_array = []\n if \"aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZy8yMDEw\".decode(\"base64\") in download_link:\n logging.debug(\"Fake-Link erkannt!\")\n return False\n elif staffel:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n db.store(\n key,\n 'dl' if config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Staffel] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n elif '.3d.' in key.lower():\n retail = False\n if config.get('cutoff'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n db.store(\n key,\n 'dl' if config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D/Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n else:\n retail = False\n if config.get('cutoff'):\n if config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n db.store(\n key,\n 'dl' if config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + 'Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n\ndef dl_search(feed, title):\n s = re.sub(r\"[&#\\s/]\", \".\", title).lower()\n for post in feed.entries:\n found = re.search(s, post.title.lower())\n if found:\n yield (post.title, [post.link], title)\n\ndef mb(link, jdownloaderpath):\n link = link.replace(\"+\", \"/\")\n url = getURL(\"aHR0cDovL21vdmllLWJsb2cub3JnLw==\".decode('base64') + link)\n rsscrawler = RssConfig('RSScrawler')\n config = RssConfig('MB')\n hoster = rsscrawler.get('hoster')\n db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n\n soup = bs(url, 'lxml')\n download = soup.find(\"div\", {\"id\": \"content\"})\n key = re.findall(r'Permanent Link: (.*?)\"', str(download)).pop()\n url_hosters = re.findall(r'href=\"([^\"\\'>]*)\".+?(.+?)<', str(download))\n download_link = \"\"\n for url_hoster in url_hosters:\n if not \"bW92aWUtYmxvZy5vcmcv\".decode(\"base64\") in url_hoster[0]:\n if hoster.lower() in url_hoster[1].lower():\n download_link = url_hoster[0]\n\n englisch = False\n if \"*englisch*\" in key.lower():\n key = key.replace('*ENGLISCH*', '').replace(\"*Englisch*\", \"\")\n englisch = True\n\n staffel = re.search(r\"s\\d{1,2}(-s\\d{1,2}|-\\d{1,2}|\\.)\", key.lower())\n\n if config.get('enforcedl') and '.dl.' not in key.lower():\n original_language = \"\"\n fail = False\n get_imdb_url = url\n key_regex = r'<title>' + re.escape(key) + r'.*?<\\/title>\\n.*?<link>(?:(?:.*?\\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\\/\\/(?:|www\\.)imdb\\.com\\/title\\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(?!\\d(?:\\.|\\,)\\d)(?:.|.*?)<\\/a>'\n imdb_id = re.findall(key_regex, get_imdb_url)\n if len(imdb_id) > 0:\n if not imdb_id[0]:\n fail = True\n else:\n imdb_id = imdb_id[0]\n else:\n fail = True\n if fail:\n search_title = re.findall(r\"(.*?)(?:\\.(?:(?:19|20)\\d{2})|\\.German|\\.\\d{3,4}p|\\.S(?:\\d{1,3})\\.)\", key)[0].replace(\".\", \"+\")\n search_url = \"http://www.imdb.com/find?q=\" + search_title\n search_page = getURL(search_url)\n search_results = re.findall(r'<td class=\"result_text\"> <a href=\"\\/title\\/(tt[0-9]{7,9})\\/\\?ref_=fn_al_tt_\\d\" >(.*?)<\\/a>.*? \\((\\d{4})\\)..(.{9})', search_page)\n total_results = len(search_results)\n if total_results == 0:\n download_imdb = \"\"\n elif staffel:\n imdb_id = search_results[0][0]\n else:\n no_series = False\n while total_results > 0:\n attempt = 0\n for result in search_results:\n if result[3] == \"TV Series\":\n no_series = False\n total_results -= 1\n attempt += 1\n else:\n no_series = True\n imdb_id = search_results[attempt][0]\n total_results = 0\n break\n if no_series is False:\n logging.debug(\"%s - Keine passende Film-IMDB-Seite gefunden\" % key)\n if not imdb_id:\n if not download_dl(key, jdownloaderpath, hoster, staffel, db, config):\n logging.debug(\"%s - Kein zweisprachiges Release gefunden.\" % key)\n else:\n if isinstance(imdb_id, list):\n imdb_id = imdb_id.pop()\n imdb_url = \"http://www.imdb.com/title/\" + imdb_id\n details = getURL(imdb_url)\n if not details:\n logging.debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n if original_language == \"German\":\n logging.debug(\"%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!\" % key)\n else:\n if not download_dl(key, jdownloaderpath, hoster, staffel, db, config) and not englisch:\n logging.debug(\"%s - Kein zweisprachiges Release gefunden! Breche ab.\" % key)\n\n if download_link:\n notify_array = []\n if staffel:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n db.store(\n key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\"),\n 'notdl' if config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Suche/Staffel] - ' + key.replace(\".COMPLETE.\", \".\") + ' - [<a href=\"' + download_link + '\" target=\"_blank\">Link</a>]'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n elif '.3d.' in key.lower():\n retail = False\n if config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if config.get('enforcedl'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n db.store(\n key,\n 'notdl' if config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Suche/Film] - <b>' + ('Retail/' if retail else \"\") + '3D</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n else:\n retail = False\n if config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n db.store(\n key,\n 'notdl' if config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Suche/Film] - ' + ('<b>Englisch</b> - ' if englisch and not retail else \"\") + ('<b>Englisch/Retail</b> - ' if englisch and retail else \"\") + ('<b>Retail</b> - ' if not englisch and retail else \"\") + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n else:\n return False\n\ndef sj(id, jdownloaderpath):\n url = getURL(\"aHR0cDovL3Nlcmllbmp1bmtpZXMub3JnLz9jYXQ9\".decode('base64') + str(id))\n season_pool = re.findall(r'<h2>Staffeln:(.*?)<h2>Feeds', url).pop()\n season_links = re.findall(r'href=\"(.{1,125})\">.{1,90}(Staffel|Season).*?(\\d{1,2}-?\\d{1,2}|\\d{1,2})', season_pool)\n title = html_to_str(re.findall(r'>(.{1,90}?) &#', season_pool).pop())\n\n rsscrawler = RssConfig('RSScrawler')\n\n if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/SJ_Serien.txt')):\n file = open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/SJ_Serien.txt'))\n output = StringIO.StringIO()\n for line in file.readlines():\n output.write(line.replace(\"XXXXXXXXXX\",\"\"))\n liste = output.getvalue()\n if not title in liste:\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/SJ_Serien.txt'), 'wb') as f:\n liste = liste + \"\\n\" + title\n f.write(liste.encode('utf-8'))\n\n staffeln = []\n staffel_nr = []\n seasons = []\n\n for s in season_links:\n if \"staffel\" in s[1].lower():\n staffeln.append([s[2], s[0]])\n if \"-\" in s[2]:\n split = s[2].split(\"-\")\n split = range(int(split[0]), int(split[1]) + 1)\n for nr in split:\n staffel_nr.append(str(nr))\n else:\n staffel_nr.append(s[2])\n else:\n seasons.append([s[2], s[0]])\n\n if rsscrawler.get(\"english\"):\n for se in seasons:\n if not se[0] in staffel_nr:\n staffeln.append(se)\n\n to_dl = []\n for s in staffeln:\n if \"-\" in s[0]:\n split = s[0].split(\"-\")\n split = range(int(split[0]), int(split[1]) + 1)\n for i in split:\n to_dl.append([str(i), s[1]])\n else:\n to_dl.append([s[0], s[1]])\n\n found_seasons = {}\n for dl in to_dl:\n if len(dl[0]) is 1:\n sXX = \"S0\" + str(dl[0])\n else:\n sXX = \"S\" + str(dl[0])\n link = dl[1]\n if sXX not in found_seasons:\n found_seasons[sXX] = link\n\n for sXX, link in found_seasons.items():\n config = RssConfig('SJ')\n quality = config.get('quality')\n url = getURL(link)\n pakete = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'\\..*?' + quality + r'.*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n folgen = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'E\\d{1,3}.*?' + quality + r'.*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n lq_pakete = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'\\..*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n lq_folgen = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'E\\d{1,3}.*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n\n if not pakete and not folgen and not lq_pakete and not lq_folgen:\n sXX = sXX.replace(\"S0\", \"S\")\n pakete = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'\\..*?' + quality + r'.*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n folgen = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'E\\d{1,3}.*?' + quality + r'.*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n lq_pakete = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'\\..*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n lq_folgen = re.findall(re.compile(r'<p><strong>(.*?\\.' + sXX + r'E\\d{1,3}.*?)<.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<(?:.*?\\n.*?href=\"(.*?)\".*? \\| (.*)<|)'), url)\n\n best_matching_links = []\n\n if pakete:\n links = []\n for x in pakete:\n title = x[0]\n score = rate(title)\n hoster = [[x[2], x[1]], [x[4], x[3]]]\n links.append([score, title, hoster])\n highest_score = sorted(links, reverse=True)[0][0]\n for l in links:\n if l[0] == highest_score:\n for hoster in l[2]:\n best_matching_links.append([l[1], hoster[0], hoster[1]])\n elif folgen:\n links = []\n for x in folgen:\n title = x[0]\n score = rate(title)\n hoster = [[x[2], x[1]], [x[4], x[3]]]\n links.append([score, title, hoster])\n highest_score = sorted(links, reverse=True)[0][0]\n for l in links:\n if l[0] == highest_score:\n for hoster in l[2]:\n best_matching_links.append([l[1], hoster[0], hoster[1]])\n elif lq_pakete:\n links = []\n for x in lq_pakete:\n title = x[0]\n score = rate(title)\n hoster = [[x[2], x[1]], [x[4], x[3]]]\n links.append([score, title, hoster])\n highest_score = sorted(links, reverse=True)[0][0]\n for l in links:\n if l[0] == highest_score:\n for hoster in l[2]:\n best_matching_links.append([l[1], hoster[0], hoster[1]])\n elif lq_folgen:\n links = []\n for x in lq_folgen:\n title = x[0]\n score = rate(title)\n hoster = [[x[2], x[1]], [x[4], x[3]]]\n links.append([score, title, hoster])\n highest_score = sorted(links, reverse=True)[0][0]\n for l in links:\n if l[0] == highest_score:\n for hoster in l[2]:\n best_matching_links.append([l[1], hoster[0], hoster[1]])\n\n notify_array = []\n for link in best_matching_links:\n dl_title = link[0].replace(\"Staffelpack \", \"\").replace(\"Staffelpack.\", \"\")\n dl_hoster = link[1]\n dl_link = link[2]\n rsscrawler = RssConfig('RSScrawler')\n hoster = rsscrawler.get('hoster')\n db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n\n if hoster.lower() in dl_hoster.lower():\n common.write_crawljob_file(dl_title, dl_title, dl_link, jdownloaderpath + \"/folderwatch\", \"RSScrawler\")\n db.store(dl_title, 'added')\n log_entry = '[Suche/Serie] - ' + dl_title + ' - <a href=\"' + dl_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + dl_title + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n logging.info(log_entry)\n notify_array.append(log_entry)\n notify(notify_array)\n return True\n"
},
{
"alpha_fraction": 0.6764968633651733,
"alphanum_fraction": 0.6976467370986938,
"avg_line_length": 47.65217208862305,
"blob_id": "b5ab127e1e631ef9524bd8af36778424c0f13504",
"content_id": "d6025be3bdd94f31cf830b545a74c58d451128c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3598,
"license_type": "permissive",
"max_line_length": 310,
"num_lines": 69,
"path": "/setup.sh",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# RSScrawler\n# Projekt von https://github.com/rix1337\n# Enthält Code von:\n# https://github.com/rix1337/RSScrawler/issues/88#issuecomment-251078409\n# https://github.com/rix1337/RSScrawler/issues/7#issuecomment-271187968\n\nVERSION=\"v.4.0.7\"\necho \"┌────────────────────────────────────────────────────────┐\"\necho \" Programminfo: RSScrawler $VERSION von RiX\"\necho \" Projektseite: https://github.com/rix1337/RSScrawler\"\necho \"└────────────────────────────────────────────────────────┘\"\necho \"Hinweise im Wiki: https://github.com/rix1337/RSScrawler/wiki\"\necho \"Bitte Plattform wählen:\"\nOPTIONS=\"Ubuntu/Debian Synology Update Beenden\"\nselect opt in $OPTIONS; do\n if [ \"$opt\" = \"Beenden\" ]; then\n exit\n elif [ \"$opt\" = \"Ubuntu/Debian\" ]; then\n apt-get update\n apt-get --yes --force-yes install git python2.7 python-setuptools python-dev nodejs libxml2-dev libxslt-dev\n easy_install pip\n pip install --upgrade pip virtualenv virtualenvwrapper\n clear\n read -rp \"Wohin soll RSScrawler installiert werden? Das Verzeichnis RSScrawler wird automatisch erstellt! Pfad ohne / am Ende: \" rsspath\n read -rp \"Wo ist der JDownloader installiert? Pfad ohne / am Ende: \" jdpath\n read -rp \"Auf welchem Port soll das Webinterface erreichbar sein? Port: \" rssport\n mkdir -p $rsspath/\n cd $rsspath/\n git clone https://github.com/rix1337/RSScrawler.git\n cd RSScrawler\n pip install -r requirements.txt\n git remote add rss https://github.com/rix1337/RSScrawler.git\n clear\n echo \"Der Webserver sollte nie ohne adequate Absicherung im Internet freigegeben werden. Dazu empfiehlt sich ein Reverse-Proxy bspw. über nginx mit Letsencrypt (automatisches, kostenloses HTTPs-Zertifikat), HTTPauth (Passwortschutz - Nur sicher über HTTPs!) und fail2ban (limitiert falsche Logins pro IP).\"\n python RSScrawler.py --port=$rssport --jd-pfad=\"$jdpath\" &\n exit\n elif [ \"$opt\" = \"Synology\" ]; then\n echo \"Es müssen Git, Python 2.7, JDownloader 2 und Java 8 installiert sein (optional auch node.js)!\"\n read -rsp $'Durch Tastendruck bestätigen...\\n' -n 1 key\n cd /volume1/@appstore/PythonModule/usr/lib/python2.7/site-packages/\n python easy_install.py pip\n pip install --upgrade pip virtualenv virtualenvwrapper\n cd /volume1/@appstore/\n git clone https://github.com/rix1337/RSScrawler.git\n cd RSScrawler\n chmod +x * /volume1/@appstore/RSScrawler\n pip install -r requirements.txt\n clear\n read -rp \"Wo ist der JDownloader installiert? Pfad ohne / am Ende: \" jdpath\n read -rp \"Auf welchem Port soll das Webinterface erreichbar sein? Port: \" rssport\n clear\n echo \"Der Webserver sollte nie ohne adequate Absicherung im Internet freigegeben werden. Dazu empfiehlt sich ein Reverse-Proxy bspw. über nginx mit Letsencrypt (automatisches, kostenloses HTTPs-Zertifikat), HTTPauth (Passwortschutz - Nur sicher über HTTPs!) und fail2ban (limitiert falsche Logins pro IP).\"\n python RSScrawler.py --port=$rssport --jd-pfad=\"$jdpath\" &\n exit\n elif [ \"$opt\" = \"Update\" ]; then\n read -rp \"Wo ist RSScrawler installiert? Pfad ohne / am Ende: \" rsspath\n cd $rsspath/\n pip install -U -r requirements.txt\n git fetch --all\n git reset --hard origin/master\n git pull origin master\n exit\n else\n clear\n echo \"Bitte eine vorhandene Option wählen\"\n exit\n fi\ndone\n"
},
{
"alpha_fraction": 0.6074181199073792,
"alphanum_fraction": 0.615606963634491,
"avg_line_length": 34.7931022644043,
"blob_id": "3bfd07f849202725f80969427212f7795e564744",
"content_id": "bb9d1d85c11ce47eaa4a5c761994c469326a0ed4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2076,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 58,
"path": "/rssdb.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n\nimport sqlite3\nimport os\nimport sys\n\ndef get_first(iterable):\t\t\n return iterable and list(iterable[:1]).pop() or None\n\ndef merge_old():\n def connect(file):\n return sqlite3.connect(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Downloads/' + file + '.db'), check_same_thread=False)\n\n def read(connection):\n return connection.execute(\"SELECT key, value FROM 'data'\")\n\n conn_old1 = connect('MB_Downloads')\n conn_old2 = connect('SJ_Downloads')\n conn_old3 = connect('YT_Downloads')\n conn_new = connect('Downloads')\n\n if not conn_new.execute(\"SELECT sql FROM sqlite_master WHERE type = 'table' AND name = 'rsscrawler';\").fetchall():\n conn_new.execute(\"CREATE TABLE 'rsscrawler' (key, value)\")\n conn_new.commit()\n\n res_old = []\n res_old.append(read(conn_old1))\n res_old.append(read(conn_old2))\n res_old.append(read(conn_old3))\n\n for res in res_old:\n for key, value in res:\n conn_new.execute(\"INSERT INTO '%s' VALUES ('%s', '%s')\" %('rsscrawler', key, value.lower().replace('downloaded', 'added')))\n conn_new.commit()\n\n return True\n\nclass RssDb(object):\n def __init__(self, file):\n self._conn = sqlite3.connect(file, check_same_thread=False)\n self._table = 'rsscrawler'\n if not self._conn.execute(\"SELECT sql FROM sqlite_master WHERE type = 'table' AND name = '%s';\" % self._table).fetchall():\n self._conn.execute('''CREATE TABLE %s (key, value)''' % self._table)\n self._conn.commit()\n\n def retrieve(self, key):\n res = self._conn.execute(\"SELECT value FROM %s WHERE key='%s'\" %(self._table, key)).fetchone()\n return res[0] if res else None\n\n def store(self, key, value):\n self._conn.execute(\"INSERT INTO '%s' VALUES ('%s', '%s')\" %(self._table, key, value))\n self._conn.commit()\n\n def delete(self, key):\n self._conn.execute(\"DELETE FROM %s WHERE key='%s'\" %(self._table, key))\n self._conn.commit()\n"
},
{
"alpha_fraction": 0.7253760099411011,
"alphanum_fraction": 0.7360504865646362,
"avg_line_length": 43.739131927490234,
"blob_id": "0527d4f998a6f35a7d07d03bb21880f01e69b4f1",
"content_id": "b7f16b809bca9945f3757d610ec626a5e8e56238",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2066,
"license_type": "permissive",
"max_line_length": 299,
"num_lines": 46,
"path": "/README.md",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# RSScrawler\n\nRSScrawler durchsucht vordefinierte Seiten nach Titeln und reicht Links an JDownloader weiter.\n\n[](https://gitter.im/RSScrawler/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n\n## Credits\n\nDie Suchfunktionen basieren auf pyLoad-Erweiterungen von:\n\n[zapp-brannigan](https://github.com/zapp-brannigan/)\n\n[Gutz-Pilz](https://github.com/Gutz-Pilz/pyLoad-stuff/blob/master/SJ.py)\n\n## Vorraussetzungen\n* [Python 2.7](https://www.python.org/downloads/)\n* [Java 8](http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html)\n* [JDownloader 2](http://www.jdownloader.org/jdownloader2)\n* [Optional, aber empfohlen: node.js](https://nodejs.org/en/)\n* [Zusatzpakete](https://github.com/rix1337/RSScrawler/blob/master/requirements.txt)\n\n## Installation\n\nHinweise zur Installation und Einrichtung finden sich im [Wiki](https://github.com/rix1337/RSScrawler/wiki)!\n\n## Sicherheitshinweis\n\nDer Webserver sollte nie ohne adequate Absicherung im Internet freigegeben werden. Dazu empfiehlt sich ein Reverse-Proxy bspw. über nginx mit Letsencrypt (automatisches, kostenloses HTTPs-Zertifikat), HTTPauth (Passwortschutz - Nur sicher über HTTPs!) und fail2ban (limitiert falsche Logins pro IP).\n\n## RSScrawler starten\n\n```python RSScrawler.py``` führt RSScrawler aus\n\n## Startparameter:\n\n ```--testlauf``` Einmalige Ausführung von RSScrawler\n \n ```--docker``` Sperre Pfad und Port auf Docker-Standardwerte (um falsche Einstellungen zu vermeiden)\n\n ```--port=<PORT>``` Legt den Port des Webservers fest\n \n ```--jd-pfad=\"<JDPFAD>\"``` Legt den Pfad von JDownloader fest um nicht die RSScrawler.ini direkt bearbeiten zu müssen\n\n ```--log-level=<LOGLEVEL>``` Legt fest, wie genau geloggt wird (CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET )\n \n ```--ersatzblogs``` Erweitert die Suche allgemeiner Blogs auf weitere Seiten (langsamer)\n \n"
},
{
"alpha_fraction": 0.7714776396751404,
"alphanum_fraction": 0.7714776396751404,
"avg_line_length": 31.33333396911621,
"blob_id": "b4ce10d669e6fe593c5b1f477adebb69330b285d",
"content_id": "3ef76d140bde1f6026ecd0b88f87a9970c7024c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 588,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 18,
"path": "/ISSUE_TEMPLATE.md",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# [RSScrawler] Fehlermeldung\n\nHier keine Kontaktaufnahme, dafür gibt es:\nhttps://gitter.im/RSScrawler/Lobby\n\n# Zwingend erforderlich\n\n- **\"Einstellungen\"-Ordner als Zip** (Paranoide können die Downloads.db weglassen)\n\n- **Fehlermeldung aus dem Log** (ggf. DEBUG aktivieren!)\n\n- **Genutzte Version** (kein Support für alte Releases)\n\n# Fehlerbeschreibung:\n\nHier ausführlich das aufgetretene Problem beschreiben. Muss reproduzierbar sein (\"Geht nicht\" reicht nicht). Nur Fehler des aktuellen Releases werden berücksichtigt.\n\n**Hier keine Links posten. Die Seiten heißen MB bzw. SJ!**\n"
},
{
"alpha_fraction": 0.471647173166275,
"alphanum_fraction": 0.477347731590271,
"avg_line_length": 36.03333282470703,
"blob_id": "a55ffccad6a424ba6fc8c257212848f715ccf009",
"content_id": "c0bc08a222af9b7cd82cf2b8c903de0cbe2ee449",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3333,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 90,
"path": "/rssconfig.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n\nimport ConfigParser\nimport logging\nimport os, sys\n\n\nclass RssConfig(object):\n _CONFIG_FILES = [os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/RSScrawler.ini')]\n _DEFAULT_CONFIG = {\n 'RSScrawler': [\n (\"jdownloader\", \"str\", \"\", \"\"),\n (\"port\", \"int\", \"\", \"9090\"),\n (\"prefix\", \"str\", \"\", \"\"),\n (\"interval\", \"int\", \"\", \"\"),\n (\"english\", \"bool\", \"\", \"\"),\n (\"hoster\", \"str\", \"\", \"\")\n ],\n 'MB': [\n (\"quality\", \"str\", \"\", \"\"),\n (\"ignore\",\"str\",\"\",\"\"),\n (\"historical\",\"bool\",\"\",\"\"),\n (\"regex\",\"bool\",\"\", \"\"),\n (\"cutoff\",\"bool\",\"\", \"\"),\n (\"crawl3d\",\"bool\",\"\",\"\"),\n (\"enforcedl\", \"bool\", \"\", \"\"),\n (\"crawlseasons\", \"bool\", \"\", \"\"),\n (\"seasonsquality\", \"str\", \"\", \"\"),\n (\"seasonpacks\", \"bool\", \"\", \"\"),\n (\"seasonssource\", \"str\", \"\", \"\"),\n (\"imdbyear\", \"str\", \"\", \"\"),\n (\"imdb\", \"str\", \"\", \"\")\n ],\n 'SJ': [\n (\"quality\", \"str\", \"\", \"\"),\n (\"rejectlist\", \"str\", \"\", \"\"),\n (\"regex\",\"bool\",\"\", \"\")\n ],\n 'YT': [\n (\"youtube\",\"bool\",\"\",\"\"),\n (\"maxvideos\",\"int\",\"\",\"\"),\n (\"ignore\",\"str\",\"\",\"\")\n ],\n 'Notifications': [\n (\"homeassistant\",\"str\",\"\",\"\"),\n (\"pushbullet\",\"str\",\"\",\"\"),\n (\"pushover\",\"str\",\"\",\"\")\n ],\n 'Crawljobs': [\n (\"autostart\",\"bool\",\"\",\"\"),\n (\"subdir\",\"bool\",\"\",\"\")\n ]\n }\n __config__ = []\n\n def __init__(self, section):\n self._section = section\n self._config = ConfigParser.RawConfigParser()\n try:\n self._config.read(self._CONFIG_FILES)\n self._config.has_section(self._section) or self._set_default_config(self._section)\n self.__config__ = self._read_config(self._section)\n except ConfigParser.DuplicateSectionError:\n logging.error('Doppelte Sektion in der Konfigurationsdatei.')\n raise\n except ConfigParser.Error:\n logging.error('Ein unbekannter Fehler in der Konfigurationsdatei ist aufgetreten.')\n raise\n\n def _set_default_config(self, section):\n self._config.add_section(section)\n for (key,key_type,comment,value) in self._DEFAULT_CONFIG[section]:\n self._config.set(section,key,value)\n with open(self._CONFIG_FILES[::-1].pop(), 'wb') as configfile:\n self._config.write(configfile)\n\n def _read_config(self, section):\n return [(key, '', '', self._config.get(section,key)) for key in self._config.options(section)]\n\n def _get_from_config(self, scope, key):\n res = [param[3] for param in scope if param[0] == key]\n if [param for param in self._DEFAULT_CONFIG[self._section] if param[0] == key and param[1] == 'bool']:\n return True if len(res) and res[0].strip('\\'\"').lower() == 'true' else False\n else:\n return res[0].strip('\\'\"') if len(res) > 0 else False\n\n def get(self, key):\n return self._get_from_config(self.__config__, key) or self._get_from_config(self._DEFAULT_CONFIG[self._section], key)\n"
},
{
"alpha_fraction": 0.43959903717041016,
"alphanum_fraction": 0.4505622982978821,
"avg_line_length": 53.508113861083984,
"blob_id": "88dc30aca1428d4ec59a0b3b9b6401731cd0ba3c",
"content_id": "51d6d817e02bc8437b84f4c0fa08eb777f366924",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131264,
"license_type": "permissive",
"max_line_length": 520,
"num_lines": 2403,
"path": "/RSScrawler.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n# Enthält Code von:\n# https://github.com/zapp-brannigan/\n# https://github.com/Gutz-Pilz/pyLoad-stuff/blob/master/SJ.py\n# Beschreibung:\n# RSScrawler erstellt .crawljobs für den JDownloader.\n\n\"\"\"RSScrawler.\n\nUsage:\n RSScrawler.py [--testlauf]\n [--ersatzblogs]\n [--docker]\n [--port=<PORT>]\n [--jd-pfad=\"<JDPATH>\"]\n [--log-level=<LOGLEVEL>]\n\nOptions:\n --testlauf Einmalige Ausführung von RSScrawler\n --ersatzblogs Erweitert die Suche um weitere Blogs um Ausfälle zu überbrücken.\n --docker Sperre Pfad und Port auf Docker-Standardwerte (um falsche Einstellungen zu vermeiden)\n --port=<PORT> Legt den Port des Webservers fest\n --jd-pfad=\"<JDPFAD>\" Legt den Pfad von JDownloader fest um nicht die RSScrawler.ini direkt bearbeiten zu müssen\n --log-level=<LOGLEVEL> Legt fest, wie genau geloggt wird (CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET )\n\"\"\"\n\nimport version\nversion = version.getVersion()\n\nfrom docopt import docopt\nimport feedparser\nimport re\nimport urllib2\nimport codecs\nfrom bs4 import BeautifulSoup as bs\nimport cfscrape\nimport time\nimport sys\nimport signal\nimport socket\nimport logging\nimport os\nfrom multiprocessing import Process\n\nfrom rssconfig import RssConfig\nfrom rssdb import RssDb\nfrom notifiers import notify\nfrom url import getURL\nimport common\nimport files\nfrom web import start\n\n\ndef web_server(port, docker, jd):\n start(port, docker, jd)\n\ndef crawler(jdpath, rssc):\n global added_items\n added_items = []\n global jdownloaderpath\n jdownloaderpath = jdpath\n global rsscrawler\n rsscrawler = rssc\n log_debug = logging.debug\n\n search_pool = [\n YT(),\n SJ(filename='SJ_Serien', internal_name='SJ'),\n SJ(filename='SJ_Serien_Regex', internal_name='SJ'),\n SJ(filename='SJ_Staffeln_Regex', internal_name='SJ'),\n SJ(filename='MB_Staffeln', internal_name='MB'),\n MB(filename='MB_Regex'),\n MB(filename='MB_Filme'),\n MB(filename='MB_Staffeln'),\n MB(filename='MB_3D')\n ]\n erweiterter_pool = [\n HW(filename='MB_Regex'),\n HW(filename='MB_Filme'),\n HW(filename='MB_Staffeln'),\n HW(filename='MB_3D'),\n HA(filename='MB_Regex'),\n HA(filename='MB_Filme'),\n HA(filename='MB_Staffeln'),\n HA(filename='MB_3D')\n ]\n arguments = docopt(__doc__, version='RSScrawler')\n if not arguments['--testlauf']:\n while True:\n try:\n start_time = time.time()\n log_debug(\"--------Alle Suchfunktion gestartet.--------\")\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" - Alle Suchfunktion gestartet.\")\n for task in search_pool:\n task.periodical_task()\n log_debug(\"-----------Suchfunktion ausgeführt!-----------\")\n if arguments['--ersatzblogs']:\n for task in erweiterter_pool:\n task.periodical_task()\n log_debug(\"---------Ersatz-Suchfunktion ausgeführt!---------\")\n end_time = time.time()\n total_time = end_time - start_time\n total_unit = \" Sekunden\"\n if total_time > 60:\n total_time = total_time / 60\n total_unit = \" Minuten\"\n total_time = str(round(total_time, 1)) + total_unit\n notify(added_items)\n log_debug(\"-----Alle Suchfunktion ausgeführt (Dauer: \" + total_time + \")!-----\")\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" - Alle Suchfunktion ausgeführt (Dauer: \" + total_time + \")!\")\n added_items = []\n time.sleep(int(rsscrawler.get('interval')) * 60)\n log_debug(\"-------------Wartezeit verstrichen-------------\")\n except Exception as e:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" - Fehler im Suchlauf: \" + str(e))\n else:\n try:\n start_time = time.time()\n log_debug(\"--------Testlauf gestartet.--------\")\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" - Testlauf gestartet.\")\n for task in search_pool:\n task.periodical_task()\n log_debug(\"-----------Suchfunktion ausgeführt!-----------\")\n for task in erweiterter_pool:\n task.periodical_task()\n log_debug(\"---------Ersatz-Suchfunktion ausgeführt!---------\")\n end_time = time.time()\n total_time = end_time - start_time\n total_unit = \" Sekunden\"\n if total_time > 60:\n total_time = total_time / 60\n total_unit = \" Minuten\"\n total_time = str(round(total_time, 1)) + total_unit\n notify(added_items)\n log_debug(\"---Testlauf ausgeführt (inkl. Ersatz-Suchfunktionen, Dauer: \" + total_time + \")!---\")\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" - Testlauf ausgeführt (Dauer: \" + total_time + \")!\")\n except Exception as e:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" - Fehler im Suchlauf: \" + str(e))\n\nclass YT():\n _INTERNAL_NAME='YT'\n\n def __init__(self):\n self.config = RssConfig(self._INTERNAL_NAME)\n self.log_info = logging.info\n self.log_error = logging.error\n self.log_debug = logging.debug\n self.db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n self.youtube = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/YT_Channels.txt')\n self.dictWithNamesAndLinks = {}\n\n def readInput(self, file):\n if not os.path.isfile(file):\n open(file, \"a\").close()\n placeholder = open(file, 'w')\n placeholder.write('XXXXXXXXXX')\n placeholder.close()\n try:\n f = codecs.open(file, \"rb\")\n return f.read().splitlines()\n except:\n self.log_error(\"Liste nicht gefunden!\")\n\n def periodical_task(self):\n if not self.config.get('youtube'):\n self.log_debug(\"Suche für YouTube deaktiviert!\")\n return\n channels = []\n links = []\n videos = []\n download_link = \"\"\n self.allInfos = self.readInput(self.youtube)\n\n for xline in self.allInfos:\n if len(xline) > 0 and not xline.startswith(\"#\"):\n if xline.startswith(\"XXXXXXXXXX\") or self.config.get(\"youtube\") is False:\n self.log_debug(\"Liste enthält Platzhalter. Stoppe Suche für YouTube!\")\n return\n channels.append(xline)\n\n for channel in channels:\n if 'list=' in channel:\n id_cutter = channel.rfind('list=') + 5\n channel = channel[id_cutter:]\n url = 'https://www.youtube.com/playlist?list=' + channel\n response = getURL(url)\n else:\n url = 'https://www.youtube.com/user/' + channel + '/videos'\n urlc = 'https://www.youtube.com/channel/' + channel + '/videos'\n cnotfound = False\n try:\n response = getURL(url)\n except urllib2.HTTPError:\n try:\n response = getURL(urlc)\n except urllib2.HTTPError:\n cnotfound = True\n if cnotfound:\n self.log_debug(\"YouTube-Kanal: \" + channel + \" nicht gefunden!\")\n return\n\n links = re.findall(r'VideoRenderer\":{\"videoId\":\"(.*?)\",\".*?simpleText\":\"(.*?)\"}', response)\n \n maxvideos = int(self.config.get(\"maxvideos\"))\n if maxvideos < 1:\n self.log_debug(\"Anzahl zu suchender YouTube-Videos (\" + str(maxvideos) +\") zu gering. Suche stattdessen 1 Video!\")\n maxvideos = 1\n elif maxvideos > 50:\n self.log_debug(\"Anzahl zu suchender YouTube-Videos (\" + str(maxvideos) +\") zu hoch. Suche stattdessen maximal 50 Videos!\")\n maxvideos = 50\n\n for link in links[:maxvideos]:\n if len(link[0]) > 10:\n videos.append([link[0].encode('ascii', 'replace'), link[1], channel])\n\n for video in videos:\n channel = video[2]\n video_title = video[1].replace(\"&\", \"&\").replace(\">\", \">\").replace(\"<\", \"<\").replace('"', '\"').replace(\"'\", \"'\").replace(\"\\u0026\", \"&\")\n video = video[0]\n download_link = 'https://www.youtube.com/watch?v=' + video\n if download_link:\n if self.db.retrieve(video) == 'added':\n self.log_debug(\"[%s] - YouTube-Video ignoriert (bereits gefunden)\" % video)\n else:\n ignore = \"|\".join([\"%s\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n ignorevideo = re.search(ignore,video_title.lower())\n if ignorevideo:\n self.log_debug(video_title + \" (\" + channel + \") \" + \"[\" + video + \"] - YouTube-Video ignoriert (basierend auf ignore-Einstellung)\")\n continue\n common.write_crawljob_file(\n video,\n \"YouTube/\" + channel,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\t\t\n video,\t\t\n 'added'\n )\n log_entry = '[YouTube] - ' + video_title + ' (' + channel + ') - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + video + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n\nclass SJ():\n def __init__(self, filename, internal_name):\n self._INTERNAL_NAME = internal_name\n self.config = RssConfig(self._INTERNAL_NAME)\n self.log_info = logging.info\n self.log_error = logging.error\n self.log_debug = logging.debug\n self.filename = filename\n self.db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n self.search_list = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/{}.txt'.format(self.filename))\n self.empty_list = False\n if self.filename == 'SJ_Staffeln_Regex':\n self.level = 3\n elif self.filename == 'MB_Staffeln':\n self.seasonssource = self.config.get('seasonssource').lower()\n self.level = 2\n elif self.filename == 'SJ_Serien_Regex':\n self.level = 1\n else:\n self.level = 0\n\n def periodical_task(self):\n if self.filename == \"MB_Staffeln\" or self.filename == \"SJ_Staffeln_Regex\":\n feed = feedparser.parse('aHR0cDovL3Nlcmllbmp1bmtpZXMub3JnL3htbC9mZWVkcy9zdGFmZmVsbi54bWw='.decode('base64'))\n else:\n feed = feedparser.parse('aHR0cDovL3Nlcmllbmp1bmtpZXMub3JnL3htbC9mZWVkcy9lcGlzb2Rlbi54bWw='.decode('base64'))\n \n self.pattern = \"|\".join(self.getSeriesList(self.search_list, self.level)).lower()\n\n if self.filename == 'SJ_Serien_Regex':\n if not self.config.get('regex'):\n self.log_debug(\"Suche für SJ-Regex deaktiviert!\")\n return\n elif self.filename == 'SJ_Staffeln_Regex':\n if not self.config.get('regex'):\n self.log_debug(\"Suche für SJ-Regex deaktiviert!\")\n return\n elif self.filename == 'MB_Staffeln':\n if not self.config.get('crawlseasons'):\n self.log_debug(\"Suche für SJ-Staffeln deaktiviert!\")\n return\n if self.empty_list:\n return\n try:\n reject = self.config.get(\"rejectlist\").replace(\",\", \"|\").lower() if len(\n self.config.get(\"rejectlist\")) > 0 else r\"^unmatchable$\"\n except TypeError:\n reject = r\"^unmatchable$\"\n self.quality = self.config.get(\"quality\")\n self.hoster = rsscrawler.get(\"hoster\")\n\n for post in feed.entries:\n if not post.link:\n continue\n\n link = post.link\n title = post.title\n\n if self.filename == 'SJ_Serien_Regex':\n if self.config.get(\"regex\"):\n if '[DEUTSCH]' in title:\n language_ok = 1\n elif rsscrawler.get('english'):\n language_ok = 2\n else:\n language_ok = 0\n if language_ok:\n m = re.search(self.pattern, title.lower())\n if not m and not \"720p\" in title and not \"1080p\" in title and not \"2160p\" in title:\n m = re.search(self.pattern.replace(\"480p\", \".\"), title.lower())\n self.quality = \"480p\"\n if m:\n if \"720p\" in title.lower(): self.quality = \"720p\"\n if \"1080p\" in title.lower(): self.quality = \"1080p\"\n if \"2160p\" in title.lower(): self.quality = \"2160p\"\n m = re.search(reject, title.lower())\n if m:\n self.log_debug(title + \" - Release durch Regex gefunden (trotz rejectlist-Einstellung)\")\n title = re.sub(r'\\[.*\\] ', '', post.title)\n self.range_checkr(link, title, language_ok)\n else:\n self.log_debug(\"%s - Englische Releases deaktiviert\" % title)\n\n else:\n continue\n elif self.filename == 'SJ_Staffeln_Regex':\n if self.config.get(\"regex\"):\n if '[DEUTSCH]' in title:\n language_ok = 1\n elif rsscrawler.get('english'):\n language_ok = 2\n else:\n language_ok = 0\n if language_ok:\n m = re.search(self.pattern, title.lower())\n if not m and not \"720p\" in title and not \"1080p\" in title and not \"2160p\" in title:\n m = re.search(self.pattern.replace(\"480p\", \".\"), title.lower())\n self.quality = \"480p\"\n if m:\n if \"720p\" in title.lower(): self.quality = \"720p\"\n if \"1080p\" in title.lower(): self.quality = \"1080p\"\n if \"2160p\" in title.lower(): self.quality = \"2160p\"\n m = re.search(reject, title.lower())\n if m:\n self.log_debug(title + \" - Release durch Regex gefunden (trotz rejectlist-Einstellung)\")\n title = re.sub(r'\\[.*\\] ', '', post.title)\n self.range_checkr(link, title, language_ok)\n else:\n self.log_debug(\"%s - Englische Releases deaktiviert\" % title)\n\n else:\n continue\n else:\n if self.config.get(\"quality\") != '480p':\n m = re.search(self.pattern, title.lower())\n if m:\n if '[DEUTSCH]' in title:\n language_ok = 1\n elif rsscrawler.get('english'):\n language_ok = 2\n else:\n language_ok = 0\n if language_ok:\n mm = re.search(self.quality, title.lower())\n if mm:\n mmm = re.search(reject, title.lower())\n if mmm:\n self.log_debug(\n title + \" - Release ignoriert (basierend auf rejectlist-Einstellung)\")\n continue\n title = re.sub(r'\\[.*\\] ', '', post.title)\n self.range_checkr(link, title, language_ok)\n else:\n self.log_debug(\"%s - Englische Releases deaktiviert\" % title)\n\n else:\n m = re.search(self.pattern, title.lower())\n if m:\n if '[DEUTSCH]' in title:\n language_ok = 1\n elif rsscrawler.get('english'):\n language_ok = 2\n else:\n language_ok = 0\n if language_ok:\n if \"720p\" in title.lower() or \"1080p\" in title.lower() or \"2160p\" in title.lower():\n continue\n mm = re.search(reject, title.lower())\n if mm:\n self.log_debug(title + \" Release ignoriert (basierend auf rejectlist-Einstellung)\")\n continue\n title = re.sub(r'\\[.*\\] ', '', post.title)\n self.range_checkr(link, title, language_ok)\n else:\n self.log_debug(\"%s - Englische Releases deaktiviert\" % title)\n\n def range_checkr(self, link, title, language_ok):\n englisch = False\n if language_ok == 2:\n englisch = True\n if self.filename == 'MB_Staffeln':\n season = re.search(r\"\\.s\\d\", title.lower())\n if not season:\n self.log_debug(title + \" - Release ist keine Staffel\")\n return\n if not self.config.get(\"seasonpacks\"):\n staffelpack = re.search(r\"s\\d.*(-|\\.).*s\\d\", title.lower())\n if staffelpack:\n self.log_debug(\"%s - Release ignoriert (Staffelpaket)\" % title)\n return\n pattern = re.match(r\".*S\\d{1,2}E\\d{1,2}-(?:S\\d{1,2}E|E)\\d{1,2}.*\", title)\n if pattern:\n range0 = re.sub(r\".*S\\d{1,2}E(\\d{1,2}-(?:S\\d{1,2}E|E)\\d{1,2}).*\", r\"\\1\", title)\n number1 = re.sub(r\"(\\d{1,2})-(?:S\\d{1,2}E|E)\\d{1,2}\", r\"\\1\", range0)\n number2 = re.sub(r\"\\d{1,2}-(?:S\\d{1,2}E|E)(\\d{1,2})\", r\"\\1\", range0)\n title_cut = re.findall(r\"(.*S\\d{1,2}E)(\\d{1,2}-(?:S\\d{1,2}E|E)\\d{1,2})(.*)\", title)\n check = title_cut[0][1]\n if \"E\" in check:\n check = re.sub(r\"(S\\d{1,2}E|E)\", \"\", check)\n title_cut = [(title_cut[0][0], check, title_cut[0][2])]\n try:\n for count in range(int(number1), (int(number2) + 1)):\n NR = re.match(r\"\\d{1,2}\", str(count))\n if NR:\n title1 = title_cut[0][0] + str(count) + \".*\" + title_cut[0][-1]\n self.range_parse(link, title1, englisch)\n else:\n title1 = title_cut[0][0] + \"0\" + str(count) + \".*\" + title_cut[0][-1]\n self.range_parse(link, title1, englisch)\n except ValueError as e:\n logging.error(\"Fehler in Variablenwert: %s\" % e.message)\n else:\n self.parse_download(link, title, englisch)\n\n def range_parse(self, series_url, search_title, englisch):\n req_page = getURL(series_url)\n soup = bs(req_page, 'lxml')\n try:\n titles = soup.findAll(text=re.compile(search_title))\n for title in titles:\n if self.quality != '480p' and self.quality in title:\n self.parse_download(series_url, title, englisch)\n if self.quality == '480p' and not (('.720p.' in title) or ('.1080p.' in title) or ('.2160p.' in title)):\n self.parse_download(series_url, title, englisch)\n except re.error as e:\n self.log_error('Konstantenfehler: %s' % e)\n\n def parse_download(self, series_url, search_title, englisch):\n req_page = getURL(series_url)\n soup = bs(req_page, 'lxml')\n escape_brackets = search_title.replace(\"(\", \".*\").replace(\")\", \".*\").replace(\"+\", \".*\")\n title = soup.find(text=re.compile(escape_brackets))\n if title:\n valid = False\n if self.filename == 'MB_Staffeln':\n valid = re.search(self.seasonssource, title.lower())\n else:\n valid = True\n if valid:\n url_hosters = re.findall(r'<a href=\"([^\"\\'>]*)\".+?\\| (.+?)<', str(title.parent.parent))\n for url_hoster in url_hosters:\n if self.hoster.lower() in url_hoster[1]:\n self.send_package(title, url_hoster[0], englisch)\n else:\n self.log_debug(title + \" - Release hat falsche Quelle\")\n\n def send_package(self, title, link, englisch_info):\n englisch = \"\"\n if englisch_info:\n englisch = \"/Englisch\"\n if self.filename == 'SJ_Serien_Regex':\n link_placeholder = '[Episode/RegEx' + englisch + '] - '\n elif self.filename == 'SJ_Serien':\n link_placeholder = '[Episode' + englisch + '] - '\n elif self.filename == 'SJ_Staffeln_Regex':\n link_placeholder = '[Staffel/RegEx' + englisch + '] - '\n else:\n link_placeholder = '[Staffel' + englisch + '] - '\n try:\n storage = self.db.retrieve(title)\n except Exception as e:\n self.log_debug(\"Fehler bei Datenbankzugriff: %s, Grund: %s\" % (e, title))\n if storage == 'added':\n self.log_debug(title + \" - Release ignoriert (bereits gefunden)\")\n else:\n common.write_crawljob_file(title, title, link, jdownloaderpath + \"/folderwatch\", \"RSScrawler\")\n self.db.store(title, 'added')\n log_entry = link_placeholder + title + ' - <a href=\"' + link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + title + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n\n def getSeriesList(self, file, type):\n loginfo = \"\"\n if type == 1:\n loginfo = \" (RegEx)\"\n elif type == 2:\n loginfo = \" (Staffeln)\"\n elif type == 3:\n loginfo = \" (Staffeln/RegEx)\"\n\n if not os.path.isfile(file):\n open(file, \"a\").close()\n placeholder = open(file, 'w')\n placeholder.write('XXXXXXXXXX')\n placeholder.close()\n try:\n titles = []\n f = codecs.open(file, \"rb\", \"utf-8\")\n for title in f.read().splitlines():\n if len(title) == 0:\n continue\n title = title.replace(\" \", \".\")\n titles.append(title)\n f.close()\n if titles[0] == \"XXXXXXXXXX\":\n self.log_debug(\"Liste enthält Platzhalter. Stoppe Suche für Serien!\" + loginfo)\n if type == 1:\n self.empty_list = True\n elif type == 2:\n self.empty_list = True\n else:\n self.empty_list = True\n return titles\n except UnicodeError:\n self.log_error(\"ANGEHALTEN, ungültiges Zeichen in Serien\" + loginfo + \"Liste!\")\n except IOError:\n self.log_error(\"ANGEHALTEN, Serien\" + loginfo + \"-Liste nicht gefunden!\")\n except Exception, e:\n self.log_error(\"Unbekannter Fehler: %s\" % e)\n\nclass MB():\n _INTERNAL_NAME = 'MB'\n FEED_URL = \"aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZy9mZWVkLw==\".decode('base64')\n SUBSTITUTE = r\"[&#\\s/]\"\n\n def __init__(self, filename):\n rsscrawler = RssConfig('RSScrawler')\n self.config = RssConfig(self._INTERNAL_NAME)\n self.log_info = logging.info\n self.log_error = logging.error\n self.log_debug = logging.debug\n self.filename = filename\n self.db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n self.search_list = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/{}.txt'.format(self.filename))\n self.hoster = rsscrawler.get(\"hoster\")\n self.dictWithNamesAndLinks = {}\n self.empty_list = False\n\n def readInput(self, file):\n if not os.path.isfile(file):\n open(file, \"a\").close()\n placeholder = open(file, 'w')\n placeholder.write('XXXXXXXXXX')\n placeholder.close()\n try:\n f = codecs.open(file, \"rb\")\n return f.read().splitlines()\n except:\n self.log_error(\"Liste nicht gefunden!\")\n\n def getPatterns(self,patterns, **kwargs):\n if patterns == [\"XXXXXXXXXX\"]:\n self.log_debug(\"Liste enthält Platzhalter. Stoppe Suche für Filme!\")\n self.empty_list = True\n if kwargs:\n return {line: (kwargs['quality'], kwargs['rg'], kwargs['sf']) for line in patterns}\n return {x: (x) for x in patterns}\n\n def searchLinks(self, feed):\n if self.empty_list:\n return\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n\n for key in self.allInfos:\n s = re.sub(self.SUBSTITUTE, \".\", \"^\" + key).lower()\n for post in feed.entries:\n found = re.search(s, post.title.lower())\n if found:\n found = re.search(ignore, post.title.lower())\n if found:\n self.log_debug(\"%s - Release ignoriert (basierend auf ignore-Einstellung)\" % post.title)\n continue\n ss = self.allInfos[key][0].lower()\n if self.filename == 'MB_Filme':\n if ss == \"480p\":\n if \"720p\" in post.title.lower() or \"1080p\" in post.title.lower() or \"1080i\" in post.title.lower() or \"2160p\" in post.title.lower():\n continue\n found = True\n else:\n found = re.search(ss, post.title.lower())\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, post.title.lower())\n if self.allInfos[key][2]:\n found = all([word in post.title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', post.title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % post.title)\n continue\n yield (post.title, [post.link], key)\n elif self.filename == 'MB_3D':\n if '.3d.' in post.title.lower():\n if self.config.get('crawl3d') and (\n \"1080p\" in post.title.lower() or \"1080i\" in post.title.lower()):\n found = True\n else:\n continue\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, post.title.lower())\n if self.allInfos[key][2]:\n found = all([word in post.title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', post.title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % post.title)\n continue\n yield (post.title, [post.link], key)\n\n elif self.filename == 'MB_Staffeln':\n validsource = re.search(self.config.get(\"seasonssource\"), post.title.lower())\n if not validsource:\n self.log_debug(post.title + \" - Release hat falsche Quelle\")\n continue\n if not \".complete.\" in post.title.lower():\n self.log_debug(post.title + \" - Staffel noch nicht komplett\")\n continue\n season = re.search(r\"\\.s\\d\", post.title.lower())\n if not season:\n self.log_debug(post.title + \" - Release ist keine Staffel\")\n continue\n if not self.config.get(\"seasonpacks\"):\n staffelpack = re.search(r\"s\\d.*(-|\\.).*s\\d\", post.title.lower())\n if staffelpack:\n self.log_debug(\"%s - Release ignoriert (Staffelpaket)\" % post.title)\n continue\n ss = self.allInfos[key][0].lower()\n\n if ss == \"480p\":\n if \"720p\" in post.title.lower() or \"1080p\" in post.title.lower() or \"1080i\" in post.title.lower() or \"2160p\" in post.title.lower():\n continue\n found = True\n else:\n found = re.search(ss, post.title.lower())\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, post.title.lower())\n\n if self.allInfos[key][2]:\n found = all([word in post.title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', post.title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % post.title)\n continue\n yield (post.title, [post.link], key)\n else:\n yield (post.title, [post.link], key)\n\n def download_dl(self, title):\n search_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0].replace(\".\", \" \").replace(\" \", \"+\")\n search_url = \"aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZy9zZWFyY2gv\".decode('base64') + search_title + \"/feed/rss2/\"\n feedsearch_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0]\n if not '.dl.' in feedsearch_title.lower():\n self.log_debug(\"%s - Release ignoriert (nicht zweisprachig, da wahrscheinlich nicht Retail)\" %feedsearch_title)\n return False\n for (key, value, pattern) in self.dl_search(feedparser.parse(search_url), feedsearch_title):\n download_link = self._get_download_links(value[0])\n if download_link:\n if \"aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZy8yMDEw\".decode(\"base64\") in download_link:\n self.log_debug(\"Fake-Link erkannt!\")\n return False\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'dl':\n self.log_debug(\"%s - zweisprachiges Release ignoriert (bereits gefunden)\" % key)\n return True\n elif self.filename == 'MB_Filme':\n retail = False\n if self.config.get('cutoff'):\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + 'Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n elif self.filename == 'MB_3D':\n retail = False\n if self.config.get('cutoff'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D/Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n elif self.filename == 'MB_Regex':\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film/Serie/RegEx] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n else:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n self.db.store(\t\t\n key,\t\t\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Staffel] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n\n def dl_search(self, feed, title):\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n\n s = re.sub(self.SUBSTITUTE, \".\", title).lower()\n for post in feed.entries:\n found = re.search(s, post.title.lower())\n if found:\n found = re.search(ignore, post.title.lower())\n if found:\n self.log_debug(\n \"%s - zweisprachiges Release ignoriert (basierend auf ignore-Einstellung)\" % post.title)\n continue\n yield (post.title, [post.link], title)\n\n def imdb_search(self, imdb):\n imdbchecked = re.findall(r'<title>(.*?)<\\/title>\\n.*?<link>(.*)<\\/link>(?:(?:.*?\\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\\/\\/(?:|www\\.)imdb\\.com\\/title\\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(\\d(?:\\.|\\,)\\d)(?:.|.*?)<\\/a>', getURL(self.FEED_URL))\n for item in imdbchecked:\n download_title = item[0]\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n found = re.search(ignore, download_title.lower())\n if found:\n self.log_debug(\"%s - Release ignoriert (basierend auf ignore-Einstellung)\" % download_title)\n continue\n season = re.search(r'\\.S(\\d{1,3})\\.', download_title)\n if season:\n self.log_debug(\"%s - Release ignoriert (IMDB sucht nur Filme)\" % download_title)\n continue\n\n year_in_title = re.findall(r\"\\.((?:19|20)\\d{2})\\.\", download_title)\n years_in_title = len(year_in_title)\n if years_in_title > 0:\n title_year = year_in_title[years_in_title - 1]\n else:\n title_year = \"\"\n\n download_page = self._get_download_links(item[1])\n\n if len(item[2]) > 0:\n download_imdb = \"http://www.imdb.com/title/\" + item[2]\n else:\n try:\n search_title = re.findall(r\"(.*?)(?:\\.(?:(?:19|20)\\d{2})|\\.German|\\.\\d{3,4}p|\\.S(?:\\d{1,3})\\.)\", download_title)[0].replace(\".\", \"+\").replace(\"ae\", \"ä\").replace(\"oe\", \"ö\").replace(\"ue\", \"ü\").replace(\"Ae\", \"Ä\").replace(\"Oe\", \"Ö\").replace(\"Ue\", \"Ü\")\n except:\n break\n search_url = \"http://www.imdb.com/find?q=\" + search_title\n search_page = getURL(search_url)\n search_results = re.findall(r'<td class=\"result_text\"> <a href=\"\\/title\\/(tt[0-9]{7,9})\\/\\?ref_=fn_al_tt_\\d\" >(.*?)<\\/a>.*? \\((\\d{4})\\)..(.{9})', search_page)\n no_series = False\n total_results = len(search_results)\n if total_results == 0:\n download_imdb = \"\"\n else:\n while total_results > 0:\n attempt = 0\n for result in search_results:\n if result[3] == \"TV Series\":\n no_series = False\n total_results -= 1\n attempt += 1\n else:\n no_series = True\n download_imdb = \"http://www.imdb.com/title/\" + search_results[attempt][0]\n title_year = search_results[attempt][2]\n total_results = 0\n break\n if no_series is False:\n self.log_debug(\"%s - Keine passende Film-IMDB-Seite gefunden\" % download_title)\n\n download_score = float(item[3].replace(\",\", \".\"))\n score = str(download_score)\n\n details = \"\"\n min_year = self.config.get(\"imdbyear\")\n if min_year:\n if len(title_year) > 0:\n if title_year < min_year:\n self.log_debug(\"%s - Release ignoriert (Film zu alt)\" % download_title)\n continue\n elif len(download_imdb) > 0:\n details = getURL(download_imdb)\n if not details:\n self.log_debug(\"%s - Fehler bei Aufruf der IMDB-Seite\" % download_title)\n continue\n title_year = re.findall(r\"<title>(?:.*) \\(((?:19|20)\\d{2})\\) - IMDb<\\/title>\", details)\n if not title_year:\n self.log_debug(\"%s - Erscheinungsjahr nicht ermittelbar\" % download_title)\n continue\n else:\n title_year = title_year[0]\n if title_year < min_year:\n self.log_debug(\"%s - Release ignoriert (Film zu alt)\" % download_title)\n continue\n\n if len(download_imdb) > 0:\n if len(details) == 0:\n details = getURL(download_imdb)\n if not details:\n self.log_debug(\"%s - Release ignoriert (Film zu alt)\" % download_title)\n continue\n vote_count = re.findall(r'ratingCount\">(.*?)<\\/span>', details)\n if not vote_count:\n self.log_debug(\"%s - Wertungsanzahl nicht ermittelbar\" % download_title)\n continue\n else:\n vote_count = vote_count[0].replace(\".\", \"\").replace(\",\", \"\")\n if int(vote_count) < 1500:\n self.log_debug(download_title + \" - Release ignoriert (Weniger als 1500 IMDB-Votes: \" + vote_count + \")\")\n continue\n\n if download_score > imdb:\n ss = self.config.get('quality')\n if '.3d.' not in download_title.lower():\n if ss == \"480p\":\n if \"720p\" in download_title.lower() or \"1080p\" in download_title.lower() or \"1080i\" in download_title.lower() or \"2160p\" in download_title.lower():\n continue\n found = True\n else:\n found = re.search(ss, download_title.lower())\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', download_title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % download_title)\n continue\n self.download_imdb(download_title, download_page, score, download_imdb, details)\n else:\n self.log_debug(\"%s - Release ignoriert (falsche Aufloesung)\" % download_title)\n else:\n if not self.config.get('crawl3d'):\n self.log_debug(\"%s - Release ignoriert (3D-Suche deaktiviert)\" % download_title)\n return\n if self.config.get('crawl3d') and (\"1080p\" in download_title.lower() or \"1080i\" in download_title.lower()):\n found = True\n else:\n continue\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', download_title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % download_title)\n continue\n self.download_imdb(download_title, download_page, score, download_imdb, details)\n\n def download_imdb(self, key, download_link, score, download_imdb, details):\n if download_link:\n if \"bW92aWUtYmxvZy5vcmcvMjAxMC8=\".decode(\"base64\") in download_link:\n self.log_debug(\"Fake-Link erkannt!\")\n return\n else:\n englisch = False\n if \"*englisch*\" in key.lower():\n key = key.replace('*ENGLISCH*', '').replace(\"*Englisch*\", \"\")\n englisch = True\n if not rsscrawler.get('english'):\n self.log_debug(\"%s - Englische Releases deaktiviert\" % key)\n return\n if self.config.get('enforcedl') and '.dl.' not in key.lower():\n original_language = \"\"\n if len(details) > 0:\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n else:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n elif len(download_imdb) > 0:\n details = getURL(download_imdb)\n if not details:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n else:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n\n if original_language == \"German\":\n self.log_debug(\"%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!\" % key)\n else:\n if not self.download_dl(key) and not englisch:\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden!\" % key)\n return\n\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'notdl' or self.db.retrieve(key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\")) == 'added':\n self.log_debug(\"%s - Release ignoriert (bereits gefunden)\" % key)\n elif '.3d.' not in key.lower():\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[IMDB ' + score + '/Film] - ' + ('<b>Englisch</b> - ' if englisch and not retail else \"\") + ('<b>Englisch/Retail</b> - ' if englisch and retail else \"\") + ('<b>Retail</b> - ' if not englisch and retail else \"\") + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n else:\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[IMDB ' + score + '/Film] - <b>' + ('Retail/' if retail else \"\") + '3D</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n\n def _get_download_links(self, url):\n req_page = getURL(url)\n soup = bs(req_page, 'lxml')\n download = soup.find(\"div\", {\"id\": \"content\"})\n url_hosters = re.findall(r'href=\"([^\"\\'>]*)\".+?(.+?)<', str(download))\n for url_hoster in url_hosters:\n if not \"bW92aWUtYmxvZy5vcmcv\".decode(\"base64\") in url_hoster[0]:\n if self.hoster.lower() in url_hoster[1].lower():\n return url_hoster[0]\n\n def periodical_task(self):\n if self.filename == 'MB_Filme':\n try:\n imdb = float(self.config.get('imdb'))\n except:\n imdb = 0.0\n if imdb > 0:\n self.imdb_search(imdb)\n\n if self.empty_list:\n return\n urls = []\n if self.filename == 'MB_Staffeln':\n if not self.config.get('crawlseasons'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list),\n quality=self.config.get('seasonsquality'), rg='.*', sf=('.complete.')\n ).items()}.items()\n )\n )\n elif self.filename == 'MB_Regex':\n if not self.config.get('regex'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list)\n ).items()}.items()\n ) if self.config.get('regex') else []\n )\n else:\n if self.filename == 'MB_3D':\n if not self.config.get('crawl3d'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list), quality=self.config.get('quality'), rg='.*', sf=None\n ).items()}.items()\n )\n )\n if self.filename != 'MB_Regex':\n if self.config.get(\"historical\"):\n for xline in self.allInfos.keys():\n if len(xline) > 0 and not xline.startswith(\"#\"):\n xn = xline.split(\",\")[0].replace(\".\", \" \").replace(\" \", \"+\")\n urls.append('aHR0cDovL3d3dy5tb3ZpZS1ibG9nLm9yZw=='.decode('base64') + '/search/%s/feed/rss2/' % xn)\n else:\n urls.append(self.FEED_URL)\n else:\n urls.append(self.FEED_URL)\n for url in urls:\n for (key, value, pattern) in self.searchLinks(feedparser.parse(url)):\n download_link = self._get_download_links(value[0])\n if download_link:\n if \"bW92aWUtYmxvZy5vcmcvMjAxMC8=\".decode(\"base64\") in download_link:\n self.log_debug(\"Fake-Link erkannt!\")\n break\n englisch = False\n if \"*englisch*\" in key.lower():\n key = key.replace('*ENGLISCH*', '').replace(\"*Englisch*\", \"\")\n englisch = True\n if not rsscrawler.get('english'):\n self.log_debug(\"%s - Englische Releases deaktiviert\" % key)\n return\n if self.config.get('enforcedl') and '.dl.' not in key.lower():\n original_language = \"\"\n fail = False\n get_imdb_url = getURL(url)\n key_regex = r'<title>' + re.escape(key) + r'.*?<\\/title>\\n.*?<link>(?:(?:.*?\\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\\/\\/(?:|www\\.)imdb\\.com\\/title\\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(?!\\d(?:\\.|\\,)\\d)(?:.|.*?)<\\/a>'\n imdb_id = re.findall(key_regex, get_imdb_url)\n if len(imdb_id) > 0:\n if not imdb_id[0]:\n fail = True\n else:\n imdb_id = imdb_id[0]\n else:\n fail = True\n if fail:\n search_title = re.findall(r\"(.*?)(?:\\.(?:(?:19|20)\\d{2})|\\.German|\\.\\d{3,4}p|\\.S(?:\\d{1,3})\\.)\", key)[0].replace(\".\", \"+\")\n search_url = \"http://www.imdb.com/find?q=\" + search_title\n search_page = getURL(search_url)\n search_results = re.findall(r'<td class=\"result_text\"> <a href=\"\\/title\\/(tt[0-9]{7,9})\\/\\?ref_=fn_al_tt_\\d\" >(.*?)<\\/a>.*? \\((\\d{4})\\)..(.{9})', search_page)\n total_results = len(search_results)\n if total_results == 0:\n download_imdb = \"\"\n elif self.filename == 'MB_Staffeln':\n imdb_id = search_results[0][0]\n else:\n no_series = False\n while total_results > 0:\n attempt = 0\n for result in search_results:\n if result[3] == \"TV Series\":\n no_series = False\n total_results -= 1\n attempt += 1\n else:\n no_series = True\n imdb_id = search_results[attempt][0]\n total_results = 0\n break\n if no_series is False:\n self.log_debug(\"%s - Keine passende Film-IMDB-Seite gefunden\" % key)\n if not imdb_id:\n if not self.download_dl(key):\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden.\" % key)\n else:\n if isinstance(imdb_id, list):\n imdb_id = imdb_id.pop()\n imdb_url = \"http://www.imdb.com/title/\" + imdb_id\n details = getURL(imdb_url)\n if not details:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n if original_language == \"German\":\n self.log_debug(\"%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!\" % key)\n else:\n if not self.download_dl(key) and not englisch:\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden! Breche ab.\" % key)\n break\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'notdl' or self.db.retrieve(key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\")) == 'added':\n self.log_debug(\"%s - Release ignoriert (bereits gefunden)\" % key)\n elif self.filename == 'MB_Filme':\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film] - ' + ('<b>Englisch</b> - ' if englisch and not retail else \"\") + ('<b>Englisch/Retail</b> - ' if englisch and retail else \"\") + ('<b>Retail</b> - ' if not englisch and retail else \"\") + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n elif self.filename == 'MB_3D':\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n elif self.filename == 'MB_Staffeln':\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\"),\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Staffel] - ' + key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\") + ' - [<a href=\"' + download_link + '\" target=\"_blank\">Link</a>]'\n self.log_info(log_entry)\n added_items.append(log_entry)\n else:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\t\t\n key,\t\t\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film/Serie/RegEx] - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n\nclass HW():\n _INTERNAL_NAME = 'MB'\n FEED_URL = \"aHR0cDovL3d3dy5oZC13b3JsZC5vcmcvZmVlZC8=\".decode('base64')\n SUBSTITUTE = r\"[&#\\s/]\"\n\n def __init__(self, filename):\n rsscrawler = RssConfig('RSScrawler')\n self.config = RssConfig(self._INTERNAL_NAME)\n self.log_info = logging.info\n self.log_error = logging.error\n self.log_debug = logging.debug\n self.filename = filename\n self.db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n self.search_list = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/{}.txt'.format(self.filename))\n self.hoster = rsscrawler.get(\"hoster\")\n self.dictWithNamesAndLinks = {}\n self.empty_list = False\n\n def readInput(self, file):\n if not os.path.isfile(file):\n open(file, \"a\").close()\n placeholder = open(file, 'w')\n placeholder.write('XXXXXXXXXX')\n placeholder.close()\n try:\n f = codecs.open(file, \"rb\")\n return f.read().splitlines()\n except:\n self.log_error(\"Liste nicht gefunden!\")\n\n def getPatterns(self,patterns, **kwargs):\n if patterns == [\"XXXXXXXXXX\"]:\n self.log_debug(\"Liste enthält Platzhalter. Stoppe Suche für Filme!\")\n self.empty_list = True\n if kwargs:\n return {line: (kwargs['quality'], kwargs['rg'], kwargs['sf']) for line in patterns}\n return {x: (x) for x in patterns}\n\n def searchLinks(self, feed):\n if self.empty_list:\n return\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n\n for key in self.allInfos:\n s = re.sub(self.SUBSTITUTE, \".\", \"^\" + key).lower()\n for post in feed.entries:\n found = re.search(s, post.title.lower())\n if found:\n found = re.search(ignore, post.title.lower())\n if found:\n self.log_debug(\"%s - Release ignoriert (basierend auf ignore-Einstellung)\" % post.title)\n continue\n ss = self.allInfos[key][0].lower()\n if self.filename == 'MB_Filme':\n if ss == \"480p\":\n if \"720p\" in post.title.lower() or \"1080p\" in post.title.lower() or \"1080i\" in post.title.lower() or \"2160p\" in post.title.lower():\n continue\n found = True\n else:\n found = re.search(ss, post.title.lower())\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, post.title.lower())\n if self.allInfos[key][2]:\n found = all([word in post.title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', post.title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % post.title)\n continue\n yield (post.title, [post.link], key)\n elif self.filename == 'MB_3D':\n if '.3d.' in post.title.lower():\n if self.config.get('crawl3d') and (\n \"1080p\" in post.title.lower() or \"1080i\" in post.title.lower()):\n found = True\n else:\n continue\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, post.title.lower())\n if self.allInfos[key][2]:\n found = all([word in post.title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', post.title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % post.title)\n continue\n yield (post.title, [post.link], key)\n\n elif self.filename == 'MB_Staffeln':\n validsource = re.search(self.config.get(\"seasonssource\"), post.title.lower())\n if not validsource:\n self.log_debug(post.title + \" - Release hat falsche Quelle\")\n continue\n if not \".complete.\" in post.title.lower():\n self.log_debug(post.title + \" - Staffel noch nicht komplett\")\n continue\n season = re.search(r\"\\.s\\d\", post.title.lower())\n if not season:\n self.log_debug(post.title + \" - Release ist keine Staffel\")\n continue\n if not self.config.get(\"seasonpacks\"):\n staffelpack = re.search(r\"s\\d.*(-|\\.).*s\\d\", post.title.lower())\n if staffelpack:\n self.log_debug(\"%s - Release ignoriert (Staffelpaket)\" % post.title)\n continue\n ss = self.allInfos[key][0].lower()\n\n if ss == \"480p\":\n if \"720p\" in post.title.lower() or \"1080p\" in post.title.lower() or \"1080i\" in post.title.lower() or \"2160p\" in post.title.lower():\n continue\n found = True\n else:\n found = re.search(ss, post.title.lower())\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, post.title.lower())\n\n if self.allInfos[key][2]:\n found = all([word in post.title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', post.title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % post.title)\n continue\n yield (post.title, [post.link], key)\n else:\n yield (post.title, [post.link], key)\n\n def download_dl(self, title):\n search_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0].replace(\".\", \" \").replace(\" \", \"+\")\n search_url = \"aHR0cDovL2hkLXdvcmxkLm9yZy9zZWFyY2gv\".decode('base64') + search_title + \"/feed/rss2/\"\n feedsearch_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0]\n if not '.dl.' in feedsearch_title.lower():\n self.log_debug(\"%s - Release ignoriert (nicht zweisprachig, da wahrscheinlich nicht Retail)\" %feedsearch_title)\n return False\n for (key, value, pattern) in self.dl_search(feedparser.parse(search_url), feedsearch_title):\n download_link = self._get_download_links(value[0])\n if download_link:\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'dl':\n self.log_debug(\"%s - zweisprachiges Release ignoriert (bereits gefunden)\" % key)\n return True\n elif self.filename == 'MB_Filme':\n retail = False\n if self.config.get('cutoff'):\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + 'Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n elif self.filename == 'MB_3D':\n retail = False\n if self.config.get('cutoff'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D/Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n elif self.filename == 'MB_Regex':\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film/Serie/RegEx] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n else:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n self.db.store(\t\t\n key,\t\t\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Staffel] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n\n def dl_search(self, feed, title):\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n\n s = re.sub(self.SUBSTITUTE, \".\", title).lower()\n for post in feed.entries:\n found = re.search(s, post.title.lower())\n if found:\n found = re.search(ignore, post.title.lower())\n if found:\n self.log_debug(\n \"%s - zweisprachiges Release ignoriert (basierend auf ignore-Einstellung)\" % post.title)\n continue\n yield (post.title, [post.link], title)\n\n def imdb_search(self, imdb):\n imdbchecked = re.findall(r'<title>(.*?)<\\/title>\\n.*?<link>(.*)<\\/link>(?:(?:.*?\\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\\/\\/(?:|www\\.)imdb\\.com\\/title\\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(\\d(?:\\.|\\,)\\d)(?:.|.*?)<\\/a>', getURL(self.FEED_URL))\n for item in imdbchecked:\n download_title = item[0]\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n found = re.search(ignore, download_title.lower())\n if found:\n self.log_debug(\"%s - Release ignoriert (basierend auf ignore-Einstellung)\" % download_title)\n continue\n season = re.search(r'\\.S(\\d{1,3})\\.', download_title)\n if season:\n self.log_debug(\"%s - Release ignoriert (IMDB sucht nur Filme)\" % download_title)\n continue\n\n year_in_title = re.findall(r\"\\.((?:19|20)\\d{2})\\.\", download_title)\n years_in_title = len(year_in_title)\n if years_in_title > 0:\n title_year = year_in_title[years_in_title - 1]\n else:\n title_year = \"\"\n\n download_page = self._get_download_links(item[1])\n\n if len(item[2]) > 0:\n download_imdb = \"http://www.imdb.com/title/\" + item[2]\n else:\n try:\n search_title = re.findall(r\"(.*?)(?:\\.(?:(?:19|20)\\d{2})|\\.German|\\.\\d{3,4}p|\\.S(?:\\d{1,3})\\.)\", download_title)[0].replace(\".\", \"+\").replace(\"ae\", \"ä\").replace(\"oe\", \"ö\").replace(\"ue\", \"ü\").replace(\"Ae\", \"Ä\").replace(\"Oe\", \"Ö\").replace(\"Ue\", \"Ü\")\n except:\n break\n search_url = \"http://www.imdb.com/find?q=\" + search_title\n search_page = getURL(search_url)\n search_results = re.findall(r'<td class=\"result_text\"> <a href=\"\\/title\\/(tt[0-9]{7,9})\\/\\?ref_=fn_al_tt_\\d\" >(.*?)<\\/a>.*? \\((\\d{4})\\)..(.{9})', search_page)\n no_series = False\n total_results = len(search_results)\n if total_results == 0:\n download_imdb = \"\"\n else:\n while total_results > 0:\n attempt = 0\n for result in search_results:\n if result[3] == \"TV Series\":\n no_series = False\n total_results -= 1\n attempt += 1\n else:\n no_series = True\n download_imdb = \"http://www.imdb.com/title/\" + search_results[attempt][0]\n title_year = search_results[attempt][2]\n total_results = 0\n break\n if no_series is False:\n self.log_debug(\"%s - Keine passende Film-IMDB-Seite gefunden\" % download_title)\n\n download_score = float(item[3].replace(\",\", \".\"))\n score = str(download_score)\n\n details = \"\"\n min_year = self.config.get(\"imdbyear\")\n if min_year:\n if len(title_year) > 0:\n if title_year < min_year:\n self.log_debug(\"%s - Release ignoriert (Film zu alt)\" % download_title)\n continue\n elif len(download_imdb) > 0:\n details = getURL(download_imdb)\n if not details:\n self.log_debug(\"%s - Fehler bei Aufruf der IMDB-Seite\" % download_title)\n continue\n title_year = re.findall(r\"<title>(?:.*) \\(((?:19|20)\\d{2})\\) - IMDb<\\/title>\", details)\n if not title_year:\n self.log_debug(\"%s - Erscheinungsjahr nicht ermittelbar\" % download_title)\n continue\n else:\n title_year = title_year[0]\n if title_year < min_year:\n self.log_debug(\"%s - Release ignoriert (Film zu alt)\" % download_title)\n continue\n\n if len(download_imdb) > 0:\n if len(details) == 0:\n details = getURL(download_imdb)\n if not details:\n self.log_debug(\"%s - Release ignoriert (Film zu alt)\" % download_title)\n continue\n vote_count = re.findall(r'ratingCount\">(.*?)<\\/span>', details)\n if not vote_count:\n self.log_debug(\"%s - Wertungsanzahl nicht ermittelbar\" % download_title)\n continue\n else:\n vote_count = vote_count[0].replace(\".\", \"\").replace(\",\", \"\")\n if int(vote_count) < 1500:\n self.log_debug(download_title + \" - Release ignoriert (Weniger als 1500 IMDB-Votes: \" + vote_count + \")\")\n continue\n\n if download_score > imdb:\n ss = self.config.get('quality')\n if '.3d.' not in download_title.lower():\n if ss == \"480p\":\n if \"720p\" in download_title.lower() or \"1080p\" in download_title.lower() or \"1080i\" in download_title.lower() or \"2160p\" in download_title.lower():\n continue\n found = True\n else:\n found = re.search(ss, download_title.lower())\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', download_title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % download_title)\n continue\n self.download_imdb(download_title, download_page, score, download_imdb, details)\n else:\n self.log_debug(\"%s - Release ignoriert (falsche Aufloesung)\" % download_title)\n else:\n if not self.config.get('crawl3d'):\n self.log_debug(\"%s - Release ignoriert (3D-Suche deaktiviert)\" % download_title)\n return\n if self.config.get('crawl3d') and (\"1080p\" in download_title.lower() or \"1080i\" in download_title.lower()):\n found = True\n else:\n continue\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', download_title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % download_title)\n continue\n self.download_imdb(download_title, download_page, score, download_imdb, details)\n\n def download_imdb(self, key, download_link, score, download_imdb, details):\n if download_link:\n if \"bW92aWUtYmxvZy5vcmcvMjAxMC8=\".decode(\"base64\") in download_link:\n self.log_debug(\"Fake-Link erkannt!\")\n return\n else:\n englisch = False\n if \"*englisch*\" in key.lower():\n key = key.replace('*ENGLISCH*', '').replace(\"*Englisch*\", \"\")\n englisch = True\n if not rsscrawler.get('english'):\n self.log_debug(\"%s - Englische Releases deaktiviert\" % key)\n return\n if self.config.get('enforcedl') and '.dl.' not in key.lower():\n original_language = \"\"\n if len(details) > 0:\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n else:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n elif len(download_imdb) > 0:\n details = getURL(download_imdb)\n if not details:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n else:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n\n if original_language == \"German\":\n self.log_debug(\"%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!\" % key)\n else:\n if not self.download_dl(key) and not englisch:\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden!\" % key)\n return\n\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'notdl' or self.db.retrieve(key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\")) == 'added':\n self.log_debug(\"%s - Release ignoriert (bereits gefunden)\" % key)\n elif '.3d.' not in key.lower():\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[IMDB ' + score + '/Film] - ' + ('<b>Englisch</b> - ' if englisch and not retail else \"\") + ('<b>Englisch/Retail</b> - ' if englisch and retail else \"\") + ('<b>Retail</b> - ' if not englisch and retail else \"\") + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n else:\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[IMDB ' + score + '/Film] - <b>' + ('Retail/' if retail else \"\") + '3D</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n\n def _get_download_links(self, url):\n req_page = getURL(url)\n soup = bs(req_page, 'lxml')\n download = soup.find(\"div\", {\"id\": \"content\"})\n url_hosters = re.findall(r'href=\"([^\"\\'>]*)\".+?(.+?)<', str(download))\n for url_hoster in url_hosters:\n if self.hoster.lower() in url_hoster[1].lower():\n return url_hoster[0]\n\n def periodical_task(self):\n if self.filename == 'MB_Filme':\n try:\n imdb = float(self.config.get('imdb'))\n except:\n imdb = 0.0\n if imdb > 0:\n self.imdb_search(imdb)\n\n if self.empty_list:\n return\n urls = []\n if self.filename == 'MB_Staffeln':\n if not self.config.get('crawlseasons'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list),\n quality=self.config.get('seasonsquality'), rg='.*', sf=('.complete.')\n ).items()}.items()\n )\n )\n elif self.filename == 'MB_Regex':\n if not self.config.get('regex'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list)\n ).items()}.items()\n ) if self.config.get('regex') else []\n )\n else:\n if self.filename == 'MB_3D':\n if not self.config.get('crawl3d'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list), quality=self.config.get('quality'), rg='.*', sf=None\n ).items()}.items()\n )\n )\n if self.filename != 'MB_Regex':\n if self.config.get(\"historical\"):\n for xline in self.allInfos.keys():\n if len(xline) > 0 and not xline.startswith(\"#\"):\n xn = xline.split(\",\")[0].replace(\".\", \" \").replace(\" \", \"+\")\n urls.append('aHR0cDovL2hkLXdvcmxkLm9yZw=='.decode('base64') + '/search/%s/feed/rss2/' % xn)\n else:\n urls.append(self.FEED_URL)\n else:\n urls.append(self.FEED_URL)\n for url in urls:\n for (key, value, pattern) in self.searchLinks(feedparser.parse(url)):\n download_link = self._get_download_links(value[0])\n if download_link:\n englisch = False\n if \"*englisch*\" in key.lower():\n key = key.replace('*ENGLISCH*', '').replace(\"*Englisch*\", \"\")\n englisch = True\n if not rsscrawler.get('english'):\n self.log_debug(\"%s - Englische Releases deaktiviert\" % key)\n return\n if self.config.get('enforcedl') and '.dl.' not in key.lower():\n original_language = \"\"\n fail = False\n get_imdb_url = getURL(url)\n key_regex = r'<title>' + re.escape(key) + r'.*?<\\/title>\\n.*?<link>(?:(?:.*?\\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\\/\\/(?:|www\\.)imdb\\.com\\/title\\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(?!\\d(?:\\.|\\,)\\d)(?:.|.*?)<\\/a>'\n imdb_id = re.findall(key_regex, get_imdb_url)\n if len(imdb_id) > 0:\n if not imdb_id[0]:\n fail = True\n else:\n imdb_id = imdb_id[0]\n else:\n fail = True\n if fail:\n search_title = re.findall(r\"(.*?)(?:\\.(?:(?:19|20)\\d{2})|\\.German|\\.\\d{3,4}p|\\.S(?:\\d{1,3})\\.)\", key)[0].replace(\".\", \"+\")\n search_url = \"http://www.imdb.com/find?q=\" + search_title\n search_page = getURL(search_url)\n search_results = re.findall(r'<td class=\"result_text\"> <a href=\"\\/title\\/(tt[0-9]{7,9})\\/\\?ref_=fn_al_tt_\\d\" >(.*?)<\\/a>.*? \\((\\d{4})\\)..(.{9})', search_page)\n total_results = len(search_results)\n if total_results == 0:\n download_imdb = \"\"\n elif self.filename == 'MB_Staffeln':\n imdb_id = search_results[0][0]\n else:\n no_series = False\n while total_results > 0:\n attempt = 0\n for result in search_results:\n if result[3] == \"TV Series\":\n no_series = False\n total_results -= 1\n attempt += 1\n else:\n no_series = True\n imdb_id = search_results[attempt][0]\n total_results = 0\n break\n if no_series is False:\n self.log_debug(\"%s - Keine passende Film-IMDB-Seite gefunden\" % key)\n if not imdb_id:\n if not self.download_dl(key):\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden.\" % key)\n else:\n if isinstance(imdb_id, list):\n imdb_id = imdb_id.pop()\n imdb_url = \"http://www.imdb.com/title/\" + imdb_id\n details = getURL(imdb_url)\n if not details:\n self.log_debug(\"%s - Originalsprache nicht ermittelbar\" % key)\n original_language = re.findall(r\"Language:<\\/h4>\\n.*?\\n.*?url'>(.*?)<\\/a>\", details)\n if original_language:\n original_language = original_language[0]\n if original_language == \"German\":\n self.log_debug(\"%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!\" % key)\n else:\n if not self.download_dl(key) and not englisch:\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden! Breche ab.\" % key)\n break\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'notdl' or self.db.retrieve(key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\")) == 'added':\n self.log_debug(\"%s - Release ignoriert (bereits gefunden)\" % key)\n elif self.filename == 'MB_Filme':\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film] - ' + ('<b>Englisch</b> - ' if englisch and not retail else \"\") + ('<b>Englisch/Retail</b> - ' if englisch and retail else \"\") + ('<b>Retail</b> - ' if not englisch and retail else \"\") + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n elif self.filename == 'MB_3D':\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n elif self.filename == 'MB_Staffeln':\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\"),\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Staffel] - ' + key.replace(\".COMPLETE.\", \".\") + ' - [<a href=\"' + download_link + '\" target=\"_blank\">Link</a>]'\n self.log_info(log_entry)\n added_items.append(log_entry)\n else:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film/Serie/RegEx] - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n \nclass HA():\n _INTERNAL_NAME = 'MB'\n FEED_URL = \"aHR0cDovL3d3dy5oZC1hcmVhLm9yZy9pbmRleC5waHA=\".decode('base64')\n SUBSTITUTE = r\"[&#\\s/]\"\n\n def __init__(self, filename):\n rsscrawler = RssConfig('RSScrawler')\n self.config = RssConfig(self._INTERNAL_NAME)\n self.log_info = logging.info\n self.log_error = logging.error\n self.log_debug = logging.debug\n self.filename = filename\n self.db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n self.search_list = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/{}.txt'.format(self.filename))\n self._hosters_pattern = rsscrawler.get('hoster').replace(',', '|')\n self.dictWithNamesAndLinks = {}\n self.empty_list = False\n\n def readInput(self, file):\n if not os.path.isfile(file):\n open(file, \"a\").close()\n placeholder = open(file, 'w')\n placeholder.write('XXXXXXXXXX')\n placeholder.close()\n try:\n f = codecs.open(file, \"rb\")\n return f.read().splitlines()\n except:\n self.log_error(\"Liste nicht gefunden!\")\n\n def getPatterns(self,patterns, **kwargs):\n if patterns == [\"XXXXXXXXXX\"]:\n self.log_debug(\"Liste enthält Platzhalter. Stoppe Suche für Filme!\")\n self.empty_list = True\n if kwargs:\n return {line: (kwargs['quality'], kwargs['rg'], kwargs['sf']) for line in patterns}\n return {x: (x) for x in patterns}\n\n def searchLinks(self, feed):\n if self.empty_list:\n return\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n\n for key in self.allInfos:\n if not key.replace(\" \", \"+\") in feed and not self.filename == 'MB_Regex':\n continue\n req_page = getURL(feed)\n if not req_page:\n self.log_debug(\"Ungueltiger Link bei Seitenaufruf\")\n continue\n soup = bs(req_page, 'lxml')\n content = soup.find(\"div\", {\"id\" : \"content\"})\n if \"index.php\" in feed.lower():\n titles = content.findAll(\"div\", {\"id\" : \"title\"})\n else:\n titles = content.findAll(\"a\")\n for title in titles:\n try:\n hda = re.findall(r'href=\"(.*?)\" title=\"(.*?)\">', str(title))[0]\n except:\n self.log_debug(\"Ungueltiger Link bei Suche nach Titel\")\n url = hda[0]\n title = hda[1]\n s = re.sub(self.SUBSTITUTE, \".\", \"^\" + key).lower()\n found = re.search(s, title.lower())\n if found:\n found = re.search(ignore, title.lower())\n if found:\n self.log_debug(\"%s - Release ignoriert (basierend auf ignore-Einstellung)\" % title)\n continue\n ss = self.allInfos[key][0].lower()\n if self.filename == 'MB_Filme':\n if ss == \"480p\":\n if \"720p\" in title.lower() or \"1080p\" in title.lower() or \"1080i\" in title.lower():\n continue\n found = True\n else:\n found = re.search(ss, title.lower())\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, title.lower())\n if self.allInfos[key][2]:\n found = all([word in title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % title)\n continue\n link = self._get_download_links(url)\n yield (title, link, key)\n elif self.filename == 'MB_3D':\n if '.3d.' in title.lower():\n if self.config.get('crawl3d') and (\n \"1080p\" in title.lower() or \"1080i\" in title.lower()):\n found = True\n else:\n continue\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, title.lower())\n if self.allInfos[key][2]:\n found = all([word in title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % title)\n continue\n link = self._get_download_links(url)\n yield (title, link, key)\n\n elif self.filename == 'MB_Staffeln':\n validsource = re.search(self.config.get(\"seasonssource\"), title.lower())\n if not validsource:\n self.log_debug(title + \" - Release hat falsche Quelle\")\n continue\n if not \".complete.\" in title.lower():\n self.log_debug(title + \" - Staffel noch nicht komplett\")\n continue\n season = re.search(r\"\\.s\\d\", title.lower())\n if not season:\n self.log_debug(title + \" - Release ist keine Staffel\")\n continue\n if not self.config.get(\"seasonpacks\"):\n staffelpack = re.search(r\"s\\d.*(-|\\.).*s\\d\", title.lower())\n if staffelpack:\n self.log_debug(\"%s - Release ignoriert (Staffelpaket)\" % title)\n continue\n ss = self.allInfos[key][0].lower()\n\n if ss == \"480p\":\n if \"720p\" in title.lower() or \"1080p\" in title.lower() or \"1080i\" in title.lower():\n continue\n found = True\n else:\n found = re.search(ss, title.lower())\n if found:\n sss = r\"[\\.-]+\" + self.allInfos[key][1].lower()\n found = re.search(sss, title.lower())\n\n if self.allInfos[key][2]:\n found = all([word in title.lower() for word in self.allInfos[key][2]])\n if found:\n episode = re.search(r'([\\w\\.\\s]*s\\d{1,2}e\\d{1,2})[\\w\\.\\s]*', title.lower())\n if episode:\n self.log_debug(\"%s - Release ignoriert (Serienepisode)\" % title)\n continue\n link = self._get_download_links(url)\n yield (title, link, key)\n else:\n link = self._get_download_links(url)\n yield (title, link, key)\n\n def download_dl(self, title):\n search_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0].replace(\".\", \" \").replace(\" \", \"+\")\n search_url = \"aHR0cDovL3d3dy5oZC1hcmVhLm9yZy8/cz1zZWFyY2gmcT0=\".decode('base64') + search_title\n feedsearch_title = title.replace(\".German.720p.\", \".German.DL.1080p.\").replace(\".German.DTS.720p.\", \".German.DTS.DL.1080p.\").replace(\".German.AC3.720p.\", \".German.AC3.DL.1080p.\").replace(\".German.AC3LD.720p.\", \".German.AC3LD.DL.1080p.\").replace(\".German.AC3.Dubbed.720p.\", \".German.AC3.Dubbed.DL.1080p.\").split('.x264-', 1)[0].split('.h264-', 1)[0]\n if not '.dl.' in feedsearch_title.lower():\n self.log_debug(\"%s - Release ignoriert (nicht zweisprachig, da wahrscheinlich nicht Retail)\" %feedsearch_title)\n return False\n for (key, download_link, pattern) in self.dl_search(search_url, feedsearch_title):\n if download_link:\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'dl':\n self.log_debug(\"%s - zweisprachiges Release ignoriert (bereits gefunden)\" % key)\n return True\n elif self.filename == 'MB_Filme':\n retail = False\n if self.config.get('cutoff'):\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + 'Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n elif self.filename == 'MB_3D':\n retail = False\n if self.config.get('cutoff'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/3Dcrawler\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D/Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n elif self.filename == 'MB_Regex':\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Film/Serie/RegEx] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n else:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler/Remux\"\n )\n self.db.store(\t\t\n key,\t\t\n 'dl' if self.config.get('enforcedl') and '.dl.' in key.lower() else 'added'\n )\n log_entry = '[Staffel] - <b>Zweisprachig</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n return True\n\n def dl_search(self, feed, title):\n ignore = \"|\".join(\n [r\"\\.%s(\\.|-)\" % p for p in self.config.get(\"ignore\").lower().split(',')]) if self.config.get(\"ignore\") else r\"^unmatchable$\"\n req_page = getURL(feed)\n soup = bs(req_page, 'lxml')\n content = soup.find(\"div\", {\"id\" : \"content\"})\n try:\n found = content.findAll(\"a\")[0]\n except:\n return\n hda = re.findall(r'href=\"(.*?)\" title=\"(.*?)\">', str(found))[0]\n url = hda[0]\n title = hda[1]\n link = getURL(url)\n dl_soup = bs(link, 'lxml')\n dl_links = re.findall(r'href=\"(http:\\/\\/filecrypt.cc.*?|https:\\/\\/www.filecrypt.cc.*?)\" target=\"_blank\">(.*?)<\\/a>', str(dl_soup))\n for link in dl_links:\n url = link[0]\n if self._hosters_pattern.lower().replace(\" \", \"-\") in link[1].lower().replace(\" \", \"-\"):\n s = re.sub(self.SUBSTITUTE, \".\", title).lower()\n found = re.search(s, title.lower())\n if found:\n found = re.search(ignore, title.lower())\n if found:\n self.log_debug(\n \"%s - zweisprachiges Release ignoriert (basierend auf ignore-Einstellung)\" % title)\n continue\n yield (title, url, title)\n\n def _get_download_links(self, url):\n link = getURL(url)\n dl_soup = bs(link, 'lxml')\n dl_links = re.findall(r'inline.*?display:inline;\"><a href=\"(.*?)\" target=\"_blank\">(.*?)<\\/a>', str(dl_soup))\n for link in dl_links:\n url = link[0]\n if self._hosters_pattern.lower().replace(\" \", \"-\") in link[1].lower().replace(\" \", \"-\"):\n return url\n\n def periodical_task(self):\n urls = []\n if self.filename == 'MB_Staffeln':\n if not self.config.get('crawlseasons'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list),\n quality=self.config.get('seasonsquality'), rg='.*', sf=('.complete.')\n ).items()}.items()\n )\n )\n elif self.filename == 'MB_Regex':\n if not self.config.get('regex'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list)\n ).items()}.items()\n ) if self.config.get('regex') else []\n )\n else:\n if self.filename == 'MB_3D':\n if not self.config.get('crawl3d'):\n return\n self.allInfos = dict(\n set({key: value for (key, value) in self.getPatterns(\n self.readInput(self.search_list), quality=self.config.get('quality'), rg='.*', sf=None\n ).items()}.items()\n )\n )\n if self.filename != 'MB_Regex':\n if self.config.get(\"historical\"):\n for xline in self.allInfos.keys():\n if len(xline) > 0 and not xline.startswith(\"#\"):\n title = xline.split(\",\")[0].replace(\" \", \".\")\n search_title = title.replace(\".\", \" \").replace(\" \", \"+\")\n urls.append(\"aHR0cDovL3d3dy5oZC1hcmVhLm9yZy8/cz1zZWFyY2gmcT0=\".decode('base64') + search_title)\n else:\n urls.append(self.FEED_URL)\n else:\n urls.append(self.FEED_URL)\n\n for url in urls:\n for (key, download_link, pattern) in self.searchLinks(url):\n if download_link:\n englisch = False\n if \"*englisch*\" in key.lower():\n key = key.replace('*ENGLISCH*', '').replace(\"*Englisch*\", \"\")\n englisch = True\n if not rsscrawler.get('english'):\n self.log_debug(\"%s - Englische Releases deaktiviert\" % key)\n return\n if self.config.get('enforcedl') and '.dl.' not in key.lower():\n if not self.download_dl(key):\n self.log_debug(\"%s - Kein zweisprachiges Release gefunden\" % key)\n if self.db.retrieve(key) == 'added' or self.db.retrieve(key) == 'notdl' or self.db.retrieve(key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\")) == 'added':\n self.log_debug(\"%s - Release ignoriert (bereits gefunden)\" % key)\n elif self.filename == 'MB_Filme':\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '1'):\n retail = True\n else:\n if common.cutoff(key, '0'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film] - ' + ('<b>Englisch</b> - ' if englisch and not retail else \"\") + ('<b>Englisch/Retail</b> - ' if englisch and retail else \"\") + ('<b>Retail</b> - ' if not englisch and retail else \"\") + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n elif self.filename == 'MB_3D':\n retail = False\n if (self.config.get('enforcedl') and '.dl.' in key.lower()) or not self.config.get(\n 'enforcedl'):\n if self.config.get('cutoff') and '.COMPLETE.' not in key.lower():\n if self.config.get('enforcedl'):\n if common.cutoff(key, '2'):\n retail = True\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key,\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film] - <b>' + ('Retail/' if retail else \"\") + '3D</b> - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n elif self.filename == 'MB_Staffeln':\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\n key.replace(\".COMPLETE\", \"\").replace(\".Complete\", \"\"),\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Staffel] - ' + key.replace(\".COMPLETE.\", \".\") + ' - [<a href=\"' + download_link + '\" target=\"_blank\">Link</a>]'\n self.log_info(log_entry)\n added_items.append(log_entry)\n else:\n common.write_crawljob_file(\n key,\n key,\n download_link,\n jdownloaderpath + \"/folderwatch\",\n \"RSScrawler\"\n )\n self.db.store(\t\t\n key,\t\t\n 'notdl' if self.config.get('enforcedl') and '.dl.' not in key.lower() else 'added'\n )\n log_entry = '[Film/Serie/RegEx] - ' + key + ' - <a href=\"' + download_link + '\" target=\"_blank\" title=\"Link öffnen\"><i class=\"fas fa-link\"></i></a> <a href=\"#log\" ng-click=\"resetTitle('' + key + '')\" title=\"Download für nächsten Suchlauf zurücksetzen\"><i class=\"fas fa-undo\"></i></a>'\n self.log_info(log_entry)\n added_items.append(log_entry)\n\nif __name__ == \"__main__\":\n arguments = docopt(__doc__, version='RSScrawler')\n\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n logging.basicConfig(\n filename=os.path.join(os.path.dirname(sys.argv[0]), 'RSScrawler.log'), format='%(asctime)s - %(message)s', level=logging.__dict__[arguments['--log-level']] if arguments['--log-level'] in logging.__dict__ else logging.INFO\n )\n console = logging.StreamHandler()\n console.setLevel(logging.__dict__[arguments['--log-level']] if arguments['--log-level'] in logging.__dict__ else logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n print(\"┌────────────────────────────────────────────────────────┐\")\n print(\" Programminfo: RSScrawler \" + version + \" von RiX\")\n print(\" Projektseite: https://github.com/rix1337/RSScrawler\")\n print(\"└────────────────────────────────────────────────────────┘\")\n \n files.startup()\n\n einstellungen = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/RSScrawler.ini')\n if not arguments['--jd-pfad']:\n if not os.path.exists(einstellungen):\n if arguments['--port']:\n files.einsteller(einstellungen, version, \"Muss unbedingt vergeben werden!\", arguments['--port'])\n else:\n files.einsteller(einstellungen, version, \"Muss unbedingt vergeben werden!\", \"9090\")\n print('Der Ordner \"Einstellungen\" wurde erstellt.')\n print('Der Pfad des JDownloaders muss jetzt unbedingt in der RSScrawler.ini hinterlegt werden.')\n print('Die Einstellungen und Listen sind beim nächsten Start im Webinterface anpassbar.')\n print('Viel Spass! Beende RSScrawler!')\n sys.exit(0)\n else:\n if not os.path.exists(einstellungen):\n if arguments['--port']:\n files.einsteller(einstellungen, version, arguments['--jd-pfad'], arguments['--port'])\n else:\n files.einsteller(einstellungen, version, arguments['--jd-pfad'], \"9090\")\n print('Der Ordner \"Einstellungen\" wurde erstellt.')\n print('Die Einstellungen und Listen sind jetzt im Webinterface anpassbar.')\n \n configfile = os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/RSScrawler.ini')\n if not 'port' in open(configfile).read() and not 'prefix' in open(configfile).read() :\n print \"Veraltete Konfigurationsdatei erkannt. Ergänze neue Einstellungen!\"\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/RSScrawler.ini'), 'r+') as f:\n content = f.read()\n f.seek(0)\n f.truncate()\n f.write(content.replace('[RSScrawler]\\n', '[RSScrawler]\\nport = 9090\\nprefix =\\n'))\n\n rsscrawler = RssConfig('RSScrawler')\n\n if arguments['--jd-pfad']:\n jdownloaderpath = arguments['--jd-pfad']\n else:\n jdownloaderpath = rsscrawler.get(\"jdownloader\")\n if arguments['--docker']:\n jdownloaderpath = '/jd2'\n jdownloaderpath = jdownloaderpath.replace(\"\\\\\", \"/\")\n jdownloaderpath = jdownloaderpath[:-1] if jdownloaderpath.endswith('/') else jdownloaderpath\n\n if arguments['--docker']:\n print('Docker-Modus: JDownloader-Pfad und Port können nur per Docker-Run angepasst werden!')\n \n if jdownloaderpath == 'Muss unbedingt vergeben werden!':\n print('Der Pfad des JDownloaders muss unbedingt in der RSScrawler.ini hinterlegt werden.')\n print('Weiterhin sollten die Listen entsprechend der README.md gefüllt werden!')\n print('Beende RSScrawler...')\n sys.exit(0)\n \n print('Nutze das \"folderwatch\" Unterverzeichnis von \"' + jdownloaderpath + '\" für Crawljobs')\n \n if not os.path.exists(jdownloaderpath):\n print('Der Pfad des JDownloaders existiert nicht.')\n print('Beende RSScrawler...')\n sys.exit(0)\n\n if not os.path.exists(jdownloaderpath + \"/folderwatch\"):\n print('Der Pfad des JDownloaders enthält nicht das \"folderwatch\" Unterverzeichnis. Sicher, dass der Pfad stimmt?')\n print('Beende RSScrawler...')\n sys.exit(0)\n\n if arguments['--port']:\n port = int(arguments['--port'])\n else:\n port = port = int(rsscrawler.get(\"port\"))\n docker = False\n if arguments['--docker']:\n port = int('9090')\n docker = True\n \n if rsscrawler.get(\"prefix\"):\n prefix = '/' + rsscrawler.get(\"prefix\")\n else:\n prefix = ''\n print('Der Webserver ist erreichbar unter http://' + common.checkIp() +':' + str(port) + prefix)\n\n p = Process(target=web_server, args=(port, docker, jdownloaderpath))\n p.start()\n \n files.check()\n \n c = Process(target=crawler, args=(jdownloaderpath, rsscrawler,))\n c.start()\n\n print('Drücke [Strg] + [C] zum Beenden')\n \n def signal_handler(signal, frame):\n print('Beende RSScrawler...')\n p.terminate()\n c.terminate()\n sys.exit(0)\n signal.signal(signal.SIGINT, signal_handler)\n\n if not arguments['--testlauf']:\n try:\n while True:\n signal.pause()\n except AttributeError:\n while True:\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.5786738991737366,
"alphanum_fraction": 0.6052795648574829,
"avg_line_length": 49.64210510253906,
"blob_id": "d5ee64ac4ab93c9a572717562d94c60eb62d89a6",
"content_id": "d1d522bcb42481f47bf9adce475eb51680790f14",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4812,
"license_type": "permissive",
"max_line_length": 479,
"num_lines": 95,
"path": "/common.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n# Enthält Code von:\n# https://github.com/bharnett/Infringer/blob/master/LinkRetrieve.py\n\nimport files\nimport logging\nimport os\nimport re\nimport socket\nimport sys\nfrom rssconfig import RssConfig\n\nlog_info = logging.info\nlog_error = logging.error\nlog_debug = logging.debug\n \ndef write_crawljob_file(package_name, folder_name, link_text, crawljob_dir, subdir):\n crawljob_file = crawljob_dir + '/%s.crawljob' % unicode(re.sub('[^\\w\\s\\.-]', '', package_name.replace(' ', '')).strip().lower())\n crawljobs = RssConfig('Crawljobs')\n autostart = crawljobs.get(\"autostart\")\n usesubdir = crawljobs.get(\"subdir\")\n if not usesubdir:\n subdir = \"\"\n if autostart:\n autostart = \"TRUE\"\n else:\n autostart = \"FALSE\"\n try:\n file = open(crawljob_file, 'w')\n file.write('enabled=TRUE\\n')\n file.write('autoStart=' + autostart + '\\n')\n file.write('extractPasswords=[\"' + \"bW92aWUtYmxvZy5vcmc=\".decode('base64') + '\",\"' + \"c2VyaWVuanVua2llcy5vcmc=\".decode('base64') + '\",\"' + \"aGQtYXJlYS5vcmc=\".decode('base64') + '\",\"' + \"aGQtd29ybGQub3Jn\".decode('base64') + '\",\"' + \"d2FyZXotd29ybGQub3Jn\".decode('base64') + '\"]\\n')\n file.write('downloadPassword=' + \"c2VyaWVuanVua2llcy5vcmc=\".decode('base64') + '\\n')\n file.write('extractAfterDownload=TRUE\\n')\n file.write('forcedStart=' + autostart + '\\n')\n file.write('autoConfirm=' + autostart + '\\n')\n if not subdir == \"\":\n file.write('downloadFolder=' + subdir + \"/\" + '%s\\n' % folder_name)\n if subdir == \"RSScrawler/Remux\":\n file.write('priority=Lower\\n')\n else:\n file.write('downloadFolder=' + '%s\\n' % folder_name)\n file.write('packageName=%s\\n' % package_name.replace(' ', ''))\n file.write('text=%s\\n' % link_text)\n file.close()\n return True\n except UnicodeEncodeError as e:\n file.close()\n log_error(\"Beim Schreibversuch des Crawljobs: %s FEHLER: %s\" %(crawljob_file, e.message))\n if os.path.isfile(crawljob_file):\n log_info(\"Entferne defekten Crawljob: %s\" % crawljob_file)\n os.remove(crawljob_file)\n return False\n\ndef checkIp():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 0))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\ndef entfernen(retailtitel, identifier):\n def capitalize(line):\n line = line.rstrip()\n return ' '.join(s[0].upper() + s[1:] for s in line.split(' '))\n simplified = retailtitel.replace(\".\", \" \")\n retail = re.sub(r'(|.UNRATED|.Unrated|.Uncut|.UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU)(|.)\\d{4}(|.)(|.UNRATED|.Unrated|.Uncut|.UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS|.DTS-HD)(|.DL)(|.AC3|.DTS).(2160|1080|720)p.(UHD.|Ultra.HD.|)(HDDVD|BluRay)(|.HDR)(|.AVC|.AVC.REMUX|.x264|.x265)(|.REPACK|.RERiP)-.*', \"\", simplified)\n retailyear = re.sub(r'(|.UNRATED|.Unrated|.Uncut|.UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS|.DTS-HD)(|.DL)(|.AC3|.DTS|.DTS-HD).(2160|1080|720)p.(UHD.|Ultra.HD.|)(HDDVD|BluRay)(|.HDR)(|.AVC|.AVC.REMUX|.x264|.x265)(|.REPACK|.RERiP)-.*', \"\", simplified)\n if identifier == '2':\n liste = \"MB_3D\"\n else:\n liste = \"MB_Filme\"\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'), 'r') as l:\n content = []\n for line in l:\n content.append(re.sub(r'^(' + re.escape(retailyear) + '|' + re.escape(retail)+ '|' + re.escape(retailyear.lower()) + '|' + re.escape(retail.lower()) + '|' + re.escape(retailyear.upper()) + '|' + re.escape(retail.upper()) + '|' + re.escape(capitalize(retailyear)) + '|' + re.escape(capitalize(retail)) + ')', '', line))\n l.close()\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'), 'w') as w:\n w.write(''.join(content))\n files.check()\n log_debug(retail + \" durch Cutoff aus \" + liste + \" entfernt.\")\n\ndef cutoff(key, identifier):\n retailfinder = re.search(\"(|.UNRATED|Uncut|UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS|.DTS-HD)(|.DL)(|.AC3|.DTS|.DTS-HD).(2160|1080|720)p.(UHD.|Ultra.HD.|)(HDDVD|BluRay)(|.HDR)(|.AVC|.AVC.REMUX|.x264|.x265)(|.REPACK|.RERiP)-.*\",key)\n if retailfinder:\n entfernen(key, identifier)\n return True\n else:\n return False\n"
},
{
"alpha_fraction": 0.45647990703582764,
"alphanum_fraction": 0.46560731530189514,
"avg_line_length": 41.592506408691406,
"blob_id": "e0eaae28e4a77a746e299c12ab18a4648eab4250",
"content_id": "9a358f1df787aeba706acd0b7fa3295203d1f08e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18187,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 427,
"path": "/web.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, send_from_directory, render_template, jsonify\n\nfrom rssconfig import RssConfig\nfrom rssdb import RssDb\nimport search\nimport files\nimport version\n\nimport StringIO\nimport os\nimport re\nimport sys\n\nimport logging\n\napp = Flask(__name__, static_url_path='/web', template_folder='web')\n\nif not os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen')):\n prefix = \"\"\nelse:\n general = RssConfig('RSScrawler')\n if general.get(\"prefix\"):\n prefix = '/' + general.get(\"prefix\")\n else:\n prefix = \"\"\n\ndef to_int(i):\n i = i.strip().replace(\"None\", \"\")\n return int(i) if i else \"\"\n\ndef to_float(i):\n i = i.strip().replace(\"None\", \"\")\n return float(i) if i else \"\"\n\ndef to_str(i):\n return '' if i is None else str(i)\n\[email protected](prefix + '/<path:path>')\ndef send_html(path):\n return send_from_directory('web', path)\n\[email protected](prefix + '/')\ndef index():\n return render_template('index.html')\n\[email protected](prefix + \"/api/all/\", methods=['GET'])\ndef get_all():\n if request.method == 'GET':\n general = RssConfig('RSScrawler')\n alerts = RssConfig('Notifications')\n crawljobs = RssConfig('Crawljobs')\n mb = RssConfig('MB')\n sj = RssConfig('SJ')\n yt = RssConfig('YT')\n ver = version.getVersion()\n if version.updateCheck()[0]:\n updateready = True\n updateversion = version.updateCheck()[1]\n print('Update steht bereit (' + updateversion +')! Weitere Informationen unter https://github.com/rix1337/RSScrawler/releases/latest')\n else:\n updateready = False\n log = ''\n logfile = os.path.join(os.path.dirname(sys.argv[0]), 'RSScrawler.log')\n if os.path.isfile(logfile):\n logfile = open(os.path.join(logfile))\n output = StringIO.StringIO()\n for line in reversed(logfile.readlines()):\n output.write(\"<p>\" + line.replace(\"\\n\",\"</p>\"))\n log = output.getvalue()\n return jsonify(\n {\n \"version\": {\n \"ver\": ver,\n \"update_ready\": updateready,\n \"docker\": docker,\n },\n \"log\": log,\n \"lists\": {\n \"mb\": {\n \"filme\": getListe('MB_Filme'),\n \"filme3d\": getListe('MB_3D'),\n \"regex\": getListe('MB_Regex'),\n },\n \"sj\": {\n \"serien\": getListe('SJ_Serien'),\n \"regex\": getListe('SJ_Serien_Regex'),\n \"staffeln_regex\": getListe('SJ_Staffeln_Regex'),\n },\n \"mbsj\": {\n \"staffeln\": getListe('MB_Staffeln'),\n },\n \"yt\": {\n \"kanaele_playlisten\": getListe('YT_Channels'),\n },\n },\n \"settings\": {\n \"general\": {\n \"pfad\": general.get(\"jdownloader\"),\n \"port\": to_int(general.get(\"port\")),\n \"prefix\": general.get(\"prefix\"),\n \"interval\": to_int(general.get(\"interval\")),\n \"english\": bool(general.get(\"english\")),\n \"hoster\": general.get(\"hoster\"),\n },\n \"alerts\": {\n \"homeassistant\": alerts.get(\"homeassistant\"),\n \"pushbullet\": alerts.get(\"pushbullet\"),\n \"pushover\": alerts.get(\"pushover\"),\n },\n \"crawljobs\": {\n \"autostart\": bool(crawljobs.get(\"autostart\")),\n \"subdir\": bool(crawljobs.get(\"subdir\")),\n },\n \"mb\": {\n \"quality\": mb.get(\"quality\"),\n \"ignore\": mb.get(\"ignore\"),\n \"regex\": bool(mb.get(\"regex\")),\n \"imdb_score\": to_float(mb.get(\"imdb\")),\n \"imdb_year\": to_int(mb.get(\"imdbyear\")),\n \"historical\": bool(mb.get(\"historical\")),\n \"force_dl\": bool(mb.get(\"enforcedl\")),\n \"cutoff\": bool(mb.get(\"cutoff\")),\n \"crawl_3d\": bool(mb.get(\"crawl3d\")),\n },\n \"sj\": {\n \"quality\": sj.get(\"quality\"),\n \"ignore\": sj.get(\"rejectlist\"),\n \"regex\": bool(sj.get(\"regex\")),\n },\n \"mbsj\": {\n \"enabled\": bool(mb.get(\"crawlseasons\")),\n \"quality\": mb.get(\"seasonsquality\"),\n \"packs\": bool(mb.get(\"seasonpacks\")),\n \"source\": mb.get(\"seasonssource\"),\n },\n \"yt\": {\n \"enabled\": bool(yt.get(\"youtube\")),\n \"max\": to_int(yt.get(\"maxvideos\")),\n \"ignore\": yt.get(\"ignore\"),\n }\n }\n }\n )\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/log/\", methods=['GET', 'DELETE'])\ndef get_delete_log():\n if request.method == 'GET':\n log = ''\n logfile = os.path.join(os.path.dirname(sys.argv[0]), 'RSScrawler.log')\n if os.path.isfile(logfile):\n logfile = open(os.path.join(logfile))\n output = StringIO.StringIO()\n for line in reversed(logfile.readlines()):\n output.write(\"<p>\" + line.replace(\"\\n\",\"</p>\"))\n log = output.getvalue()\n return jsonify(\n {\n \"log\": log,\n }\n )\n if request.method == 'DELETE':\n open(os.path.join(os.path.dirname(sys.argv[0]), 'RSScrawler.log'), 'w').close()\n return \"Success\", 200\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/settings/\", methods=['GET', 'POST'])\ndef get_post_settings():\n if request.method == 'GET':\n general = RssConfig('RSScrawler')\n alerts = RssConfig('Notifications')\n crawljobs = RssConfig('Crawljobs')\n mb = RssConfig('MB')\n sj = RssConfig('SJ')\n yt = RssConfig('YT')\n return jsonify(\n {\n \"settings\": {\n \"general\": {\n \"pfad\": general.get(\"jdownloader\"),\n \"port\": to_int(general.get(\"port\")),\n \"prefix\": general.get(\"prefix\"),\n \"interval\": to_int(general.get(\"interval\")),\n \"english\": bool(general.get(\"english\")),\n \"hoster\": general.get(\"hoster\"),\n },\n \"alerts\": {\n \"homeassistant\": alerts.get(\"homeassistant\"),\n \"pushbullet\": alerts.get(\"pushbullet\"),\n \"pushover\": alerts.get(\"pushover\"),\n },\n \"crawljobs\": {\n \"autostart\": bool(crawljobs.get(\"autostart\")),\n \"subdir\": bool(crawljobs.get(\"subdir\")),\n },\n \"mb\": {\n \"quality\": mb.get(\"quality\"),\n \"ignore\": mb.get(\"ignore\"),\n \"regex\": bool(mb.get(\"regex\")),\n \"imdb_score\": to_float(mb.get(\"imdb\")),\n \"imdb_year\": to_int(mb.get(\"imdbyear\")),\n \"historical\": bool(mb.get(\"historical\")),\n \"force_dl\": bool(mb.get(\"enforcedl\")),\n \"cutoff\": bool(mb.get(\"cutoff\")),\n \"crawl_3d\": bool(mb.get(\"crawl3d\")),\n },\n \"sj\": {\n \"quality\": sj.get(\"quality\"),\n \"ignore\": sj.get(\"rejectlist\"),\n \"regex\": bool(sj.get(\"regex\")),\n },\n \"mbsj\": {\n \"enabled\": bool(mb.get(\"crawlseasons\")),\n \"quality\": mb.get(\"seasonsquality\"),\n \"packs\": bool(mb.get(\"seasonpacks\")),\n \"source\": mb.get(\"seasonssource\"),\n },\n \"yt\": {\n \"enabled\": bool(yt.get(\"youtube\")),\n \"max\": to_int(yt.get(\"maxvideos\")),\n \"ignore\": yt.get(\"ignore\"),\n }\n }\n }\n )\n if request.method == 'POST':\n data = request.json\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/RSScrawler.ini'), 'wb') as f:\n f.write('# RSScrawler.ini (Stand: RSScrawler ' + version.getVersion() + ')\\n')\n f.write(\"\\n[RSScrawler]\\n\")\n f.write(\"jdownloader = \" + to_str(data['general']['pfad']).encode('utf-8') + \"\\n\")\n f.write(\"port = \" + to_str(data['general']['port']).encode('utf-8') + \"\\n\")\n f.write(\"prefix = \" + to_str(data['general']['prefix']).encode('utf-8').lower() + \"\\n\")\n interval = to_str(data['general']['interval']).encode('utf-8')\n if to_int(interval) < 3:\n interval = '3'\n f.write(\"interval = \" + interval + \"\\n\")\n f.write(\"english = \" + to_str(data['general']['english']).encode('utf-8') + \"\\n\")\n f.write(\"hoster = \" + to_str(data['general']['hoster']).encode('utf-8') + \"\\n\")\n f.write(\"\\n[MB]\\n\")\n f.write(\"quality = \" + to_str(data['mb']['quality']).encode('utf-8') + \"\\n\")\n f.write(\"ignore = \" + to_str(data['mb']['ignore']).encode('utf-8').lower() + \"\\n\")\n f.write(\"historical = \" + to_str(data['mb']['historical']).encode('utf-8') + \"\\n\")\n f.write(\"regex = \" + to_str(data['mb']['regex']).encode('utf-8') + \"\\n\")\n f.write(\"cutoff = \" + to_str(data['mb']['cutoff']).encode('utf-8') + \"\\n\")\n f.write(\"crawl3d = \" + to_str(data['mb']['crawl_3d']).encode('utf-8') + \"\\n\")\n f.write(\"enforcedl = \" + to_str(data['mb']['force_dl']).encode('utf-8') + \"\\n\")\n f.write(\"crawlseasons = \" + to_str(data['mbsj']['enabled']).encode('utf-8') + \"\\n\")\n f.write(\"seasonsquality = \" + to_str(data['mbsj']['quality']).encode('utf-8') + \"\\n\")\n f.write(\"seasonpacks = \" + to_str(data['mbsj']['packs']).encode('utf-8') + \"\\n\")\n f.write(\"seasonssource = \" + to_str(data['mbsj']['source']).encode('utf-8').lower() + \"\\n\")\n f.write(\"imdbyear = \" + to_str(data['mb']['imdb_year']).encode('utf-8') + \"\\n\")\n imdb = to_str(data['mb']['imdb_score']).encode('utf-8')\n if re.match('[^0-9]', imdb):\n imdb = 0.0\n elif imdb == '':\n imdb = 0.0\n else:\n imdb = round(float(to_str(data['mb']['imdb_score']).encode('utf-8').replace(\",\", \".\")), 1)\n if imdb > 10:\n imdb = 10.0\n f.write(\"imdb = \" + to_str(imdb) + \"\\n\")\n f.write(\"\\n[SJ]\\n\")\n f.write(\"quality = \" + to_str(data['sj']['quality']).encode('utf-8') + \"\\n\")\n f.write(\"rejectlist = \" + to_str(data['sj']['ignore']).encode('utf-8').lower() + \"\\n\")\n f.write(\"regex = \" + to_str(data['sj']['regex']).encode('utf-8') + \"\\n\")\n f.write(\"\\n[YT]\\n\")\n f.write(\"youtube = \" + to_str(data['yt']['enabled']).encode('utf-8') + \"\\n\")\n maxvideos = to_str(data['yt']['max']).encode('utf-8')\n if maxvideos == \"\":\n maxvideos = \"10\"\n if to_int(maxvideos) < 1:\n f.write(\"maxvideos = 1\\n\")\n elif to_int(maxvideos) > 50:\n f.write(\"maxvideos = 50\\n\")\n else:\n f.write(\"maxvideos = \" + to_str(maxvideos) + \"\\n\")\n f.write(\"ignore = \" + to_str(data['yt']['ignore']).encode('utf-8') + \"\\n\")\n f.write(\"\\n[Notifications]\\n\")\n f.write(\"homeassistant = \" + to_str(data['alerts']['homeassistant']).encode('utf-8') + \"\\n\")\n f.write(\"pushbullet = \" + to_str(data['alerts']['pushbullet']).encode('utf-8') + \"\\n\")\n f.write(\"pushover = \" + to_str(data['alerts']['pushover']).encode('utf-8') + \"\\n\")\n f.write(\"\\n[Crawljobs]\\n\")\n f.write(\"autostart = \" + to_str(data['crawljobs']['autostart']).encode('utf-8') + \"\\n\")\n f.write(\"subdir = \" + to_str(data['crawljobs']['subdir']).encode('utf-8') + \"\\n\")\n files.check()\n return \"Success\", 201\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/version/\", methods=['GET'])\ndef get_version():\n if request.method == 'GET':\n ver = version.getVersion()\n if version.updateCheck()[0]:\n updateready = True\n updateversion = version.updateCheck()[1]\n print('Update steht bereit (' + updateversion +')! Weitere Informationen unter https://github.com/rix1337/RSScrawler/releases/latest')\n else:\n updateready = False\n return jsonify(\n {\n \"version\": {\n \"ver\": ver,\n \"update_ready\": updateready,\n \"docker\": docker,\n }\n }\n )\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/delete/<title>\", methods=['DELETE'])\ndef delete_title(title):\n if request.method == 'DELETE':\n db = RssDb(os.path.join(os.path.dirname(sys.argv[0]), \"Einstellungen/Downloads/Downloads.db\"))\n db.delete(title)\n return \"Success\", 200\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/search/<title>\", methods=['GET'])\ndef search_title(title):\n if request.method == 'GET':\n results = search.get(title)\n return jsonify(\n {\n \"results\": {\n \"mb\": results[0],\n \"sj\": results[1]\n }\n }\n ), 200\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/download_mb/<permalink>\", methods=['POST'])\ndef download_mb(permalink):\n if request.method == 'POST':\n if search.mb(permalink, jdpath):\n return \"Success\", 200\n else:\n return \"Failed\", 400\n else:\n return \"Failed\", 40\n\[email protected](prefix + \"/api/download_sj/<id>\", methods=['POST'])\ndef download_sj(id):\n if request.method == 'POST':\n if search.sj(id, jdpath):\n return \"Success\", 200\n else:\n return \"Failed\", 400\n else:\n return \"Failed\", 405\n\[email protected](prefix + \"/api/lists/\", methods=['GET', 'POST'])\ndef get_post_lists():\n if request.method == 'GET':\n return jsonify(\n {\n \"lists\": {\n \"mb\": {\n \"filme\": getListe('MB_Filme'),\n \"filme3d\": getListe('MB_3D'),\n \"regex\": getListe('MB_Regex'),\n },\n \"sj\": {\n \"serien\": getListe('SJ_Serien'),\n \"regex\": getListe('SJ_Serien_Regex'),\n \"staffeln_regex\": getListe('SJ_Staffeln_Regex'),\n },\n \"mbsj\": {\n \"staffeln\": getListe('MB_Staffeln'),\n },\n \"yt\": {\n \"kanaele_playlisten\": getListe('YT_Channels'),\n },\n },\n }\n )\n if request.method == 'POST':\n data = request.json\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/MB_Filme.txt'), 'wb') as f:\n f.write(data['mb']['filme'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/MB_3D.txt'), 'wb') as f:\n f.write(data['mb']['filme3d'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/MB_Staffeln.txt'), 'wb') as f:\n f.write(data['mbsj']['staffeln'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/MB_Regex.txt'), 'wb') as f:\n f.write(data['mb']['regex'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/SJ_Serien.txt'), 'wb') as f:\n f.write(data['sj']['serien'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/SJ_Serien_Regex.txt'), 'wb') as f:\n f.write(data['sj']['regex'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/SJ_Staffeln_Regex.txt'), 'wb') as f:\n f.write(data['sj']['staffeln_regex'].encode('utf-8'))\n with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/YT_Channels.txt'), 'wb') as f:\n f.write(data['yt']['kanaele_playlisten'].encode('utf-8'))\n files.check()\n return \"Success\", 201\n else:\n return \"Failed\", 405\n\ndef getListe(liste):\n if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt')):\n return \"Liste nicht gefunden\"\n else:\n file = open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'))\n output = StringIO.StringIO()\n for line in file.readlines():\n output.write(line.replace(\"XXXXXXXXXX\",\"\"))\n return output.getvalue()\n\ndef start(port, docker_arg, jd):\n global docker\n docker = docker_arg\n global jdpath\n jdpath = jd\n if version.updateCheck()[0]:\n updateversion = version.updateCheck()[1]\n print('Update steht bereit (' + updateversion +')! Weitere Informationen unter https://github.com/rix1337/RSScrawler/releases/latest')\n logger = logging.getLogger('werkzeug')\n logger.setLevel(logging.ERROR)\n app.run(host='0.0.0.0', port=port, threaded=True)\n"
},
{
"alpha_fraction": 0.5249663591384888,
"alphanum_fraction": 0.5368893146514893,
"avg_line_length": 32.199405670166016,
"blob_id": "ea4d01fa9a11a958101a924cf83985819366da16",
"content_id": "ef0f9a8c53de79b89e602fc0c0e86d3eac73d22d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 11170,
"license_type": "permissive",
"max_line_length": 224,
"num_lines": 336,
"path": "/web/js/rsscrawler.js",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "app = angular.module('crwlApp', [])\n .directive('bindHtmlCompile', function($compile) {\n return {\n restrict: \"A\",\n scope: {\n bindHtmlCompile: \"=\"\n },\n link: function(scope, elem) {\n scope.$watch(\"bindHtmlCompile\", function(newVal) {\n elem.html('');\n var newElem = angular.element(newVal);\n var compileNewElem = $compile(newElem)(scope.$parent);\n elem.append(compileNewElem);\n });\n }\n };\n});\n\napp.controller('crwlCtrl', function($scope, $http, $timeout){\n $(function () {\n $('[data-toggle=\"tooltip\"]').tooltip()\n })\n\n $scope.results = [\n {\n mb: {\n link: \"Link\",\n title: \"Title\"\n },\n sj: {\n id: \"Link\",\n title: \"Title\",\n }\n }\n ]\n\n $scope.bools = [\n {value: true, label: 'Aktiviert'},\n {value: false, label: 'Deaktiviert'},\n ];\n\n $scope.hosters = [\n {value: 'Uploaded', label: 'Uploaded'},\n {value: 'Share-Online', label: 'Share-Online'},\n ];\n\n $scope.resolutions = [\n {value: '480p', label: '480p (SD)'},\n {value: '720p', label: '720p (HD)'},\n {value: '1080p', label: '1080p (Full-HD)'},\n {value: '2160p', label: '2160p (4K)'},\n ];\n\n $scope.sources = [\n {value: 'hdtv|hdtvrip|tvrip', label: 'HDTV'},\n {value: 'web-dl|webrip|webhd|netflix*|amazon*|itunes*', label: 'WEB'},\n {value: 'hdtv|hdtvrip|tvrip|web-dl|webrip|webhd|netflix*|amazon*|itunes*', label: 'HDTV/WEB'},\n {value: 'bluray|bd|bdrip', label: 'BluRay'},\n {value: 'web-dl|webrip|webhd|netflix*|amazon*|itunes*|bluray|bd|bdrip', label: 'Web/BluRay'},\n {value: 'hdtv|hdtvrip|tvrip|web-dl|webrip|webhd|netflix*|amazon*|itunes*|bluray|bd|bdrip', label: 'HDTV/WEB/BluRay'},\n {value: 'web-dl.*-(tvs|4sj)|webrip.*-(tvs|4sj)|webhd.*-(tvs|4sj)|netflix.*-(tvs|4sj)|amazon.*-(tvs|4sj)|itunes.*-(tvs|4sj)|bluray|bd|bdrip', label: 'BluRay/WebRetail (TVS/4SJ)'},\n ];\n\n $scope.init = getAll();\n\n $scope.showSearch = function() {\n showSearch();\n };\n\n $scope.deleteLog = function() {\n deleteLog();\n };\n\n $scope.searchNow = function() {\n searchNow();\n };\n\n $scope.downloadMB = function(link) {\n downloadMB(link);\n };\n\n $scope.downloadSJ = function(id) {\n downloadSJ(id);\n };\n\n $scope.resetTitle = function(title) {\n resetTitle(title);\n };\n\n $scope.saveLists = function() {\n setLists();\n };\n\n $scope.saveSettings = function() {\n setSettings();\n };\n\n function getAll() {\n $http.get('api/all/')\n .then(function(res){\n $scope.version = res.data.version.ver;\n $(\"#headtitle\").html('Projekt von <a href=\"https://github.com/rix1337/RSScrawler/commits\" target=\"_blank\">RiX</a> ' + $scope.version + '<span id=\"updateready\" style=\"display: none;\"> - Update verfügbar!</span>');\n console.log('Dies ist der RSScrawler ' + $scope.version + ' von https://github.com/rix1337');\n $scope.update = res.data.version.update_ready;\n $scope.docker = res.data.version.docker;\n if ($scope.docker) {\n $(\".docker\").prop( \"disabled\", true );\n }\n year = (new Date).getFullYear();\n $(\"#year\").attr(\"max\", year);\n if ($scope.update) {\n $(\"#updateready\").show();\n scrollingTitle(\"RSScrawler - Update verfügbar! - \");\n console.log('Update steht bereit! Weitere Informationen unter https://github.com/rix1337/RSScrawler/releases/latest');\n showInfo('Update steht bereit! Weitere Informationen unter <a href=\"https://github.com/rix1337/RSScrawler/releases/latest\" target=\"_blank\">github.com</a>.');\n }\n $scope.log = res.data.log;\n $scope.settings = res.data.settings;\n $scope.lists = res.data.lists;\n console.log('Alles abgerufen!');\n }, function (res) {\n console.log('Konnte nichts abrufen!');\n showDanger('Konnte nichts abrufen!');\n });\n };\n \n function getLogOnly() {\n $http.get('api/log/')\n .then(function(res){\n $scope.log = res.data.log;\n console.log('Log abgerufen!');\n }, function (res) {\n console.log('Konnte Log nicht abrufen!');\n showDanger('Konnte Log nicht abrufen!');\n });\n };\n\n function getSettingsOnly() {\n $http.get('api/settings/')\n .then(function(res){\n $scope.settings = res.data.settings;\n console.log('Einstellungen abgerufen!');\n year = (new Date).getFullYear();\n $(\"#year\").attr(\"max\", year);\n }, function (res) {\n console.log('Konnte Einstellungen nicht abrufen!');\n showDanger('Konnte Einstellungen nicht abrufen!');\n });\n };\n\n function getListsOnly() {\n $http.get('api/lists/')\n .then(function(res){\n $scope.lists = res.data.lists;\n console.log('Listen abgerufen!');\n }, function (res) {\n console.log('Konnte Listen nicht abrufen!');\n showDanger('Konnte Listen nicht abrufen!');\n });\n };\n\n function getVersionOnly() {\n $http.get('api/version/')\n .then(function(res){\n $scope.version = res.data.version.ver;\n $scope.update = res.data.version.update_ready;\n if ($scope.update) {\n $(\"#updateready\").show();\n scrollingTitle(\"RSScrawler - Update verfügbar! - \");\n console.log('Update steht bereit! Weitere Informationen unter https://github.com/rix1337/RSScrawler/releases/latest');\n showInfo('Update steht bereit! Weitere Informationen unter <a href=\"https://github.com/rix1337/RSScrawler/releases/latest\" target=\"_blank\">github.com</a>.');\n }\n console.log('Version abgerufen!');\n }, function (res) {\n console.log('Konnte Version nicht abrufen!');\n showDanger('Konnte Version nicht abrufen!');\n });\n };\n\n function setLists() {\n spinLists();\n $http.post('api/lists/', $scope.lists , 'application/json')\n .then(function(res){\n console.log('Listen gespeichert! Änderungen werden im nächsten Suchlauf berücksichtigt.');\n showSuccess('Listen gespeichert! Änderungen werden im nächsten Suchlauf berücksichtigt.');\n getListsOnly();\n }, function (res) {\n console.log('Konnte Listen nicht speichern!');\n showDanger('Konnte Listen nicht speichern!');\n });\n };\n\n function setSettings() {\n spinSettings();\n $http.post('api/settings/', $scope.settings, 'application/json')\n .then(function(res){\n console.log('Einstellungen gespeichert! Einige Änderungen erfordern einen Neustart.');\n showSuccess('Einstellungen gespeichert! Einige Änderungen erfordern einen Neustart.');\n getSettingsOnly();\n }, function (res) {\n $('#headingTwoOne').addClass('show');\n console.log('Konnte Einstellungen nicht speichern!');\n showDanger('Konnte Einstellungen nicht speichern!');\n });\n };\n\n function downloadMB(link) {\n $http.post('api/download_mb/' + link)\n .then(function(res){\n console.log('Download gestartet!');\n showSuccess('Download gestartet!');\n }, function (res) {\n console.log('Konnte Download nicht starten!');\n showDanger('Konnte Download nicht starten!');\n });\n };\n\n function downloadSJ(id) {\n $http.post('api/download_sj/' + id)\n .then(function(res){\n console.log('Download gestartet!');\n showSuccess('Download gestartet!');\n }, function (res) {\n console.log('Konnte Download nicht starten!');\n showDanger('Konnte Download nicht starten!');\n });\n };\n\n function deleteLog() {\n spinLog();\n $http.delete('api/log/')\n .then(function(res){\n console.log('Log geleert!');\n showSuccess('Log geleert!');\n getLogOnly();\n }, function (res) {\n console.log('Konnte Log nicht leeren!');\n showDanger('Konnte Log nicht leeren!');\n });\n };\n\n function searchNow() {\n spinSearch();\n title = $scope.search\n $http.get('api/search/' + title)\n .then(function(res){\n $scope.results = res.data.results;\n $(\".results\").show();\n $(\".search\").hide();\n console.log('Nach ' + title + ' gesucht!');\n getLogOnly();\n getListsOnly();\n }, function (res) {\n console.log('Konnte ' + title + ' nicht suchen!');\n showDanger('Konnte ' + title + ' nicht suchen!');\n });\n };\n\n function showSearch() {\n $('.results').hide();\n $('.search').show();\n };\n\n function resetTitle(title) {\n $http.delete('api/delete/' + title)\n .then(function(res){\n console.log('Download von ' + title + ' zurückgesetzt!');\n showSuccess('Download von ' + title + ' zurückgesetzt!');\n getLogOnly();\n }, function (res) {\n console.log('Konnte Download von ' + title + ' nicht zurück setzen!');\n showDanger('Konnte Download von ' + title + ' nicht zurück setzen!');\n });\n };\n\n function scrollingTitle(titleText) {\n document.title = titleText;\n setTimeout(function () {\n scrollingTitle(titleText.substr(1) + titleText.substr(0, 1));\n }, 200);\n };\n\n function showSuccess(message) {\n $(\".alert-success\").html(message)\n $(\".alert-success\").fadeTo(3000, 500).slideUp(500, function(){\n $(\".alert-success\").slideUp(500);\n });\n };\n\n function showInfo(message) {\n $(\".alert-info\").html(message)\n $(\".alert-info\").fadeTo(10000, 500).slideUp(500, function(){\n $(\".alert-info\").slideUp(500);\n });\n };\n\n function showDanger(message) {\n $(\".alert-danger\").html(message)\n $(\".alert-danger\").fadeTo(5000, 500).slideUp(500, function(){\n $(\".alert-danger\").slideUp(500);\n });\n };\n function spinSearch() {\n $(\"#spinner-search\").fadeIn().delay(1000).fadeOut();\n };\n\n function spinLog() {\n $(\"#spinner-log\").fadeIn().delay(1000).fadeOut();\n };\n\n function spinLists() {\n $(\"#spinner-lists\").fadeIn().delay(1000).fadeOut();\n };\n\n function spinSettings() {\n $(\"#spinner-settings\").fadeIn().delay(1000).fadeOut();\n };\n\n $scope.updateLog = function(){\n $timeout(function() {\n getLogOnly();\n $scope.updateLog();\n }, 10000)\n };\n\n $scope.updateLog();\n\n $scope.updateChecker = function(){\n $timeout(function() {\n getVersionOnly();\n $scope.updateChecker();\n }, 300000)\n };\n\n $scope.updateChecker();\n});\n"
},
{
"alpha_fraction": 0.5828970074653625,
"alphanum_fraction": 0.6195462346076965,
"avg_line_length": 26.285715103149414,
"blob_id": "9a19a83880ba93e0dbc77de222be73f062c8caae",
"content_id": "f8e4ead8d17c7f51a413a9b84cea16457e3daa0c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 573,
"license_type": "permissive",
"max_line_length": 184,
"num_lines": 21,
"path": "/version.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n\nimport re\nimport urllib2\n\n\ndef getVersion():\n return \"v.4.0.7\"\n\ndef updateCheck():\n localversion = getVersion()\n try:\n onlineversion = re.search(r'return \"(v\\.\\d{1,2}\\.\\d{1,2}\\.\\d{1,2})\"', urllib2.urlopen('https://raw.githubusercontent.com/rix1337/RSScrawler/master/version.py').read()).group(1)\n if localversion == onlineversion:\n return (False, localversion)\n else:\n return (True, onlineversion)\n except:\n return (False, \"Error\")\n"
},
{
"alpha_fraction": 0.5361701846122742,
"alphanum_fraction": 0.5609928965568542,
"avg_line_length": 30.33333396911621,
"blob_id": "cecddba95c4935a29d207d9b079980b28d487f69",
"content_id": "5f0e2b6d164ec3f1f9a4b45d5e3e82fe23b3189f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1410,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 45,
"path": "/url.py",
"repo_name": "JefferyKu/RSScrawler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# RSScrawler\n# Projekt von https://github.com/rix1337\n\nimport cfscrape\n\nfrom rssconfig import RssConfig\n\ndef getURL(url):\n proxy = RssConfig('RSScrawler').get('proxy')\n if proxy:\n proxies = []\n if proxy.startswith('http://'):\n proxies[0] = proxy[:4]\n proxies[1] = proxy\n elif proxy.startswith('https://'):\n proxies[0] = proxy[:5]\n proxies[1] = proxy\n elif proxy.startswith('socks5://'):\n proxies[0] = 'http'\n proxies[1] = proxy\n proxies = {proxies[0]: proxies[1]}\n scraper = cfscrape.create_scraper(delay=10, proxies=proxies)\n else:\n scraper = cfscrape.create_scraper(delay=10)\n return scraper.get(url).content\n\ndef postURL(url, data):\n proxy = RssConfig('RSScrawler').get('proxy')\n if proxy:\n proxies = []\n if proxy.startswith('http://'):\n proxies[0] = proxy[:4]\n proxies[1] = proxy\n elif proxy.startswith('https://'):\n proxies[0] = proxy[:5]\n proxies[1] = proxy\n elif proxy.startswith('socks5://'):\n proxies[0] = 'http'\n proxies[1] = proxy\n proxies = {proxies[0]: proxies[1]}\n scraper = cfscrape.create_scraper(delay=10, proxies=proxies)\n else:\n scraper = cfscrape.create_scraper(delay=10)\n return scraper.post(url, data).content\n"
}
] | 14 |
parth-gudhka/CodeJam-2019-1B | https://github.com/parth-gudhka/CodeJam-2019-1B | 0fc4cbedcf2dff637d49687594a1a604a71cf59d | 0865244937b6309900f8b972d53db6967fb821ea | 0050fdf48ec2758a2a83f7beee66010f91f59f4a | refs/heads/master | 2020-05-17T22:06:57.372245 | 2019-04-29T03:15:35 | 2019-04-29T03:15:35 | 183,991,486 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3895348906517029,
"alphanum_fraction": 0.4011628031730652,
"avg_line_length": 14.176470756530762,
"blob_id": "0e630f42c8af3b8fef331050e89f39ebdcf4090f",
"content_id": "3d998e9ef5d2b0dbd54aebc61c43290eb4e923d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 516,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 34,
"path": "/2.py",
"repo_name": "parth-gudhka/CodeJam-2019-1B",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nimport sys\n\n\ndef solve(a, b):\n w = 0\n ready = False\n print(1)\n sys.stdout.flush()\n s = int(input())\n print(s)\n\n # def loop(day):\n # print(day)\n # sys.stdout.flush()\n # s = int(input())\n # print(s)\n # if (s == -1):\n # solve()\n # else:\n # return\n #\n # loop(w + 1)\n\n\ndef main():\n T, W = map(int, input().split())\n print(T, W)\n for _ in range(1, T + 1):\n solve()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.316755473613739,
"alphanum_fraction": 0.33155712485313416,
"avg_line_length": 29.160715103149414,
"blob_id": "b360d069d784eb9defe7af87b4c49061e2fab709",
"content_id": "a86e315d146971797ab6c4d629a4da7e95c4e35b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1689,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 56,
"path": "/1.py",
"repo_name": "parth-gudhka/CodeJam-2019-1B",
"src_encoding": "UTF-8",
"text": "from numpy import zeros\n\n\ndef main():\n t = int(input())\n for i in range(1, t + 1):\n # for i in range(2):\n test_case = input()\n p, q = map(int, test_case.split())\n # people = []\n people = []\n for j in range(p):\n case = input().split()\n people.append(\n {\n \"x\": int(case[0]),\n \"y\": int(case[1]),\n \"dir\": case[2]\n }\n )\n matrix = zeros([q + 1, q + 1])\n for person in people:\n if person[\"dir\"] == \"W\":\n for c in range(person[\"x\"]):\n for d in range(q + 1):\n matrix[c][d] += 1\n elif person[\"dir\"] == \"E\":\n for c in range(person[\"x\"] + 1, q + 1):\n for d in range(q + 1):\n matrix[c][d] += 1\n elif person[\"dir\"] == \"N\":\n for c in range(q + 1):\n for d in range(person[\"y\"] + 1, q + 1):\n matrix[c][d] += 1\n elif person[\"dir\"] == \"S\":\n for c in range(q + 1):\n for d in range(person[\"y\"]):\n matrix[c][d] += 1\n max = 0\n x = 0\n y = 0\n for c in range(q + 1):\n for d in range(q + 1):\n if (matrix[c][d] > max):\n x = c\n y = d\n max = matrix[c][d]\n # print(max, x, y)\n print(matrix)\n print(\"Case #\" + str(i) + \": \" + str(x) + \" \" + str(y))\n # print(people)\n # print(p, q)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.3608815371990204,
"alphanum_fraction": 0.3705234229564667,
"avg_line_length": 24.034482955932617,
"blob_id": "5a2f7be3c826015ddf106923e47961331fb1e10e",
"content_id": "47fb2990ea0c2ce82c4e3a52b1cea8f2f7070bc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 726,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 29,
"path": "/3_temp.py",
"repo_name": "parth-gudhka/CodeJam-2019-1B",
"src_encoding": "UTF-8",
"text": "def main():\n t = int(input())\n for i in range(1, t + 1):\n # for i in range(2):\n test_case = input()\n n, k = map(int, test_case.split())\n # print(k)\n c = list(map(int, input().split()))\n d = list(map(int, input().split()))\n\n num = 0\n\n # Find num here\n for p in range(n):\n for q in range(p, n):\n c_max = max(c[p:q+1])\n d_max = max(d[p:q+1])\n # print(c_max, d_max)\n if (abs(c_max - d_max) <= k):\n # print(p, q, c_max, d_max)\n num += 1\n\n #\n # num = i\n print(\"Case #\" + str(i) + \": \" + str(num))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6140350699424744,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 27.5,
"blob_id": "128f9965a0b72aded291e6f63c57a18f1e0eadb0",
"content_id": "ebb6d14461a94d4b4472f30570fa1d844d12c56e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 2,
"path": "/README.md",
"repo_name": "parth-gudhka/CodeJam-2019-1B",
"src_encoding": "UTF-8",
"text": "# CodeJam-2019-1B\nMy solutions for CodeJam 2019 Round 1B\n"
}
] | 4 |
hybby/portcheck | https://github.com/hybby/portcheck | df735955419ab742e56741a9d8df9974b156afe9 | ba8902157c4f7f7fb65c8c5b03150410702d47a2 | b3589914bdba8903acf8ef3f29fee582ac95c758 | refs/heads/master | 2021-01-19T08:15:40.159957 | 2015-12-16T17:04:26 | 2015-12-16T17:04:26 | 34,843,836 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5832257866859436,
"alphanum_fraction": 0.625806450843811,
"avg_line_length": 26.175437927246094,
"blob_id": "4bb25e3b645fcd04407ceca4425d6096cdf9bb25",
"content_id": "0bf2441523477ce7ecacde4d648f8c3b74e0016f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1550,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 57,
"path": "/README.md",
"repo_name": "hybby/portcheck",
"src_encoding": "UTF-8",
"text": "# portcheck.py\n\n## synopsis\n\njust a python port checker. shouldn't be any crazy deps or anything like that.\n\neveryone's written one of these at some point. right? right.\n\n\n## installation\n\njust clone the repo\n\n git clone [email protected]:hybby/portcheck.git\n\n\n## usage \n\n $ ./portcheck.py --help\n usage: portcheck.py [-h] --host HOST --port PORT [--retries RETRIES]\n [--interval INTERVAL]\n \n a script for checking for testing network ports\n \n optional arguments:\n -h, --help show this help message and exit\n --host HOST hostname or ip address\n --port PORT tcp port number\n --retries RETRIES number of times to retry upon failure\n --interval INTERVAL interval in seconds between retries\n\n## examples\n\ngo ahead and give 'er a spin\n\n # when it works\n $ ./portcheck.py --host utora05a --port 22\n tcp/22 on utora05a open\n\n # when it doesn't\n $ ./portcheck.py --host utora05a --port 23\n tcp/23 on utora05a closed\n\n # when it doesn't with OpTiOnS\n $ ./portcheck.py --host utora05a --port 23 --retries 10 --interval 5\n setting retries to 10\n setting interval to 5 seconds\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n tcp/23 on utora05a closed\n\n"
},
{
"alpha_fraction": 0.6429272890090942,
"alphanum_fraction": 0.6625736951828003,
"avg_line_length": 25.441558837890625,
"blob_id": "34c386101509a7732bb93a2d3808461e6333dc03",
"content_id": "34f83b29db16e6a90d5e69c8590db9d4d5173dfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2036,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 77,
"path": "/portcheck.py",
"repo_name": "hybby/portcheck",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\nimport os\nimport socket\nimport argparse\nimport time\nfrom types import *\n\n# colors\nclass color:\n blue = '\\033[94m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n bold = '\\033[1m'\n underline = '\\033[4m'\n normal = '\\033[0m'\n\n# config \nattempt = int(1)\nretries = int(1)\ninterval = int(1)\ntimeout = int(5)\nsuccess = False\n\n# parse our arguments\nparser = argparse.ArgumentParser(description='a script for checking for testing network ports')\nparser.add_argument('--host', type=str, help='hostname or ip address', required=True)\nparser.add_argument('--port', type=int, help='tcp port number', required=True)\nparser.add_argument('--retries', type=int, help='number of times to retry upon failure')\nparser.add_argument('--interval', type=int, help='interval in seconds between retries')\nargs = parser.parse_args()\n\n\n# override our default values if provided\nif args.retries:\n print \"setting retries to %d\" % (args.retries)\n retries = args.retries\n\nif args.interval:\n print \"setting interval to %d seconds\" % (args.interval)\n interval = args.interval\n\n\n# check our datatypes\nassert type(args.host) is StringType, \"host is not a string: %r\" % (args.host)\nassert type(args.port) is IntType, \"port is not an integer: %r\" % (args.port)\nassert type(retries) is IntType, \"retries is not an integer: %r\" % (retries)\nassert type(interval) is IntType, \"interval is not an integer: %r\" % (interval)\n\n# attempt our connection\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.settimeout(timeout)\nresult = sock.connect_ex((args.host,args.port))\n\nwhile attempt <= retries: \n if result == 0:\n success = True\n print(\"%s tcp/%d\" % (args.host, args.port)\n + color.green\n + \"\\topen\"\n + color.normal)\n break\n else:\n time.sleep(interval)\n attempt += 1\n print(\"%s tcp/%d\" % (args.host, args.port)\n + color.red\n + \"\\tclosed\"\n + color.normal)\n\n\n# and exit\nif success: \n sys.exit(0)\nelse:\n sys.exit(1)\n"
}
] | 2 |
jvadebossan/EVA-Editable-Virtual-Assistant | https://github.com/jvadebossan/EVA-Editable-Virtual-Assistant | b608cb670b28d86c769c30d8ed3ffe217063ed57 | 3fbc0ce29e2752818efed89537174daec1cbbecf | 2aaa9f35be51341411fd57957729ef8b976a6289 | refs/heads/master | 2022-12-29T22:17:12.925739 | 2020-10-03T01:05:57 | 2020-10-03T01:05:57 | 289,769,493 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5805009007453918,
"alphanum_fraction": 0.587656557559967,
"avg_line_length": 27.421052932739258,
"blob_id": "e617bd656325d724f5f8f387a86e9ba1f062243c",
"content_id": "d4e7cc17a0ab54ab69a9686b9fa22fa955f812a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1131,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 38,
"path": "/configs.py",
"repo_name": "jvadebossan/EVA-Editable-Virtual-Assistant",
"src_encoding": "UTF-8",
"text": "import datetime\r\n\r\n#padrão ==============================================\r\nversion = ('0.1')\r\ncreator = str('@jvadebossan')\r\ncreator_to_talk = str('@ j v a debossan')\r\n\r\n#variaveis de dia e hora\r\ndia1 = datetime.datetime.now()\r\ndia = ('hoje é dia {} do {} de {}'.format(dia1.day, dia1.month, dia1.year))\r\n\r\nhr1 = datetime.datetime.now()\r\nhr = hr1.strftime(\"%H:%M\")\r\n\r\n\r\n#editaveis ========================================\r\nname_to_talk = ('éva') #fonética\r\nname = ('eva') #nome correto\r\n\r\nplaylist = ('https://open.spotify.com/')\r\nuser_name = ('joão vitor')\r\n\r\n#listas\r\nl_boas_vindas = [\r\n'o que você precisa, {}'.format(user_name),\r\n'seja bem vindo de volta,{}'.format(user_name),\r\n'olá {}'.format(user_name),\r\n'sim'\r\n'pode falar'\r\n]\r\n\r\nl_piadas = [\r\n'o que é um pontinho vermelho no castelo, , , é uma pimenta do reino ',\r\n'o que é um pontinho amarelo na africa, , , é um ieloufante',\r\n'porque a plantinha nao foi atendida no hospital, , , porque só tinha médico de plantão',\r\n'o que o pagodeiro foi fazer na igreja, , , ele foi cantar pa god',\r\n'o que acontece quando chove na inglaterra, , , ela vira inglalama',\r\n]\r\n"
},
{
"alpha_fraction": 0.7988165616989136,
"alphanum_fraction": 0.7988165616989136,
"avg_line_length": 27.16666603088379,
"blob_id": "bae6a59de10c17ecd0552eedb8f59e6a199f701d",
"content_id": "139dda3b235a1767c51796a21324290946a1a6a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 12,
"path": "/README.md",
"repo_name": "jvadebossan/EVA-Editable-Virtual-Assistant",
"src_encoding": "UTF-8",
"text": "# EVA-Editable-Virtual-Assistant\n\nEsse projeto consiste em um assistente virtual por comandos de voz.\nPara poder executar:\n\nRequerimentos: \npip install Pyaudio (arquivo incluído no programa, basta digitar \"Py\" e apertar TAB)\npip install speech_recognition\n\nColoque tudo na mesma pasta, e execute o MAIN\n\n(Irei fazer alterações no futuro)\n"
},
{
"alpha_fraction": 0.4615979492664337,
"alphanum_fraction": 0.47654637694358826,
"avg_line_length": 36.039215087890625,
"blob_id": "a632836cc1972b14a436c4511585483b069c9d72",
"content_id": "069080dc39df9d2f8ad9bd663ac3ef93975909f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3899,
"license_type": "no_license",
"max_line_length": 481,
"num_lines": 102,
"path": "/main.py",
"repo_name": "jvadebossan/EVA-Editable-Virtual-Assistant",
"src_encoding": "UTF-8",
"text": "from configs import *\r\nimport speech_recognition as sr\r\nfrom random import choice\r\nimport pyttsx3\r\nimport datetime\r\nimport sys\r\nfrom time import sleep as wait\r\nimport webbrowser as wb\r\n\r\ndef intro():\r\n print('=============================================================================================')\r\n print('= version: ' + version, ' ' + name, 'assistant' ' made by' + creator, ' =')\r\n print('=============================================================================================')\r\n frase_intro = ('{} assistente, versão {} feito por {}'.format(name_to_talk, version, creator_to_talk) )\r\n say(frase_intro)\r\n start()\r\n\r\ndef restart():\r\n print('.')\r\n wait(0.2)\r\n start()\r\n\r\ndef desligar():\r\n sys.exit()\r\n\r\ndef reboot():\r\n wait(0.2)\r\n intro()\r\n\r\ndef say(tosay):\r\n engine = pyttsx3.init()\r\n engine.say(tosay)\r\n engine.runAndWait()\r\n\r\n\r\ndef start():\r\n while True:\r\n r = sr.Recognizer()\r\n with sr.Microphone() as fonte:\r\n print('ouvindo...')\r\n audio = r.listen(fonte)\r\n textc = r.recognize_google(audio, language='pt-br') \r\n text = textc.lower()\r\n print(text)\r\n try:\r\n engine = pyttsx3.init()\r\n #função boas vindas\r\n if text == str(name): #esse ouve o próprio nome dela e responde com um bom dia ou algo do tipo\r\n msg_boas_vindas = choice(l_boas_vindas)\r\n say(msg_boas_vindas)\r\n \r\n #função tocar música\r\n elif 'playlist' in text:#abre a playlist de muscia do usuário, pre definida em \"configs\"\r\n wb.open(playlist, new=2)\r\n \r\n #função dia\r\n elif 'dia' in text: #fala o dia\r\n print(dia)\r\n say(dia)\r\n\r\n #função horas\r\n elif 'horas' in text: #fala as horas\r\n print(hr)\r\n say(hr)\r\n\r\n\t\t\t #função piadas\r\n elif 'piada' in text: # lança a braba\r\n joke = (choice(l_piadas))\r\n joke = choice(l_piadas)\r\n print (joke)\r\n say(joke)\r\n\r\n #função desligar\r\n elif 'desligar' in text: #desliga o sistema\r\n desligando = str('desligando em 3, 2, 1')\r\n print (desligando)\r\n engine.say(desligando)\r\n engine.runAndWait()\r\n desligar()\r\n\r\n #função reiniciar\r\n elif 'reiniciar' in text: #reinicia o sistema\r\n reiniciando = str('reiniciando em 3, 2, 1')\r\n print (reiniciando)\r\n engine.say(reiniciando)\r\n engine.runAndWait()\r\n reboot()\r\n\r\n elif 'fale' in text:\r\n texto_falar = text.replace('fale', '')\r\n say(texto_falar)\r\n\r\n elif 'pesquis' in text:\r\n site_pesquisar = text.replace('pesquis', '')\r\n say('pesquisando ' + site_pesquisar)\r\n wb.open('https://www.google.com/search?client=opera-gx&hs=5GZ&sxsrf=ALeKk02LWQxX_lhfnlTF6lCi_LYm0x5kqg%3A1601686367378&ei=X8t3X_LeFpPA5OUP0e6-WA&q={}&oq={}&gs_lcp=CgZwc3ktYWIQAzIHCAAQChDLATIECAAQHjoHCCMQ6gIQJzoECCMQJzoFCAAQsQM6CAguELEDEIMBOgIIADoFCC4QsQM6BAgAEAo6BggAEAoQHlD_EVjVH2COImgBcAB4AIABsgKIAdMJkgEHMC41LjAuMZgBAKABAaoBB2d3cy13aXqwAQrAAQE&sclient=psy-ab&ved=0ahUKEwiyiuDXmpfsAhUTILkGHVG3DwsQ4dUDCAw&uact=5'.format(site_pesquisar, site_pesquisar), new=2)\r\n \r\n elif text not in comandos:\r\n restart()\r\n except:\r\n restart()\r\nintro()\r\n"
},
{
"alpha_fraction": 0.5728813409805298,
"alphanum_fraction": 0.6033898591995239,
"avg_line_length": 29.052631378173828,
"blob_id": "064217fc1c6a5db029c58bdc7eef4e1668aca16c",
"content_id": "e744f55413f743fe3c0ea6682ca84ff1dc0a4f5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 19,
"path": "/testes.py",
"repo_name": "jvadebossan/EVA-Editable-Virtual-Assistant",
"src_encoding": "UTF-8",
"text": "import webbrowser\r\na = input('say something: ')\r\n\r\nif ('open') in a:\r\n if ('google') in a:\r\n webbrowser.open('www.google.com.br', new=2)\r\n elif ('youtube') in a:\r\n webbrowser.open('www.youtube.com', new=2)\r\n elif ('kahoot') in a:\r\n webbrowser.open('kahoot.it', new=2)\r\n else:\r\n print('aplicativo não encontrado')\r\n\r\nelif ('pesquisar') in a:\r\n print(a)\r\n webbrowser.open('https://www.google.com/search?q={}&oq={}&aqs=chrome.0.0j46j0l6.1656j1j15&sourceid=chrome&ie=UTF-8'.format(a, a), new=2)\r\n\r\nelse: \r\n print('comando ainda não suportado')\r\n"
}
] | 4 |
hikaruya8/nlp100 | https://github.com/hikaruya8/nlp100 | 2ce951bcd0ef44b568892bd33a94a9d265781aec | 81859e8b00b6278bdcc776c6fb9fc2815626fb1e | 39445abc9423c5bb3bf9a020ca49e9ecf0226f97 | refs/heads/master | 2020-03-27T09:45:47.377240 | 2019-03-10T01:16:57 | 2019-03-10T01:16:57 | 146,370,142 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6213592290878296,
"alphanum_fraction": 0.6504854559898376,
"avg_line_length": 14.923076629638672,
"blob_id": "6bc8da074f1856435520c12564e388a8b3505883",
"content_id": "a543af7680f33ad3a483916725f0f0e8d84be583",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 13,
"path": "/chapter4/nlp32.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 32. 動詞の原形\n# 動詞の原形をすべて抽出せよ\n\nimport nlp30\n\nlines = nlp30.neko_lines()\nverbs = []\nfor line in lines:\n for morpheme in line:\n if morpheme['pos'] == '動詞':\n verbs.append(morpheme['base'])\n\nprint(verbs)"
},
{
"alpha_fraction": 0.5799458026885986,
"alphanum_fraction": 0.6219512224197388,
"avg_line_length": 18.36842155456543,
"blob_id": "adff40f77569c25f3b3e24790455da7e02b9f41c",
"content_id": "b4dc0ff6fdd544b6c1c09052d9db8a4e774bdf82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 946,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 38,
"path": "/chapter1/06.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# \"paraparaparadise\"と\"paragraph\"に含まれる文字bi-gramの集合を,それぞれ, XとYとして求め,XとYの和集合,積集合,差集合を求めよ.さらに,'se'というbi-gramがXおよびYに含まれるかどうかを調べよ.\n\nimport nltk\n\nword1 = \"paraparaparadise\"\nword2 = \"paragraph\"\nX = []\nY = []\n\nbigram1 = nltk.ngrams(word1, 2)\nbigram2 = nltk.ngrams(word2, 2)\n\n\ndef bigram_x(x):\n for b1 in bigram1:\n x.append(b1)\n return x\n\ndef bigram_y(y):\n for b2 in bigram2:\n y.append(b2)\n return y\n\nbigram_x(X)\nbigram_y(Y)\n\nX1 = set(X) #集合は辞書型にしなければならない\nY1 = set(Y)\n\nprint('X:{}'.format(X1))\nprint('Y:{}'.format(Y1))\nprint('和集合:{}'.format(X1 | Y1))\nprint('積集合:{}'.format(X1 & Y1))\nprint('差集合:{}'.format(X1 ^ Y1))\n\n# {1, 3} <= {1, 2, 3}\nprint('seがXに含まれる:{}'.format({('s', 'e')} <= X1))\nprint('seがYに含まれる:{}'.format({('s', 'e')} <= Y1))\n\n\n"
},
{
"alpha_fraction": 0.6772388219833374,
"alphanum_fraction": 0.7108209133148193,
"avg_line_length": 28.72222137451172,
"blob_id": "40d89611a44e7ea3d92824be08ed15ac24bc3ff8",
"content_id": "5f96feee142c4878e0a79ee49ec1fb093d75d9df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 18,
"path": "/chapter2/19.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 19. 各行の1コラム目の文字列の出現頻度を求め,出現頻度の高い順に並べる\n# 各行の1列目の文字列の出現頻度を求め,その高い順に並べて表示せよ.確認にはcut, uniq, sortコマンドを用いよ.\n\nfrom collections import Counter\nimport numpy as np\nimport nltk\n\ndef count_row1():\n f = open('hightemp.txt')\n document = f.read()\n doc_token = nltk.word_tokenize(document)\n doc_2d = np.array(doc_token).reshape((24,4)) #24行4列 ndarrayにする\n\n unique, counts = np.unique(doc_2d[0:, 0], return_counts=True)\n count_pre = np.asarray((unique, counts)).T\n print(count_pre[count_pre[:,1].argsort(), :][::-1]) #先頭の列でソート+降順\n\ncount_row1()\n\n"
},
{
"alpha_fraction": 0.6307692527770996,
"alphanum_fraction": 0.6692307591438293,
"avg_line_length": 31.5625,
"blob_id": "897498a2420860c5668c39828404a02f301a04e7",
"content_id": "ae22164f762ceafb7196ee02526c92878768d64d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 16,
"path": "/chapter2/13.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 13. col1.txtとcol2.txtをマージ\n# 12で作ったcol1.txtとcol2.txtを結合し,元のファイルの1列目と2列目をタブ区切りで並べたテキストファイルを作成せよ.確認にはpasteコマンドを用いよ.\n\n# import glob\n# import subprocess\n\n# for f in glob.glob(\"col*.txt\"):\n# subprocess.call(\"cat \"+f+\" >> OutFile.txt\", shell=True)\n# 上は羅列だけになってる\n\nwith open(\"col1.txt\") as col1_file, \\\n open(\"col2.txt\") as col2_file, \\\n open(\"merge.txt\", mode=\"w\") as out_file:\n\n for col1_line, col2_line in zip(col1_file, col2_file):\n out_file.write(col1_line.rstrip() + '\\t' + col2_line.rstrip() + '\\n')"
},
{
"alpha_fraction": 0.6384040117263794,
"alphanum_fraction": 0.6807979941368103,
"avg_line_length": 27.64285659790039,
"blob_id": "dd9e1e178b2e2cc8858a0f861aa1f7695e54723c",
"content_id": "bd1d1324d95d8f1e2a73eaa1254e4a498f00355e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 14,
"path": "/chapter1/04.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "import nltk\n\nelements = 'Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.'\nwords = nltk.word_tokenize(elements) #単語分割\nnum_first_only = (1, 5, 6, 7, 8, 9, 15, 16, 19)\nresult = {}\n\nfor num, word in enumerate(words, 1):\n if num in num_first_only:\n result[word[0:1]] = num\n else:\n result[word[0:2]] = num\n\nprint(result, num)\n"
},
{
"alpha_fraction": 0.6393897533416748,
"alphanum_fraction": 0.694868266582489,
"avg_line_length": 21.40625,
"blob_id": "1c80ecbe5195c72959ed020a1959798bc7e642ad",
"content_id": "62ccdc685ab917db0ac7d8e37f9e6277f1d82fba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 943,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 32,
"path": "/chapter2/12.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 12. 1列目をcol1.txtに,2列目をcol2.txtに保存\n# 各行の1列目だけを抜き出したものをcol1.txtに,2列目だけを抜き出したものをcol2.txtとしてファイルに保存せよ.確認にはcutコマンドを用いよ.\n\nimport nltk\nimport numpy as np #numpyで2次元配列にする\n\nf = open('hightemp.txt')\ndocument = f.read()\ndoc_token = nltk.word_tokenize(document)\ndoc_2d = np.array(doc_token).reshape((24,4)) #24行4列 ndarrayにする\n\ncol1 = doc_2d[:,0] #各行の1列目だけを抜き出したもの\ncol2 = doc_2d[:,1] #各行の1列目だけを抜き出したもの\n\n\npath_w = '/Users/yamadahikaru/Projects/ML_Projects_Python/nlp100/chapter2/row_n.txt'\n\n\nl1 = col1\nl2 = col2\n\nwith open(path_w1, mode='w') as f:\n f.write('\\n'.join(l1))\n\nwith open(path_w1) as f:\n print(f.read())\n\nwith open(path_w2, mode='w') as g:\n g.write('\\n'.join(l2))\n\nwith open(path_w2) as g:\n print(g.read())\n\n\n\n\n"
},
{
"alpha_fraction": 0.6203389763832092,
"alphanum_fraction": 0.6440678238868713,
"avg_line_length": 21.769229888916016,
"blob_id": "7eb1c39c892c17180010713920372b263343a653",
"content_id": "5ecc4990f46f7536b427ca6633f29515f26a5d0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 13,
"path": "/chapter4/nlp33.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 33. サ変名詞\n# サ変接続の名詞をすべて抽出せよ.\n\nimport nlp30\n\nlines = nlp30.neko_lines()\nnoun_sahen = []\nfor line in lines:\n for morpheme in line:\n if (morpheme['pos'] == '名詞' and morpheme['pos1'] == 'サ変接続') and not morpheme['surface'] == '——':\n noun_sahen.append(morpheme['surface'])\n\nprint(noun_sahen)"
},
{
"alpha_fraction": 0.5211864113807678,
"alphanum_fraction": 0.5720338821411133,
"avg_line_length": 17.230770111083984,
"blob_id": "bfc9ddcef62c19f226c839e2733e0c5a50f7b0d3",
"content_id": "79561634df5adc0c6b148fd05e39a8880127cd08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 13,
"path": "/chapter3/nlp22.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 22. カテゴリ名の抽出\n# 記事のカテゴリ名を(行単位ではなく名前で)抽出せよ.\n\nimport nlp20\n\ndata_uk = nlp20.read_uk('text').splitlines()\nfor d in data_uk:\n if 'Category' in d:\n l = d.find('|')\n if not l == -1:\n print(d[11:l])\n else:\n print(d[11:-2])"
},
{
"alpha_fraction": 0.6553288102149963,
"alphanum_fraction": 0.6575963497161865,
"avg_line_length": 23.5,
"blob_id": "173d3f1b00ae73b91e1f06289557e2c82d2c4630",
"content_id": "0a38f6d72a120fc738102063036a32b56f4c78f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 555,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 18,
"path": "/chapter1/05.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 与えられたシーケンス(文字列やリストなど)からn-gramを作る関数を作成せよ.この関数を用い,\"I am an NLPer\"という文から単語bi-gram,文字bi-gramを得よ.\nimport nltk\nwords_bi = []\nchar_bi = []\n\ndef ngram(words, n):\n tokens = nltk.word_tokenize(words)\n bigram = nltk.ngrams(tokens, n)\n for bi in bigram:\n words_bi.append(bi)\n for b in bi:\n char = nltk.ngrams(b, n)\n for c in char:\n char_bi.append(c)\n return words_bi, char_bi\n\nwords = \"I am an NLPer\"\nprint(ngram(words, 2))\n"
},
{
"alpha_fraction": 0.6477987170219421,
"alphanum_fraction": 0.6855345964431763,
"avg_line_length": 16.55555534362793,
"blob_id": "154384fe643079e55b1195f00775df5257396ede",
"content_id": "3b8bf271f65d24287a1f7bf02af6a5beb73bd018",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 9,
"path": "/chapter3/nlp21.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 21. カテゴリ名を含む行を抽出\n# 記事中でカテゴリ名を宣言している行を抽出せよ.\n\nimport nlp20\n\ndata_uk = nlp20.read_uk('text').splitlines()\nfor d in data_uk:\n if 'Category' in d:\n print(d)\n\n"
},
{
"alpha_fraction": 0.6872586607933044,
"alphanum_fraction": 0.7065637111663818,
"avg_line_length": 20.66666603088379,
"blob_id": "d9509f50192cf107b6d87ac447acb1e187a9addd",
"content_id": "e8a121c3f82381bd7091f64974372901cfd0d62e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 12,
"path": "/chapter2/11.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 11. タブをスペースに置換\n# タブ1文字につきスペース1文字に置換せよ.確認にはsedコマンド,trコマンド,もしくはexpandコマンドを用いよ.\ndocument = open('hightemp.txt')\n\ndef tab_space(document):\n d_list = ''\n for doc in document:\n d = doc.expandtabs(1)\n d_list += d\n return d_list\n\nprint(tab_space(document))"
},
{
"alpha_fraction": 0.7411083579063416,
"alphanum_fraction": 0.7832919955253601,
"avg_line_length": 35.6363639831543,
"blob_id": "847bed8114450906338d55fc7300f59f26c00dcb",
"content_id": "02db5b8f22ca379b6381e27e5b4862cf6e2e498c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1727,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 33,
"path": "/README.md",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "git管理をクライントで管理するのからターミナル操作でするようにしたら、git cloneできなくなってしまったので、続きをこちらにしました。\nhttps://github.com/hikaruya8/nlp100_next\n\n# Natural Language Processing 100 questions\n\nPractice Natural Language Processing by 100 questions of Naoaki Okazaki, Inui-Okazaki Laboratory in Tohoku University (http://www.cl.ecei.tohoku.ac.jp/).\n\nI sometimes use API like NLTK, so it may not be followed basic.\n\n\n\n乾・鈴木研究室の言語処理100本ノックをやっていきます。\n[NLTK](https://www.nltk.org/)などのライブラリもけっこう使ってるので基本とは少し違うかもしれません。\n\nまたわからないときには@segavvyさんの[素人の言語処理100本ノック:まとめ](https://qiita.com/segavvy/items/fb50ba8097d59475f760)を参考にさせていただいています。\n\nNLP 100 quetions [言語処理100本ノック](http://www.cl.ecei.tohoku.ac.jp/nlp100/#data)\n\n### Quotation, Reference, Requirements\n#### chapter2:\n出典: [気象庁ホームページ 歴代全国ランキング>観測史上の順位>最高気温の高い方から](http://www.data.jma.go.jp/obd/stats/etrn/view/rankall.php?prec_no=&block_no=&year=&month=&day=&view=)\nを加工して利用\n\n#### chapter3:\n[Wikipediaの記事をJSON形式で書き出したもの](http://www.cl.ecei.tohoku.ac.jp/nlp100/data/jawiki-country.json.gz)\nをデータとして使用\n\n#### chapter4:\n夏目漱石の小説『吾輩は猫である』の文章 [neko.txt](http://www.cl.ecei.tohoku.ac.jp/nlp100/data/neko.txt)\n使用\nrequirment:\n[MeCab](http://taku910.github.io/mecab/#usage-tools),\n[matplotlib](https://matplotlib.org/)\n"
},
{
"alpha_fraction": 0.6103664040565491,
"alphanum_fraction": 0.6327077746391296,
"avg_line_length": 26.292682647705078,
"blob_id": "7696ee92573621b4719ca654c3ca82b09078f14a",
"content_id": "068712f2cfadc01496cffb868fe3e590080e7fcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1379,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 41,
"path": "/chapter2/16.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 16. ファイルをN分割する\n# 自然数Nをコマンドライン引数などの手段で受け取り,入力のファイルを行単位でN分割せよ.同様の処理をsplitコマンドで実現せよ.\n\n\n# coding: utf-8 素人の言語処理100本ノック模範解答\nimport math\n\nfname = 'hightemp.txt'\nn = int(input('N--> '))\n\nwith open(fname) as data_file:\n lines = data_file.readlines()\n\ncount = len(lines)\nunit = math.ceil(count / n) # 1ファイル当たりの行数\n\nfor i, offset in enumerate(range(0, count, unit), 1):\n with open('child_{:02d}.txt'.format(i), mode='w') as out_file:\n for line in lines[offset:offset + unit]:\n out_file.write(line)\n\n# import nltk\n# import numpy as np #numpyで2次元配列にする\n# def split_n_file(n):\n# f = open('hightemp.txt')\n# document = f.read()\n# doc_token = nltk.word_tokenize(document)\n# doc_2d = np.array(doc_token).reshape((24,4)) #24行4列 ndarrayにする\n\n# path_w = '/Users/yamadahikaru/Projects/ML_Projects_Python/nlp100/chapter2/split_n.txt'\n# for r in row_n:\n# r = doc_2d[(int(n),:)] #1~n行目を抜き出したもの\n\n# for l in row_n:\n# with open(path_w, mode='w') as f:\n# f.write(' '.join(l))\n# with open(path_w) as f:\n# print(f.read())\n\n# n = input(\"自然数(半角数字)を入力してください: \")\n# split_n_file(n)\n"
},
{
"alpha_fraction": 0.6435483694076538,
"alphanum_fraction": 0.6693548560142517,
"avg_line_length": 23.84000015258789,
"blob_id": "c2ede13e6649e848e9a0ce11bbdc40dc3151b0aa",
"content_id": "6bbb60021e2bae384a232acd434d7df46bc2f210",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 25,
"path": "/chapter2/15.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 15. 末尾のN行を出力\n# 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち末尾のN行だけを表示せよ.確認にはtailコマンドを用いよ\n\nimport nltk\nimport numpy as np #numpyで2次元配列にする\n\ndef make_col_n(n):\n f = open('hightemp.txt')\n document = f.read()\n doc_token = nltk.word_tokenize(document)\n doc_2d = np.array(doc_token).reshape((24,4)) #24行4列 ndarrayにする\n\n row_n = doc_2d[(-int(n)):,:] #1~n行目を抜き出したもの\n\n\n path_w = '/Users/yamadahikaru/Projects/ML_Projects_Python/nlp100/chapter2/row_n.txt'\n\n for l in row_n:\n with open(path_w, mode='w') as f:\n f.write(' '.join(l))\n with open(path_w) as f:\n print(f.read())\n\nn = input(\"自然数(半角数字)を入力してください: \")\nmake_col_n(n)"
},
{
"alpha_fraction": 0.6289682388305664,
"alphanum_fraction": 0.6507936716079712,
"avg_line_length": 14.78125,
"blob_id": "5862db664773057f0d792a212b98d1ddcd0497c3",
"content_id": "f6b259ecb61527af3ad7987b125c38aa56202651",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 32,
"path": "/chapter1/08.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 08. 暗号文\n# 与えられた文字列の各文字を,以下の仕様で変換する関数cipherを実装せよ.\n\n# 英小文字ならば(219 - 文字コード)の文字に置換\n# その他の文字はそのまま出力\n# この関数を用い,英語のメッセージを暗号化・復号化せよ\n\ndef cipher(target):\n result = ''\n \n for t in target:\n if t.islower():\n result += chr(219 - ord(t))\n\n else:\n result += t\n return result\n\n# 対象文字列の入力\ntarget = input('文字列を入力してください--> ')\n\n# 暗号化\nresult = cipher(target)\nprint('暗号化:' + result)\n\n# 復号化\nresult2 = cipher(result)\nprint('復号化:' + result2)\n\n# 復号化で元に戻っているかチェック\nif result2 != target:\n print('元に戻っていない!?')"
},
{
"alpha_fraction": 0.5307855606079102,
"alphanum_fraction": 0.5605095624923706,
"avg_line_length": 23.842105865478516,
"blob_id": "c5dcada41b8a3612e60715803c78d226edb60e0a",
"content_id": "38c11ab84b9210b1724fbc88492decfea90a718c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 19,
"path": "/chapter4/nlp34.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 34. 「AのB」\n# 2つの名詞が「の」で連結されている名詞句を抽出せよ.\n\nimport nlp30\n\nlines = nlp30.neko_lines()\nnouns_a_no_b = []\nfor line in lines:\n if len(line) > 2:\n for i in range(1, len(line) - 1):\n if line[i]['surface'] == 'の' \\\n and line[i-1]['pos'] == '名詞' \\\n and line[i+1]['pos'] == '名詞':\n nouns_a_no_b.append(line[i-1]['surface']+'の'+line[i+1]['surface'])\na_no_b = set(nouns_a_no_b)\n\n\nsorted_a_no_b = sorted(a_no_b, key=nouns_a_no_b.index)\nprint(sorted_a_no_b)"
},
{
"alpha_fraction": 0.6013400554656982,
"alphanum_fraction": 0.6197655200958252,
"avg_line_length": 21.9743595123291,
"blob_id": "b0aca328da8c1edc224b9a2b1777429ff1cbf3a9",
"content_id": "39f976d5ba10458f95e0eb74afb911ddc846fd21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2527,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 78,
"path": "/chapter5/nlp44.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "import from44\nfrom from44 import Chunk\nimport pydot\n\nfname = 'neko.txt.tmp'\nfname_parsed = 'neko.txt.cabocha.tmp'\n\ndef graph_from_edges_ex(edge_list, directed=False):\n '''pydot_ng.graph_from_edges()のノード識別子への対応版\n\n graph_from_edges()のedge_listで指定するタプルは\n 識別子とグラフ表示時のラベルが同一のため、\n ラベルが同じだが実体が異なるノードを表現することができない。\n 例えば文の係り受けをグラフにする際、文の中に同じ単語が\n 複数出てくると、それらのノードが同一視されて接続されてしまう。\n\n この関数ではedge_listとして次の書式のタプルを受け取り、\n ラベルが同一でも識別子が異なるノードは別ものとして扱う。\n\n edge_list = [((識別子1,ラベル1),(識別子2,ラベル2)), ...]\n\n 識別子はノードを識別するためのもので表示されない。\n ラベルは表示用で、同じでも識別子が異なれば別のノードになる。\n\n なお、オリジナルの関数にあるnode_prefixは未実装。\n\n 戻り値:\n pydot.Dotオブジェクト\n '''\n\n if directed:\n graph = pydot.Dot(graph_type='digraph')\n\n else:\n graph = pydot.Dot(graph_type='graph')\n\n for edge in edge_list:\n\n id1 = str(edge[0][0])\n label1 = str(edge[0][1])\n id2 = str(edge[1][0])\n label2 = str(edge[1][1])\n\n # ノード追加\n graph.add_node(pydot.Node(id1, label=label1))\n graph.add_node(pydot.Node(id2, label=label2))\n\n # エッジ追加\n graph.add_edge(pydot.Edge(id1, id2))\n\n return graph\n\n\n# 対象文字列を入力してもらい、そのままfnameに保存\nwith open(fname, mode='w') as out_file:\n out_file.write(input('文字列を入力してください--> '))\n\n# 係り受け解析\nfrom44.parse_neko()\n\n# 1文ずつリスト作成\nfor chunks in from44.neko_lines():\n\n # 係り先があるものを列挙\n edges = []\n for i, chunk in enumerate(chunks):\n if chunk.dst != -1:\n\n # 記号を除いた表層形をチェック、空なら除外\n src = chunk.normalized_surface()\n dst = chunks[chunk.dst].normalized_surface()\n if src != '' and dst != '':\n edges.append(((i, src), (chunk.dst, dst)))\n\n # 描画\n if len(edges) > 0:\n graph = graph_from_edges_ex(edges, directed=True)\n graph.write_png('result.png')"
},
{
"alpha_fraction": 0.6172248721122742,
"alphanum_fraction": 0.6459330320358276,
"avg_line_length": 15.076923370361328,
"blob_id": "fbb490edcb305c4dd4c13d938a2d05da0afd35d0",
"content_id": "65fc79e05f41d52488dae38670e0ef7b9508264c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 247,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 13,
"path": "/chapter4/nlp31.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 31. 動詞\n# 動詞の表層形をすべて抽出せよ.\n\nimport nlp30\n\nlines = nlp30.neko_lines()\nverbs = []\nfor line in lines:\n for morpheme in line:\n if morpheme['pos'] == '動詞':\n verbs.append(morpheme['surface'])\n\nprint(verbs)\n"
},
{
"alpha_fraction": 0.6606170535087585,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 19.370370864868164,
"blob_id": "84b017e1d4cba81369bc8b2ff9989d59170e6d94",
"content_id": "eb0bd9cd1cd8106ec3b146d207a4ec3cc31b110e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 27,
"path": "/chapter4/nlp37.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 37. 頻度上位10語\n# 出現頻度が高い10語とその出現頻度をグラフ(例えば棒グラフなど)で表示せよ.\n\nimport nlp36\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.family'] = 'AppleGothic'\n\nline_common_words = nlp36.line_common_words()\n\nten_common_words = []\nfor i, line in enumerate(line_common_words):\n if i < 10:\n ten_common_words.append(line)\n else:\n break\n\n\n# x, y = map(list, zip(*ten_common_words))\nx = np.array(ten_common_words)[0:,0]\ny = np.array(ten_common_words)[0:,1]\nprint(x)\nprint(y)\n\nplt.bar(range(len(x)), y, tick_label=x, align=\"center\")\n\nplt.show()\n\n"
},
{
"alpha_fraction": 0.6141414046287537,
"alphanum_fraction": 0.6303030252456665,
"avg_line_length": 19.66666603088379,
"blob_id": "52cc8b7d57fce8ffdfbe337c32505f66aeec6f71",
"content_id": "952f36642f47d090b6c46ca09db710d1b2e5efe9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 563,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 24,
"path": "/chapter4/nlp35.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 35. 名詞の連接\n# 名詞の連接(連続して出現する名詞)を最長一致で抽出せよ\n\nimport nlp30\n\nlines = nlp30.neko_lines()\n\nlist_series_noun = []\n\nfor line in lines:\n nouns = []\n for morpheme in line:\n if morpheme['pos'] == '名詞':\n nouns.append(morpheme['surface'])\n else:\n if len(nouns) > 1:\n list_series_noun.append(\"\".join(nouns))\n nouns = []\n if len(nouns) > 1:\n list_series_noun.append(\"\".join(nouns))\n\nseries_noun = set(list_series_noun)\n\nprint(sorted(series_noun, key=list_series_noun.index))"
},
{
"alpha_fraction": 0.6220472455024719,
"alphanum_fraction": 0.6456692814826965,
"avg_line_length": 13.882352828979492,
"blob_id": "815099451f0c7b228a3e9d36945de08056614311",
"content_id": "474cfa195e760105ae875b7c981881c73f692100",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 336,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 17,
"path": "/chapter3/nlp24.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 24. ファイル参照の抽出\n# 記事から参照されているメディアファイルをすべて抜き出せ.\nimport nlp20\nimport re\n\ndata_uk = nlp20.read_uk('text')\n\npattern = re.compile(r'''\n (?:File|ファイル)\n :\n (.+?)\n \\|\n ''', re.VERBOSE)\n\nmatchmd = pattern.findall(data_uk)\nfor line in matchmd:\n print(line)\n\n"
},
{
"alpha_fraction": 0.656000018119812,
"alphanum_fraction": 0.671999990940094,
"avg_line_length": 24.066667556762695,
"blob_id": "61670aa811037a9ecfe464448e82bfccdc37fd98",
"content_id": "b484675825c310ba5e4b13849fff029732d0d217",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 15,
"path": "/chapter3/nlp20.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 20. JSONデータの読み込み\n# Wikipedia記事のJSONファイルを読み込み,「イギリス」に関する記事本文を表示せよ.問題21-29では,ここで抽出した記事本文に対して実行せよ.\n\nimport gzip\nimport json\n\ndef read_uk(read_key):\n with gzip.open('jawiki-country.json.gz', 'rt') as data:\n for line in data:\n data_json = json.loads(line)\n if data_json['title'] == 'イギリス':\n return data_json[read_key]\n break\n\n# print(read_uk('text'))"
},
{
"alpha_fraction": 0.6332767605781555,
"alphanum_fraction": 0.6604413986206055,
"avg_line_length": 21.653846740722656,
"blob_id": "6c5223cc7cb58064654ea87e0ad97bc0ca3f715b",
"content_id": "dae9657314e55dde054e09d3943d5ce96d483c71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 845,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 26,
"path": "/chapter5/nlp42.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 42. 係り元と係り先の文節の表示\n# 係り元の文節と係り先の文節のテキストをタブ区切り形式ですべて抽出せよ.ただし,句読点などの記号は出力しないようにせよ.\n\nimport nlp40\nimport nlp41\nfrom nlp41 import Chunk\n\n#nlp41のChunkクラスに関数normalized_surface()を追加\n\nif __name__ == '__main__':\n nlp40.relate_neko()\n # 1文ずつリスト作成\n for chunks in nlp41.neko_lines():\n for chunk in chunks:\n if chunk.dst != -1:\n src = chunk.normalized_surface()\n dst = chunks[chunk.dst].normalized_surface()\n if src != '' and dst != '':\n print('{}\\t{}'.format(src, dst))\n\n\n\n\n\n#係り先文節インデックス番号(dst),係り元文節インデックス番号のリスト(srcs)を使う\n# def extract_relation(dst, srcs):\n"
},
{
"alpha_fraction": 0.5568493008613586,
"alphanum_fraction": 0.5702054500579834,
"avg_line_length": 23.53781509399414,
"blob_id": "db997bf4dbd14f86a322dc830bcddcd92ef7f2be",
"content_id": "7d05e7deacfb69b0eb5a6bf9cc70f1587bc6d3e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4074,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 119,
"path": "/chapter5/nlp41.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 41. 係り受け解析結果の読み込み(文節・係り受け)\n# 40に加えて,文節を表すクラスChunkを実装せよ.このクラスは形態素(Morphオブジェクト)のリスト(morphs),\n# 係り先文節インデックス番号(dst),係り元文節インデックス番号のリスト(srcs)をメンバ変数に持つこととする.\n# さらに,入力テキストのCaboChaの解析結果を読み込み,1文をChunkオブジェクトのリストとして表現し,\n# 8文目の文節の文字列と係り先を表示せよ.第5章の残りの問題では,ここで作ったプログラムを活用せよ.\n\nimport nlp40\nimport re\n\nfname = '/Users/yamadahikaru/Projects/ML_Projects_Python/nlp100/chapter4/neko.txt'\nfname_parsed = 'neko.txt.cabocha'\n\nclass Chunk:\n # 形態素(Morphオブジェクト)のリスト(morphs),係り先文節インデックス番号(dst),係り元文節インデックス番号のリスト(srcs)をメンバ変数に持つ\n def __init__(self):\n #初期化\n self.morphs = []\n self.srcs = []\n self.dst = -1\n\n def __str__(self):\n '''オブジェクトの文字列表現'''\n surface = ''\n for morph in self.morphs:\n surface += morph.surface\n return '{}\\tsrcs{}\\tdst[{}]'.format(surface, self.srcs, self.dst)\n\n def normalized_surface(self):\n #句読点などの記号を出力しないようにする\n result = ''\n for morph in self.morphs:\n if morph in self.morphs:\n if morph.pos != '記号':\n result += morph.surface\n return result\n def chk_pos(self, pos):\n '''指定した品詞(pos)を含むかチェックする\n\n 戻り値:\n 品詞(pos)を含む場合はTrue\n '''\n for morph in self.morphs:\n if morph.pos == pos:\n return True\n return False\n\n\ndef neko_lines():\n # 「吾輩は猫である」の係り受け解析結果のジェネレータ\n # 「吾輩は猫である」の係り受け解析結果を順次読み込んで、\n # 1文ずつChunkクラスのリストを返す\n\n # 戻り値:\n # 1文のChunkクラスのリスト\n\n with open(fname_parsed) as file_parsed:\n chunks = dict()\n idx = -1\n\n for line in file_parsed:\n if line == 'EOS\\n':\n\n if len(chunks) > 0:\n sorted_tuple = sorted(chunks.items(), key=lambda x:x[0])\n yield list(zip(*sorted_tuple))[1]\n chunks.clear()\n\n else:\n yield []\n\n # 先頭が*の行は係り受け解析結果なので、Chunkを作成\n elif line[0] == '*':\n\n # Chunkのインデックス番号と係り先のインデックス番号取得\n cols = line.split(' ')\n idx = int(cols[1])\n dst = int(re.search(r'(.*?)D', cols[2]).group(1))\n\n # Chunkを生成(なければ)し、係り先のインデックス番号セット\n if idx not in chunks:\n chunks[idx] = Chunk()\n chunks[idx].dst = dst\n\n # 係り先のChunkを生成(なければ)し、係り元インデックス番号追加\n if dst != -1:\n if dst not in chunks:\n chunks[dst] = Chunk()\n chunks[dst].srcs.append(idx)\n\n # それ以外の行は形態素解析結果なので、Morphを作りChunkに追加\n else:\n\n # 表層形はtab区切り、それ以外は','区切りでバラす\n cols = line.split('\\t')\n res_cols = cols[1].split(',')\n\n # Morph作成、リストに追加\n chunks[idx].morphs.append(\n nlp40.Morph(\n cols[0], # surface\n res_cols[6], # base\n res_cols[0], # pos\n res_cols[1] # pos1\n )\n )\n\n raise StopIteration\n\n\nif __name__ == '__main__':\n nlp40.relate_neko()\n\n # 1文ずつリスト作成\n for i, chunks in enumerate(neko_lines(), 1):\n # 8文目を表示\n if i == 8:\n for j, chunk in enumerate(chunks):\n print('[{}]{}'.format(j, chunk))\n break\n"
},
{
"alpha_fraction": 0.5824176073074341,
"alphanum_fraction": 0.6140109896659851,
"avg_line_length": 21.78125,
"blob_id": "77758fb82c036ab5a2fcafb85b42a391461545f2",
"content_id": "f9e075d95ba6dac5cdae749c13a76bb7ec467731",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 872,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 32,
"path": "/chapter2/17.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 17. 1列目の文字列の異なり\n# 1列目の文字列の種類(異なる文字列の集合)を求めよ.確認にはsort, uniqコマンドを用いよ.\n\nimport nltk\nimport numpy as np #numpyで2次元配列にする\n\ndef diff_str():\n f = open('hightemp.txt')\n document = f.read()\n doc_token = nltk.word_tokenize(document)\n doc_2d = np.array(doc_token).reshape((24,4)) #24行4列 ndarrayにする\n\n row1 = doc_2d[:,0]\n \n for r in row1:\n if r.isnumeric() == True:\n print(\"数字\")\n elif r.isalpha() == True:\n print(\"英字or日本語\")\n elif r.isalnum() == True:\n print(\"英数字\")\n\n # path_w = '/Users/yamadahikaru/Projects/ML_Projects_Python/nlp100/chapter2/17.txt'\n\n # for l in doc_2d:\n # with open(path_w, mode='w') as f:\n # f.write(' '.join(l))\n # with open(path_w) as f:\n # print(f.read())\n\n\ndiff_str()"
},
{
"alpha_fraction": 0.6809986233711243,
"alphanum_fraction": 0.6976421475410461,
"avg_line_length": 34.95000076293945,
"blob_id": "fc0bf87ffdeaec41f6ac453814740397a66fe1c6",
"content_id": "3e1b23e213faf08a8ac0411616a69e9e00f39095",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1113,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 20,
"path": "/chapter1/09.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 09. Typoglycemia\n# スペースで区切られた単語列に対して,各単語の先頭と末尾の文字は残し,それ以外の文字の順序をランダムに並び替えるプログラムを作成せよ.ただし,長さが4以下の単語は並び替えないこととする.適当な英語の文(例えば\"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\")を与え,その実行結果を確認せよ\n\nimport nltk\nimport random\n\ndef typoglycemia(document):\n result = ''\n doc = document\n doc_tokens = nltk.word_tokenize(doc)\n for t in doc_tokens:\n if len(t) <= 4: #単語の長さが4文字以下の場合そのまま出力\n result += ' ' + t\n else: #単語の長さが5以上ならば先頭と末尾の文字は残し,それ以外の文字の順序をランダムに並び替える\n sr = ''.join(random.sample(t[1:-1], len(t[1:-1])))\n result += ' ' + (t[0] + sr + t[-1])\n return result\n\ndocument = input(\"文字列を入力してください->\")\nprint(typoglycemia(document))\n\n\n"
},
{
"alpha_fraction": 0.6615384817123413,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 16.81818199157715,
"blob_id": "3bf70a6cbdc5d399f493dd5624ea34bb66b1f9f7",
"content_id": "424d6682e71e997df845586077a6791bd69b28f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 11,
"path": "/chapter3/nlp25.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 25. テンプレートの抽出\n# 記事中に含まれる「基礎情報」テンプレートのフィールド名と値を抽出し,辞書オブジェクトとして格納せよ.\n\nimport nlp20\nimport re\n\ndata_uk = nlp20.read_uk('text')\nprint(data_uk)\n# for d in data_uk:\n# if \"基礎情報\" in d:\n# print(d)"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6507353186607361,
"avg_line_length": 31.058822631835938,
"blob_id": "eca2bb54f5c1253f8d02234717c72a7978d44538",
"content_id": "ff0d4b0437b206472d3530a34ab27ac34c5b736c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 792,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 17,
"path": "/chapter5/nlp43.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 43. 名詞を含む文節が動詞を含む文節に係るものを抽出\n# 名詞を含む文節が,動詞を含む文節に係るとき,これらをタブ区切り形式で抽出せよ.ただし,句読点などの記号は出力しないようにせよ.\nimport nlp40\nimport nlp41\nfrom nlp41 import Chunk\n\nnlp40.relate_neko()\n # 1文ずつリスト作成\nfor chunks in nlp41.neko_lines():\n for chunk in chunks:\n if chunk.dst != -1:\n #かかり元に名詞があるか、係り先に動詞があるかチェック\n if chunk.chk_pos('名詞') and chunks[chunk.dst].chk_pos('名詞'):\n src = chunk.normalized_surface()\n dst = chunks[chunk.dst].normalized_surface()\n if src != '' and dst != '':\n print('{}\\t{}'.format(src, dst))"
},
{
"alpha_fraction": 0.6925287246704102,
"alphanum_fraction": 0.709770143032074,
"avg_line_length": 18.27777862548828,
"blob_id": "559def98192cf99b4ebaf05e3c3dafb1b91b032e",
"content_id": "5afc16d0e410edc603c253137b4d4e5de776c088",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 18,
"path": "/chapter4/nlp36.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 36. 単語の出現頻度\n# 文章中に出現する単語とその出現頻度を求め,出現頻度の高い順に並べよ\n\nimport nlp30\nimport collections\n\ndef line_common_words():\n lines = nlp30.neko_lines()\n words = []\n for line in lines:\n for morpheme in line:\n words.append(morpheme['surface'])\n\n word_count = collections.Counter(words)\n\n common_words = word_count.most_common()\n\n return common_words\n\n"
},
{
"alpha_fraction": 0.5646731853485107,
"alphanum_fraction": 0.5952712297439575,
"avg_line_length": 25.66666603088379,
"blob_id": "e2d879bd71ba7cfe7422f0611f6ed14caf64e4df",
"content_id": "8425611fa02f3ba5b949568a13af20eeadc36a98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1167,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 27,
"path": "/chapter3/nlp23.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 23. セクション構造\n# 記事中に含まれるセクション名とそのレベル(例えば\"== セクション名 ==\"なら1)を表示せよ.\n# セクション名とは問題26で参考情報として紹介されているマークアップ早見表の「見出し」を指しているもよう\nimport nlp20\nimport re\n\ndata_uk = nlp20.read_uk('text')\npattern = re.compile(r'''\n ^ # 行頭\n (={2,}) # キャプチャ対象、2個以上の'='\n \\s* # 余分な0個以上の空白('哲学'や'婚姻'の前後に余分な空白があるので除去)\n (.+?) # キャプチャ対象、任意の文字が1文字以上、非貪欲(以降の条件の巻き込み防止)\n \\s* # 余分な0個以上の空白\n \\1 # 後方参照、1番目のキャプチャ対象と同じ内容\n .* # 任意の文字が0文字以上\n $ # 行末\n ''', re.MULTILINE + re.VERBOSE)\n\n\nresult = pattern.findall(data_uk)\nprint(result)\n\n# 結果表示\nfor line in result:\n level = len(line[0]) - 1 # '='の数-1\n print('{indent}{sect}({level})'.format(\n indent='\\t' * (level - 1), sect=line[1], level=level))"
},
{
"alpha_fraction": 0.5388888716697693,
"alphanum_fraction": 0.6222222447395325,
"avg_line_length": 24.85714340209961,
"blob_id": "dfc1cea0cb42aea758638d93c1f14a125118ca45",
"content_id": "c343c836423e6c85155274667953f706caa821ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 7,
"path": "/chapter1/07.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 07. テンプレートによる文生成\n# 引数x, y, zを受け取り「x時のyはz」という文字列を返す関数を実装せよ.さらに,x=12, y=\"気温\", z=22.4として,実行結果を確認せよ.\n\ndef temp(x, y, z):\n print(\"{0}時の{1}は{2}\".format(x, y, z))\n\ntemp(12, \"気温\", 22.4)"
},
{
"alpha_fraction": 0.5987260937690735,
"alphanum_fraction": 0.6050955653190613,
"avg_line_length": 13.181818008422852,
"blob_id": "90701dbba1f306a4977581f5ff27c8cb6f4d2af5",
"content_id": "3f9ea83cef384e4eaf9c72a3b8e2865968c48691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 11,
"path": "/chapter1/02.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "motion = 'stressed'\nprint(motion[::-1])\n\npolice_car = 'パトカー'\ntaxi = 'タクシー'\n\nresult = ''\nfor a, b in zip(police_car, taxi):\n result += a + b\n\nprint(result)\n\n"
},
{
"alpha_fraction": 0.6905131936073303,
"alphanum_fraction": 0.6905131936073303,
"avg_line_length": 34.66666793823242,
"blob_id": "bddefe06e320f4d11bd155988792d01d4b5ddfe5",
"content_id": "b7469551c56abe8dc31c5854cdc94dfd5b781772",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 643,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 18,
"path": "/chapter1/03.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import wordpunct_tokenize\nnltk.download('stopwords')\nstop_words = nltk.corpus.stopwords.words('english')\nsymbol = [\"'\", '\"', ':', ';', '.', ',', '-', '!', '?', \"'s\"]\n\ndocument = 'Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.'\nlist_of_words = nltk.FreqDist(w.lower() for w in document if w.lower() not in stop_words + symbol)\n\n\nwords = wordpunct_tokenize(document) not in stop_words\n\n# for doc in documents:\n# list_of_words = [i.lower() for i in wordpunct_tokenize(doc) if i.lower() not in stop_words]\n\n\nprint(list_of_words)\n\n"
},
{
"alpha_fraction": 0.6292358636856079,
"alphanum_fraction": 0.6471760869026184,
"avg_line_length": 24.066667556762695,
"blob_id": "d8dda5fc58ef0ad6f4be9db924064a316177f0c3",
"content_id": "c6eeeb83afa84b92bb5dfe12373a9f5afc84aa31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2357,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 60,
"path": "/chapter4/nlp30.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 30. 形態素解析結果の読み込み\n# 形態素解析結果(neko.txt.mecab)を読み込むプログラムを実装せよ.ただし,各形態素は表層形(surface),基本形(base),品詞(pos),品詞細分類1(pos1)をキーとするマッピング型に格納し,1文を形態素(マッピング型)のリストとして表現せよ.第4章の残りの問題では,ここで作ったプログラムを活用せよ.\n\nimport MeCab\nimport sys\n\nfname = 'neko.txt'\nfname_parsed = 'neko.txt.mecab'\n\ndef parse_neko():\n #neko.txtを形態素解析してneko.txt.mecabに取り込む\n with open(fname) as data_file, open(fname_parsed, mode='w') as out_file:\n mecab = MeCab.Tagger()\n out_file.write(mecab.parse(data_file.read()))\n\ndef neko_lines():\n '''「吾輩は猫である」の形態素解析結果のジェネレータ\n 「吾輩は猫である」の形態素解析結果を順次読み込んで、各形態素を\n ・表層形(surface)\n ・基本形(base)\n ・品詞(pos)\n ・品詞細分類1(pos1)\n の4つをキーとする辞書に格納し、1文ずつ、この辞書のリストとして返す\n\n 戻り値:\n 1文の各形態素を辞書化したリスト\n '''\n with open(fname_parsed) as file_parsed:\n morphemes = []\n for line in file_parsed:\n #表層形はtabで区切る、それ以外は','で区切る\n cols = line.split('\\t')\n if len(cols) < 2:\n raise StopIteration #区切りがない場合終了\n res_cols = cols[1].split(',')\n\n #辞書を作成し、辞書に追加 詳細は表層形\\t品詞,品詞細分類1,品詞細分類2,品詞細分類3,活用型,活用形,原形,読み,発音\n #http://taku910.github.io/mecab/#format\n\n morpheme = {\n 'surface': cols[0],\n 'base': res_cols[6],\n 'pos': res_cols[0],\n 'pos1': res_cols[1]\n }\n morphemes.append(morpheme)\n\n # 品詞細分類1が'句点'なら文の終わりと判定\n if res_cols[1] == '句点':\n yield morphemes #大きいデータはreturnで一度に引き渡すのではなく、yeildで少量ずつ読み込む\n morphemes = []\n\n# 形態素解析\nif __name__ == \"__main__\":\n parse_neko()\n\n# 1文ずつ辞書のリストを作成\n lines = neko_lines()\n for line in lines:\n print(line)\n\n"
},
{
"alpha_fraction": 0.6958174705505371,
"alphanum_fraction": 0.7262357473373413,
"avg_line_length": 20.95833396911621,
"blob_id": "b091238de86b12ce6f819e8d60d09650640dd448",
"content_id": "46eb860de73302e5f8b8316885d860a3b4d907d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 24,
"path": "/chapter4/nlp38.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 38. ヒストグラム\n# 単語の出現頻度のヒストグラム(横軸に出現頻度,縦軸に出現頻度をとる単語の種類数を棒グラフで表したもの)を描け.\n\nimport nlp36\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.family'] = 'AppleGothic'\n\nline_common_words = nlp36.line_common_words()\n\nhundred_common_words = [] #とりあえず100で考えてみる\nfor i, line in enumerate(line_common_words):\n if i < 100:\n hundred_common_words.append(line)\n else:\n break\n\nx = np.array(hundred_common_words)[0:,0]\ny = np.array(hundred_common_words)[0:,1]\nprint(x)\nprint(y)\n\nplt.hist(x, y, tick_label=x)\nplt.show()"
},
{
"alpha_fraction": 0.6721311211585999,
"alphanum_fraction": 0.7189695835113525,
"avg_line_length": 29.571428298950195,
"blob_id": "82b5218620db4e93d6898e4541df250c54944c56",
"content_id": "85fc6311ec6fad8bc7a4a9fb9f18ab6dd6a6cedf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 14,
"path": "/chapter2/18.py",
"repo_name": "hikaruya8/nlp100",
"src_encoding": "UTF-8",
"text": "# 18. 各行を3コラム目の数値の降順にソート\n# 各行を3コラム目の数値の逆順で整列せよ(注意: 各行の内容は変更せずに並び替えよ).確認にはsortコマンドを用いよ(この問題はコマンドで実行した時の結果と合わなくてもよい).\n\nimport nltk\nimport numpy as np #numpyで2次元配列にする\n\ndef sort_row3():\n f = open('hightemp.txt')\n document = f.read()\n doc_token = nltk.word_tokenize(document)\n doc_2d = np.array(doc_token).reshape((24,4)) #24行4列 ndarrayにする\n print(doc_2d[:, doc_2d[2].argsort(axis=0, kind='quicksort')[::-1]][::-1])\n\nsort_row3()"
}
] | 36 |
hondaya14/gaussian-tool | https://github.com/hondaya14/gaussian-tool | d7e59cb75f476c6da8e31a78aa8281a3fd4c22c3 | 85e9d882a2d46375a778a3e8b7c7168253c180ed | 926a328bf42fc806ef82b64a235c9ae76cd31fbe | refs/heads/main | 2023-02-19T16:19:48.438035 | 2021-01-22T11:19:26 | 2021-01-22T11:19:26 | 319,911,014 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 32,
"blob_id": "139d05c8090c4233994e0c293993834f66cbb775",
"content_id": "04bb2971a78c0b5c117d818a0797985ac3e3c7bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 3,
"path": "/README.md",
"repo_name": "hondaya14/gaussian-tool",
"src_encoding": "UTF-8",
"text": "## gaussian-tool\n\nTools for calculation by quantum chemistry calculation software Gausian and Eos.\n"
},
{
"alpha_fraction": 0.4009324014186859,
"alphanum_fraction": 0.41025641560554504,
"avg_line_length": 20.450000762939453,
"blob_id": "ee7ec6cde0e8bbbc971a3e76e8bdd023b2c085c5",
"content_id": "27d5a7d3c445520475fc70aad5470ae0d9eba0eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 20,
"path": "/freeze_atom.py",
"repo_name": "hondaya14/gaussian-tool",
"src_encoding": "UTF-8",
"text": "import sys\n\nfreeze_atom_list = ['C', 'N', 'O']\nwhile True:\n try:\n input_line = input()\n line = ''\n s = input_line.split()\n if not s:\n print()\n continue\n if s[0] in freeze_atom_list:\n line += s[0] + '\\t' + '-1' + '\\t'\n line += '\\t'.join(s[1:])\n print(line)\n else:\n print(input_line)\n\n except EOFError:\n break\n"
},
{
"alpha_fraction": 0.5402930378913879,
"alphanum_fraction": 0.5494505763053894,
"avg_line_length": 25,
"blob_id": "ec8c937670a40f54b7f93fb6619405ed61c10c0e",
"content_id": "86a150c256a214796aa1e245a1ccd8f425b3f044",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1098,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 42,
"path": "/cube_preprocessor.py",
"repo_name": "hondaya14/gaussian-tool",
"src_encoding": "UTF-8",
"text": "# This program is cube file convert to ascii format according to Eos ascii2mrc.\n# python3 cub_preprocessor.py < cubefile > new cubefile\n\n# parse header phase\ntitle = str(input())\n# print(title)\ninput() # 不要行\n\natom_num = int(input().split()[0])\n# print('atomic number ... '+str(atom_num))\n\ndimension = '3'\nx_num = int(input().split()[0])\ny_num = int(input().split()[0])\nz_num = int(input().split()[0])\n\n# cube dimension\nprint(dimension+' '+str(x_num)+' '+str(y_num)+' '+str(z_num))\n\n# skip phase\nfor i in range(0, atom_num):\n input()\n\ncube = [[[[''] for z in range(z_num)] for y in range(y_num)] for x in range(x_num)]\n\nfor i in range(x_num):\n for j in range(y_num):\n lines = ''\n for line in range(z_num // 6 + 1):\n lines += input().replace('\\n', ' ')\n z_column = lines.split()\n for k in range(z_num):\n cube[i][j][k] = z_column[k]\n\n\nfor i in range(z_num):\n for j in range(y_num):\n line = ''\n for k in range(x_num):\n # print(cube[k][j][i], end=' ')\n line += ' ' + cube[k][j][i]\n print(line)\n"
},
{
"alpha_fraction": 0.6942148804664612,
"alphanum_fraction": 0.6942148804664612,
"avg_line_length": 12.44444465637207,
"blob_id": "8650e79b4e4479be27916bca538a1104111f7161",
"content_id": "df0735ec604f615a71e383a657f07c378633989b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 9,
"path": "/counter.py",
"repo_name": "hondaya14/gaussian-tool",
"src_encoding": "UTF-8",
"text": "import collections\n\n\nline = input().split()\n\ncounter = collections.Counter(line)\n\nfor c in counter.items():\n print(c)\n"
},
{
"alpha_fraction": 0.561987042427063,
"alphanum_fraction": 0.5766738653182983,
"avg_line_length": 25,
"blob_id": "51685b1a5aacef9c4ccafcdfee8e162f50b96cc5",
"content_id": "93fa646bce225b857d599f8ff841b33bcada82e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2337,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 89,
"path": "/ploter.py",
"repo_name": "hondaya14/gaussian-tool",
"src_encoding": "UTF-8",
"text": "import sys\nimport seaborn as sb\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\n\n# voxel params\n# x_number = 130\n# y_number = 100\n# z_number = 76\nx_number = 170\ny_number = 172\nz_number = 176\n\n\n\n# なに軸の何番目か\nsection_axis = sys.argv[1] # x, y, z\n# section_number = sys.argv[2] # 何枚目\nprint('section axis: ' + section_axis)\n# print('section number: ' + section_number)\n\nsection_number = 0\nif section_axis == 'x':\n section_number = x_number\nelif section_axis == 'y':\n section_number = y_number\nelif section_axis == 'z':\n section_number = z_number\n\n\ndef validate_axis(xi, yi, zi, sec_num):\n if section_axis == 'x':\n if xi == sec_num:\n return True\n elif section_axis == 'y':\n if yi == sec_num:\n return True\n elif section_axis == 'z':\n if zi == sec_num:\n return True\n return False\n\n\n# prepare data\ndata_max = 0\ndata_min = 100000000\nvoxel = [[[0] * z_number for i in range(y_number)] for j in range(x_number)]\nfor k in range(z_number):\n for j in range(y_number):\n for i in range(x_number):\n x, y, z, value = map(float, input().split())\n voxel[i][j][k] = value\n data_max = max(data_max, value)\n data_min = min(data_min, value)\n\nsection = np.array([[]])\n\nif section_axis == 'x':\n section = [[0] * z_number for i in range(y_number)]\nelif section_axis == 'y':\n section = np.array([[0] * z_number for i in range(x_number)])\nelif section_axis == 'z':\n section = np.array([[0] * y_number for i in range(x_number)])\n\n\ndef make_plane(xi, yi, zi):\n if section_axis == 'x':\n section[yi][zi] = voxel[xi][yi][zi]\n elif section_axis == 'y':\n section[xi][zi] = voxel[xi][yi][zi]\n elif section_axis == 'z':\n section[xi][yi] = voxel[xi][yi][zi]\n\n\nsave_path = str(section_axis)+'_section/'\nfor sn in range(section_number):\n print('section number: ' + str(sn))\n for i in range(x_number):\n for j in range(y_number):\n for k in range(z_number):\n if validate_axis(i, j, k, sn):\n make_plane(i, j, k)\n plt.figure()\n sb.heatmap(section, cmap='gray', vmax=math.sqrt(data_max), vmin=data_min)\n # plt.show()\n plt.savefig(save_path+section_axis+'_'+str(sn)+'.png')\n plt.clf()\n plt.close()\n\n"
},
{
"alpha_fraction": 0.5606096982955933,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 30.571428298950195,
"blob_id": "67830543de65dc989118589242291873ef4f8aa8",
"content_id": "31c1470ccb20f73e3f716e3706a516ce9ccf06c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4371,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 133,
"path": "/pickup_hkl.py",
"repo_name": "hondaya14/gaussian-tool",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport sys\n\nvoxel_file = sys.argv[1]\nhkl_ref_file = sys.argv[2]\n\n# original mrc size\n# original_mrc_size_x = 131\n# original_mrc_size_y = 101\n# original_mrc_size_z = 76\noriginal_mrc_size_x = 170\noriginal_mrc_size_y = 172\noriginal_mrc_size_z = 176\n\n# cutoff mrc size - even\ncutoff_mrc_size_x = 170\ncutoff_mrc_size_y = 172\ncutoff_mrc_size_z = 176\n\n# voxel length\n# mrc_unit_length = 0.403\nmrc_unit_length = 0.336\n\ncenter = [\n int(cutoff_mrc_size_x / 2),\n int(cutoff_mrc_size_y / 2),\n int(cutoff_mrc_size_z / 2)\n]\n\n\n# read voxel data\nvoxel_data = [[[0] * cutoff_mrc_size_z for i in range(cutoff_mrc_size_y)] for j in range(cutoff_mrc_size_x)]\n\nwith open(voxel_file) as vf:\n vl = vf.readlines()\n cx = 0\n for line in vl:\n try:\n x, y, z, v = map(float, line.split())\n # x, y, z を ボクセルの整数座標に変換\n x = round(x / mrc_unit_length)\n y = round(y / mrc_unit_length)\n z = round(z / mrc_unit_length)\n\n # 逆空間(Å^-1)に変換\n # x = (x - cutoff_mrc_size_x / 2) / (cutoff_mrc_size_x * mrc_unit_length)\n # y = (y - cutoff_mrc_size_y / 2) / (cutoff_mrc_size_y * mrc_unit_length)\n # z = (z - cutoff_mrc_size_z / 2) / (cutoff_mrc_size_z * mrc_unit_length)\n # # print('\\t{0}\\t{1}\\t{2}\\t{3}'.format(x, y, z, v))\n # voxel_data.append([x, y, z, v])\n\n voxel_data[x][y][z] = v\n\n except EOFError:\n break\n\n\n# フーリエボクセル1個の幅\nvoxel_unit_length_x = 1 / (cutoff_mrc_size_x * mrc_unit_length)\nvoxel_unit_length_y = 1 / (cutoff_mrc_size_y * mrc_unit_length)\nvoxel_unit_length_z = 1 / (cutoff_mrc_size_z * mrc_unit_length)\n\n# unit cell parameters\nunit_cell_tv_x = [7.1178, 0, 0]\nunit_cell_tv_y = [0, 9.6265, 0]\nunit_cell_tv_z = [-1.39567, 0, 11.81314]\n\nunit_cell_tv = np.array([\n unit_cell_tv_x,\n unit_cell_tv_y,\n unit_cell_tv_z\n])\n\n# unit cell volume\nunit_cell_volume = np.dot(np.cross(unit_cell_tv_x, unit_cell_tv_y), unit_cell_tv_z)\n\nreciprocal_lattice_vector_a = np.cross(unit_cell_tv_y, unit_cell_tv_z) / unit_cell_volume\nreciprocal_lattice_vector_b = np.cross(unit_cell_tv_z, unit_cell_tv_x) / unit_cell_volume\nreciprocal_lattice_vector_c = np.cross(unit_cell_tv_x, unit_cell_tv_y) / unit_cell_volume\n\nreciprocal_lattice_vector = np.array([\n reciprocal_lattice_vector_a,\n reciprocal_lattice_vector_b,\n reciprocal_lattice_vector_c\n])\n\n# hkl reference\nhkl = []\nwith open(hkl_ref_file) as hrf:\n hl = hrf.readlines()\n for lines in hl:\n h, k, l, *values = lines.split()\n hkl.append([int(h), int(k), int(l)])\n\n\ndef main():\n # hkl.insert(0, [0, 0, 0])\n for e in hkl:\n h, k, l = e[0], e[1], e[2]\n\n # referenceで読み込んだh,k,lに対応するフーリエ空間の座標\n fourier_coord = h * reciprocal_lattice_vector_a + \\\n k * reciprocal_lattice_vector_b + \\\n l * reciprocal_lattice_vector_c\n\n # 特定のhklの点が対応するボクセルの座標()\n target_voxel_x = round(fourier_coord[0] / voxel_unit_length_x) + center[0]\n target_voxel_y = round(fourier_coord[1] / voxel_unit_length_y) + center[1]\n target_voxel_z = round(fourier_coord[2] / voxel_unit_length_z) + center[2]\n\n # 特定のhklの点が対応するボクセルの座標(Å-1)\n # target_voxel_x = round(fourier_coord[0] / voxel_unit_length_x) * voxel_unit_length_x\n # target_voxel_y = round(fourier_coord[1] / voxel_unit_length_y) * voxel_unit_length_y\n # target_voxel_z = round(fourier_coord[2] / voxel_unit_length_z) * voxel_unit_length_z\n\n # value = search_value(target_voxel_x, target_voxel_y, target_voxel_z)\n # print('\\t{0}\\t{1}\\t{2}\\t{3:.6f}'.format(h, k, l, value), flush=True)\n try:\n print(\n '\\t{0}\\t{1}\\t{2}\\t{3:.6f}'.format(h, k, l, voxel_data[target_voxel_x][target_voxel_y][target_voxel_z]), flush=True)\n except IndexError:\n print('\\t{0}\\t{1}\\t{2}\\t{3}'.format(h, k, l, \"out of range\"), flush=True)\n\n\ndef search_value(vx, vy, vz):\n for data in voxel_data:\n if np.isclose(data[0], vx) and np.isclose(data[1], vy) and np.isclose(data[2], vz):\n return data[3]\n return 'out range hkl'\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 6 |
DSSAGGAF/Amazont-Tele | https://github.com/DSSAGGAF/Amazont-Tele | 439df474e86d0fc6ed4a573d91e497f1e23fe16f | 28ac71750024ce5894edd8cbe4e8f6b15b9706fc | 7913fb7ef865e7820b1a5def7080dc298c49cff7 | refs/heads/main | 2023-03-06T15:40:58.880434 | 2021-02-16T11:50:17 | 2021-02-16T11:50:17 | 339,381,522 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8018018007278442,
"alphanum_fraction": 0.8018018007278442,
"avg_line_length": 54.5,
"blob_id": "9ce86c16652ce6760a07b108c601c0f6a98a4bb2",
"content_id": "91236556563a300867d3f09a43028ae599290646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 2,
"path": "/README.md",
"repo_name": "DSSAGGAF/Amazont-Tele",
"src_encoding": "UTF-8",
"text": "# Amazont-Tele\n This script will buy automatic staff from link for amazon that the clinet will get on Telegram\n"
},
{
"alpha_fraction": 0.5665504932403564,
"alphanum_fraction": 0.5672473907470703,
"avg_line_length": 30.688888549804688,
"blob_id": "106ff960aa2372153eca98642410cbdf712534ae",
"content_id": "1444051d18ed708e041d1fb4309474b70f12be81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1435,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 45,
"path": "/amazon-telegram.py",
"repo_name": "DSSAGGAF/Amazont-Tele",
"src_encoding": "UTF-8",
"text": "\nimport re\nfrom telethon.sync import TelegramClient \nfrom telethon import TelegramClient,events\nfrom selenium import webdriver \nfrom selenium.webdriver.chrome.options import Options \n\napi_id = ''\napi_hash = ''\n\n# your phone number\nphone = \"\"\nclient = TelegramClient('session', api_id, api_hash) \n \nclient.connect() \n \nif not client.is_user_authorized(): \n \n client.send_code_request(phone) \n \n # signing in the client \n client.sign_in(phone, input('Enter the code: ')) \n\[email protected](events.NewMessage)\nasync def my_event_handler(event):\n channelID = str(event.message.chat_id)\n if \"\"== channelID:\n try:\n link_regex = re.compile('((https?):((//)|(\\\\\\\\))+([\\w\\d:#@%/;$()~_?\\+-=\\\\\\.&](#!)?)*)', re.DOTALL)\n url = re.findall(link_regex, event.message.message)\n for lnk in url:\n \n chrome_options = Options()\n chrome_options.add_argument(\"--user-data-dir=chrome-data\")\n driver = webdriver.Chrome('chromedriver.exe',options=chrome_options)\n driver.get(lnk[0]) \n element = driver.find_element_by_id(\"placeYourOrder\")\n element.click()\n driver.quit()\n print(element)\n print(\"Done \") \n except :\n driver.quit()\n print(\"somthing wrong \") \nclient.start()\nclient.run_until_disconnected()\n \n\n\n\n"
}
] | 2 |
alpden550/algorithms | https://github.com/alpden550/algorithms | c2bc88d0d06a934760f44a13d3dd442ee11b34b2 | 69b7580d56e33842f3db788a636277552a9c56e6 | 3ad8b9bc611565cd58f246b214f7bac96732a492 | refs/heads/master | 2020-04-22T16:42:12.760553 | 2019-02-21T17:24:50 | 2019-02-21T17:24:50 | 170,517,478 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5521472096443176,
"alphanum_fraction": 0.5644171833992004,
"avg_line_length": 26.16666603088379,
"blob_id": "22f9ed9973911ee205b2c254e5dad2616d84e437",
"content_id": "aeeee5cdd1511c6fd4fce42ae8b4061653d3d040",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 12,
"path": "/choice_sort.py",
"repo_name": "alpden550/algorithms",
"src_encoding": "UTF-8",
"text": "# сортировка методом выбора\n# sort choice\n# квадратичное время, O(n2)\n\ndef choice_sort(A: list):\n \"\"\"сортировка списка A выбором\"\"\"\n N = len(A)\n for position in range(0, N-1):\n for k in range(position+1, N):\n if A[k] < A[position]:\n A[k], A[position] = A[position], A[k]\n return A\n"
},
{
"alpha_fraction": 0.4114002585411072,
"alphanum_fraction": 0.42503097653388977,
"avg_line_length": 22.05714225769043,
"blob_id": "d7cb1ef97660ac07991835ae1137c3d29f61810a",
"content_id": "9b22bcefa211417aa88e8042540075ed07cb15d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 878,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 35,
"path": "/merge_sort.py",
"repo_name": "alpden550/algorithms",
"src_encoding": "UTF-8",
"text": "# Сортировка слиянием\n# слияние отсортированных массивов в один\n# Квазилинейное время, O(n log2 n)\n\n\ndef merge_sort(A: list):\n if len(A) > 1:\n middle = len(A) // 2\n left_half = A[:middle]\n right_half = A[middle:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = k = j = 0\n\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n A[k] = left_half[i]\n i += 1\n else:\n A[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n A[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n A[k] = right_half[j]\n j += 1\n k += 1\n return A\n"
},
{
"alpha_fraction": 0.48630136251449585,
"alphanum_fraction": 0.5102739930152893,
"avg_line_length": 23.33333396911621,
"blob_id": "726f9cd9433fa94d18a0c1c55dc81e1b76fb57ff",
"content_id": "e2efd61bb98d24f30c7dd88fe025f3b107e1cf89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 353,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 12,
"path": "/insert_sort.py",
"repo_name": "alpden550/algorithms",
"src_encoding": "UTF-8",
"text": "# сортировка вставками\n# insert sort\n# квадратичное время, O(n2)\n\ndef insert_sort(A: list):\n \"\"\"сортировка списка A вставками\"\"\"\n for top in range(1, len(A)):\n k = top\n while k > 0 and A[k-1] > A[k]:\n A[k], A[k-1] = A[k-1], A[k]\n k -= 1\n return A\n"
},
{
"alpha_fraction": 0.5176848769187927,
"alphanum_fraction": 0.5369774699211121,
"avg_line_length": 24.91666603088379,
"blob_id": "c60ec2e839ccc5c3bd4c37337dca200895f5ccd2",
"content_id": "73b32cd12b6946b0bed32055e2917cf164012347",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/bubble_sort.py",
"repo_name": "alpden550/algorithms",
"src_encoding": "UTF-8",
"text": "# пузырьковая сортировка\n# bubble sort\n# квадратичное время, O(n2)\n\ndef bubble_sort(A: list):\n \"\"\"сортировка списка A методом пузырька\"\"\"\n N = len(A)\n for bypass in range(1, N):\n for k in range(0, N-bypass):\n if A[k] > A[k+1]:\n A[k], A[k+1] = A[k+1], A[k]\n return A\n"
},
{
"alpha_fraction": 0.8082191944122314,
"alphanum_fraction": 0.8219178318977356,
"avg_line_length": 17.25,
"blob_id": "fdeaf2487d657cf8a0f117fdbc7c6ca2e0dbebf7",
"content_id": "5c210a5f53f5bdfa6ef82c4ca7a36dce4a6a127f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 4,
"path": "/README.md",
"repo_name": "alpden550/algorithms",
"src_encoding": "UTF-8",
"text": "# algorithms\nAlgorithms, Python\n\nАлгоритмы и структуры данных на Python3\n"
},
{
"alpha_fraction": 0.504780113697052,
"alphanum_fraction": 0.552581250667572,
"avg_line_length": 26.526315689086914,
"blob_id": "04f21cbd13bf755de83ec15d63abc90e4a8097e0",
"content_id": "73ab502779574e7cb69ecdff7b439d1b20079112",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 19,
"path": "/generate_permutations.py",
"repo_name": "alpden550/algorithms",
"src_encoding": "UTF-8",
"text": "def generate_permutations(N: int, M: int = -1, prefix=None):\n \"\"\" Генерация всех перестановок N-чисел\n в M-позициях\n с префикосм prefix\n \"\"\"\n M = N if M == -1 else M\n prefix = prefix or []\n if M == 0:\n print(*prefix, sep='', end=', ')\n return\n for number in range(1, N+1):\n if number in prefix:\n continue\n prefix.append(number)\n generate_permutations(N, M-1, prefix)\n prefix.pop()\n\ngenerate_permutations(3)\n# 123, 132, 213, 231, 312, 321\n"
}
] | 6 |
ConnorChristie/datacollector | https://github.com/ConnorChristie/datacollector | 87390f89313df0607ce2d759a2dfcb6370d975be | be9df5187cb153502a1afce7104177fdd7e23c9c | 9d5bd11a32f70511ebc46066427e1a41093885a4 | refs/heads/master | 2021-01-18T05:29:12.655355 | 2015-12-20T06:05:18 | 2015-12-20T06:05:18 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6559349298477173,
"alphanum_fraction": 0.6764227747917175,
"avg_line_length": 24.831932067871094,
"blob_id": "042cbc10aeb2d5db089239f669c136a0d2d2f3e4",
"content_id": "9b15eb161db1982acd213bc7eb20f6cfde189d8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3075,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 119,
"path": "/speedserver.py",
"repo_name": "ConnorChristie/datacollector",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\n# authors: austin hartline, matt manhke, wyatt stark\n\nimport RPi.GPIO as GPIO\nimport math\nimport time\nimport socket\nimport os.path\nimport sys\n\nprint('start time: ' + time.strftime('%x %X'))\n\n# circumference of the wheel (20\") (2*pi*r*1ft/12in*1mi/5280ft)\nCIRCUMF = 2.0 * math.pi * 10.0 / 12.0 / 5280\n\n# array max size\nSPEEDS_MAX_SIZE = 6\n\n# socket / network config for client subscribers\nHOST = ''\nPORT = 12100\ntry:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((HOST, PORT))\nexcept socket.error, msg:\n print(\"socket error! \" + str(msg[0]) + \" \" + msg[1])\n\n# data\ncounter = 0\nspeed = 0.0\nlasttime = time.time()\nprevtime = time.time() - 1\nother_interrupt = False\nspeeds = []\n\n# Log file setup\n# we need the log file to graph the complete set\n# of interrupts to post mortem analyze speed and acceleration\n# also for debugging and electrical system triage\n# if the system becomes desparately slow or we run out of \n# disk space, remove logging\nLOG_FILE_NAME = 'DAQ.log' + time.strftime('%b%d.%H.%M') + '.csv'\n\n# a = append\n# 0 = size 0 buffer, write immediately\nlogfile = open(LOG_FILE_NAME, 'a', 0)\n\n# callback for each interrupt\ndef eventCallback(channel):\n global counter\n global prevtime\n global lasttime\n global logfile\n global other_interrupt\n if other_interrupt:\n counter += 1\n prevtime = lasttime\n lasttime = time.time()\n # basically System.currentTimeMillis()\n logfile.write(str(int(round(lasttime * 1000))) + '\\n')\n other_interrupt = False\n else:\n other_interrupt = True\n\n# GPIO setup\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.add_event_detect(4, GPIO.RISING, callback=eventCallback, bouncetime=33)\n\n# return the AROC between two times in milliseconds as mph\ndef getspeed(prevtime, currenttime):\n # 3600 seconds / hour\n # 2 magnets per cycle means each interrupt implies \n # half a rotation (half circumference) -> divide by 2\n return CIRCUMF / (currenttime - prevtime) * 3600 / 2\n\n\n# now that we have everything else set up, wait for \n# speed requests and calculate on the fly\n# based on interrupts / request time\nwhile True:\n \n # blocking\n # wait for a request for speed\n entered, address = sock.recvfrom(256)\n # immediately record the time of the request\n requesttime = time.time()\n\n entered = entered.strip()\n # register ability to quit\n if entered in [\"Q\", \"q\"]: break\n \n if lasttime - prevtime > requesttime - lasttime:\n\tcurrent_speed = getspeed(prevtime, lasttime)\n\t# else we are decelerating\n else:\n current_speed = getspeed(lasttime, requesttime)\n \n if len(speeds) >= SPEEDS_MAX_SIZE:\n speeds.pop(0)\n\n speeds.append(current_speed)\n \n # average speeds\n print_speed = sum(speeds) / float(len(speeds))\n # send the speed back to the client\n sock.sendto(str(print_speed), address)\n\n\n# Cleanup\n# cute loading style indication of steps to quitting\nprint \"quitting.\"\nsock.close()\nprint \".\"\nlogfile.close()\nprint \".\"\nGPIO.cleanup()\nprint(\"\\ndone.\")\n\n"
},
{
"alpha_fraction": 0.4867057204246521,
"alphanum_fraction": 0.49752140045166016,
"avg_line_length": 24.517240524291992,
"blob_id": "ebac82016421025a543f39a897bb57a2aedb7dc2",
"content_id": "6825572e25f2a57954e825e773e65c134f003dc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2219,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 87,
"path": "/src/Adb.py",
"repo_name": "ConnorChristie/datacollector",
"src_encoding": "UTF-8",
"text": "import threading\nimport socket\nimport time\nimport json\nimport math\n\nTCP_IP = \"localhost\"\nTCP_PORT = 5001\n\ndata = {\"time\": int(time.time())};\n\nspeed_data = {\"data\": \"speed\", \"value\": 0}\nrpm_data = {\"data\": \"rpm\", \"value\": 3500}\ntime_data = {\"data\": \"time\", \"value\": int(time.time())}\n\nlog_file_time = int(time.time())\n\n#Whenever we get a pulse from GPIO, start a new thread and do the calculations and logging\n\ndef obtain_data(): #Have this be the callback from the GPIO pin\n previousTime = -1\n \n while True:\n currentTime = int(round(time.time() * 1000))\n \n if previousTime is not -1:\n timeDifference = (currentTime - previousTime)\n \n data[\"time\"] = timeDifference\n \n previousTime = currentTime\n \n log_data()\n \n time.sleep(0.06)\n \ndef send_data(sock):\n sock.send((json.dumps(data) + \"\\n\").encode(\"utf-8\"))\n \ndef log_data():\n data_str = str(speed_data[\"value\"]) + \",\" + str(rpm_data[\"value\"]) + \",\" + str(time_data[\"value\"])\n \n open((\"LogData-%d.csv\" % log_file_time), \"a\").write(\"\\n\" + data_str)\n\ndef android_connect():\n while True:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n while True:\n try:\n print(\"Trying to connect...\")\n \n s.connect((TCP_IP, TCP_PORT))\n \n print(\"Connected\")\n \n break\n except Exception as e:\n print(e)\n \n time.sleep(0.5)\n \n continue\n \n while True:\n try:\n send_data(s)\n \n time.sleep(0.05)\n except Exception as e:\n print(\"Reconnecting\")\n \n break\n \n s.close()\n\nif __name__ == '__main__':\n open((\"LogData-%d.csv\" % log_file_time), \"w\").write(\"Speed,RPM,Timestamp\")\n \n dataThread = threading.Thread(target = obtain_data, args = ())\n dataThread.start()\n \n dataThread = threading.Thread(target = android_connect, args = ())\n dataThread.start()\n \n while True:\n pass"
},
{
"alpha_fraction": 0.7101449370384216,
"alphanum_fraction": 0.7149758338928223,
"avg_line_length": 22,
"blob_id": "5e7e95119c1097d3a62259c3d4d9c566abf4b3c3",
"content_id": "d5680c88013f59cfaee36788b62c94c39e6c1733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 208,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 9,
"path": "/Readme.md",
"repo_name": "ConnorChristie/datacollector",
"src_encoding": "UTF-8",
"text": "# Raspberry Pi\n\nThe code in this directory is meant to be run on the Raspberry Pi.\n\n## Collected Data from Vehicle\n- Road speed [mph]\n- Acceleration [mi*hr^-2]\n- Engine speed [rpm]\n- Engine temperature [°F]\n"
},
{
"alpha_fraction": 0.7337948679924011,
"alphanum_fraction": 0.76148521900177,
"avg_line_length": 52,
"blob_id": "1417818bf7120670effe46170d4fd1eff5ce2b5b",
"content_id": "80f31ad111ef2586dfd7cadc8b576d0ee73cab15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1589,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 30,
"path": "/README.md",
"repo_name": "ConnorChristie/datacollector",
"src_encoding": "UTF-8",
"text": "# SuperMileage RPi\nThe Raspberry Pi script for the SuperMileage car. Sends things such as speed and RPM to the HUD.\n\n### About\n- This is the raspberry pi script\n- This sends data to the android app via TCP over ADB\n\n### Requirements\n- Python v3.0 or higher\n- PyDev plugin for Eclipse (http://pydev.org/updates)\n- ADB for RPi (http://forum.xda-developers.com/showthread.php?t=1924492, http://forum.xda-developers.com/attachment.php?attachmentid=1392336&d=1349930509)\n\n### How to Clone\n#### Installing the plugins\n1. Install the PyDev plugin in eclipse: http://pydev.org/updates\n2. Install EGit into eclipse: http://download.eclipse.org/egit/updates\n\n#### Cloning the project\n1. Create a new PyDev project with the name SM-RPi and all the default selections\n2. Create a new file in the project and name it with your name\n4. Right click on the project > Team > Share Project...\n5. Click \"Use or create repository in parent folder or project\"\n6. Click on the project and click Create Repository and click Finish\n7. Right click on the project > Team > Commit...\n8. Add a commit message and ONLY select the file with your name, then click Commit and Push\n9. Click New Remote, name it master and put the URI as: https://github.com/MSOE-Supermileage/SM-RPi.git and click Finish\n10. Click Next and it should give you an error: non-fast-forward, thats OK! Click Finish\n11. Right click on the project > Team > Pull, it should successfully pull the current code from the repo\n12. Now delete your file with your name and Commit and Push!\n13. You are now all set up to begin pushing commits to the repo!"
}
] | 4 |
jjlk/gammapy-extra | https://github.com/jjlk/gammapy-extra | fc2c4d39071565115b6bf63b287945b365a1ee71 | 4d59e4401df585d3499fc5276eb19a6bf337fef5 | 202cd5bcec880de707ae03b7436b1829883d05a0 | refs/heads/master | 2020-12-25T20:20:29.149960 | 2016-07-10T09:59:45 | 2016-07-10T09:59:45 | 63,338,595 | 0 | 0 | null | 2016-07-14T13:24:29 | 2016-05-30T08:35:20 | 2016-07-10T09:59:58 | null | [
{
"alpha_fraction": 0.7320964932441711,
"alphanum_fraction": 0.7521336078643799,
"avg_line_length": 28.58241844177246,
"blob_id": "e524f824ec900b19fb411229f7f36146d3094c83",
"content_id": "3dc80694b1591941dfb1c91baacb099ab8b59ddb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2695,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 91,
"path": "/datasets/fermi_2fhl/fermi_skyimages.py",
"repo_name": "jjlk/gammapy-extra",
"src_encoding": "UTF-8",
"text": "\"\"\"\nLittle script to produce Fermi 2FHL sky images \n\"\"\"\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__name__)\n\nfrom scipy.ndimage import convolve\nfrom astropy.convolution import Tophat2DKernel, Ring2DKernel, Gaussian2DKernel\nfrom gammapy.data import EventList\nfrom gammapy.image import SkyMap, SkyMapCollection\nfrom gammapy.background import IterativeKernelBackgroundEstimator as IKBE\nfrom gammapy.background import GammaImages\nfrom gammapy.detect import compute_lima_map\t\n\ndef gaussian_smooth(skyimage, width):\n\tkernel = Gaussian2DKernel(width, mode='oversample').array\n\tskyimage.data = convolve(skyimage.data, kernel)\n\treturn skyimage\n\n\ndef counts_skyimage_2fhl(**kwargs):\n\tlog.info('Computing counts map.')\n\tevents = EventList.read('2fhl_events.fits.gz')\n\tcounts = SkyMap.empty('Counts', **kwargs)\n\tcounts.fill(events)\n\treturn counts\n\n\ndef background_skyimage_2fhl(counts):\n\tlog.info('Computing background map.')\n\timages = GammaImages(counts.data, header=counts.wcs.to_header())\n\n\tsource_kernel = Tophat2DKernel(5)\n\tsource_kernel.normalize('peak')\n\n\tbackground_kernel = Ring2DKernel(20, 20)\n\tbackground_kernel.normalize('peak')\n\n\tikbe = IKBE(\n\t images=images,\n\t source_kernel=source_kernel.array,\n\t background_kernel=background_kernel.array,\n\t significance_threshold=5,\n\t mask_dilation_radius=3,\n\t)\n\n\tmask_data, background_data = ikbe.run()\n\n\tmask = SkyMap.empty_like(counts)\n\tmask.data = mask_data\n\n\tbackground = SkyMap.empty_like(counts)\n\tbackground.data = background_data\n\treturn mask, background\n\n\ndef skyimages_2fhl(**kwargs):\n\t# Counts\n\tskyimages = SkyMapCollection()\n\tskyimages.counts = counts_skyimage_2fhl(**kwargs)\n\tskyimages['counts_smoothed_0.25'] = gaussian_smooth(skyimages.counts.copy(), 2.5)\n\n\t# Background & Exclusion\n\texclusion, background = background_skyimage_2fhl(skyimages.counts)\n\tskyimages.exlusion = exclusion\n\tskyimages.background = background\n\n\t# Significance\n\tlog.info('Computing counts map.')\n\ttophat = Tophat2DKernel(5)\n\ttophat.normalize('peak')\n\tresult = compute_lima_map(skyimages.counts, skyimages.background, tophat)\n\tskyimages['significance_0.5'] = result['significance']\n\treturn skyimages\n\t\n\nif __name__ == '__main__':\n\t# Galactic center\n\tfilename = 'fermi_2fhl_gc.fits.gz'\n\tkwargs = dict(nxpix=320, nypix=180, binsz=0.1)\n\tgc_skyimages = skyimages_2fhl(**kwargs)\n\tlog.info('Writing {}'.format(filename))\n\tgc_skyimages.write(filename, clobber=True)\n\n\t# Vela region\n\tfilename = 'fermi_2fhl_vela.fits.gz'\n\tkwargs = dict(nxpix=320, nypix=180, binsz=0.1, xref=266, yref=-1.2)\n\tvela_skyimages = skyimages_2fhl(**kwargs)\n\tlog.info('Writing {}'.format(filename))\n\tvela_skyimages.write(filename, clobber=True)\n\n\n\n"
}
] | 1 |
DeshDSingh/SMS-SPAM-Detection | https://github.com/DeshDSingh/SMS-SPAM-Detection | 00aca9749f879c8108b0768a33b72876685b1342 | fd9f3021aa2b723e4ee586052f9fec17ef337f9d | f18def2cc2956f09b29bd41dcdccbae81590f4fe | refs/heads/master | 2020-04-11T11:47:59.584975 | 2018-12-14T09:46:08 | 2018-12-14T09:46:08 | 161,759,873 | 0 | 1 | null | 2018-12-14T09:10:24 | 2018-12-14T09:10:27 | 2018-12-14T09:18:21 | null | [
{
"alpha_fraction": 0.7911522388458252,
"alphanum_fraction": 0.7928669452667236,
"avg_line_length": 82.31428527832031,
"blob_id": "559636eba676bab081d160c43064cea141e67451",
"content_id": "de9a2275c8489dc3fcfe84998a524f8d488bd228",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5851,
"license_type": "no_license",
"max_line_length": 684,
"num_lines": 70,
"path": "/README.md",
"repo_name": "DeshDSingh/SMS-SPAM-Detection",
"src_encoding": "UTF-8",
"text": "# SMS-SPAM-Detection\nMachine Learning Project for Text Classification\n\n# 1.\tINTRODUCTION\nIn today’s world there is a big need of analyzing the data as it has lot of information that goes un-noticed. In order to detect such pattern, the Machine Learning techniques are used.\nIn this project, we are going to work with a dataset which has the data of SMS which are collected to classify them into SPAM or HAM. So, if a message is sent by a real user then it should be tagged as Ham or if it is by a machine for advertisement purpose then it should be tagged as SPAM.\n# 1.1\tPURPOSE\nIn daily life, we receive lot of messages from different sources and those messages might be arriving from a machine just for the advertisement purpose. The mobile user gets irritated because of such messages and may even ignore a real message from a known person. In order to avoid such cases, we are going to classify these SMS messages into two categories as SPAM or HAM.\n# 1.2\tSCOPE\nThe project is made on Anaconda Jupyter Notebook. This application is an easy to use application as the user need to run the complete program and the user will get the accuracy of each model. \nSince, there is no un-labelled data, we are going to divide the data in Train and Test dataset. Test dataset is used for validating our model performance.\nAlso, in case in future we have some new dataset which is not labelled then we can classify them too. \n# 1.3\tOVERVIEW\nThis document will give you an easy walk over through the application and act as a guide with easy steps to use and maintain the application. Detailed overview of each feature and design is covered below in the System Overview. This application does not involve any database, but this is a future aspect of this application in case there is a need to store the labelled data, the flow of application is explained with data flow in use case diagram under System Architecture section. In the end, a visual look and feel of this application with the flow of application is shown. This application act as a perfect medium to classify the SMS messages with text data using NLP techniques. \n\n# 2.\tSYSTEM OVERVIEW\nThe project involves the text data which cannot be classified by basic modeling techniques and hence we are using NLP techniques. Natural Language Processing helps us to bridge the gap of text data with numerical data which is needed to run by the machine.\nIt also helps us to neutralize the dirty text data into a simpler form. We are removing Stop words punctuations, commonly appearing terms using TF-iDF (Term Frequency inverse Document Frequency).\nThe major steps that are involved in order to classify the data are as follows. Each step is described with Code Snippet.\n# Importing Libraries:\nFirst thing we are going to do is to import the important libraries. Since we are working on Text data, we have imported NLTK for text analysis. Also, for data visualization, we have used matplotlib library.\n# Read Dataset: \nNext thing we need to do is to input the dataset we need to work on. The data is located in our current working directory which is Downloads in this case. The command dataframe.head() help us in checking the first 5 lines of our dataframe.\n \n# Remove NA values:\nThe biggest problem in any dataset is the NA values which needs to be handled very carefully. Since they are not going to add any meaning to our classification, we need to remove all the NA fields.\n \n# Describe the dataset and the label column:\nThe data should be understood correctly and hence, we are going to describe the whole dataset and also the label column.\n \nFigure 4: Describing the Dataset and the Label Set\nThis image clearly shows that our data has only two different kind of labels and also the count of unique values.\n# For Visualization – Adding new column:\nIn order to visualize our dataset, we need to add one column which shows the total number of words in that particular feature field.\n \n# Basic Visualization of dataset:\nLet’s do a simple visualization of data before we start with machine learning.\n \n# Machine Learning Steps – Basic Text pre-processing steps:\nThis step is our first important step and we are first going to do some text pre-processing in order to clean the dataset before we input it to our models.\n\nSo, we have converted all the text into lower case and then we removed the punctuations from our dataset. Finally, we removed the stop words form our dataset.\n\n# Stemming each term using Porter Stemmer:\nIn order to make sure that our model predict the data better, we are going to stem our words into its stem form.\n\n# Split the dataset into train and test:\nSince, we do not have any train and test dataset split already, we are going to split the dataset into train and test. The test dataset is used for validating our models.\n\n# Training Models Naïve Bayes:\nLet’s start training our models. The first model we are going to use is Naïve Bayes.\nIn the last line, we have also shown the predicted values.\n\n# Decision Tree:\nSimilarly, we are going to train our data with Decision Tree.\n \n# Random Forest:\nAlso, we are going to use Random Forest as generally it gives higher accuracy then other models.\n \n# Support Vector Machine:\nSVMs are always considered as good model for text classification. So, let’s train the SVM too.\n \n# Classification Report for all the models:\nAs we are done with all the models we wanted to use, let’s check the classification report of these models.\n \n# Finally Checking the Accuracy Score:\nFinally let’s check their accuracy score in order to check which model performed best amongst all.\nSo, we can see that SVM and Naïve Bayes has given the best accuracy although there is no much difference with other models.\n\nThis is a simple implementation of Machine Learning to classify the text data. The code is attached below for more details.\n"
},
{
"alpha_fraction": 0.7076605558395386,
"alphanum_fraction": 0.7217440605163574,
"avg_line_length": 33.12378692626953,
"blob_id": "030c181f32b6c124509950c49aff680220847b9f",
"content_id": "efcbc9dda428266a604f5d742c5720e742016d1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14063,
"license_type": "no_license",
"max_line_length": 581,
"num_lines": 412,
"path": "/SMS SPAM DETECTION.py",
"repo_name": "DeshDSingh/SMS-SPAM-Detection",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # SPAM SMS DETECTION\n\n# **Input data files are available in the current working directory**\n# **By running this files all the cells, we will be able to find the results of spam detection with Natural language processing and performance of different models**\n# **Natural Language Processing is basically consists of combining machine learning techniques with text, and using math and statistics to get that text in a format that the machine learning algorithms can understand!**\n# \n# **NLTK should be installed, along with downloading the corpus for stopwords. Download using nltk.download()**\n# \n# **Now, we are going to see NLP techniques to classify SMS into Ham or Spam using different Machine Learning Models.**\n\n# In[46]:\n\n\n# importing Libraries\n\nimport numpy as np # For numerical analysis\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt # for plotting graphs\nimport nltk # for text processing\nimport os # for system based operations\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n#print(os.listdir(\"../\"))\nprint(os.getcwd()) # to chech the current working directory\n# if needed results can be saved too.\n\n\n# ## Lets first get the Data in a Dataframe\n# The dataset downloaded file contains a collection of more than 5 thousand SMS phone messages.\n# \n\n# In[2]:\n\n\nspam_test_dataframe = pd.read_csv(os.getcwd() + '/sms_spam.csv',names= ['label', 'feature']) # read csv as we have a csv file to read\n\nspam_test_dataframe.head() # to print first 5 rows of data frame\n\n\n# As we can see that first row contains header, we need to remove them first.\n\n# In[3]:\n\n\nspam_test_dataframe.dropna()\nspam_test_dataframe=spam_test_dataframe.iloc[1:]\n\n\n# In[4]:\n\n\nspam_test_dataframe.isnull().values.any()\n\n\n# In[5]:\n\n\nspam_test_dataframe.head()\n\n\n# ## Data Analysis\n# \n# First we will analyse the data which we have received and then we will do machine learning on it.\n\n# In[6]:\n\n\nspam_test_dataframe.describe() # this describe how our dataset looks like\n\n\n# In[7]:\n\n\nspam_test_dataframe.groupby('label').describe() #this describe our lablel column\n\n\n# As suspected length of message could bre really useful to identify the spam or a ham sms.\n# \n# Let us add another column called length of feature which will have how much does the message length is.\n\n# In[8]:\n\n\nspam_test_dataframe['length'] = spam_test_dataframe['feature'].apply(len)\nspam_test_dataframe.head()\n\n\n# ### Data Visualization\n# Lets Analyse the data before we do some machine learning\n\n# In[9]:\n\n\nspam_test_dataframe['length'].plot(bins=100, kind='hist') \n\n\n# In[11]:\n\n\nspam_test_dataframe.length.describe()\n\n\n# So the message with longest length is of 910 characters\n\n# In[12]:\n\n\nspam_test_dataframe[spam_test_dataframe['length'] == 910]['feature'].iloc[0]\n\n\n# So this SMS message was sent by one person to other personally so it is ham. But this does not help much in Ham Spam identification\n\n# In[13]:\n\n\nspam_test_dataframe.hist(column='length', by='label', bins=50,figsize=(12,4))\n\n\n# Our data is text data so first it should be in a vector format which is then input to machine learning model.\n# In this section we'll convert the raw messages (sequence of characters) into vectors (sequences of numbers).\n# \n# Step1: Do some preprocessing like removing punctuation, stop words etc.\n# Step2: Do some advance text processing like converting to bag of words, N-gram etc.\n# Step3: Machine Learning model fit adn transform\n# Step4: Model accuracy check.\n# Let's first start with with step 1 and then rest will follow.\n\n# # Machine Learning Step\n\n# In[14]:\n\n\nimport string\nfrom nltk.corpus import stopwords\n\n\n# In[15]:\n\n\n# text pre-processing\nspam_test_dataframe['feature'] = spam_test_dataframe['feature'].str.replace('[^\\w\\s]','')\nspam_test_dataframe['feature'] = spam_test_dataframe['feature'].apply(lambda x: \" \".join(x.lower() for x in x.split()))\nstop = stopwords.words('english')\nspam_test_dataframe['feature'] = spam_test_dataframe['feature'].apply(lambda x: \" \".join(x for x in x.split() if x not in stop))\n\n\n# In[16]:\n\n\n# Check to make sure its working\nspam_test_dataframe['feature'].head()\n\n\n# Lets do something even better beside above techniques, lets shorten the terms to their stem form.\n\n# In[17]:\n\n\nfrom nltk.stem import PorterStemmer\nst = PorterStemmer()\n\n\n# In[18]:\n\n\nspam_test_dataframe['feature'] = spam_test_dataframe['feature'][:5].apply(lambda x: \" \".join([st.stem(word) for word in x.split()]))\n\n\n# Each vector will have as many dimensions as there are unique words in the SMS corpus. We will first use SciKit Learn's **CountVectorizer**. This model will convert a collection of text documents to a matrix of token counts.\n# \n# We can imagine this as a 2-Dimensional matrix. Where the 1-dimension is the entire vocabulary (1 row per word) and the other dimension are the actual documents, in this case a column per text message. \n# \n# For example:\n# \n# <table border = “1“>\n# <tr>\n# <th></th> <th>Message 1</th> <th>Message 2</th> <th>...</th> <th>Message N</th> \n# </tr>\n# <tr>\n# <td><b>Word 1 Count</b></td><td>0</td><td>1</td><td>...</td><td>0</td>\n# </tr>\n# <tr>\n# <td><b>Word 2 Count</b></td><td>0</td><td>0</td><td>...</td><td>0</td>\n# </tr>\n# <tr>\n# <td><b>...</b></td> <td>1</td><td>2</td><td>...</td><td>0</td>\n# </tr>\n# <tr>\n# <td><b>Word N Count</b></td> <td>0</td><td>1</td><td>...</td><td>1</td>\n# </tr>\n# </table>\n# \n# \n# Since there are so many messages, we can expect a lot of zero counts for the presence of that word in that document. Because of this, SciKit Learn will output a [Sparse Matrix](https://en.wikipedia.org/wiki/Sparse_matrix).\n\n# In[19]:\n\n\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn import decomposition, ensemble\nfrom sklearn.utils import shuffle\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cross_validation import KFold, cross_val_score\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# we will use pipelines to make the steps short. \n# The pipeline will do the vectorization, Term frequcz transformation and model fitting together.\n\n# After the counting, the term weighting and normalization can be done with [TF-IDF](http://en.wikipedia.org/wiki/Tf%E2%80%93idf), using scikit-learn's `TfidfTransformer`.\n# \n# ____\n# ### So what is TF-IDF?\n# TF-IDF stands for *term frequency-inverse document frequency*, and the tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus. Variations of the tf-idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document's relevance given a user query.\n# \n# One of the simplest ranking functions is computed by summing the tf-idf for each query term; many more sophisticated ranking functions are variants of this simple model.\n# \n# Typically, the tf-idf weight is composed by two terms: the first computes the normalized Term Frequency (TF), aka. the number of times a word appears in a document, divided by the total number of words in that document; the second term is the Inverse Document Frequency (IDF), computed as the logarithm of the number of the documents in the corpus divided by the number of documents where the specific term appears.\n# \n# **TF: Term Frequency**, which measures how frequently a term occurs in a document. Since every document is different in length, it is possible that a term would appear much more times in long documents than shorter ones. Thus, the term frequency is often divided by the document length (aka. the total number of terms in the document) as a way of normalization: \n# \n# *TF(t) = (Number of times term t appears in a document) / (Total number of terms in the document).*\n# \n# **IDF: Inverse Document Frequency**, which measures how important a term is. While computing TF, all terms are considered equally important. However it is known that certain terms, such as \"is\", \"of\", and \"that\", may appear a lot of times but have little importance. Thus we need to weigh down the frequent terms while scale up the rare ones, by computing the following: \n# \n# *IDF(t) = log_e(Total number of documents / Number of documents with term t in it).*\n# \n# See below for a simple example.\n# \n# **Example:**\n# \n# Consider a document containing 100 words wherein the word cat appears 3 times. \n# \n# The term frequency (i.e., tf) for cat is then (3 / 100) = 0.03. Now, assume we have 10 million documents and the word cat appears in one thousand of these. Then, the inverse document frequency (i.e., idf) is calculated as log(10,000,000 / 1,000) = 4. Thus, the Tf-idf weight is the product of these quantities: 0.03 * 4 = 0.12.\n# ____\n# \n# Let's go ahead and see how we can do this in SciKit Learn:\n# To transform the entire bag-of-words corpus into TF-IDF corpus at once:\n\n# # Train Test Split\n\n# In[22]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(spam_test_dataframe['feature'].values.astype('U'), spam_test_dataframe['label'], test_size=0.2, random_state=1)\n\nprint(len(X_train), len(X_test), len(y_train) + len(y_test))\n\n\n# The test size is 20% of the entire dataset (1112 messages out of total 5559), and the training is the rest (4447 out of 5559). Note the default split would have been 30/70.\n# \n# ## Creating a Data Pipeline\n# \n# Let's run our model again and then predict off the test set. We will use SciKit Learn's [pipeline](http://scikit-learn.org/stable/modules/pipeline.html) capabilities to store a pipeline of workflow. This will allow us to set up all the transformations that we will do to the data for future use. Let's see an example of how it works:\n\n# ## Training a model\n# \n# With messages represented as vectors, we can finally train our spam/ham classifier. Now we can actually use almost any sort of classification algorithms. For a [variety of reasons](http://www.inf.ed.ac.uk/teaching/courses/inf2b/learnnotes/inf2b-learn-note07-2up.pdf), the Naive Bayes classifier algorithm is a good choice.\n# \n# Using scikit-learn here, choosing the Naive Bayes, Decision Tree, Random Forest, Support Vector Machine classifiers to start with:\n# In the end we will compare the accuracy of each model.\n# \n\n# # Naive Bayes\n\n# In[32]:\n\n\n# Pipelining \ntext_clf = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()), ('clf', MultinomialNB()),])\ntext_clf = text_clf.fit(X_train, y_train)\n# using GridSearch CV\nparameters = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False), 'clf__alpha': (1e-2, 1e-3),}\ngs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)\ngs_clf = gs_clf.fit(X_train, y_train)\ngs_clf.best_score_\ngs_clf.best_params_\npredicted_nb = gs_clf.predict(X_test)\nprint(predicted_nb)\n\n\n# Lets build other models too as promised earlier.\n# \n# # Decision Tree\n\n# In[25]:\n\n\n# Decisiton Tree Pipelining \ndt = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\n ('clf-dt', DecisionTreeClassifier(criterion = \"gini\", splitter=\"best\",\n max_depth=20, random_state = 42)),])\n_ = dt.fit(X_train, y_train)\n\npredicted_dt = dt.predict(X_test) \nprint(predicted_dt)\n\n\n# # Random Forest\n\n# In[26]:\n\n\n# Pipelining \nrf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\n ('clf-rf', RandomForestClassifier(n_estimators = 100, max_depth=5, random_state = 42)),])\n_ = rf.fit(X_train, y_train)\n\npredicted_rf = rf.predict(X_test) \nprint(predicted_rf) \n\n\n# # Support Vector Machine\n\n# In[27]:\n\n\n# using SVM\ntext_clf_svm = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\n ('clf-svm', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=5, random_state=42)),])\n_ = text_clf_svm.fit(X_train, y_train)\npredicted_svm = text_clf_svm.predict(X_test)\nprint(predicted_svm)\n\n\n# # Classification Report\n# Lets develop the classification report for all above models\n\n# In[29]:\n\n\nfrom sklearn.metrics import classification_report\ntarget_names = ['Features', 'Labels']\n\n\n# In[33]:\n\n\nprint(classification_report(y_test, predicted_nb, target_names=target_names))\n\n\n# In[34]:\n\n\nprint(classification_report(y_test, predicted_dt, target_names=target_names))\n\n\n# In[35]:\n\n\nprint(classification_report(y_test, predicted_rf, target_names=target_names))\n\n\n# In[36]:\n\n\nprint(classification_report(y_test, predicted_svm, target_names=target_names))\n\n\n# # Accuracy Score\n\n# In[41]:\n\n\nprecision_nb = accuracy_score(y_test, predicted_nb)\nprint(\"Naive Bayes Accuracy Score: \", precision_nb)\n\n\n# In[42]:\n\n\nprecision_dt = accuracy_score(y_test, predicted_dt)\nprint(\"Decision Tree Accuracy Score: \", precision_dt)\n\n\n# In[43]:\n\n\nprecision_rf = accuracy_score(y_test, predicted_rf)\nprint(\"Random Forest Accuracy Score: \", precision_dt)\n\n\n# In[44]:\n\n\nprecision_svm = accuracy_score(y_test, predicted_svm)\nprint(\"Support Vector Machine Accuracy Score: \", precision_dt)\n\n\n# In[45]:\n\n\nhighest = max(precision_nb, precision_dt, precision_rf, precision_svm)\nprint(\"the the highest accuracy is: \", highest)\n\n\n# **So the our model predicted very well on the dataset with an accuracy about 86%. If we fine tune our model, our accuracy could increase.**\n"
}
] | 2 |
ttrankle/CarMax | https://github.com/ttrankle/CarMax | 7cd3b728798d2c391aed51d221801d5d985659a8 | 6908856a514a899a38412d65f752e3d7e20141cd | 94f799efb9901fc7804ef5b708106bd593d026e4 | refs/heads/main | 2023-02-15T01:00:08.697167 | 2021-01-15T15:15:41 | 2021-01-15T15:15:41 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5663959980010986,
"alphanum_fraction": 0.6299259662628174,
"avg_line_length": 20.85378646850586,
"blob_id": "036d7dde40719afcc56916c4c0a2802038a4a6d9",
"content_id": "fb75aedcdf1d90e80ada8de1729f18dd6d8904d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8374,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 383,
"path": "/Carmax.py",
"repo_name": "ttrankle/CarMax",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Carmax Analytics Competition \n\n# In[172]:\n\n\nimport pandas as pd\nimport random\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport seaborn as sns\nimport numpy as np\n\n\n# In[3]:\n\n\ndf = pd.read_csv(\"C:/Users/msado/Documents/CarMax.csv\") \ndf2 = pd.read_excel(\"C:/Users/msado/Documents/national_M2019_dl.xlsx\")\n\n\n# In[173]:\n\n\nprint(df.dtypes)\n\n\n# ## Create Unique variables \n\n# In[203]:\n\n\njobs = df2['occ_title']\nmedian_salary = df2['a_median']\nannual_bottom = df2['a_pct10']\ncust_income = df['customer_income']\ncust_age = df['customer_age']\nvehicle_cost = df['purchase_price']\ndistance = df['customer_distance_to_dealer']\n\n\n# ## Cleaning up distance to dealership\n\n# In[207]:\n\n\ndistance_range = set()\nfor num in distance:\n if num == '?':\n continue\n parts1 = num.split(\"-\")\n if len(parts1) == 2:\n distance_range.add((int(parts1[0]))\n#distance_range= list(distance_range)\ndistance_range.sort()\ndistance_range\n\n\n\n'''\nnew_purchase_prices = set()\nfor num in vehicle_cost:\n if num == '?':\n continue\n parts1 = num.split(\"-\")\n if len(parts1) == 2:\n new_purchase_prices.add((int(parts1[0]),int(parts1[1])))\n else:\n new_purchase_prices.add((int(parts1[0][:-1])))\nnew_purchase_prices= list(new_purchase_prices)\nnew_purchase_prices.sort()\n# Removing age ranges above 100's as it could mean fraud \nnew_purchase_prices\n'''\n\n\n# In[ ]:\n\n\nnew_prices = []\n\nfor i in range(len(vehicle_cost)):\n if vehicle_cost[i] == '?':\n y = random.randint(0,len(new_purchase_prices)- 1)\n new_prices.append( '%d - %d'%(new_purchase_prices[y][0],new_purchase_prices[y][1]) )\n else:\n new_prices.append(vehicle_cost[i])\n \ndf['new_prices']= new_prices\n\n\n# In[ ]:\n\n\nfig, ax = plt.subplots()\nax.hist(df['new_prices'], color = 'g',bins = 40)\n\nfig.suptitle('Count of Vehicle Purchase Ranges')\nfig.set_size_inches(15,8)\n\nax.xaxis.set_label_text('Vehicle Price Ranges')\nax.xaxis.set_tick_params(which = 'both', top = False, bottom = True, labelbottom = True) # Turn top x axis tick marks off \n\nax.yaxis.set_label_text('Total Vehicle Price Range Totals')\nax.yaxis.set_tick_params(which = 'both', right = False, left = True, labelleft = True) # Turn right y axis tick marks off\n\n\nplt.style.use('ggplot')\nax.set_xlim(0,10)\nax.set_ylim(0,90000)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.grid(False)\n\n\n# In[ ]:\n\n\n\n\n\n# ## Cleaning up Vehicle Purchase Price \n\n# In[ ]:\n\n\nCar_purchase = np.random.choice(1, 18, p =[0.025598948,\n0.284308491,\n0.332151267,\n0.186417103,\n0.09084676,\n0.042894395,\n0.020431389,\n0.008601359,\n0.004425724,\n0.001910789,\n0.001048124,\n0.000584477,\n0.000382158,\n0.000179839,\n0.000112399,\n3.93398E-05,\n4.77697E-05,\n1.12399E-05,\n8.42995E-06,\n]) \n\n\n# In[198]:\n\n\nnew_purchase_prices = set()\nfor num in vehicle_cost:\n if num == '?':\n continue\n parts1 = num.split(\"-\")\n if len(parts1) == 2:\n new_purchase_prices.add((int(parts1[0]),int(parts1[1])))\n else:\n new_purchase_prices.add((int(parts1[0][:-1])))\nnew_purchase_prices= list(new_purchase_prices)\nnew_purchase_prices.sort()\n# Removing age ranges above 100's as it could mean fraud \nnew_purchase_prices\n\n\n# In[179]:\n\n\nnew_prices = []\n\nfor i in range(len(vehicle_cost)):\n if vehicle_cost[i] == '?':\n y = random.randint(0,len(new_purchase_prices)- 1)\n new_prices.append( '%d - %d'%(new_purchase_prices[y][0],new_purchase_prices[y][1]) )\n else:\n new_prices.append(vehicle_cost[i])\n \ndf['new_prices']= new_prices\n\n\n# In[211]:\n\n\nfig, ax = plt.subplots()\nax.hist(df['new_prices'], color = 'g',bins = 40)\n\nfig.suptitle('Count of Vehicle Purchase Ranges')\nfig.set_size_inches(15,8)\n\nax.xaxis.set_label_text('Vehicle Price Ranges')\nax.xaxis.set_tick_params(which = 'both', top = False, bottom = True, labelbottom = True) # Turn top x axis tick marks off \n\nax.yaxis.set_label_text('Total Vehicle Price Range Totals')\nax.yaxis.set_tick_params(which = 'both', right = False, left = True, labelleft = True) # Turn right y axis tick marks off\n\n\nplt.style.use('ggplot')\nax.set_xlim(0,10)\nax.set_ylim(0,130000)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.grid(False)\n\n\n# ## Clean up the customer ages\n\n# In[124]:\n\n\nage_ranges = set()\nfor num in cust_age:\n if num == '?':\n continue\n parts1 = num.split(\"-\")\n if len(parts1) == 2:\n age_ranges.add((int(parts1[0]),int(parts1[1])))\n else:\n age_ranges.add((int(parts1[0][:-1]),110))\nage_ranges= list(age_ranges)\nage_ranges.sort()\n# Removing age ranges above 100's as it could mean fraud \ndel age_ranges[9]\nage_ranges\n\n\n# In[177]:\n\n\nnew_ages = []\n\nfor i in range(len(cust_age)):\n if cust_age[i] == '?':\n y = random.randint(0,len(age_ranges)- 1)\n new_ages.append( '%d - %d'%(age_ranges[y][0],age_ranges[y][1]) )\n else:\n new_ages.append(cust_age[i])\n \ndf['new_ages']= new_ages\n\n\n# In[171]:\n\n\nfig, ax = plt.subplots()\nax.hist(df['new_ages'], color = 'g',bins = 20)\n\nfig.suptitle('Count of Customer ages')\nfig.set_size_inches(11,5)\n\nax.xaxis.set_label_text('Customer Age Ranges')\nax.xaxis.set_tick_params(which = 'both', top = False, bottom = True, labelbottom = True) # Turn top x axis tick marks off \n\nax.yaxis.set_label_text('Total Age Range Totals')\nax.yaxis.set_tick_params(which = 'both', right = False, left = True, labelleft = True) # Turn right y axis tick marks off\n\n\nplt.style.use('ggplot')\nax.set_xlim(0,10)\nax.set_ylim(0,90000)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.grid(False)\n\n\n# ## Want to rearrange the data so that income doesn't include dashes\n\n# In[129]:\n\n\nincome_ranges = set()\nfor val in cust_income:\n if val == '?':\n continue \n parts = val.split('-')\n #print(parts)\n if len(parts) == 2:\n income_ranges.add((int(parts[0]),int(parts[1])))\n else:\n income_ranges.add((int(parts[0][:-1]),1000001))\nincome_ranges= list(income_ranges)\n\nincome_ranges.sort()\nincome_ranges\n\n\n# ### Give values to the \"?\" in the data set\n\n# In[216]:\n\n\nlen(probs)\n\n\n# In[217]:\n\n\n# Using choice() method \nprobs = [0.091754385,0.2272012,0.216433344,0.11958165,0.083268235,0.044451126,0.020959665,0.021260334,0.010551487,0.004639282,0.030645678,0.129253612]\n\nnew_income = []\n\nfor i in range(len(cust_income)):\n if cust_income[i] == '?':\n #x = random.randint(0,len(income_ranges)- 1)\n pick1 = np.random.choice(12, 45998, p = probs) \n new_income.append( '%d - %d'%(income_ranges[pick1][0],income_ranges[pick1][1]) )\n else:\n new_income.append(cust_income[i])\n \ndf['new_income']= new_income\n\n\n# ### Plot Distributions\n\n# In[161]:\n\n\nfig, ax = plt.subplots()\nax.hist(df['new_income'], color = 'Gold',bins = 23)\n\nfig.suptitle('End of Year test Score Frequency Histogram')\nfig.set_size_inches(20,10)\n\nax.xaxis.set_label_text('Customer Income Ranges')\nax.xaxis.set_tick_params(which = 'both', top = False, bottom = True, labelbottom = True) # Turn top x axis tick marks off \n\nax.yaxis.set_label_text('Total Income Range Totals')\nax.yaxis.set_tick_params(which = 'both', right = False, left = True, labelleft = True) # Turn right y axis tick marks off\nabline =True \nax.set_ylim(0,100000)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.grid(False)\n\n\n# ## Incoprating BLS jobs data with customer income\n\n# In[219]:\n\n\nv1 = {}\nfor i in range(len(annual_bottom)):\n try:\n bottom = int(annual_bottom[i])\n if bottom < 21000:\n if v1.get(20000) is None:\n v1[20000] = [ jobs[i] ]\n else:\n v1[20000].append(jobs[i])\n except: \n pass\n \nfor i in range(len(median_salary)):\n for low,high in income_ranges:\n try:\n salary = int(median_salary[i])\n if salary >= low and salary <= high:\n if v1.get(high) is None:\n v1[high] = [ jobs[i] ]\n else:\n v1[high].append( jobs[i] )\n except: \n pass\n\nv1\n\n\n# ## See who pursued financing \n\n# In[163]:\n\n\nfin = []\nfor i in range(len(df)):\n if df['vehicle_financing'][i] == 1:\n fin.append([df['new_ages'][i],df['vehicle_financing'][i],df['new_income'][i],df['purchase_make'][i]])\nfin\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.7517730593681335,
"alphanum_fraction": 0.7517730593681335,
"avg_line_length": 27.200000762939453,
"blob_id": "a7d95df3b3bd4ed7cdef6cc6e7201ef8211a8af1",
"content_id": "0bf511d725eb3d7331c2a3ccb653fd5e52c40410",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 5,
"path": "/README.md",
"repo_name": "ttrankle/CarMax",
"src_encoding": "UTF-8",
"text": "# CarMax Fun\nFun times with friends\n\n## GenderPopularityOfCars.ipynb\nSee which car is most popular with a specific gender aka 'M','F' or 'U'\n"
},
{
"alpha_fraction": 0.5330188870429993,
"alphanum_fraction": 0.5597484111785889,
"avg_line_length": 22.538461685180664,
"blob_id": "123af03715f74b965e8d53bdc9036c8bda2c8fe7",
"content_id": "a315389ecc00341b2fcd9b0887c3f0a36cc7c1b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 636,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 26,
"path": "/car.py",
"repo_name": "ttrankle/CarMax",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 11 10:34:25 2020\r\n\r\n@author: msado\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\n\r\ndata = pd.read_csv(\"C:/Users/msado/Documents/CarMax.csv\") \r\n\r\nprint(data)\r\ndata.head()\r\n\r\n\r\ndata.groupby('insert_num')['subsequent_purchases'].head().sort_values(ascending = False)\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nsns.set_style('dark')\r\nmatplotlib inline \r\n\r\nplt.figure(figsize=(8,6)) \r\nplt.rcParams['patch.force_edgecolor'] = True \r\nratings_mean_count['rating_counts'].hist(bins=50)"
}
] | 3 |
sarinmadhur/git-demo | https://github.com/sarinmadhur/git-demo | c6c4b93ff0d280c4a5d5c9b5d3f9ab197dc45dce | b1f5a73f44ba0cdfb917b08283b7b4ce3f78afe9 | 5423818b7b2f97979713f2a187b78fab83a9d2c1 | refs/heads/master | 2020-07-17T07:31:11.670652 | 2019-09-03T02:55:35 | 2019-09-03T02:55:35 | 205,975,034 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7743589878082275,
"alphanum_fraction": 0.7743589878082275,
"avg_line_length": 11.133333206176758,
"blob_id": "3f80365a94fb5c305e926bed47340040488499e9",
"content_id": "7208bd12853487f53a0ffaee8ba6e6839f43342e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 15,
"path": "/Readme.md",
"repo_name": "sarinmadhur/git-demo",
"src_encoding": "UTF-8",
"text": "#Demo Git Repository \r\n\r\nThis is my first commit.\r\n\r\n## Heading example \r\nioioioio ioioioioioi\r\n\r\nklklkllklkllka\r\nkkkaklkalkla\r\nklaklaklkla\r\n\r\nklkllklllkllkkl\r\n\r\nkjjkkjkjkjkkjkjkjkjk\r\nklklklklll"
},
{
"alpha_fraction": 0.8275862336158752,
"alphanum_fraction": 0.8275862336158752,
"avg_line_length": 9.800000190734863,
"blob_id": "d272c9ddbe155c038c33a4a4c540d176812e8444",
"content_id": "23bcee1dc3fd8bc4f017b95e674ac66348d546ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 5,
"path": "/web/index.py",
"repo_name": "sarinmadhur/git-demo",
"src_encoding": "UTF-8",
"text": "jakjakakakjka\r\n\r\njkjkjkjkjkjkkjk\r\n\r\nlaklalallsljlsjls new "
}
] | 2 |
mkousathanas/myrepo | https://github.com/mkousathanas/myrepo | aa975f6050240e1530ba22bc46e202399e231d2c | 63868918a83e1d0fc4979860d1cb7d8d3f3d9725 | d7070c47cd9ce93f24016c8f9423653c50e5ee64 | refs/heads/master | 2021-05-11T13:24:33.880765 | 2018-01-16T13:27:01 | 2018-01-16T13:27:01 | 117,679,515 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5998030304908752,
"alphanum_fraction": 0.6027576923370361,
"avg_line_length": 31.414894104003906,
"blob_id": "2de6ab54bf872815685e2ce3926bb2df9613e04c",
"content_id": "5238975b94f55246a8b19cdca17a889dc0aaf5cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3433,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 94,
"path": "/Flask-System/app.py",
"repo_name": "mkousathanas/myrepo",
"src_encoding": "UTF-8",
"text": "\"\"\"Καλούνται οι παρακάτω λειτουργίες για την εκτέλεση της εφαρμογής\"\"\"\nfrom flask import Flask, render_template, request, redirect, url_for, g, session, flash\nimport sqlite3 as sql\nimport sqlite3\nimport hashlib\n\napp = Flask(__name__)\n\"\"\"Μυστικό κλειδί για session\"\"\"\napp.secret_key = 'sic@*#^shfido8d5sad8#^&'\n\"\"\"Συνάρτηση για το Password hashing\"\"\"\ndef check_password(hashed_password, user_password):\n return hashed_password == hashlib.md5(user_password.encode()).hexdigest()\n\"\"\"Παρακάτω γίνεται η επιβεβαίωση των πεδίων username & Password\"\"\"\ndef validate(username, password):\n con = sqlite3.connect('static/user.db')\n completion = False\n with con:\n cur = con.cursor()\n cur.execute(\"SELECT * FROM Users\")\n rows = cur.fetchall()\n for row in rows:\n dbUser = row[0]\n dbPass = row[1]\n if dbUser==username:\n completion=check_password(dbPass, password)\n return completion\n\n\"\"\"Συνάρτηση για την φόρμα login\"\"\"\[email protected]('/', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n completion = validate(username, password)\n if completion ==False:\n error = 'Eσφαλμένα στοιχεία. Παρακαλώ προσπαθήστε ξανά.'\n else:\n return redirect(url_for('home'))\n return render_template('login.html', error=error)\n\"\"\"Συνάρτηση για αποσύνδεση απο την εφαρμογή\"\"\"\[email protected]('/logout')\ndef logout():\n session.pop('logged_in', None)\n flash('You were logged out.')\n return redirect(url_for('login'))\n\t\n\[email protected]('/home.html')\ndef home():\n return render_template('home.html')\n\[email protected]('/enternew')\ndef new_student():\n return render_template('register.html')\n\"\"\"Η παρακάτω συνάρτηση είναι υπέυθυνη για την καταχώριση των εγγραφών στη βάση δεδομένων\"\"\"\[email protected]('/addrec',methods = ['POST', 'GET'])\ndef addrec():\n if request.method == 'POST':\n try:\n nm = request.form['nm']\n addr = request.form['add']\n city = request.form['city']\n pin = request.form['pin']\n \n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n \n cur.execute(\"INSERT INTO users (name,addr,city,pin)VALUES(?,?,?,?)\",(nm,addr,city,pin))\n \n con.commit()\n msg = \"Η εγγραφή προστέθηκε με επιτυχία!\"\n except:\n con.rollback()\n msg = \"Σφάλμα με την καταχώρηση εγγραφής\"\n \n finally:\n return render_template(\"result.html\",msg = msg)\n con.close()\n\"\"\"Για την προβολή εγγραφών απο τη βάση δεδομένων\"\"\"\[email protected]('/list')\ndef list():\n con = sql.connect(\"database.db\")\n con.row_factory = sql.Row\n \n cur = con.cursor()\n cur.execute(\"select * from users\")\n \n rows = cur.fetchall();\n return render_template(\"list.html\",rows = rows)\n \n\nif __name__ == '__main__':\n app.run(debug = True)"
},
{
"alpha_fraction": 0.7517006993293762,
"alphanum_fraction": 0.7585033774375916,
"avg_line_length": 35.875,
"blob_id": "9048371cceb9d7aeaf399e675bd414c791e70902",
"content_id": "f242445b4b38e37d7443b6e6ed987fb3f97cff46",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 8,
"path": "/Flask-System/create.py",
"repo_name": "mkousathanas/myrepo",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\"\"\"Για την έναρξη της σύνδεσης με τη βάση δεδομένων\"\"\"\nconn = sqlite3.connect('database.db')\nprint \"Πετυχημένη σύνδεση με τη βάση δεδομένων\";\n\nconn.execute('CREATE TABLE users (name TEXT, addr TEXT, city TEXT, pin TEXT)')\nprint \"Ο πίνακας δημιουργήθηκε με επιτυχία\";\nconn.close()"
},
{
"alpha_fraction": 0.6964285969734192,
"alphanum_fraction": 0.7244898080825806,
"avg_line_length": 34.727272033691406,
"blob_id": "dbd2b6793a08cd202251f1f5f649a321d31da36a",
"content_id": "6511cd1c5356dd9f3d46e86e6994ff1b6522ef58",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 11,
"path": "/Flask-System/insert.py",
"repo_name": "mkousathanas/myrepo",
"src_encoding": "UTF-8",
"text": "import sqlite3\n#conn = sqlite3.connect('database.db')\n\"\"\"Για την έναρξη της σύνδεσης με τη βάση δεδομένων\"\"\"\nwith sqlite3.connect(\"database.db\") as con:cur = con.cursor()\nprint (\"Πετυχημένη σύνδεση με τη βάση δεδομένων\");\ncur.execute(\"INSERT INTO users (name,addr,city,pin)VALUES ('abc1','aa 20','aaa1','1234a')\")\n\ncon.commit()\nmsg = \"Η εγγραφή προστέθηκε με επιτυχία\"\nprint (msg)\ncon.close()"
},
{
"alpha_fraction": 0.7457627058029175,
"alphanum_fraction": 0.7627118825912476,
"avg_line_length": 19,
"blob_id": "95e40fb2d0d8e547e11d9a9b09aec64ff58818e4",
"content_id": "67d4184b15304737b0faa64c52329c57f34907d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 59,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 3,
"path": "/Flask-System/readme.txt",
"repo_name": "mkousathanas/myrepo",
"src_encoding": "UTF-8",
"text": "Login Credentials:\nUsername--> mike1\npassword--> python"
},
{
"alpha_fraction": 0.7162162065505981,
"alphanum_fraction": 0.7702702879905701,
"avg_line_length": 16.75,
"blob_id": "373d6524441f79dcc361eb9c3891d756f7573cee",
"content_id": "7b4f8a20dd1599172c014dc3e97fbaff19551b3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 4,
"path": "/README.md",
"repo_name": "mkousathanas/myrepo",
"src_encoding": "UTF-8",
"text": "# myrepo\nΑριθμός Μητρώου: 5375\nΤμήμα: Computing\nΑκαδημαϊκό Έτος: Τρίτο\n \n"
}
] | 5 |
aryan-jadon/Kaggle_distracted_drivers | https://github.com/aryan-jadon/Kaggle_distracted_drivers | e5ffd449eaaa4ea30fcf13f7db9abbc60904f05b | 0d182e3d5bc7401686dcbea8fa6da8eb395dc464 | 9b4d4e6f19fcfad6d408d00ea10f36bd18a1119a | refs/heads/master | 2018-12-22T13:28:08.070511 | 2016-09-25T00:20:05 | 2016-09-25T00:20:05 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7020833492279053,
"alphanum_fraction": 0.7113425731658936,
"avg_line_length": 33.82258224487305,
"blob_id": "4fbfec2801073b82945ee456f7442112f70521a4",
"content_id": "1279df7c1cc974d1eb261360054345fa2e190f8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4320,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 124,
"path": "/train_model_vgg.py",
"repo_name": "aryan-jadon/Kaggle_distracted_drivers",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf \nimport layers as ly\nimport os\nfrom tqdm import tqdm\nimport numpy as np\nfrom layers import Input \n\nflags=tf.app.flags\nFLAGS=flags.FLAGS\n\nclass model:\n\tdef __init__(self,fold,layers):\n\t\tself.fold=fold\n\t\tself.sess=tf.get_default_session()\n\t\t\n\t\tself.x=tf.placeholder(tf.float32,shape=[None,FLAGS.dim,FLAGS.dim,3])\n\t\tself.y_=tf.placeholder(tf.int64,shape=[None])\n\t\t#self.y=inception_bn.inception_BN(self.x,num_classes=FLAGS.num_classes)\n\n\n\t\tself.is_training=tf.placeholder(tf.bool,shape=[])\n\t\tself.global_step=tf.Variable(0)\n\t\tself.global_step_op=self.global_step.assign_add(1)\n\t\tself.checkpoint_path=os.path.join(FLAGS.checkpoint_path,\"model_{}\".format(fold))\n\t\tif not os.path.exists(self.checkpoint_path):\n\t\t\tos.makedirs(self.checkpoint_path)\n\n\t\tself.layers=[Input(self.x)]+layers\n\t\tprev=None\n\t\tfor i,layer in enumerate(self.layers):\n\t\t\tprev=layer.apply(prev,i,self)\n\t\t\tprint prev\n\t\t\t# if \"activation\" not in prev.name:\n\t\t\t# \tprint prev\n\t\t\t# \tprint prev.get_shape()\n\t\tself.y=prev \n\t\tif FLAGS.pre_trained:\n\t\t\tweights=np.load(FLAGS.weight_file)\n\t\t\tkeys=sorted(weights.keys())\n\t\t\tfor i,k in enumerate(keys):\n\t\t\t\tif \"fc8\" not in k:\n\t\t\t\t\tself.sess.run(ly.parameters[i].assign(weights[k]))\n\t\t\tprint \"loaded pre trained weights\"\n\n\t\twith tf.name_scope(\"loss\"):\n\t\t\tloss_op=tf.nn.sparse_softmax_cross_entropy_with_logits(self.y,self.y_)\n\t\t\tself.loss=tf.reduce_mean(loss_op,name=\"loss\")\n\t\t\ttf.scalar_summary(\"loss\",self.loss)\n\t\t\tloss_ema=tf.train.ExponentialMovingAverage(decay=0.9,num_updates=self.global_step)\n\t\t\tloss_ema_op=loss_ema.apply([self.loss])\n\t\t\ttf.scalar_summary(\"loss_ema\",loss_ema.average(self.loss))\n\n\t\twith tf.name_scope(\"Evaluation\"):\n\t\t\tcorrect_prediction=tf.nn.in_top_k(self.y,self.y_,1)\n\t\t\tself.accuracy_op=tf.reduce_mean(tf.cast(correct_prediction,\"float\"))\n\t\t\ttf.scalar_summary(\"accuracy\",self.accuracy_op)\n\n\t\t\taccuracy_ema=tf.train.ExponentialMovingAverage(decay=0.9,num_updates=self.global_step)\n\t\t\taccuracy_ema_op=accuracy_ema.apply([self.accuracy_op])\n\t\t\ttf.scalar_summary(\"accuracy_ema\",accuracy_ema.average(self.accuracy_op))\n\n\n\t\twith tf.control_dependencies([self.global_step_op,accuracy_ema_op,loss_ema_op]):\n\t\t\tself.train_op=tf.train.AdamOptimizer(FLAGS.lr).minimize(self.loss,name=\"train\")\n\n\t\tself.summaries_op=tf.merge_all_summaries()\n\t\t\n\t\tself.sess.run(tf.initialize_all_variables())\n\t\tself.saver=tf.train.Saver(max_to_keep=1)\n\t\tif FLAGS.fine_tune_from_model is not None:\n\t\t\tvariables_to_restore=tf.get_collection(layers.VARIABLES_TO_RESTORE)\n\t\t\trestorer=tf.train.Saver(variables_to_restore)\n\n\t\t\trestorer.restore(self.sess,FLAGS.fine_tune_from_model)\n\n\t\tsummary_path=os.path.join(FLAGS.summary_path,\"summaries_{}\".format(fold))\n\t\tif not os.path.exists(summary_path):\n\t\t\tos.makedirs(summary_path)\n\n\t\tself.summary_writer=tf.train.SummaryWriter(summary_path,self.sess.graph_def)\n\t\tmodel_path=os.path.join(FLAGS.model_path,\"model_{}\".format(fold))\n\t\tif not os.path.exists(model_path):\n\t\t\tos.makedirs(model_path)\n\n\t\ttf.train.write_graph(self.sess.graph_def,model_path,\"model.pb\",as_text=False)\n\n\t\tself.ckpt =tf.train.get_checkpoint_state(self.checkpoint_path)\n\t\tif self.ckpt and self.ckpt.model_checkpoint_path:\n\t\t\tprint \"ckpt available\"\n\t\t\tself.saver.restore(self.sess,self.ckpt.model_checkpoint_path)\n\t\telse:\n\t\t\tprint \"ckpt not found\"\n\n\tdef train(self,epoch,x_train,y_train,is_training=True):\n\t\tlength=x_train.shape[0]\n\t\t\n\t\ttotal_loss=[]\n\t\ttotal_acc=[]\n\t\tsummary=None\n\t\t\n\t\tfor start,end in tqdm(zip(range(0,length,FLAGS.batch_size),range(FLAGS.batch_size,length,FLAGS.batch_size))):\n\t\t\t#print \"running {}-{}\".format(start,end)\n\t\t\tbatch_x=x_train[start:end]\n\t\t\tbatch_y=y_train[start:end]\n\t\t\tbatch_x=batch_x.astype(np.float32)\n\t\t\tmean_pixel = [103.939, 116.779, 123.68]\n\t\t\tfor i in range(3):\n\t\t\t\tbatch_x[:,:,:,i]=batch_x[:,:,:,i]-mean_pixel[i]\n\t\t\tbatch_x=batch_x/255.0\n\n\t\t\t_,loss,train_acc,summary=self.sess.run([self.train_op,self.loss,self.accuracy_op,self.summaries_op],\n\t\t\t\t\t\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\t\t\t\t\tself.x:batch_x,\n\t\t\t\t\t\t\t\t\t\t\tself.y_:batch_y,\n\t\t\t\t\t\t\t\t\t\t\tself.is_training:is_training\n\t\t\t\t\t\t\t\t\t\t\t})\n\n\t\t\ttotal_loss.append(loss)\n\t\t\ttotal_acc.append(train_acc)\n\t\tif is_training:\n\n\t\t\tself.saver.save(self.sess,self.checkpoint_path+\"/model.ckpt\",global_step=epoch)\n\t\t\tself.summary_writer.add_summary(summary,global_step=epoch)\n\t\treturn np.mean(total_loss),np.mean(total_acc)\n\t\n"
},
{
"alpha_fraction": 0.6719390749931335,
"alphanum_fraction": 0.6947861909866333,
"avg_line_length": 23.724637985229492,
"blob_id": "a05becc012942803571a3de54aac2af1a630ebca",
"content_id": "abe9786ddc07a78821dfe6a3e9477b92b71dcb5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3414,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 138,
"path": "/utils.py",
"repo_name": "aryan-jadon/Kaggle_distracted_drivers",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport tensorflow as tf \nimport os\nimport time\nimport glob\nimport pdb\nimport cPickle\nimport PIL\nimport joblib\nimport h5py\nfrom tqdm import tqdm\nfrom PIL import Image\nflags=tf.app.flags \nFLAGS=flags.FLAGS \n\nflags.DEFINE_integer(\"dim\",224,\"dim of image after resizing\")\nflags.DEFINE_bool(\"subset\",False,\"subset\")\nflags.DEFINE_string(\"train_path\",\"train.h5\",\"path to save train data\")\nflags.DEFINE_string(\"test_path\",\"test.h5\",\"path to save test data\")\n\nTRAIN_PATH=glob.glob(\"imgs/train/*/*\")\nTEST_PATH=sorted(glob.glob(\"test/*\"))\nprint len(TEST_PATH)\n\ndef load_data_group(test=False):\n\th5=h5py.File(FLAGS.train_path,\"r\")\n\treturn h5\n\t\ndef load_and_save_data(test=None):\n\tfile_path=TEST_PATH if test else TRAIN_PATH\n\tnp.random.shuffle(file_path)\n\tx_train=[]\n\tif not test:\n\n\t\ty_train=[]\n\n\n\tdef resize(img):\n\t\treturn img.resize((FLAGS.dim,FLAGS.dim),PIL.Image.ANTIALIAS)\n\tif FLAGS.subset:\n\t\tfile_path=file_path[:100]\n\tfor img_file in tqdm(file_path):\n\t\t\n\t\timg=Image.open(img_file)\n\t\timg=resize(img)\n\t\t\n\t\tx_train.append(np.array(img))\n\t\tif not test:\n\t\t\tfile_name=img_file.split(\"/\")[-2]\n\t\t\t\n\t\t\tfile_name=int(file_name.replace(\"c\",\"\"))\n\t\t\t\n\t\t\ty_train.append(file_name)\n\t\t\n\tif not test:\n\t\treturn np.array(x_train),np.array(y_train).reshape(len(y_train),1)\n\telse:\n\t\treturn np.array(x_train),None\n\ndef cache_data(data,data_path):\n\tprint \"saving data into {}\".format(data_path)\n\th5f=h5py.File(data_path,\"w\")\n\t#print data[0].shape\n\th5f.create_dataset(\"x_train\",data=data[0])\n\th5f.create_dataset(\"y_train\",data=data[1])\n\th5f.close()\n\ndef load_data(test=False):\n\tif not test:\n\n\t\th5f=h5py.File(FLAGS.train_path,\"r\")\n\t\tx_train=h5f[\"x_train\"]\n\n\t\ty_train=h5f[\"y_train\"]\n\t\t\n\t\treturn x_train,y_train\n\telse:\n\t\tx_test=load_and_save_test()\n\t\treturn x_test\n\ndef load_and_save_test():\n\tfile_path=TEST_PATH\n\tdef resize(img):\n\t\treturn img.resize((FLAGS.dim,FLAGS.dim),PIL.Image.ANTIALIAS)\n\tif FLAGS.subset:\n\t\tfile_path=file_path[:100]\n\t#pdb.set_trace()\n\t#print file_path[0]\n\tx_train=[]\n\tfor img_file in tqdm(file_path):\n\t\t\n\t\timg=Image.open(img_file)\n\t\timg=resize(img)\n\t\t\n\t\tx_train.append(np.array(img))\n\tx_train=np.array(x_train)\n\treturn x_train\n\t\t\nclass load_random_cropped_fliped_data:\n\tdef __init__(self):\n\n\t\tfile_path=TEST_PATH\n\t\tif FLAGS.subset:\n\t\t\tfile_path=file_path[:100]\n\t\tself.file_path=file_path\n\t\tself.start=0\n\t\tself.stop=len(self.file_path)\n\t\tself.index=0\n\n\tdef __next__(self):\n\t\treturn self.next()\n\tdef length(self):\n\t\treturn len(self.file_path)\n\tdef next(self):\n\t\tif self.index>=self.length():\n\t\t\tself.index=0\n\t\timg_file=self.file_path[self.index]\n\t\tself.index+=1\n\t\tx_train=[]\n\t\timg=Image.open(img_file)\n\t\timg=img.resize((256,256),PIL.Image.ANTIALIAS)\n\t\timg=np.array(img)\n\t\t\n\t\tc=(img.shape[0]/2,img.shape[1]/2)\n\t\tx,y=c[0]-(FLAGS.dim/2),c[1]-(FLAGS.dim/2)\n\t\timg1=img[:FLAGS.dim,:FLAGS.dim,:]\n\t\timg2=img[-FLAGS.dim:,:FLAGS.dim,:]\n\t\timg3=img[-FLAGS.dim:,-FLAGS.dim:,:]\n\t\timg4=img[:FLAGS.dim,-FLAGS.dim:,:]\n\t\timg5=img[x:x+FLAGS.dim,y:y+FLAGS.dim,:]\n\t\timg6=np.array(Image.fromarray(img1).transpose(Image.FLIP_LEFT_RIGHT))\n\t\timg7=np.array(Image.fromarray(img2).transpose(Image.FLIP_LEFT_RIGHT))\n\t\timg8=np.array(Image.fromarray(img3).transpose(Image.FLIP_LEFT_RIGHT))\n\t\timg9=np.array(Image.fromarray(img4).transpose(Image.FLIP_LEFT_RIGHT))\n\t\timg10=np.array(Image.fromarray(img5).transpose(Image.FLIP_LEFT_RIGHT))\n\t\tx_train.extend([img1,img2,img3,img4,img5,img6,img7,img8,img9,img10])\n\n\t\treturn np.array(x_train)\n\n\n"
},
{
"alpha_fraction": 0.6222222447395325,
"alphanum_fraction": 0.6626262664794922,
"avg_line_length": 25.105262756347656,
"blob_id": "8bc4ec6f7dde6863b7b430c351b52b5bf244216a",
"content_id": "4f38db7fb6641a0f62c0e4ee8246792f9c4a93eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 19,
"path": "/create_sub.py",
"repo_name": "aryan-jadon/Kaggle_distracted_drivers",
"src_encoding": "UTF-8",
"text": "import glob\nimport numpy as np\nimport utils\nimport pandas as pd\nfiles=glob.glob(\"*.csv\")\narr=np.zeros((79726,10))\nprint files\nfor i,f in enumerate(files):\n\tdf=pd.read_csv(f)\n\tdf=df.ix[:,1:]\n\tarr+=df.as_matrix()\n#print arr\ntest_list=sorted([st.split(\"/\")[-1] for st in utils.TEST_PATH])\narr=arr/len(files)\nprint arr\ndf=pd.DataFrame(arr,columns=[['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']])\n\ndf.insert(0,\"img\",pd.Series(test_list,index=df.index))\ndf.to_csv(\"submissionFinal.csv\")"
},
{
"alpha_fraction": 0.5553410649299622,
"alphanum_fraction": 0.5881595611572266,
"avg_line_length": 37.85416793823242,
"blob_id": "9e98a52b17b3a87b19018281a3060ea8c4b5a4f2",
"content_id": "47a2c238aac3edac876311a3fc99924f735d68f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9324,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 240,
"path": "/layers.py",
"repo_name": "aryan-jadon/Kaggle_distracted_drivers",
"src_encoding": "UTF-8",
"text": "import math\nimport tensorflow as tf\n\nfrom tensorflow.python import control_flow_ops\nparameters=[]\nVARIABLES_TO_RESTORE = '_variables_to_restore_'\ndef weight_bias(shape, stddev, bias_init=0.1):\n W = tf.Variable(tf.truncated_normal(shape, stddev=stddev), name='weight')\n b = tf.Variable(tf.constant(bias_init, shape=shape[-1:]), name='bias')\n return W, b\n\n\ndef _two_element_tuple(int_or_list):\n if isinstance(int_or_list,(list,tuple)):\n return int(int_or_list[0]),int(int_or_list[1])\n if isinstance(int_or_list,int):\n return int(int_or_list),int(int_or_list)\n if isinstance(int_or_list,tf.Tensorshape):\n return int_or_list[0],int_or_list[1]\n raise ValueError(\"Must be int or list ot tensor type\")\n\nclass Dropout:\n def __init__(self, keep_prob, name='dropout'):\n self.keep_prob = keep_prob\n self.name = name\n\n def apply(self, x, index, model):\n with tf.name_scope(self.name):\n keep_prob = tf.select(model.is_training, self.keep_prob, 1.0)\n self.h = tf.nn.dropout(x, keep_prob)\n\n return self.h\n\nclass Dense:\n def __init__(self, fan_out, name='dense'):\n self.fan_out = fan_out\n self.name = name\n\n def apply(self, x, index, model):\n global parameters\n with tf.name_scope(self.name):\n input_shape = x.get_shape()\n fan_in = input_shape[-1].value\n stddev = math.sqrt(1.0 / fan_in) # he init\n\n shape = [fan_in, self.fan_out]\n W, b = weight_bias(shape, stddev=stddev, bias_init=0.0)\n parameters+=[W,b]\n self.h = tf.matmul(x, W) + b\n \n return self.h\n\nclass Activation:\n def __init__(self, activation, name='activation'):\n self.name = name\n self.activation = activation\n\n def apply(self, x, index, model):\n with tf.name_scope(self.name):\n self.h = self.activation(x)\n return self.h\n\nclass MaxPool:\n def __init__(self, ksize, strides, padding='SAME', name='max_pool'):\n self.ksize = ksize\n self.strides = strides\n self.padding = padding\n self.name = name\n\n def apply(self, x, index, model):\n with tf.name_scope(self.name):\n if isinstance(self.padding,(int,list)):\n pad_h,pad_w=_two_element_tuple(self.padding)\n x=tf.pad(x,[[0,0],[pad_h,pad_h],[pad_w,pad_w],[0,0]],\"CONSTANT\")\n self.padding=\"VALID\"\n\n self.h = tf.nn.max_pool(x, self.ksize, self.strides, self.padding)\n return self.h\n\nclass GlobalAvgPool:\n def __init__(self, name='global_avg_pool'):\n self.name = name\n\n def apply(self, x, index, model):\n input_shape = x.get_shape().as_list()\n k_w, k_h = input_shape[1], input_shape[2]\n with tf.name_scope(self.name):\n self.h = tf.nn.avg_pool(x, [1, k_w, k_h, 1], [1, 1, 1, 1], 'VALID')\n return self.h\n\nclass AvgPool:\n def __init__(self, ksize, strides, padding='VALID', name='avg_pool'):\n self.ksize = ksize\n self.strides = strides\n self.padding = padding\n self.name = name\n\n def apply(self, x, index, model):\n with tf.name_scope(self.name):\n if isinstance(self.padding,(int,list)):\n pad_h,pad_w=_two_element_tuple(self.padding)\n x=tf.pad(x,[[0,0],[pad_h,pad_h],[pad_w,pad_w],[0,0]],\"CONSTANT\")\n self.padding=\"VALID\"\n self.h = tf.nn.avg_pool(x, self.ksize, self.strides, self.padding)\n return self.h\n\nclass Input:\n def __init__(self, input_placeholder):\n self.h = input_placeholder\n\n def apply(self, x, index, model):\n return self.h\n\n\nclass Conv2D:\n def __init__(self, filter_shape, output_channels, strides, padding='SAME', name='conv2d'):\n self.filter_shape = filter_shape\n self.output_channels = output_channels\n self.strides = strides\n self.padding = padding\n self.name = name\n\n def apply(self, x, index, model):\n global parameters\n with tf.name_scope(self.name):\n if isinstance(self.padding,(int,list)):\n pad_h,pad_w=_two_element_tuple(self.padding)\n x=tf.pad(x,[[0,0],[pad_h,pad_h],[pad_w,pad_w],[0,0]],\"CONSTANT\")\n self.padding=\"VALID\"\n input_shape = x.get_shape()\n input_channels = input_shape[-1].value\n\n k_w, k_h = self.filter_shape\n stddev = math.sqrt(2.0 / ((k_w * k_h) * input_channels)) # he init\n\n shape = self.filter_shape + [input_channels, self.output_channels]\n W, b = weight_bias(shape, stddev=stddev, bias_init=0.0)\n parameters+=[W,b]\n self.h = tf.nn.conv2d(x, W, self.strides, self.padding) + b\n \n return self.h\n\nclass Flatten:\n def __init__(self, name='flatten'):\n self.name = name\n\n def apply(self, x, index, model):\n with tf.name_scope(self.name):\n shape = x.get_shape()\n dim = shape[1] * shape[2] * shape[3]\n self.h = tf.reshape(x, [-1, dim.value])\n return self.h\n\nclass ConvFactory:\n def __init__(self,kernel_size,fan_out,strides=[1,1,1,1],padding=\"SAME\",act_type=tf.nn.relu,name=\"confactory\"):\n \n self.conv=Conv2D(kernel_size,fan_out,strides,padding=padding,name=\"conv_%s\"%name)\n self.batchnorm=Conv2DBatchNorm(fan_out)\n self.Activation=Activation(act_type)\n self.act_type=act_type\n def apply(self,x,index,model):\n prev=self.conv.apply(x,index,model)\n prev=self.batchnorm.apply(prev,index,model)\n prev=self.Activation.apply(prev,index,model)\n return prev\n\nclass InceptionFactoryA:\n def __init__(self,num_1x1,num_3x3red,num_3x3,num_d3x3red,num_d3x3,pool,proj,name):\n self.c1x1=ConvFactory([1,1],num_1x1,[1,1,1,1],name=\"%s_1x1\"%name)\n self.c3x3r=ConvFactory([1,1],num_3x3red,name=\"%s_3x3r\"%name)\n self.c3x3=ConvFactory([3,3],num_3x3,padding=[1,1],name=\"%s_3x3\"%name)\n self.cd3x3r=ConvFactory([1,1],num_d3x3red,name=\"%s_d_3x3\"%name)\n self.cd3x3=ConvFactory([3,3],num_d3x3,padding=[1,1],name=\"%s_d_3x3_0\"%name)\n self.cd3x3_1=ConvFactory([3,3],num_d3x3,padding=[1,1],name=\"%s_d_3x3_1\"%name)\n if pool==\"max\":\n\n self.pool=MaxPool(ksize=[1,3,3,1],strides=[1,1,1,1],padding=[1,1],name=\"%s_pool\"%name)\n elif pool==\"avg\":\n self.pool=AvgPool(ksize=[1,3,3,1],strides=[1,1,1,1],padding=[1,1],name=\"%s_pool\"%name)\n self.cproj=ConvFactory([1,1],proj,name=\"%s_proj\"%name)\n def apply(self,x,i,model):\n prev_1=self.c1x1.apply(x,i,model)\n prev_2=self.c3x3r.apply(x,i,model)\n prev_3=self.c3x3.apply(prev_2,i,model)\n prev_4=self.cd3x3r.apply(x,i,model)\n prev_5=self.cd3x3.apply(prev_4,i,model)\n prev_6=self.cd3x3_1.apply(prev_5,i,model)\n prev_7=self.pool.apply(x,i,model)\n prev_8=self.cproj.apply(prev_7,i,model)\n concat=tf.concat(3,[prev_1,prev_3,prev_6,prev_8])\n return concat\n\nclass InceptionFactoryB:\n def __init__(self,num_3x3red,num_3x3,num_d3x3red,num_d3x3,name):\n self.c3x3r=ConvFactory([1,1],num_3x3red,name=\"%s_3x3\"%name)\n self.c3x3=ConvFactory([3,3],num_3x3,padding=[1,1],strides=[1,2,2,1],name=\"%s_3x3_0\"%name)\n self.cd3x3r=ConvFactory([1,1],num_d3x3red,name=\"%s_d_3x3\"%name)\n self.cd3x3=ConvFactory([3,3],num_d3x3,padding=[1,1],strides=[1,1,1,1],name=\"%s_d_3x3_1\"%name)\n self.cd3x3_1=ConvFactory([3,3],num_d3x3,padding=[1,1],strides=[1,2,2,1],name=\"%s_d_3x3_2\"%name)\n self.pool=MaxPool(ksize=[1,3,3,1],strides=[1,2,2,1],padding=[1,1],name=\"pool_%s\"%name)\n\n def apply(self,x,i,model):\n c3x3r=self.c3x3r.apply(x,i,model)\n c3x3=self.c3x3.apply(c3x3r,i,model)\n cd3x3r=self.cd3x3r.apply(x,i,model)\n cd3x3=self.cd3x3.apply(cd3x3r,i,model)\n cd3x3=self.cd3x3_1.apply(cd3x3,i,model)\n #prev_6=self.cd3x3_1.apply(prev_5,i,model)\n prev_7=self.pool.apply(x,i,model)\n concat=tf.concat(3,[c3x3,cd3x3,prev_7])\n return concat\n\n\n\nclass Conv2DBatchNorm:\n \n def __init__(self, fan_out, affine=True, name='batch_norm'):\n self.fan_out = fan_out\n self.affine = affine\n self.name = name\n\n def apply(self, x, index, model):\n with tf.name_scope(self.name):\n beta = tf.Variable(tf.constant(0.0, shape=[self.fan_out]), name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[self.fan_out]), name='gamma', trainable=self.affine)\n\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n\n ema = tf.train.ExponentialMovingAverage(decay=0.9)\n ema_apply_op = ema.apply([batch_mean, batch_var])\n ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)\n\n def mean_var_with_update():\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = control_flow_ops.cond(model.is_training, mean_var_with_update, lambda: (ema_mean, ema_var))\n\n self.h = tf.nn.batch_norm_with_global_normalization(x, mean, var, beta, gamma, 1e-3, self.affine)\n return self.h"
},
{
"alpha_fraction": 0.6407679915428162,
"alphanum_fraction": 0.6741431951522827,
"avg_line_length": 29.45355224609375,
"blob_id": "da462a91511c84c8f17d24a605ca147dd6782dd5",
"content_id": "7f89ae1177c6e5156bf97ecf2838fdc2fd87dec0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5573,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 183,
"path": "/test.py",
"repo_name": "aryan-jadon/Kaggle_distracted_drivers",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport tensorflow as tf \nimport utils\nimport pandas as pd \nimport pdb\nimport re\nimport math\nfrom collections import defaultdict,Counter\nimport glob\nfrom layers import Input\nfrom architectures import Inception_BN\nflags=tf.app.flags\nFLAGS=flags.FLAGS \n\nflags.DEFINE_string(\"checkpoint_dir\",None,\"checkpoints_path\")\nflags.DEFINE_string(\"eval_dir\",\"eval_summaries\",\"summary for eval\")\nflags.DEFINE_integer(\"batch_size\",64,\"batch size\")\nflags.DEFINE_integer(\"num_classes\",10,\"number of classes \")\nflags.DEFINE_string(\"model\",\"vgg\",\"summary for eval\")\n\n\nclass Evaluation:\n\tdef __init__(self,layers,x_test):\n\t\tself.x_test=x_test\n\t\twith tf.Graph().as_default(),tf.Session() as sess:\n\n\t\t\tself.x=tf.placeholder(tf.float32,shape=[None,224,224,3])\n\t\t\tself.is_training=tf.placeholder(tf.bool,shape=[])\n\t\t\tprev=None\n\n\t\t\tlayers=[Input(self.x)]+layers \n\t\t\tfor i,layer in enumerate(layers):\n\t\t\t\tprev=layer.apply(prev,i,self)\n\n\t\t\tself.y=prev \n\t\t\t\n\t\t\t#variable_averages=tf.train.ExponentialMovingAverage(0.999)\n\t\t\tself.saver=tf.train.Saver(max_to_keep=1)\n\t\t\tself.sess=sess\n\tdef evaluate(self):\n\n\t\t\n\t\tckpt=tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n\t\tif ckpt and ckpt.model_checkpoint_path:\n\t\t\tself.saver.restore(self.sess,ckpt.model_checkpoint_path)\n\n\t\t\tglobal_step=ckpt.model_checkpoint_path.split(\"/\")[-1].split(\"-\")[-1]\n\t\t\tprint \"ckpt available\"\n\t\telse:\n\t\t\tprint \"no ckpt found\"\n\t\t\treturn\n\t\t#pdb.set_trace()\n\t\tlength=x_test.length()\n\t\tpredictions=np.zeros((length,FLAGS.num_classes),dtype=np.float32)\n\t\tfor start in range(0,length):\n\t\t\tprint FLAGS.checkpoint_dir\n\t\t\t#print \"{},{}/{}\".format(FLAGS.checkpoint_dir,start,end)\n\t\t\tx_test_batch=self.x_test.next()\n\t\t\tx_test_batch=x_test_batch.astype(np.float32)\n\t\t\t# mean_pixel = [103.939, 116.779, 123.68]\n\t\t\t# for c in range(3):\n\t\t\t# \tx_test_batch[:,:,:,c]-=mean_pixel[c]\n\t\t\tx_test_batch=x_test_batch/255.0\n\n\t\t\tpredictions_temp=self.sess.run(self.y,feed_dict={\n\t\t\t\tself.x:x_test_batch,\n\t\t\t\tself.is_training:False\n\t\t\t\t})\n\t\t\tpredictions[start,...]=np.mean(predictions_temp,axis=0)\n\t\t#submission(predictions)\n\t\treturn predictions\n\tdef submission(self,checkpoint):\n\t\tFLAGS.checkpoint_dir=checkpoint\n\t\t#TEST_PATH=sorted(glob.glob(\"/media/ashwin/Radhika1TB/Dataset/Statefarm/imgs/test/*\"))\n\n\t\tFLAGS.fold=FLAGS.checkpoint_dir[-1]\n\t\tfilen_name=\"submision_%s_%s.csv\"%(FLAGS.model,FLAGS.fold)\n\t\t#fw=open(\"submision_%s_%s.csv\"%(FLAGS.model,FLAGS.fold),\"w\")\n\n\t\t#fw.write('img,c0,c1,c2,c3,c4,c5,c6,c7,c8,c9\\n')\n\t\tcnt=0\n\t\tpredictions=self.evaluate()\n\t\t#print predictions.shape\n\t\tfor i in range(predictions.shape[0]):\n\t\t\tpredictions[i,:]=[\"%.9f\"%p for p in predictions[i,:]]\n\t\t\t\n\t\ttest_list=sorted([st.split(\"/\")[-1] for st in utils.TEST_PATH])\n\t\t#test_list=test_list[:100]\n\t\t\n\t\tdf=pd.DataFrame(predictions,columns=[['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']])\n\t\tdf.insert(0,\"img\",pd.Series(test_list,index=df.index))\n\t\tdf.to_csv(\"submision_%s_%s.csv\"%(FLAGS.model,FLAGS.fold),index=False)\n\t\t\n\n\n\n\n\n\nx_test=utils.load_random_cropped_fliped_data()\n\nlayers=Inception_BN.Inception_BN(FLAGS.num_classes)\nEval=Evaluation(layers,x_test)\n#Eval.submission(\"checkpoints_inception/model_1\")\nEval.submission(\"checkpoints_inception/model_2\")\nEval.submission(\"checkpoints_inception/model_3\")\nEval.submission(\"checkpoints_inception/model_4\")\nEval.submission(\"checkpoints_inception/model_5\")\n\n\n# Eval.submission(\"/media/ashwin/Radhika1TB/checkpoints_vgg/model_2\")\n# Eval.submission(\"/media/ashwin/Radhika1TB/checkpoints_vgg/model_3\")\n# Eval.submission(\"/media/ashwin/Radhika1TB/checkpoints_vgg/model_4\")\n# Eval.submission(\"/media/ashwin/Radhika1TB/checkpoints_vgg/model_5\")\n\n\n\n# import glob\n# import numpy as np\n# import utils\n# import heapq\n# import pandas as pd\n# files=glob.glob(\"*.csv\")\n\n# def create_geom_submission(files):\n\n# \tarr=np.ones((79726,10))\n# \t#print files\n# \tfor i,f in enumerate(files):\n# \t\tdf=pd.read_csv(f)\n# \t\tdf=df.ix[:,1:]\n# \t\tarr*=df.as_matrix()\n# \t#print arr\n# \ttest_list=sorted([st.split(\"/\")[-1] for st in utils.TEST_PATH])\n# \tarr=np.power(arr,1.0/len(files))\n\n# \tdf=pd.DataFrame(arr,columns=[['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']])\n\n# \tdf.insert(0,\"img\",pd.Series(test_list,index=df.index))\n# \tdf.to_csv(\"submissionFinal.csv\")\n\n# def weighted_avg(files,weights=\"uniform\"):\n# \tprint files\n# \ttest_id=pd.read_csv(files[0]).img \n# \tarr=[]\n# \tfor i in files:\n# \t\tarr.append(pd.read_csv(i,usecols=['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']))\n\t\n# \tdf=pd.concat(arr,axis=1)\n# \tdf=df.as_matrix()\n# \tdf=df.tolist()\n# \tresult=[]\n# \tweights=[1.17,1.21,1.17,1.165,1.16,1.51,1.49,1.53,1.51,1.495]\n# \tfor r,row in enumerate(df):\n# \t\tcluster_count=dict()\n# \t\tfor index,cluster in enumerate(row):\n# \t\t\tif cluster in cluster_count:\n# \t\t\t\tcluster_count[cluster]+=weights[index/10]*(1/((index%10)+1.0))\n# \t\t\telse:\n# \t\t\t\tcluster_count[cluster]=weights[index/10]*(1/((index%10)+1.0))\n\n# \t\ttopFive=heapq.nlargest(10,cluster_count,key=cluster_count.get)\n# \t\tresult.append(topFive)\n# #prediction=[\" \".join(str(x[i]) for i in ensembleResult]\n# \tprint result[0]\n# \tresult=np.array(result)\n# \tdf=pd.DataFrame(result,columns=[['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']])\n# \tdf.insert(0,\"img\",pd.Series(test_id,index=df.index))\n# \tdf.to_csv(\"submissionFinal.csv\")\n# #pd.DataFrame(\"id\":test_id,\"[\")\n\n\n\n# \t# pattern=re.compile(r\"(.)*_[w|W](\\d*)_[.]*\")\n# \t# with open(\"submissionFinal.csv\",\"wb\") as f:\n# \t# \tweight_list=[1]*len(files)\n# \t# \tfor i,fi in enumerate(files):\n# \t# \t\tweight=pattern.match(fi)\n# \t# \t\tprint weight \n# \t# \t\tbreak\n\n\n# weighted_avg(files,\"weights\")\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7274376153945923,
"avg_line_length": 31.8358211517334,
"blob_id": "6de7fa0c556266c5d1d4ea5536e64a2b708cb405",
"content_id": "b8b72f6d7b3d7eb48bbcfd592700ed70df120584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2205,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 67,
"path": "/train.py",
"repo_name": "aryan-jadon/Kaggle_distracted_drivers",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf \nimport os\nimport gc \nimport time\nfrom sklearn import cross_validation\nimport utils \nfrom random import shuffle\nimport train_model\nimport numpy as np\nfrom architectures import VGG16,Inception_BN\n\nflags=tf.app.flags\nFLAGS=flags.FLAGS \n\nflags.DEFINE_float(\"lr\",0.0001,\"learning rate\")\nflags.DEFINE_integer(\"batch_size\",64,\"batch size\")\nflags.DEFINE_integer(\"epochs\",20,\"epochs\")\nflags.DEFINE_integer(\"num_classes\",10,\"number of classes\")\nflags.DEFINE_bool(\"pre_trained\",False,\"pre trained model\")\nflags.DEFINE_string(\"checkpoint_path\",\"checkpoints_inception\",\"checkpoint path\")\nflags.DEFINE_string(\"weight_file\",\"vgg16_weights.npz\",\"weights file \")\nflags.DEFINE_string(\"summary_path\",\"summary_path_inception\",\"path to save summary\")\nflags.DEFINE_string(\"model_path\",\"model_path_inception\",\"path to save model\")\nflags.DEFINE_string(\"fine_tune_from_model\",None,\"fine tune from model\")\n\nstart=time.time()\ntrain_group=utils.load_data_group(test=False)\nprint \"Time taken to load data: {} secs\".format(time.time()-start)\n#print x_train.shape\n\n\nx_train=[]\ny_train=[]\nfor i in train_group.items():\n\t#print \n\tx_train.append(train_group.get(i[0])[\"images\"])\n\ty_train.append(train_group.get(i[0])[\"labels\"])\n\n\nindices = [(chunk, row) for chunk, rows in enumerate(x_train) \n for row in range(rows.shape[0])]\n\nshuffle(indices)\nfold=2\n\n\nfor train_index,valid_index in cross_validation.ShuffleSplit(len(indices),n_iter=4,test_size=0.1,random_state=50):\n\tx_traindata=[indices[i] for i in train_index]\n\ty_traindata=[indices[i] for i in valid_index]\n\t#print y_train[x_traindata[0][0]][x_traindata[0][1]]\n\t\n\twith tf.Graph().as_default(),tf.Session() as sess:\n\t\tlayers=Inception_BN.Inception_BN(FLAGS.num_classes)\n\t\tm=train_model.model(fold,layers)\n\n\t\tfor epoch in range(FLAGS.epochs):\n\t\t\tshuffle(x_traindata)\n\n\t\t\ttrain_loss,train_acc=m.train(epoch,x_train,y_train,x_traindata,True)\n\n\t\t\tprint \"Epoch: {} training_loss: {} training_accuracy: {}\".format(epoch,train_loss,train_acc)\n\n\t\t\tshuffle(y_traindata)\n\t\t\tvalid_loss,valid_acc=m.train(epoch,x_train,y_train,y_traindata,False)\n\t\t\tprint \"==> Validaton loss: {},validation accuracy: {}\".format(valid_loss,valid_acc)\n\n\tfold+=1\n\n\n\n\n\n"
}
] | 6 |
BernardTsai/renderer | https://github.com/BernardTsai/renderer | 40afc0eb213ddb640b1509e61457864d1325a570 | 78c9de80f7704999b04f8756919f4ae953581d36 | 5baef892699fe9fc2e4305ee71c75fd607934840 | refs/heads/master | 2021-06-21T19:06:57.490175 | 2021-04-20T18:22:09 | 2021-04-20T18:22:09 | 213,201,451 | 0 | 0 | Apache-2.0 | 2019-10-06T16:19:18 | 2019-10-10T20:01:31 | 2021-04-20T18:22:09 | Python | [
{
"alpha_fraction": 0.5589887499809265,
"alphanum_fraction": 0.5730336904525757,
"avg_line_length": 26.384614944458008,
"blob_id": "7fdd9bfa952f21a1942e8fbc294d90fc4c7dd474",
"content_id": "617308addaa3395bccc0da83781d4befc90d9b74",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 356,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 13,
"path": "/setup.py",
"repo_name": "BernardTsai/renderer",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nfrom distutils.core import setup\n\nsetup(\n name = 'render',\n version = '0.0.0',\n description = 'A script to render yaml with the help of jinja2 templates',\n author = 'Bernard Tsai',\n author_email = '[email protected]',\n url = 'https://github.com/BernardTsai/renderer',\n py_modules = ['render'],\n)\n"
},
{
"alpha_fraction": 0.4923939108848572,
"alphanum_fraction": 0.5034249424934387,
"avg_line_length": 30.6647891998291,
"blob_id": "c7134babaa39af8572023dca03c0903570a44cb1",
"content_id": "c1c389dc8f6e05215d2c75ebc97e5b67f0fc27a7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11241,
"license_type": "permissive",
"max_line_length": 166,
"num_lines": 355,
"path": "/render.py",
"repo_name": "BernardTsai/renderer",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport yaml\nimport logging\nimport jinja2\nimport shutil\nimport pathlib\nimport codecs\nimport re\nimport traceback\n\n#-------------------------------------------------------------------------------\n\nversion = \"0.0.0\" # version of this program\nmodel = {} # the model as parsed from stdin\nschema = \"0.0.0\" # the schema version of the model\nscript_directory = \"\" # the location of the script\ntemplates_directory = \"\" # the location of the templates\ncurrent_directory = \"\" # the location of the execution environment\noutput_directory = \"\" # the location of the generated output\n\n# ------------------------------------------------------------------------------\n# loadModel: loads yaml model\n# ------------------------------------------------------------------------------\ndef loadModel():\n global model\n global schema\n global script_directory\n global templates_directory\n\n # A. read yaml from stdin\n yaml_file = ''\n for line in sys.stdin:\n yaml_file += line\n\n # B. parse yaml into a model\n try:\n model = yaml.safe_load(yaml_file)\n except yaml.YAMLError as exc:\n logging.error('Loading data failed: {}'.format(exc))\n\n if hasattr(exc, 'problem_mark'):\n logging.error(\"Error position: (%s:%s)\" % (exc.problem_mark.line + 1, exc.problem_mark.column + 1))\n exit(1)\n\n except Exception as exc:\n logging.error('Loading data failed: {}'.format(exc))\n exit(1)\n\n # C. derive schema\n if not 'schema' in model:\n logging.error(\"Model does not have a schema attribute\")\n exit(1)\n\n schema = model['schema']\n\n # D. derive templates directory and check if it exists\n templates_directory = os.path.join(script_directory, \"templates\", \"V\" + schema + \"/\")\n\n if not os.path.exists(templates_directory):\n logging.error(\"Schema '\" + schema + \"'is not supported\")\n exit(1)\n\n# ------------------------------------------------------------------------------\n# loadTemplate: loads template from a file and returns a string\n# ------------------------------------------------------------------------------\ndef loadTemplate(filename):\n with codecs.open(str(filename), 'r', 'utf-8') as template_file:\n template = template_file.read()\n\n return template\n\n# ------------------------------------------------------------------------------\n# renderTemplate: apply jinj2 template to model\n# ------------------------------------------------------------------------------\ndef renderTemplate(template_name, template):\n global model\n\n try:\n # initialize the jinja2 environment\n env = jinja2.Environment(\n trim_blocks=True,\n lstrip_blocks=True,\n extensions=[ 'jinja2.ext.loopcontrols', 'jinja2.ext.do' ]\n )\n env.filters[\"fixed\"] = fixed_ips_filter\n env.filters[\"allowed\"] = allowed_ips_filter\n env.filters[\"portmin\"] = port_min_filter\n env.filters[\"portmax\"] = port_max_filter\n\n renderer = env.from_string(template)\n\n # render a view of the model\n view = renderer.render(model)\n\n return view\n\n except jinja2.TemplateSyntaxError as template_error:\n logging.error('Error in template: ' + template_name + '/' + str(template_error.lineno) + ': ' + template_error.message)\n exit(1)\n\n except jinja2.UndefinedError as exc:\n logging.error('Undefined error in template: ' + template_name + ': {}'.format(str(exc)))\n exit(1)\n\n except Exception as exc:\n print(traceback.format_exc())\n logging.error('Failed to render model with template ' + template_name + ': {}'.format(str(exc)))\n exit(1)\n\n# ------------------------------------------------------------------------------\n# fixed_ips_filter: derives a list of fixed IPs from a string\n# format: \"fixed: IP1, IP2, IP3, ...;\"\n# each IP? should either be an IP-address or a\n# range with the last octet defining the Range\n# e.g. 192.168.178.10-20\n# ------------------------------------------------------------------------------\ndef fixed_ips_filter(str):\n result = []\n\n # extract the fixed ips part of the string\n m = re.search( r'fixed\\:[^;]*', str, re.IGNORECASE)\n\n if not m:\n return result\n\n # get first occurence\n str1 = m[0].strip()\n\n # remove the prefix: \"fixed: \"\n str2 = str1[7:]\n\n # split into substrings\n str3 = str2.split(\",\")\n\n # construct the result\n for str4 in str3:\n # check if we have a range\n if str4.find(\"-\") < 0:\n result.append(str4)\n else:\n str5 = generate_ip_range(str4)\n result.extend(str5)\n\n # completed\n return result\n\n# ------------------------------------------------------------------------------\n# allowed_ips_filter derives a list of allowed IPs from a string\n# format: \"allowed: IP1, IP2, IP3, ...;\"\n# each IP? should either be an IP-address or a\n# range with the last octet defining the Range\n# e.g. 192.168.178.10-20\n# ------------------------------------------------------------------------------\ndef allowed_ips_filter(str):\n result = []\n\n # extract the allowed ips part of the string\n m = re.search( r'allowed:[^;]*', str, re.IGNORECASE)\n\n if not m:\n return result\n\n # get first occurence\n str1 = m[0].strip()\n\n # remove the prefix: \"allowed: \"\n str2 = str1[9:]\n\n # split into substrings\n str3 = str2.split(\",\")\n\n # construct the result\n for str4 in str3:\n # check if we have a range\n if str4.find(\"-\") < 0:\n result.append(str4)\n else:\n str5 = generate_ip_range(str4)\n result.extend(str5)\n\n # completed\n return result\n\n# ------------------------------------------------------------------------------\n# port_min_filter derives a min port number from a string\n# format: \"portmin-portmax|port\"\n# e.g. 8080-8081\n# ------------------------------------------------------------------------------\ndef port_min_filter(str):\n parts = str.split(\"-\")\n\n return (parts[0] if len(parts) == 2 else str)\n\n# ------------------------------------------------------------------------------\n# port_max_filter derives a max port number from a string\n# format: \"portmin-portmax|port\"\n# e.g. 8080-8081\n# ------------------------------------------------------------------------------\ndef port_max_filter(str):\n parts = str.split(\"-\")\n\n return (parts[1] if len(parts) == 2 else str)\n\n# ------------------------------------------------------------------------------\n# generate_ip_range generates a list of IP addresses as an array\n# ------------------------------------------------------------------------------\ndef generate_ip_range( ip_range ):\n result = []\n\n # split range and determine prefix and range\n pos = ip_range.rfind(\".\")\n prefix = ip_range[:pos]\n rng = ip_range[pos+1:]\n\n # split the range and determine first and last index\n parts = rng.split(\"-\")\n first = int( parts[0] )\n last = int( parts[1] )\n\n # construct the result\n for index in range(first,last+1):\n result.append(prefix + \".\" + str(index) )\n\n # completed\n return result\n\n# ------------------------------------------------------------------------------\n# saveView: save view to a file or a set of files\n# ------------------------------------------------------------------------------\ndef saveView(view, path):\n # check if the view contains special output statement lines:\n # \">> [path] [comments]\\n\" which advise to output the following\n # data to a file location indicated by the [path] argument\n\n block = ''\n file_name = ''\n for line in view.splitlines():\n # determine new filename: \">> [filename] [comments]\"\n match = re.match('>> ([^ ]*)(.*)', line)\n if match:\n # write the existing block\n if block != '':\n write_block(str(path), str(file_name), block)\n\n # reset block\n block = ''\n\n # set new file name\n file_name = match.group(1)\n else:\n if block == '':\n block = line\n else:\n block += '\\n' + line\n\n # write last block\n write_block(str(path), str(file_name), block)\n\n\n# --------------------------------------------------------------------------\n# write_block: output a block\n# --------------------------------------------------------------------------\ndef write_block(path, file_name, block):\n try:\n # determine path\n if file_name == '':\n file_path = os.path.join(path)\n else:\n file_path = str(pathlib.Path(path).parent)\n file_path = os.path.join(file_path, file_name)\n\n # check if the file_path does not point to a directory\n if os.path.isdir(file_path):\n logging.error(\"Destination must not be a directory: {}\".format(file_name))\n exit(1)\n\n # ensure that the directory exists\n dir_path = os.path.dirname(file_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # write block as text file\n with codecs.open(file_path, 'w', 'utf-8') as text_file:\n text_file.write(block)\n\n except IOError as exc:\n snippet_len = 500\n short_block = (block[ :snippet_len ] + '..') if len(block) > snippet_len else block\n logging.error(\"Error while writing to file {}: \\n\\n=== SNIPPET START (first {} chars)=== \\n{}\\n===SNIPPET END===\".format(file_name, snippet_len, short_block))\n exit(1)\n\n#-------------------------------------------------------------------------------\n\ndef main():\n global script_directory\n global current_directory\n global output_directory\n\n # determine path of module\n script_directory = os.path.dirname(os.path.realpath(__file__))\n\n # determine current working directory\n current_directory = os.getcwd()\n\n # define output directory\n output_directory = os.path.join(current_directory, \"output\") + \"/\"\n\n # cleanup output path\n shutil.rmtree(output_directory, ignore_errors=True, onerror=None)\n\n # load model\n loadModel()\n\n # load yaml model and render all templates\n try:\n # find every template\n for filename in pathlib.Path(templates_directory).glob('**/*.j2'):\n if filename.is_dir():\n continue\n\n # define template name\n template_name = str(filename).replace( templates_directory, \"\" )\n\n # # load template\n template = loadTemplate(filename)\n\n # render template\n view = renderTemplate(template_name, template)\n\n # remove the \".j2\" extension\n basename = str(filename)[:-3]\n\n # define output path\n output_path = str(basename).replace( templates_directory, output_directory )\n\n # save view\n saveView(view, output_path)\n\n except Exception as exc:\n traceback.print_exc()\n logging.error('Unknown error: {}'.format(exc))\n return 1\n\n return 0\n\n#-------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n\n#-------------------------------------------------------------------------------\n"
},
{
"alpha_fraction": 0.6027088165283203,
"alphanum_fraction": 0.6094807982444763,
"avg_line_length": 35.91666793823242,
"blob_id": "f3281b693437e259970894f0ca6beba4ee7b3c53",
"content_id": "f1eaf496128844f4338be0a48bc11e506e073d29",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 443,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 12,
"path": "/templates/V1.0.1/servers/create_all.sh.j2",
"repo_name": "BernardTsai/renderer",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nSCRIPTPATH=\"$( cd \"$(dirname \"$0\")\" ; pwd -P )\"\n{% for component in components %}{% if component.placement != 'OTHER' %}{% if component.placement != 'ROUTER' %}\n{% if component.max == 1 %}\n$SCRIPTPATH/{{component.name}}/create.yml &\n{% else %}\n{% for index in range(component.max) %}\n$SCRIPTPATH/{{component.name}}/create.yml --extra-vars \"nr={{index+1}}\" &\n{% endfor %}\n{% endif %}\n{% endif %}{% endif %}{% endfor %}\nwait\n"
},
{
"alpha_fraction": 0.40740740299224854,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.5,
"blob_id": "ce025d456239aebba0b9cf3f4f5cd80a1f4031c2",
"content_id": "821921e9c4a6512e12ff050d8b2a8f061723d780",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 27,
"license_type": "permissive",
"max_line_length": 14,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "BernardTsai/renderer",
"src_encoding": "UTF-8",
"text": "Jinja2==2.10.3\nPyYAML==5.4\n"
},
{
"alpha_fraction": 0.6826706528663635,
"alphanum_fraction": 0.6894223690032959,
"avg_line_length": 32.32500076293945,
"blob_id": "d2eb216f12dec03d7e7c71916a9395a06274fdc6",
"content_id": "74580a432c8f960b9714af676ecea1f7bdf8d1dc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1333,
"license_type": "permissive",
"max_line_length": 178,
"num_lines": 40,
"path": "/README.md",
"repo_name": "BernardTsai/renderer",
"src_encoding": "UTF-8",
"text": "renderer\n========\n\nA simple python3 script which renders a yaml model into a set of files (as shown in the diagram below) by:\n\n1. reading a **yaml model** from stdin,\n2. determining the top-level **schema** attribute, \n3. searching for a matching subdirectory in the **templates** subdirectory,\n4. applying all **jinj2 templates** it finds there and\n5. then writing the results into corresponding files (without the .j2 extension) of an **output** directory which is created as a subfolder of the current working directory.\n\n\n````\n templates/<schema>/....\n |\n |\n v\nyaml (schema) ==> render.py ==> ./output/...\n\n````\n\nTo invoke the renderer issue following command:\n\n````\n> cat model.yml | ./render.py\n````\n\nThe templates can make use of a special tweak which allows to generate several output files from one template by adding following information into the templates:\n\n````\n...\n>> [relative filename]\n...\n````\n\nWhen the renderer finds such a line in the text it will concatenate the original output path with the relative filename and output all following lines to the corresponding file. \n\nGenerating multiple output files can be achieved by e.g. enclosing such a line in a jinj2 loop and by deriving the filename from the model. \n\nAuthor: Bernard Tsai ([email protected])\n"
}
] | 5 |
aditya-kandada/democrat | https://github.com/aditya-kandada/democrat | fc850bde286a76086f820f22d01b5069107786d7 | 9ac4eb230b584eb2ea84cc79e2d6cbcc0c049898 | 26604a0c2744abea37742fd99c4f61e8b9d44d52 | refs/heads/master | 2021-01-23T12:33:04.574913 | 2017-06-02T13:55:56 | 2017-06-02T13:55:56 | 93,169,759 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.43820226192474365,
"alphanum_fraction": 0.6516854166984558,
"avg_line_length": 13.833333015441895,
"blob_id": "bea2756b2178a7811d98048ea73124063e7cc29c",
"content_id": "967e52998184b55cb2ad4f44ac1ba8970ce00c7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "aditya-kandada/democrat",
"src_encoding": "UTF-8",
"text": "Django==1.7.4\nuWSGI==2.0.2\nPython >= 2.6.5\ngunicorn==18.0\npsycopg2==2.5.1\nwsgiref==0.1.2\n"
},
{
"alpha_fraction": 0.6785714030265808,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 17.77777862548828,
"blob_id": "49469431cd65bd29d129b8fe26d677e97c3dfb2d",
"content_id": "2763ef40e5190879b425ae9bc79ef5f7c4fb9f89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/polls/urls.py",
"repo_name": "aditya-kandada/democrat",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns\nfrom django.conf.urls import url\n\n\nurlpatterns = patterns('polls.views',\n # Examples:\n url(r'^$', 'index', name='index'),\n\n)"
},
{
"alpha_fraction": 0.6990521550178528,
"alphanum_fraction": 0.741706132888794,
"avg_line_length": 46,
"blob_id": "64957d07cb6035c1b207c37cc5b745259b895ea8",
"content_id": "6f74c32b1626fabc920b927b172c5d53074f2360",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 9,
"path": "/polls/models.py",
"repo_name": "aditya-kandada/democrat",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Candidate(models.Model):\n name = models.CharField(max_length=100)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n description = models.CharField(max_length=250)\n upvote = models.IntegerField(max_length=250, null=True, blank=True)\n downvote = models.IntegerField(max_length=250, null=True, blank=True)"
},
{
"alpha_fraction": 0.7420814633369446,
"alphanum_fraction": 0.7420814633369446,
"avg_line_length": 21.200000762939453,
"blob_id": "95213909741bb51558cf73831bf163529c9fa0c4",
"content_id": "64f79538dafd3a2a960c6faab5d30d002b702274",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 10,
"path": "/polls/views.py",
"repo_name": "aditya-kandada/democrat",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom polls.models import Candidate\n\n\ndef index(request):\n\n\n candidates = Candidate.objects.all().order_by('name')\n\n return render(request, 'index.html', {'candidates':candidates})"
},
{
"alpha_fraction": 0.7607843279838562,
"alphanum_fraction": 0.7607843279838562,
"avg_line_length": 22.090909957885742,
"blob_id": "5fd3b5f32dbcb75e1c8eb6252462b12ebd53dff9",
"content_id": "f2b4d985c5c3d98aff47d42574ce6dbba6667bbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 11,
"path": "/polls/admin.py",
"repo_name": "aditya-kandada/democrat",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom polls.models import Candidate\n\n\nclass CandidateAdmin(admin.ModelAdmin):\n list_display = ['name', 'description', 'upvote', 'downvote']\n\n\nadmin.site.register(Candidate, CandidateAdmin)\n\n"
}
] | 5 |
turovkv/syntax-analysis | https://github.com/turovkv/syntax-analysis | 04ff9eca7d8eac9d6186099a7127f85665477662 | c502b13d44065e82a74e7c86e2d4b0d159ec59db | 0ab10b61c45ee4d81450175fd1e0ad16782baf16 | refs/heads/master | 2023-01-01T01:06:14.183042 | 2020-10-16T10:09:20 | 2020-10-16T10:09:20 | 300,001,838 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6290322542190552,
"alphanum_fraction": 0.6290322542190552,
"avg_line_length": 19,
"blob_id": "1170c3bf73c19d649798ee6189e72026fd397262",
"content_id": "e3ceb88b3a0efb7c9feeb58a7cfec9a4a36b0d06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/6 parser combinators/README.md",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "`parser.py filename` \n__тесты__: \n`pytest parser_test.py` \n"
},
{
"alpha_fraction": 0.5600504279136658,
"alphanum_fraction": 0.5605009198188782,
"avg_line_length": 44.679012298583984,
"blob_id": "a6738d2f4dbf07ceac543e9e31a0a0d0174b0f2d",
"content_id": "c193aac30385bb5ba4256571d53bb42f956358de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11099,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 243,
"path": "/6 parser combinators/parser_test.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import parser\n\n\ndef test_unit_atom_ok():\n res = lambda x: parser.PrologParsers.atom.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert print_res('a') == 'Atom (ID (a))'\n assert print_res('a a') == 'Atom (ID (a)) (Atom (ID (a)))'\n assert print_res('a a a') == 'Atom (ID (a)) (Atom (ID (a))) (Atom (ID (a)))'\n assert print_res('a a a a') == 'Atom (ID (a)) (Atom (ID (a))) (Atom (ID (a))) (Atom (ID (a)))'\n assert print_res('a (a a a)') == 'Atom (ID (a)) (Atom (ID (a)) (Atom (ID (a))) (Atom (ID (a))))'\n assert print_res('a (a a a) a') == 'Atom (ID (a)) (Atom (ID (a)) (Atom (ID (a))) (Atom (ID (a)))) (Atom (ID (a)))'\n assert print_res('a (a) (a)') == 'Atom (ID (a)) (Atom (ID (a))) (Atom (ID (a)))'\n assert print_res('a (((a))) (((a)))') == 'Atom (ID (a)) (Atom (ID (a))) (Atom (ID (a)))'\n assert print_res('a ((a (((a)))))') == 'Atom (ID (a)) (Atom (ID (a)) (Atom (ID (a))))'\n assert print_res('a A A') == 'Atom (ID (a)) (Var (A)) (Var (A))'\n\n\ndef test_unit_atom_error():\n res = lambda x: parser.PrologParsers.atom.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert isinstance(res('(a)'), parser.Failure)\n assert isinstance(res('(a) a'), parser.Failure)\n assert isinstance(res('a ((a) (a))'), parser.Failure)\n assert isinstance(res('A'), parser.Failure)\n assert isinstance(res('A a'), parser.Failure)\n assert isinstance(res('a (A a)'), parser.Failure)\n assert isinstance(res('A a a a'), parser.Failure)\n assert isinstance(res('type'), parser.Failure)\n assert isinstance(res('module'), parser.Failure)\n\n\ndef test_unit_typeexpr_ok():\n res = lambda x: parser.PrologParsers.typeexpr.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert print_res('a') == 'Type (Atom (ID (a)))'\n assert print_res('a -> a') == 'Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))'\n assert print_res('(((a))) -> a') == 'Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))'\n assert print_res('a -> (((a)))') == 'Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))'\n assert print_res('((((((a))) -> (((a))))))') == 'Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))'\n assert print_res(\n 'a a -> a a') == 'Arrow (Type (Atom (ID (a)) (Atom (ID (a))))) (Type (Atom (ID (a)) (Atom (ID (a)))))'\n assert print_res('a a -> A') == 'Arrow (Type (Atom (ID (a)) (Atom (ID (a))))) (Type (Var (A)))'\n assert print_res(\n '(a -> a) -> a') == 'Arrow (Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))) (Type (Atom (ID (a))))'\n assert print_res('(a -> a -> a) -> a') == 'Arrow (Arrow (Type (Atom (ID (a)))) (Arrow (Type (Atom (ID (a)))) (' \\\n 'Type (Atom (ID (a)))))) (Type (Atom (ID (a))))'\n assert print_res('(a -> a) -> (a -> a)') == 'Arrow (Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))) (Arrow (' \\\n 'Type (Atom (ID (a)))) (Type (Atom (ID (a)))))'\n\n\ndef test_unit_typeexpr_error():\n res = lambda x: parser.PrologParsers.typeexpr.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert isinstance(res('a (->) a'), parser.Failure)\n assert isinstance(res('a ->'), parser.Failure)\n assert isinstance(res('-> a'), parser.Failure)\n assert isinstance(res('a -> -> a'), parser.Failure)\n assert isinstance(res('->'), parser.Failure)\n assert isinstance(res('a -> A a'), parser.Failure)\n assert isinstance(res('a -> a a)'), parser.Failure)\n\n\ndef test_unit_typedef_ok():\n res = lambda x: parser.PrologParsers.typedef.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert print_res('type a a.') == 'Typedef (ID (a)) (Type (Atom (ID (a))))'\n assert print_res('type a a->a.') == 'Typedef (ID (a)) (Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a)))))'\n assert print_res('type aA a.') == 'Typedef (ID (aA)) (Type (Atom (ID (a))))'\n\n\ndef test_unit_typedef_error():\n res = lambda x: parser.PrologParsers.typedef.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert isinstance(res('type '), parser.Failure)\n assert isinstance(res('type .'), parser.Failure)\n assert isinstance(res('type a'), parser.Failure)\n assert isinstance(res('type . a'), parser.Failure)\n assert isinstance(res('type type'), parser.Failure)\n assert isinstance(res('type A a'), parser.Failure)\n\n\ndef test_unit_module_ok():\n res = lambda x: parser.PrologParsers.module.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert print_res('module a.') == 'Module (ID (a))'\n assert print_res('module aAA.') == 'Module (ID (aAA))'\n\n\ndef test_unit_module_error():\n res = lambda x: parser.PrologParsers.module.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert isinstance(res('module a a.'), parser.Failure)\n assert isinstance(res('modale a a.'), parser.Failure)\n assert isinstance(res('module A.'), parser.Failure)\n assert isinstance(res('module type.'), parser.Failure)\n assert isinstance(res('module a'), parser.Failure)\n assert isinstance(res('module .'), parser.Failure)\n assert isinstance(res('module '), parser.Failure)\n assert isinstance(res('a'), parser.Failure)\n\n\ndef test_unit_relation_ok():\n res = lambda x: parser.PrologParsers.relation.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert print_res('a :- a.') == 'Relation (Atom (ID (a))) (Atom (ID (a)))'\n assert print_res('a a:- a.') == 'Relation (Atom (ID (a)) (Atom (ID (a)))) (Atom (ID (a)))'\n assert print_res('a :- (((a))).') == 'Relation (Atom (ID (a))) (Atom (ID (a)))'\n assert print_res('a :- a; a.') == 'Relation (Atom (ID (a))) (Disj (Atom (ID (a))) (Atom (ID (a))))'\n assert print_res('a :- a, a.') == 'Relation (Atom (ID (a))) (Conj (Atom (ID (a))) (Atom (ID (a))))'\n assert print_res('a :- (a; a), (a; a).') == 'Relation (Atom (ID (a))) (Conj (Disj (Atom (ID (a))) (Atom (ID (' \\\n 'a)))) (Disj (Atom (ID (a))) (Atom (ID (a)))))'\n assert print_res('a :- a a.') == 'Relation (Atom (ID (a))) (Atom (ID (a)) (Atom (ID (a))))'\n\n\ndef test_unit_relation_error():\n res = lambda x: parser.PrologParsers.relation.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert isinstance(res('a : -'), parser.Failure)\n assert isinstance(res('a :- a'), parser.Failure)\n assert isinstance(res('A :- a.'), parser.Failure)\n assert isinstance(res('a :- A.'), parser.Failure)\n assert isinstance(res('a :- a, .'), parser.Failure)\n assert isinstance(res('a :- a; .'), parser.Failure)\n assert isinstance(res('a :- a; A A.'), parser.Failure)\n assert isinstance(res('a :- a; ;a.'), parser.Failure)\n assert isinstance(res('a :- a;a ;.'), parser.Failure)\n\n\ndef test_unit_program_ok():\n res = lambda x: parser.PrologParsers.program.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert print_res('module a. \\n type a a. \\n a :- a.') == 'Program (\\nModule (ID (a))\\nTypedef (ID (a)) (Type (' \\\n 'Atom (ID (a))))\\nRelation (Atom (ID (a))) (Atom (ID (' \\\n 'a)))\\n)'\n assert print_res('type a a. \\n a :- a.') == 'Program (\\nTypedef (ID (a)) (Type (' \\\n 'Atom (ID (a))))\\nRelation (Atom (ID (a))) (Atom (ID (' \\\n 'a)))\\n)'\n assert print_res('type a a. \\n a :- a. \\n a.') == 'Program (\\nTypedef (ID (a)) (Type (' \\\n 'Atom (ID (a))))\\nRelation (Atom (ID (a))) (Atom (ID (' \\\n 'a)))\\nRelation (Atom (ID (a)))\\n)'\n\n\ndef test_unit_program_error():\n res = lambda x: parser.PrologParsers.program.parse(x)\n print_res = lambda x: parser.printAST(res(x).value)\n assert isinstance(res('type a. module a.'), parser.Failure)\n assert isinstance(res('type a module a.'), parser.Failure)\n assert isinstance(res('type a'), parser.Failure)\n assert isinstance(res('A :- A.'), parser.Failure)\n assert isinstance(res('type module.'), parser.Failure)\n\n\ndef test_integrate_prog(tmp_path, monkeypatch):\n filename = 'a'\n text = 'a a.'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['--prog', f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res == 'Program (\\nRelation (Atom (ID (a)) (Atom (ID (a))))\\n)'\n\n\ndef test_integrate_prog_2(tmp_path, monkeypatch):\n filename = 'a'\n text = 'a a.'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main([f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res == 'Program (\\nRelation (Atom (ID (a)) (Atom (ID (a))))\\n)'\n\n\ndef test_integrate_atom(tmp_path, monkeypatch):\n filename = 'a'\n text = 'a a'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['--atom', f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res == 'Atom (ID (a)) (Atom (ID (a)))'\n\n\ndef test_integrate_typeexpr(tmp_path, monkeypatch):\n filename = 'a'\n text = 'a->a'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['--typeexpr', f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res == 'Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a))))'\n\n\ndef test_integrate_type(tmp_path, monkeypatch):\n filename = 'a'\n text = 'type a a->a.'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['--type', f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res == 'Typedef (ID (a)) (Arrow (Type (Atom (ID (a)))) (Type (Atom (ID (a)))))'\n\n\ndef test_integrate_module(tmp_path, monkeypatch):\n filename = 'a'\n text = 'module a.'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['--module', f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res == 'Module (ID (a))'\n\n\ndef test_integrate_relation_error(tmp_path, monkeypatch):\n filename = 'a'\n text = 'a :- a a'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['--relation', f'{filename}'])\n res = open(f'{filename}.out', 'r').read()\n assert res.split(' ', 1)[0] == 'ERROR'\n\n\ndef test_integrate_arg_error_1(tmp_path, monkeypatch, capsys):\n filename = 'a'\n text = 'kek'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['a', 'b', f'{filename}'])\n out, err = capsys.readouterr()\n assert err == ''\n assert out == 'Invalid args\\n'\n\n\ndef test_integrate_arg_error_2(tmp_path, monkeypatch, capsys):\n filename = 'a'\n text = 'kek'\n (tmp_path / filename).write_text(text)\n monkeypatch.chdir(tmp_path)\n parser.main(['kek', f'{filename}'])\n out, err = capsys.readouterr()\n assert err == ''\n assert out == 'Invalid args\\n'"
},
{
"alpha_fraction": 0.4863945543766022,
"alphanum_fraction": 0.4909296929836273,
"avg_line_length": 15.054545402526855,
"blob_id": "ce08d48f8fc8e48287fc9fbd08fefaabad81431a",
"content_id": "9eefe0c553a81af1f779537bcc8c7c9e1132cb08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 55,
"path": "/5 yacc/lexer.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import ply.lex as lex\n\nreserved = {}\n\ntokens = [\n 'IDENTIFIER',\n 'DEFINITION',\n 'END',\n 'AND',\n 'OR',\n 'LBR',\n 'RBR'\n ] + list(reserved.values())\n\n\ndef t_IDENTIFIER(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value, 'IDENTIFIER')\n return t\n\n\nt_DEFINITION = r':-'\n\nt_END = r'\\.'\n\nt_AND = r','\n\nt_OR = r';'\n\nt_LBR = r'\\('\n\nt_RBR = r'\\)'\n\nt_ignore = ' \\t'\n\nline_start_pos = 0\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n global line_start_pos\n line_start_pos = t.lexpos + len(t.value)\n\n\ndef t_error(t):\n error_msg = \"SYNTAX ERROR (illegal character)\"\n if t:\n error_msg += f\" in line {t.lineno} in pos {t.lexpos - line_start_pos + 1} !\"\n else:\n error_msg += \" at EOF !\"\n raise SyntaxError(error_msg)\n\n\nlexer = lex.lex()"
},
{
"alpha_fraction": 0.403253436088562,
"alphanum_fraction": 0.4357876777648926,
"avg_line_length": 20.43119239807129,
"blob_id": "69aceb7b491287c9611684703de4c178002296ed",
"content_id": "728bef01c620ba63b27f05d1f5a5550f0a9eb31d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2336,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 109,
"path": "/5 yacc/parser.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import ply.yacc as yacc\nimport sys\nimport lexer\nfrom lexer import tokens\n\n\ndef p_program(p):\n \"\"\"program : definition\n | definition program\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n elif len(p) == 3:\n p[0] = f'{p[1]} \\n{p[2]}'\n\n\ndef p_definition(p):\n \"\"\"definition : atom END\n | atom DEFINITION disjunction END\"\"\"\n if len(p) == 3:\n p[0] = f'Def ({p[1]})'\n elif len(p) == 5:\n p[0] = f'Def ({p[1]}) by ({p[3]})'\n\n\ndef p_disjunction(p):\n \"\"\"disjunction : conjunction\n | conjunction OR disjunction\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n elif len(p) == 4:\n p[0] = f'Disj ({p[1]}) ({p[3]})'\n\n\ndef p_conjunction(p):\n \"\"\"conjunction : lowexpr\n | lowexpr AND conjunction\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n elif len(p) == 4:\n p[0] = f'Conj ({p[1]}) ({p[3]})'\n\n\ndef p_lowexpr(p):\n \"\"\"lowexpr : atom\n | LBR disjunction RBR\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n elif len(p) == 4:\n p[0] = f'({p[2]})'\n\n\ndef p_atom(p):\n \"\"\"atom : id\n | id atom2\"\"\"\n if len(p) == 2:\n p[0] = f'Atom ({p[1]})'\n elif len(p) == 3:\n p[0] = f'Atom ({p[1]} {p[2]})'\n\n\ndef p_atom2_id(p):\n \"\"\"atom2 : id\n | id atom2\"\"\"\n if len(p) == 2:\n p[0] = f'Atom ({p[1]})'\n elif len(p) == 3:\n p[0] = f'Atom ({p[1]}) {p[2]}'\n\n\ndef p_atom2_atom3(p):\n \"\"\"atom2 : atom3\n | atom3 atom2\"\"\"\n if len(p) == 2:\n p[0] = f'{p[1]}'\n elif len(p) == 3:\n p[0] = f'{p[1]} {p[2]}'\n\n\ndef p_atom3(p):\n \"\"\"atom3 : LBR atom RBR\n | LBR atom3 RBR\"\"\"\n if len(p) == 4:\n p[0] = f'{p[2]}'\n\n\ndef p_id(p):\n \"\"\"id : IDENTIFIER\"\"\"\n p[0] = f'ID {p[1]}'\n\n\ndef p_error(p):\n error_msg = \"SYNTAX ERROR\"\n if p:\n error_msg += f\" in line {p.lineno} in pos {p.lexpos - lexer.line_start_pos + 1} !\"\n else:\n error_msg += \" at EOF !\"\n raise SyntaxError(error_msg)\n\n\nparser = yacc.yacc()\n\nif __name__ == '__main__':\n with open(sys.argv[1], 'r') as file_in, \\\n open(sys.argv[1] + '.out', 'w') as file_out:\n try:\n result = parser.parse(file_in.read())\n file_out.write(f'OK !\\n{result}')\n except SyntaxError as e:\n file_out.write(str(e))\n"
},
{
"alpha_fraction": 0.7586206793785095,
"alphanum_fraction": 0.7931034564971924,
"avg_line_length": 29,
"blob_id": "88601835836cb17017b09b197e44a30f3745adc2",
"content_id": "3710e619c7c5a005ab88e2e2350cc648f3e60637",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 1,
"path": "/5 yacc/README.md",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "`python3 parser.py file_name`"
},
{
"alpha_fraction": 0.7692307829856873,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 25,
"blob_id": "a618883100506ed7dc09fe7a480b498e511dc4d7",
"content_id": "eee038905ce56713ac5737e4153f528169748c2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 1,
"path": "/4 recursive descent parser/README.md",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "python3 main.py file_name\n"
},
{
"alpha_fraction": 0.5669291615486145,
"alphanum_fraction": 0.5679386258125305,
"avg_line_length": 22.473934173583984,
"blob_id": "e70dda31f425e29d22fefc5927d915ebe1e3429a",
"content_id": "5c2b7801eec7ede069523953d9342a1e616469dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4953,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 211,
"path": "/3 derivatives/main.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import sys\n\nsys.setrecursionlimit(10 ** 6)\n\n\nclass Empty:\n pass\n\n\nclass Epsilon:\n pass\n\n\nclass Char:\n def __init__(self, char):\n self.arg = char\n\n\nclass Alt:\n def __init__(self, left, right):\n self.arg_left = left\n self.arg_right = right\n\n\nclass Seq:\n def __init__(self, left, right):\n self.arg_left = left\n self.arg_right = right\n\n\nclass Star:\n def __init__(self, exp):\n self.arg = exp\n\n\ndef alt(left, right):\n if isinstance(right, Empty):\n left, right = right, left\n if isinstance(left, Empty):\n return right\n\n if isinstance(right, Epsilon):\n left, right = right, left\n if isinstance(left, Epsilon):\n if nullable(right):\n return right\n else:\n return Alt(left, right)\n\n if isinstance(right, Star):\n left, right = right, left\n if isinstance(left, Star):\n if equal(left.arg, right):\n return left\n\n if isinstance(right, Alt):\n left, right = right, left\n if isinstance(right, Alt):\n return alt(alt(left, right.arg_left), right.arg_right)\n\n def find_in_alts(exp, needle):\n if equal(exp, needle):\n return True\n if isinstance(exp, Alt):\n return find_in_alts(exp.arg_left, needle) or \\\n find_in_alts(exp.arg_right, needle)\n return False\n\n if find_in_alts(left, right):\n return left\n\n return Alt(left, right)\n\n\ndef seq(left, right):\n if isinstance(right, Empty):\n left, right = right, left\n if isinstance(left, Empty):\n return Empty()\n\n if isinstance(right, Epsilon):\n left, right = right, left\n if isinstance(left, Epsilon):\n return right\n\n if isinstance(right, Seq):\n left, right = right, left\n if isinstance(right, Seq):\n return seq(seq(left, right.arg_left), right.arg_right)\n\n def find_in_seqs(exp, needle):\n if equal(exp, needle):\n return True\n if isinstance(exp, Seq):\n return find_in_seqs(exp.arg_right, needle)\n return False\n\n if isinstance(right, Star) and find_in_seqs(left, right):\n return left\n\n return Seq(left, right)\n\n\ndef star(arg):\n if isinstance(arg, Empty):\n return Empty()\n if isinstance(arg, Epsilon):\n return Epsilon()\n\n if isinstance(arg, Star):\n return arg\n\n return Star(arg)\n\n\ndef plus(arg):\n return seq(arg, star(arg))\n\n\ndef equal(a, b):\n if type(a) != type(b):\n return False\n if isinstance(a, Empty):\n return True\n if isinstance(a, Epsilon):\n return True\n if isinstance(a, Char):\n return a.arg == b.arg\n if isinstance(a, Alt) or isinstance(a, Seq):\n return equal(a.arg_left, b.arg_left) and equal(a.arg_right, b.arg_right)\n if isinstance(a, Star):\n return equal(a.arg, b.arg)\n\n\ndef nullable(exp):\n if isinstance(exp, Empty):\n return False\n if isinstance(exp, Epsilon):\n return True\n if isinstance(exp, Char):\n return False\n if isinstance(exp, Alt):\n return nullable(exp.arg_left) or nullable(exp.arg_right)\n if isinstance(exp, Seq):\n return nullable(exp.arg_left) and nullable(exp.arg_right)\n if isinstance(exp, Star):\n return True\n\n\ndef dfs(exp):\n if isinstance(exp, Empty):\n print('Emp', end='')\n if isinstance(exp, Epsilon):\n print('Eps', end='')\n if isinstance(exp, Char):\n print(exp.arg, end='')\n if isinstance(exp, Alt):\n print('(', end='')\n dfs(exp.arg_left)\n print('|', end='')\n dfs(exp.arg_right)\n print(')', end='')\n if isinstance(exp, Seq):\n print('(', end='')\n dfs(exp.arg_left)\n dfs(exp.arg_right)\n print(')', end='')\n if isinstance(exp, Star):\n dfs(exp.arg)\n print('*', end='')\n\n\ndef derivative(char, exp):\n if isinstance(exp, Empty):\n return Empty()\n if isinstance(exp, Epsilon):\n return Empty()\n\n if isinstance(exp, Char):\n if char == exp.arg:\n return Epsilon()\n else:\n return Empty()\n\n if isinstance(exp, Alt):\n return alt(derivative(char, exp.arg_left), derivative(char, exp.arg_right))\n\n if isinstance(exp, Seq):\n if nullable(exp.arg_left):\n return alt(seq(derivative(char, exp.arg_left), exp.arg_right),\n derivative(char, exp.arg_right))\n else:\n return seq(derivative(char, exp.arg_left), exp.arg_right)\n\n if isinstance(exp, Star):\n return seq(derivative(char, exp.arg), exp)\n\n\ndef check(string, exp):\n cur_exp = exp\n for char in string:\n cur_exp = derivative(char, cur_exp)\n return nullable(cur_exp)\n\n\nmy_exp = Seq(Alt(Char('a'), Char('b')), Star(Char('c')))\nmy_exp2 = Seq(Alt(Char('a'), Char('b')), Star(Char('c')))\n\nif __name__ == '__main__':\n print(check(input(), my_exp))\n print(my_exp == my_exp2)\n"
},
{
"alpha_fraction": 0.5184049010276794,
"alphanum_fraction": 0.5265848636627197,
"avg_line_length": 36.13924026489258,
"blob_id": "bdf17dfcd349a8a6fd84285ac3a293350aa6c343",
"content_id": "1b259019ae5ff92ebebce6ccf366eae613af25f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2934,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 79,
"path": "/6 parser combinators/parser.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "from parsita import *\nimport sys\n\n\ndef printAST(node):\n if not isinstance(node, list):\n return str(node)\n if node[0] == 'Program':\n ans = f'{node[0]} (\\n' + '\\n'.join(map(printAST, node[1:])) + '\\n)'\n else:\n ans = f'{node[0]} (' + ') ('.join(map(printAST, node[1:])) + ')'\n return ans\n\n\nclass PrologParsers(TextParsers, whitespace=r'[ \\t\\n\\r]*'):\n program = opt(module) & rep(typedef) & rep(relation) > (lambda x: ['Program'] + x[0] + x[1] + x[2])\n\n module = 'module' >> identifier << '.' > (lambda x: ['Module'] + [x])\n\n typedef = 'type' >> identifier & opt(typeexpr) << '.' > (lambda x: ['Typedef'] + [x[0]] + x[1])\n typeexpr = ((type_arg << '->') & typeexpr > (lambda x: ['Arrow'] + x)) | type_arg\n type_arg = '(' >> typeexpr << ')' | type_simple\n type_simple = atom | variable > (lambda x: ['Type'] + [x])\n\n relation = atom & opt(':-' >> body) << '.' > (lambda x: ['Relation'] + [x[0]] + x[1])\n\n body = disjunction\n disjunction = (conjunction << ';' & disjunction > (lambda x: ['Disj'] + x)) | conjunction\n conjunction = (lowexpr << ',' & conjunction > (lambda x: ['Conj'] + x)) | lowexpr\n lowexpr = atom | '(' >> disjunction << ')'\n\n atom = identifier & rep(simple_atom | inner_atom | variable) > (lambda x: ['Atom'] + [x[0]] + x[1])\n inner_atom = '(' >> inner_atom << ')' | '(' >> atom << ')'\n simple_atom = identifier > (lambda x: ['Atom'] + [x])\n\n keywords = {'module', 'type'}\n not_keyword_comb = lambda parser, kw=keywords: pred(parser, lambda x: x not in kw, 'not a keyword')\n identifier = not_keyword_comb(reg(r'[a-z_][a-zA-Z_0-9]*')) > (lambda x: ['ID'] + [x])\n variable = not_keyword_comb(reg(r'[A-Z][a-zA-Z_0-9]*')) > (lambda x: ['Var'] + [x])\n\n\ndef main(args):\n if len(args) == 1:\n option = None\n filename = args[0]\n elif len(args) == 2:\n option = args[0]\n filename = args[1]\n else:\n print('Invalid args')\n return\n\n with open(filename, 'r') as file_in, \\\n open(filename + '.out', 'w') as file_out:\n\n if option is None or option == '--prog':\n res = PrologParsers.program.parse(file_in.read())\n elif option == '--atom':\n res = PrologParsers.atom.parse(file_in.read())\n elif option == '--typeexpr':\n res = PrologParsers.typeexpr.parse(file_in.read())\n elif option == '--type':\n res = PrologParsers.typedef.parse(file_in.read())\n elif option == '--module':\n res = PrologParsers.module.parse(file_in.read())\n elif option == '--relation':\n res = PrologParsers.relation.parse(file_in.read())\n else:\n print('Invalid args')\n return\n\n if isinstance(res, Success):\n file_out.write(printAST(res.value))\n else:\n file_out.write(f'ERROR !\\n{res.message}')\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n"
},
{
"alpha_fraction": 0.5457875728607178,
"alphanum_fraction": 0.5494505763053894,
"avg_line_length": 18.5,
"blob_id": "d15bf4b1009b24c9c72e1abd7904eefd7c774527",
"content_id": "f53dae4f9747f2789aa80f7dfd7e165b4533b622",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 14,
"path": "/4 recursive descent parser/main.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import sys\nimport parser\n\nif __name__ == '__main__':\n with open(sys.argv[1], 'r') as file:\n par = parser.Parser(file.read())\n\n while par.definition():\n pass\n\n if par.last_error is not None:\n print(par.last_error)\n else:\n print('OK')\n"
},
{
"alpha_fraction": 0.492337167263031,
"alphanum_fraction": 0.5,
"avg_line_length": 15.3125,
"blob_id": "7d180f32a8c3918a813cd8a8bd9e299ceb354564",
"content_id": "cdb826bb2b75b9e4b7c61bd213f27e5406b8e4b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1044,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 64,
"path": "/4 recursive descent parser/lexer.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import ply.lex as lex\n\nreserved = {}\n\ntokens = [\n 'IDENTIFIER',\n 'DEFINITION',\n 'END',\n 'AND',\n 'OR',\n 'LPAREN',\n 'RPAREN'\n ] + list(reserved.values())\n\n\ndef t_IDENTIFIER(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value, 'IDENTIFIER')\n return t\n\n\nt_DEFINITION = r':-'\n\nt_END = r'\\.'\n\nt_AND = r','\n\nt_OR = r';'\n\nt_LPAREN = r'\\('\n\nt_RPAREN = r'\\)'\n\nt_ignore = ' \\t'\n\nline_start_pos = 0\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n global line_start_pos\n line_start_pos = t.lexpos + len(t.value)\n\n\ndef find_column(inp, token):\n line_start = inp.rfind('\\n', 0, token.lexpos) + 1\n return (token.lexpos - line_start) + 1\n\n\ndef t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n\ndef get_lexer(string):\n lexer = lex.lex()\n lexer.input(string)\n while True:\n tok = lexer.token()\n if not tok:\n yield None\n else:\n yield tok\n"
},
{
"alpha_fraction": 0.5457400679588318,
"alphanum_fraction": 0.5618374347686768,
"avg_line_length": 24.727272033691406,
"blob_id": "0536a4a4173e846f33d4772dac8c163b77ccebfe",
"content_id": "d22535adf1a2cfb2c17896006f8665213708b522",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2547,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 99,
"path": "/3 derivatives/test.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import time\n\nfrom main import *\n\n\ndef test_empty():\n assert not nullable(Empty())\n\n\ndef test_epsilon():\n assert nullable(Epsilon())\n\n\ndef test_char():\n assert not nullable(Char('a'))\n\n\ndef test_alt():\n assert nullable(alt(Epsilon(), Empty()))\n assert nullable(alt(Epsilon(), Epsilon()))\n assert nullable(alt(Epsilon(), Char('a')))\n assert not nullable(alt(Empty(), Char('a')))\n assert not nullable(alt(Empty(), Empty()))\n assert not nullable(alt(Char('a'), Char('b')))\n\n\ndef test_seq():\n assert nullable(alt(Epsilon(), Empty()))\n assert not nullable(seq(Epsilon(), Empty()))\n assert not nullable(seq(Epsilon(), Char('a')))\n assert not nullable(seq(Empty(), Char('a')))\n assert not nullable(seq(Empty(), Empty()))\n assert not nullable(seq(Char('a'), Char('b')))\n\n\ndef test_star():\n assert nullable(star(Char('b')))\n assert nullable(star(Epsilon()))\n assert not nullable(star(Empty()))\n\n\ndef test_plus():\n assert nullable(star(Char('b')))\n assert nullable(star(Epsilon()))\n assert not nullable(star(Empty()))\n\n\ndef test_integration_1():\n exp = seq(seq(star(Char('a')), star(Char('b'))), star(Char('c'))) # (a)*(b)*(c)*\n string = 'a' * 2 * 10 ** 5 + 'bc'\n assert check(string, exp)\n\n\ndef test_integration_2():\n exp = seq(alt(Char('a'), star(Char('a'))), alt(Char('a'), star(Char('a')))) # (a | a*)(a | a*)\n string = 'a' * 10 ** 6\n assert check(string, exp)\n\n\ndef test_integration_3():\n exp = star(Char('a')) # a*\n string = 'a' * 10 ** 6\n assert check(string, exp)\n\n\ndef test_integration_4():\n exp = seq(seq(alt(Char('a'), star(Char('a'))), Char('a')), alt(Char('a'), star(Char('a')))) # (a | a*) a (a | a*)\n string = 'a' * 2 * 10 ** 5\n assert check(string, exp)\n\n\ndef test_integration_5():\n exp = plus(Char('a')) # a+\n string = 'a' * 2 * 10 ** 5\n assert check(string, exp)\n\n\ndef test_integration_5_1():\n exp = plus(Char('a')) # a+\n string = 'a' * 10 ** 5 + 'b'\n assert not check(string, exp)\n\n\ndef test_integration_6():\n exp = seq(seq(star(Char('a')), star(Char('a'))), star(Char('a'))) # a*a*a*\n string = 'a' * 10 ** 5\n assert check(string, exp)\n\n\ndef test_integration_7():\n exp = plus(seq(alt(Char('a'), Char('b')), alt(Char('c'), Char('d')))) # ((a|b)(c|d))+\n string = 'ad' * 10 ** 2\n assert check(string, exp)\n\n\ndef test_integration_7_1():\n exp = plus(seq(alt(Char('a'), Char('b')), alt(Char('c'), Char('d')))) # ((a|b)(c|d))+\n string = 'ab' * 10 ** 2 + 'ab'\n assert not check(string, exp)\n"
},
{
"alpha_fraction": 0.5132192969322205,
"alphanum_fraction": 0.5152928829193115,
"avg_line_length": 27.791044235229492,
"blob_id": "91de68b828af4834979cb03df7c477c58dfae7bb",
"content_id": "ad2eef431a8383d4088bfc87da60aa5edcdfd840",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1929,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 67,
"path": "/4 recursive descent parser/parser.py",
"repo_name": "turovkv/syntax-analysis",
"src_encoding": "UTF-8",
"text": "import sys\nimport lexer\n\nsys.setrecursionlimit(10 ** 6)\n\n\nclass Parser:\n def __init__(self, string):\n self.lex = lexer.get_lexer(string)\n self.current = next(self.lex)\n self.last_error = None\n\n def expect(self, token_type):\n if self.current is None:\n self.last_error = f'Error! Expected {token_type}, but EndOfFile found'\n return False\n if self.current.type == token_type:\n self.current = next(self.lex)\n return True\n self.last_error = f'Error! Expected {token_type}' + \\\n f' in line {self.current.lineno}' + \\\n f' in pos {self.current.lexpos - lexer.line_start_pos + 1}'\n return False\n\n def definition(self):\n if self.current is None:\n self.last_error = None\n return False\n if not self.expect('IDENTIFIER'):\n return False\n if self.expect('END'):\n self.last_error = None\n return True\n if self.expect('DEFINITION') and \\\n self.disjunction() and \\\n self.expect('END'):\n self.last_error = None\n return True\n return False\n\n def disjunction(self):\n if not self.conjunction():\n return False\n if self.expect('OR'):\n if not self.disjunction():\n return False\n return True\n\n def conjunction(self):\n if not self.lowest_expr():\n return False\n if self.expect('AND'):\n if not self.conjunction():\n return False\n return True\n\n def lowest_expr(self):\n if self.expect('LPAREN'):\n if self.disjunction() and \\\n self.expect('RPAREN'):\n return True\n else:\n return False\n\n if self.expect('IDENTIFIER'):\n return True\n return False\n"
}
] | 12 |
Syvokobylenko/ProjectAutoHome | https://github.com/Syvokobylenko/ProjectAutoHome | 3605c3aad981d43691d20bd4688cbd91496e6701 | e4ccef7cf14b6d73fbf8089cda86b594e1b3f854 | 8ec4585d733678977683f146123ef8d4b946b311 | refs/heads/master | 2022-01-01T15:15:38.418095 | 2021-12-22T21:09:09 | 2021-12-22T21:09:09 | 243,077,376 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.48155736923217773,
"alphanum_fraction": 0.48770493268966675,
"avg_line_length": 27.705883026123047,
"blob_id": "cd3786a1fc81e7bcdc3860b7a49016e76463e38c",
"content_id": "76585cc5a8c5653f2f2d3f59b1d718082db7c99e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 488,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 17,
"path": "/gpio.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "class switchObject():\n def __init__(self, channel):\n import machine\n self.pin = machine.Pin(channel, machine.Pin.OUT)\n self.state = \"1\"\n self.switch()\n def switch(self):\n if bool(int(self.state)):\n print(\"Turning OFF\")\n self.pin.on()\n self.state = \"0\"\n return self.state\n else:\n print(\"Turning ON\")\n self.pin.off()\n self.state = \"1\"\n return self.state\n"
},
{
"alpha_fraction": 0.5970149040222168,
"alphanum_fraction": 0.5970149040222168,
"avg_line_length": 29.225807189941406,
"blob_id": "66bbf17e35b84643c21af70f1fccfb22741b4161",
"content_id": "8f4367fe575232592e62db13e568d31c27a0c76d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 31,
"path": "/TCP_socket_object.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "class createConnection():\n def __init__(self):\n import socket\n self.socket = socket.socket()\n\n def startServer(self,port,max_con):\n self.port = port\n self.socket.bind(('',self.port))\n self.socket.listen(max_con)\n\n def client(self,IP,port):\n self.IP = IP\n self.port = port\n self.connection = self.socket\n self.connection.connect((self.IP, self.port))\n \n def send(self,message,connection=None):\n if connection is None:\n connection = self.connection\n connection.send(message.encode())\n\n def recieve(self,timeoutms,maxlenght,connection=None):\n if connection is None:\n connection = self.connection\n connection.settimeout(timeoutms)\n try:\n msg = connection.recv(maxlenght).decode()\n except TimeoutError:\n msg = False\n connection.settimeout(None)\n return msg\n\n"
},
{
"alpha_fraction": 0.6691729426383972,
"alphanum_fraction": 0.677318274974823,
"avg_line_length": 21.478872299194336,
"blob_id": "29c1848794bed4e97f0252c55eec2382583459ea",
"content_id": "040c2be7b8f2cc02e6168d613b439f6da6c04166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1596,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 71,
"path": "/esp8266.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "import socket, machine\n\ndef do_connect(ESSID,password):\n import network\n network.WLAN(network.AP_IF).active(False)\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n print(\"connecting to network...\")\n sta_if.active(True)\n sta_if.connect(ESSID, password)\n while not sta_if.isconnected():\n pass\t\n print(\"network config:\", sta_if.ifconfig())\n\ndef credentialsRead(filename):\n file = open(filename,\"r\")\n f = file.read()\n f = f.split(\"\\n\")\n credentials = []\n for line in f:\n credentials.append(line)\n file.close()\n return credentials\n\ndo_connect(*credentialsRead(\"wifi.ini\"))\n\nclass switchObject():\n def __init__(self, channel):\n self.pin = machine.Pin(channel, machine.Pin.OUT)\n self.state = \"1\"\n self.switch()\n def switch(self):\n if bool(int(self.state)):\n print(\"Turning OFF\")\n self.pin.on()\n self.state = \"0\"\n return self.state\n else:\n print(\"Turning ON\")\n self.pin.off()\n self.state = \"1\"\n return self.state\n\nclass socketConnection():\n def __init__(self, port):\n self.server = socket.socket()\n self.server.bind((\"\", port))\n self.server.listen(1)\n def acceptCon(self):\n return self.server.accept()\n\nserver_instance = socketConnection(2198)\nGPIO0Handler = switchObject(0)\n\nwhile True:\n data, addr = server_instance.acceptCon()\n print (\"Got connection from\" + str(addr))\n data.settimeout(5)\n while True:\n try:\n if not bool(int(data.recv(1).decode())):\n data.send(GPIO0Handler.switch())\n data.close()\n except(ValueError):\n print(\"Invalid Input\")\n data.close()\n break\n except(OSError):\n print(\"Timed Out\")\n data.close()\n break\n"
},
{
"alpha_fraction": 0.5453020334243774,
"alphanum_fraction": 0.5587248206138611,
"avg_line_length": 27.428571701049805,
"blob_id": "8fedf494c821e39146f82e84581dc3bb4c928440",
"content_id": "8c2ba293fa4aa52acf94176721522d89d2a42022",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 596,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 21,
"path": "/socket_server.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "from TCP_socket_object import createConnection\n\ndef node(con):\n while True:\n pc, pc_IP = con.socket.accept()\n print(\"New connection:\", pc_IP)\n while True:\n try:\n print(con.recieve(None,10,pc))\n except ConnectionResetError:\n print(\"Connection lost:\", pc_IP)\n break\n\nif __name__ == \"__main__\":\n con = createConnection()\n con.startServer(2198,5)\n import threading\n for x in range(5):\n thread = threading.Thread(target=node, args=(con,))\n thread.daemon = False\n thread.start()"
},
{
"alpha_fraction": 0.6716867685317993,
"alphanum_fraction": 0.6957831382751465,
"avg_line_length": 26.66666603088379,
"blob_id": "ed36187007848c3639025385f486e6509cd80416",
"content_id": "63579ae4c36cc716b5a2555789a1c2851cbd0d50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 12,
"path": "/boot.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "import read_file, TCP_socket_object, wifi_connect, machine, time\n\nipconfig = wifi_connect.do_connect(*read_file.credentialsRead(\"wifi.ini\"))\ncon = TCP_socket_object.createConnection()\ncon.client(ipconfig[3],2198)\n\npin = machine.Pin(0, machine.Pin.IN)\n\nwhile True:\n if not pin.value():\n con.send('1')\n time.sleep(2)\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.5925925970077515,
"avg_line_length": 23,
"blob_id": "3a0f25cf646fe0c97401463aa5f0838747e20457",
"content_id": "cf9a562b06e68b785a763d61ab739e4b2638102c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 9,
"path": "/read_file.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "def credentialsRead(filename):\n file = open(filename,\"r\")\n f = file.read()\n f = f.split(\"\\n\")\n credentials = []\n for line in f:\n credentials.append(line)\n file.close()\n return credentials\n"
},
{
"alpha_fraction": 0.6747663617134094,
"alphanum_fraction": 0.6915887594223022,
"avg_line_length": 23.31818199157715,
"blob_id": "10b133326178d34fd096a3eab83224c900f04880",
"content_id": "da86bfdfb1d8bec6506eb0f72e08e532c126b31b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 22,
"path": "/esp8266 (1).py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "import gpio, read_file, TCP_socket_object, wifi_connect\nwifi_connect.do_connect(*read_file.credentialsRead(\"wifi.ini\"))\n\nGPIO0Handler = gpio.switchObject(0)\n\nwhile True:\n data, addr = TCP_socket_object.server(2198)\n print (\"Got connection from\" + str(addr))\n data.settimeout(5)\n while True:\n try:\n if not bool(int(data.recv(1).decode())):\n data.send(GPIO0Handler.switch())\n data.close()\n except(ValueError):\n print(\"Invalid Input\")\n data.close()\n break\n except(OSError):\n print(\"Timed Out\")\n data.close()\n break\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6767857074737549,
"avg_line_length": 18.310344696044922,
"blob_id": "fb390ffdb4683d47bd668e7641c08d7d1f88df1f",
"content_id": "9e13e9511fdf33eda1c4757af207556390916c4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 560,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 29,
"path": "/inputsocket.py",
"repo_name": "Syvokobylenko/ProjectAutoHome",
"src_encoding": "UTF-8",
"text": "import socket, time\n\nclass connection:\n def __init__(self, IP, port):\n self.IP = IP\n self.port = port\n self.startConnection()\n\n def startConnection(self):\n self.s = socket.socket()\n try:\n self.s.connect((self.IP, self.port))\n except(KeyboardInterrupt):\n exit\n\n def sendData(self, state):\n try:\n self.s.send(str(state))\n except(KeyboardInterrupt):\n exit\n\n\n\nwhile True:\n state = input(\"Type 0 to use switch: \")\n client_soc = connection(\"192.168.0.39\", 2198)\n client_soc.sendData(state)\n print(client_soc.s.recv(1).decode())\n client_soc.s.close()\n"
}
] | 8 |
rizwann/handwritten-digit-recognition | https://github.com/rizwann/handwritten-digit-recognition | 6144103aabe91308fffb79180458ba8db2899210 | c601c58f2418847f7404832eb1968563a61bb935 | 58318df9c5b8bf9a7adec5ca9a1ae4a954881266 | refs/heads/main | 2023-05-30T18:01:34.192961 | 2021-06-17T18:13:45 | 2021-06-17T18:13:45 | 375,514,243 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6812251806259155,
"alphanum_fraction": 0.7107203602790833,
"avg_line_length": 31.07272720336914,
"blob_id": "a7022ce621fa47dd31c8acae86e5218c43b52d2c",
"content_id": "c025f23919075c7f59a0f9474799d745f98372c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1819,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 55,
"path": "/GUI-digit-recognizer.py",
"repo_name": "rizwann/handwritten-digit-recognition",
"src_encoding": "UTF-8",
"text": "from keras.models import load_model\nfrom tkinter import *\nimport tkinter as tk\nimport win32gui\nfrom PIL import ImageGrab, Image\nimport numpy as np\n\n\n\nmodel = load_model(‘mnist.h5’)\n\n\ndef predict_digit(img):\n#resize image to 28x28 pixels\nimg = img.resize((28,28))\n#convert rgb to grayscale\nimg = img.convert(‘L’)\nimg = np.array(img)\n#reshaping to support our model input and normalizing\nimg = img.reshape(1,28,28,1)\nimg = img/255.0\n#predicting the class\nres = model.predict([img])[0]\nreturn np.argmax(res), max(res)\nclass App(tk.Tk):\ndef __init__(self):\ntk.Tk.__init__(self)\nself.x = self.y = 0\n# Creating elements\nself.canvas = tk.Canvas(self, width=300, height=300, bg = “white”, cursor=”cross”)\nself.label = tk.Label(self, text=”Thinking..”, font=(“Helvetica”, 48))\nself.classify_btn = tk.Button(self, text = “Recognise”, command = self.classify_handwriting)\nself.button_clear = tk.Button(self, text = “Clear”, command = self.clear_all)\n# Grid structure\nself.canvas.grid(row=0, column=0, pady=2, sticky=W, )\nself.label.grid(row=0, column=1,pady=2, padx=2)\nself.classify_btn.grid(row=1, column=1, pady=2, padx=2)\nself.button_clear.grid(row=1, column=0, pady=2)\n#self.canvas.bind(“<Motion>”, self.start_pos)\nself.canvas.bind(“<B1-Motion>”, self.draw_lines)\ndef clear_all(self):\nself.canvas.delete(“all”)\ndef classify_handwriting(self):\nHWND = self.canvas.winfo_id() # get the handle of the canvas\nrect = win32gui.GetWindowRect(HWND) # get the coordinate of the canvas\nim = ImageGrab.grab(rect)\ndigit, acc = predict_digit(im)\nself.label.configure(text= str(digit)+’, ‘+ str(int(acc*100))+’%’)\ndef draw_lines(self, event):\nself.x = event.x\nself.y = event.y\nr=8\nself.canvas.create_oval(self.x-r, self.y-r, self.x + r, self.y + r, fill=’black’)\napp = App()\nmainloop()"
},
{
"alpha_fraction": 0.7819548845291138,
"alphanum_fraction": 0.7819548845291138,
"avg_line_length": 43,
"blob_id": "669452ec8c5db8002fd2f8ce45816c148a2414b6",
"content_id": "b43e61ae0c1d13a0e08f4ae0e03571a1b25c2a2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 3,
"path": "/README.md",
"repo_name": "rizwann/handwritten-digit-recognition",
"src_encoding": "UTF-8",
"text": "\n# Deep Learning Project — Handwritten Digit Recognition using Python\n\n(N.B This is a running project for one of my masters courses)\n"
}
] | 2 |
yokolet/DeepLearning | https://github.com/yokolet/DeepLearning | 4c56436115e1e80c301dc0fce2db03e507966448 | d8a914a1e1a345641d940429d53e5f761115961e | 716d73470ccf7e5b9dabf506161b59a308e843b9 | refs/heads/master | 2022-11-29T15:13:03.724890 | 2019-09-24T17:48:22 | 2019-09-24T17:48:26 | 152,685,247 | 0 | 0 | null | 2018-10-12T02:50:31 | 2019-09-24T17:49:45 | 2022-11-22T01:37:01 | Jupyter Notebook | [
{
"alpha_fraction": 0.7579505443572998,
"alphanum_fraction": 0.7591283917427063,
"avg_line_length": 43.68421173095703,
"blob_id": "e2660c09e7a56dd365cdd8d5853b2be7f3354cef",
"content_id": "558a8f0a94d708971e75d64aa01941150bc523aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1698,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 38,
"path": "/README.md",
"repo_name": "yokolet/DeepLearning",
"src_encoding": "UTF-8",
"text": "# Deep Learning\n### Deep Learning Projects\n\n These are collections of my Deep Learning study. Most of them are Udaicy projects\n I worked on. Those projects were submitted and passed all required criteria.\n\n- [dcgan](https://github.com/yokolet/DeepLearning/blob/master/dcgan)\n\n Deep Convolutional Adversarial Networks (DCGAN) by PyTorch is here. The dataset is\n the Street View House Numbers Dataset (SVHN). Python notebook and its PDF are in the\n directory.\n\n- [Jane](https://github.com/yokolet/DeepLearning/blob/master/Jane)\n\n NLP, \"Becoming Jane by Deep Learning\" is here. This is my old attempt to generate\n Jane Austen-like sentences by training Recurrent Neural Networks (RNN).\n\n- [cnn](https://github.com/yokolet/DeepLearning/blob/master/cnn)\n\n Image clasification by Convolutional Neural Networks (CNN) project. This is one of\n Udacity Deep Learning project. Using Resnet50 for a transfer learning, Dog breeds were\n detected by CNN.\n\n- [rnn](https://github.com/yokolet/DeepLearning/blob/master/rnn)\n\n NLP by Recurrent Neural Networks (RNN) project. This is also one of Udacity Deep\n Learning project. Dataset is Simpsons' TV script. RNN model was trained using TV\n script, then, TV script was generated.\n\n- [dcgans-face](https://github.com/yokolet/DeepLearning/tree/master/dcgans-face)\n\n Deep Convolutional Adversarial Networks (DCGANs) project. This is another Udacity\n Deep Learning project. Using celeb's faces, faces were generated by GANs.\n\n- [deep-rl](https://github.com/yokolet/DeepLearning/blob/master/deep-rl)\n\n Deep Reinforcement Learning project to fly the quadcopter. This is the Udacity\n Deep Learning project like three of above.\n"
},
{
"alpha_fraction": 0.7801932096481323,
"alphanum_fraction": 0.7801932096481323,
"avg_line_length": 33.5,
"blob_id": "cfdc9fae6a6f241e504e1b005d92a9799d7ff7b1",
"content_id": "4bb34c1a3d114dcfd85406d229fb0f5c3217efb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 12,
"path": "/nlp/README.md",
"repo_name": "yokolet/DeepLearning",
"src_encoding": "UTF-8",
"text": "# NLP\n\n#### Sentiment Analysis\n\nThe notebook, Sentiment Analysis, is a kernel for Kaggle competition,\n[Quora Insincere Questions Classification](https://www.kaggle.com/c/quora-insincere-questions-classification).\n\nThe kernel uses RNN as a model.\n\n#### Notebook at Kaggle site\n\n[https://www.kaggle.com/yokolet/quora-sentiment-analysis-by-pytorch](https://www.kaggle.com/yokolet/quora-sentiment-analysis-by-pytorch)\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7843137383460999,
"avg_line_length": 42.57143020629883,
"blob_id": "eadf435d59c0a6843f4274086b5a8dab37e99a2d",
"content_id": "30ddc2910140b632f614b934c428249f42d07fd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 306,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 7,
"path": "/mnist/README.md",
"repo_name": "yokolet/DeepLearning",
"src_encoding": "UTF-8",
"text": "# MNIST\n\nThis is a work on the Kaggle MNIST comptition, [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer).\nThe submission made 0.99871 of accuracy.\n\nThis is a mixture of PyTorch and FastAI. The data loading and model definition are done by PyTorch.\nTraining is performed using FastAI library.\n\n"
},
{
"alpha_fraction": 0.5498303174972534,
"alphanum_fraction": 0.5674174427986145,
"avg_line_length": 38.06024169921875,
"blob_id": "bdab6d062ac3036ca1f38129bbccc5d01003ffea",
"content_id": "a9afa0be61108e69bf1bfa78fb857adfba974d26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3241,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 83,
"path": "/deep-rl/task.py",
"repo_name": "yokolet/DeepLearning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport math\nfrom physics_sim import PhysicsSim\n\nclass Task():\n \"\"\"Task (environment) that defines the goal and provides feedback to the agent.\"\"\"\n def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n \"\"\"Initialize a Task object.\n Params\n ======\n init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles\n init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions\n init_angle_velocities: initial radians/second for each of the three Euler angles\n runtime: time limit for each episode\n target_pos: target/goal (x,y,z) position for the agent\n \"\"\"\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.]) \n\n # For distance normalization\n try:\n self.norm=[1.0 if (target_pos[i] - init_pose[i]) == 0 else \\\n np.linalg.norm([init_pose[i], target_pos[i]]) for i in range(3)]\n self.norm_target = self.target_pos / self.norm\n except TypeError:\n self.norm = [1.0, 1.0, 1.0]\n\n def get_reward(self):\n \"\"\"Uses current pose of sim to return reward.\"\"\"\n reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n return reward\n\n def get_reward_2(self):\n \"\"\"Uses current pose and velocity of sim to return reward.\"\"\"\n def dist_reward(xyz):\n normalized = xyz / self.norm\n # normalized diff of x, y, and z\n diff = abs(normalized - self.norm_target)\n # focuses on the height only\n if (diff < 0.03).all():\n # close enough height to the target\n return 1.0\n av_diff = sum(diff) / 3.0\n return max(1 - av_diff**0.4, -1.0)\n def velocity_reward_exp(zv):\n return 0.01 * np.exp(zv)\n\n # crash\n if self.sim.pose[2] < 0:\n return -1.0 \n reward = dist_reward(self.sim.pose[:3])\n if self.sim.v[2] > 0:\n reward += velocity_reward_exp(self.sim.v[2])\n if self.sim.v[2] < 0:\n reward -= 0.1\n return min(reward, 1.0)\n\n def step(self, rotor_speeds, opt=False):\n \"\"\"Uses action to obtain next state, reward, done.\"\"\"\n reward = 0\n pose_all = []\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward() if not opt else self.get_reward_2()\n pose_all.append(self.sim.pose)\n next_state = np.concatenate(pose_all)\n return next_state, reward, done\n\n def reset(self):\n \"\"\"Reset the sim to start a new episode.\"\"\"\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state"
},
{
"alpha_fraction": 0.7346938848495483,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 28.399999618530273,
"blob_id": "6e93d3c6edc8d22fcfd5946227b9a1a30d2eaf7a",
"content_id": "6f12645d391a94807d22e03908dec4bece5aee67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1860,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 60,
"path": "/Jane/README.md",
"repo_name": "yokolet/DeepLearning",
"src_encoding": "UTF-8",
"text": "# Becoming Jane by Deep Learning\n\nThis repositry holds files for the Machine Learning Capstone Project.\nThe project experiments RNN using Tensorflow and generates a sequence of\nwords after training. The training is done by Jane Austen's novels.\nFor training and validation, Pride and Prejudice, Sense and\nSensibility, and Emma are used. For test, the first part of Persuation\nwill be used. All data are from <http://www.fullbooks.com/>.\n\nThe project report is [Report.pdf](Report.pdf).\n\n### TensorFlow\n\nTensorFlow must be installed prior to run the Python notebook. For this\nporject, Tensorflow was installed using Anaconda's pip. As of October\n2016, Anaconda installs Tensorflow version 0.9. The code in notebook\nworks on this version.\n\nTo use TensorFlow, its environment should be enabled before hitting\njupyter command.\n\n```bash\nsource activate tensorflow\n```\n\nSome libraries must be installed after TensorFlow enviroment starts.\nEven though Anaconda has those already, there may be a case the\nlibraries need to be installed again.\n\n\n### Files\n\nThis repository has the files below:\n\n```\n.\n├── Jane-RNN.html\n├── Jane-RNN.ipynb\n├── Jane-Sampling.html\n├── Jane-Sampling.ipynb\n├── README.md\n├── Report.pdf\n├── data\n│ ├── test.txt\n│ ├── train.txt\n│ └── valid.txt\n└── saved <--- not pushed to the repo\n ├── checkpoint\n ├── model.ckpt\n ├── model.ckpt.meta\n```\n\n- README.me : this file\n- Report.pdf : project report\n- Jane-RNN.html : HTML export of Jane-RNN notebook\n- Jane-RNN.ipynb : Python notebook for training\n- Jane-Sampling.html : HTML export of Jane-Sampling notebook\n- Jane-Sampling.ipynb : Python notebook for sampling\n- data : input data directory\n- saved : not pushed to the repository, automatically created once the training runs\n"
}
] | 5 |
Zer0-/brick_press | https://github.com/Zer0-/brick_press | 650bb5887084fce68b2a70656d761f630a265ede | 6ecb15e5c2855f22df9989babeca73eaf07d0158 | f780f117f6f1765487dd386ce5310f2dd863d6d1 | refs/heads/master | 2021-01-23T15:51:35.294475 | 2014-12-01T07:27:42 | 2014-12-01T07:33:06 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6132723093032837,
"alphanum_fraction": 0.6201372742652893,
"avg_line_length": 17.20833396911621,
"blob_id": "55a7ec5684a4625e12efc03ded9019ac71968ab2",
"content_id": "b82804a81c4d4acd4deb5ad1663e883835ba13ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 24,
"path": "/setup.py",
"repo_name": "Zer0-/brick_press",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nrequires = [\n 'bricks',\n 'scss',\n 'csscompressor',\n 'CoffeeScript',\n]\n\nlinks = [\n 'git+https://github.com/Zer0-/bricks.git#egg=bricks',\n]\n\nsetup(\n name='brick_press',\n version='0.0',\n description='Static Asset Builder',\n author='Philipp Volguine',\n author_email='[email protected]',\n packages=['src'],\n include_package_data=True,\n install_requires=requires,\n dependency_links=links,\n)\n"
}
] | 1 |
daniel-lennart/data-portfolio | https://github.com/daniel-lennart/data-portfolio | 2b5e7bf301af17d7cfbdc5bbd2e10a272a30557f | 1099d3783257ae397e026a4b9af24eb2303c9e08 | 85d3482e8ca48a890c993e2770a3f234b7156f82 | refs/heads/master | 2021-09-28T07:23:15.997710 | 2018-11-15T10:53:49 | 2018-11-15T10:53:49 | 104,191,375 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6727457046508789,
"avg_line_length": 26.41666603088379,
"blob_id": "eaecb2f294e02d1154a2a414e4c5a4f732d1520b",
"content_id": "108edad06c34bb1bb3ccf5729d4866f78c6d5d36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 987,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 36,
"path": "/docker/flask-mysql-example/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Flask App container with MySQL Database\n\n## Quick Start\n\n 1. Run `docker-compose up`\n 2. Visit `http://localhost:5000` in your browser.\n\n## Description\n\n[Flask](http://flask.pocoo.org/) is a web development microframework for Python.\n\nDocker compose will create two containers linked together, one for Flask and one for MySQL\n\n## Persisting Data\n\nThis setup will persist data on a separate `/var/lib/mysql` volume.\n\nThis is setup in `docker-compose.yml` file:\n\n services:\n ...\n db:\n ...\n volumes:\n - /var/lib/mysql\n\nYou can also mount a directory on the host as a volume using the syntax `[host-path]:[container-path]`, so if you want to mount a `data` dir in the project folder as `/var/lib/mysql`, it can be specified like follows:\n\n services:\n ...\n db:\n ...\n volumes:\n - ./database:/var/lib/mysql\n\ncontainers are immutable so after changing any settings, containers need to be stopped, removed and recreated\n"
},
{
"alpha_fraction": 0.6178234815597534,
"alphanum_fraction": 0.6289631724357605,
"avg_line_length": 21.882352828979492,
"blob_id": "2ee7a71d0e1d78738d43d548f2f00e921a0e6238",
"content_id": "16dba531cf17f2d8e9835a8bb58e1c382d9d916d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1167,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 51,
"path": "/terraform/scripts/setup_terraform.sh",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nset -e\n\n\n# Add color\nRED=$(tput setaf 1)\nGREEN=$(tput setaf 2)\nYELLOW=$(tput setaf 3)\nNC=$(tput sgr0)\n\n# Pre-install checks\n\nOS_TYPE=$(uname)\nTERRAFORM_VERSION=\"0.9.11\"\n\n# Clean directory if needed\necho \"${YELLOW}Running pre-install checks${NC}\"\n\nif [ ! -d 'bin' ] ; then\n mkdir -p bin\nfi\n\ncase $OS_TYPE in\n Linux )\n TERRAFORM_FILE=terraform_${TERRAFORM_VERSION}_linux_amd64.zip\n ;;\n Darwin )\n TERRAFORM_FILE=terraform_${TERRAFORM_VERSION}_darwin_amd64.zip\n ;;\n * )\n echo \"${RED}ERROR - OS not supported ${OS_TYPE} ${NC}\"\n exit 1\n ;;\nesac\n\necho \"${GREEN}Pre-install checks passed${NC}\"\n\n# Download and unpack if doesn't exist\n\nif [ ! -f bin/${TERRAFORM_FILE} ] || [ ! -f bin/terraform ] ; then\n cd bin\n rm -f ${TERRAFORM_FILE} packer\n echo \"${GREEN}Cleaning old installation ${NC}\"\n\n wget https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/${TERRAFORM_FILE}\n echo \"${GREEN}Terraform file downloaded${NC}\"\n unzip ${TERRAFORM_FILE}\n echo \"${GREEN}Terraform file uncompressed in $(pwd) ${NC}\"\nelse\n echo \"${GREEN}Terraform already installed in $(pwd) ${NC}\"\nfi\n"
},
{
"alpha_fraction": 0.6265884637832642,
"alphanum_fraction": 0.6378298997879028,
"avg_line_length": 33.6779670715332,
"blob_id": "3c0cedfa869b50445578f7498bbd1e16557f9b2e",
"content_id": "f5995f42025d7e6063dc2a6f489af0d00284eca0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6138,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 177,
"path": "/r/movie-ratings/code.R",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "######################################################\n# Programming languages for data engineering\n# R programming course assignment\n# Author: Daniel Lennart\n# Dataset: Movie ratings, genres and budgets 2007-2011\n######################################################\nlibrary(ggplot2)\n\n# Load dataset into data frame\nmovies<- read.csv(\"Movie-Ratings.csv\")\n# rename columns to get rid of special characters\ncolnames(movies)<-c(\"Film\", \"Genre\", \"CriticRating\", \"AudienceRating\",\n \"BudgetMillions\", \"Year\")\nfactor(movies$Genre)\n\n# check stucture of the data (6 variables:562 objects loaded)\nstr(movies)\n\n# Basic statistical measures (means, medians, std deviation)\n# str() will return min, max, mean, median for numeric data\n# and aggregated categorical data.\nsummary(movies)\n\n# standard deviations for numerical data\nsd_cr <- sd(movies$CriticRating)\nsd_ar <- sd(movies$AudienceRating)\nsd_bud <- sd(movies$BudgetMillions)\n# display all standard deviations\nsd_cr\nsd_ar\nsd_bud\n\n# Plot relationships in the data (one or more)\n\n# add common theme for all graphs\nplot_theme <- theme(axis.title.x = element_text(colour = \"DarkGreen\", size = 20),\n axis.title.y = element_text(colour = \"Red\", size = 20),\n axis.text.x = element_text(size=10),\n axis.text.y = element_text(size=10),\n\n plot.title = element_text(colour = \"DarkBlue\", size=25,\n family = \"Courier\"))\n\n # Critics rating v Audience rating\n crit_v_aud <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating,\n colour=Genre))\n crit_v_aud + geom_point(aes(size=BudgetMillions)) +\n facet_grid(Genre~Year) +\n geom_smooth() +\n plot_theme +\n xlab(\"Critics rating\") +\n ylab(\"Audience rating\") +\n ggtitle(\"Audience v Critics rating per year and genre\") +\n coord_cartesian(ylim = c(0,100))\n\n\n ## Audience v Budget\n aud_v_bud <- ggplot(data=movies, aes(x=BudgetMillions, y=AudienceRating,\n colour=Genre, size=BudgetMillions))\n aud_v_bud + geom_point(aes(x=BudgetMillions)) +\n xlab(\"Budget in millions\") +\n ylab(\"Audience rating\") +\n ggtitle(\"Audience rating v Budget\") +\n plot_theme\n\n\n ## Critics v Budget\n aud_v_bud <- ggplot(data=movies, aes(x=BudgetMillions, y=CriticRating,\n colour=Genre, size=BudgetMillions))\n aud_v_bud + geom_point(aes(x=BudgetMillions)) +\n xlab(\"Budget in millions\") +\n ylab(\"Critic rating\") +\n ggtitle(\"Critics rating v Budget\") +\n plot_theme\n\n ## Boxplots\n # Audience\n aud_plot <- ggplot(data=movies, aes(x=Genre, y=AudienceRating,\n colour=Genre))\n aud_plot + geom_jitter() + geom_boxplot(size=1.2, alpha=0.5) +\n plot_theme +\n xlab(\"Genres\") +\n ylab(\"Audience rating\") +\n ggtitle(\"Audience rating per genre\")\n\n # Critics\n crit_plot <- ggplot(data=movies, aes(x=Genre, y=CriticRating,\n colour=Genre))\n crit_plot + geom_jitter() + geom_boxplot(size=1.2, alpha=0.5) +\n plot_theme +\n xlab(\"Genres\") +\n ylab(\"Critics rating\") +\n ggtitle(\"Critics rating per genre\")\n\n ## Histogram\n o <- ggplot(data=movies, aes(x=BudgetMillions))\n h <- o + geom_histogram(binwidth = 10, aes(fill=Genre), colour=\"Black\")\n h + xlab(\"Millions\") +\n ylab(\"Number of movies\") +\n ggtitle(\"Movie Budget distribution\") +\n plot_theme\n\n\n# Investigate if data has normal distribution\n ## Audience rating distribution\n rating <- ggplot(data=movies, aes(x=AudienceRating))\n rating + geom_histogram(binwidth = 10,\n fill=\"White\", colour=\"Blue\") + # Norm distribution\n plot_theme +\n xlab(\"Audience rating\") +\n ylab(\"Count\") +\n ggtitle(\"Audience rating distribution\")\n\n # Density plots\n rating + geom_density(aes(fill=AudienceRating), position=\"stack\")\n qqnorm(movies$AudienceRating);qqline(movies$AudienceRating, col=2)\n\n ## critics rating distribution\n rating + geom_histogram(binwidth = 10,\n aes(x=CriticRating), # ovveride aesthetics\n fill=\"White\", colour=\"Blue\") + # uniform critics rely on rules\n plot_theme +\n xlab(\"Critics rating\") +\n ylab(\"Count\") +\n ggtitle(\"Critics rating distribution\")\n\n# Compare with generated normal distribution\n norm_dist <- rnorm(n=562, sd=16.8, mean=58.83)\n uni_dist <- runif(n=562, min=0, max=97)\n # Plot norm dist for comparison\n # name columns while assigning\n generated_df <- data.frame(gen_audience=norm_dist, gen_critics=uni_dist)\n norm_plot <- ggplot(data=generated_df, aes(x=gen_audience))\n norm_plot + geom_histogram(binwidth = 10,\n fill=\"White\", colour=\"Blue\") + # Norm distribution\n plot_theme +\n geom_density(aes(fill=gen_audience), position=\"stack\") +\n xlab(\"Generated Audience rating\") +\n ylab(\"Count\") +\n ggtitle(\"Generated Audience rating distribution\")\n\n # Density plots\n norm_plot + geom_density(aes(fill=gen_audience), position=\"stack\")\n qqnorm(norm_dist);qqline(norm_dist, col=2)\n\n\n uni_plot <- ggplot(data=generated_df, aes(x=gen_critics))\n uni_plot + geom_histogram(binwidth = 10,\n fill=\"White\", colour=\"Blue\") + # Norm distribution\n plot_theme +\n xlab(\"Generated Critics rating\") +\n ylab(\"Count\") +\n ggtitle(\"Generated Critics rating distribution\")\n\n\n# Explore with linear regression Present result of linear regression graphically\n pairs(movies)\n model <- lm(AudienceRating~CriticRating, data=movies)\n ac <- ggplot(data=movies, aes(x=AudienceRating, y=CriticRating))\n ac + geom_point()+\n geom_smooth(method='lm')\n summary(model)\n confint(model)\n anova(model)\n\n model2 <- lm(AudienceRating~BudgetMillions, data=movies)\n ab <- ggplot(data=movies, aes(x=AudienceRating, y=BudgetMillions))\n ab + geom_point()+\n geom_smooth(method='lm')\n summary(model2)\n anova(model2)\n model3 <- lm(CriticRating~BudgetMillions, data=movies)\n cb <- ggplot(data=movies, aes(x=CriticRating, y=BudgetMillions))\n cb + geom_point()+\n geom_smooth(method='lm')\n summary(model3)\n anova(model3)\n"
},
{
"alpha_fraction": 0.7692555785179138,
"alphanum_fraction": 0.7692555785179138,
"avg_line_length": 47.484375,
"blob_id": "94f6cee1e99e7beb4241522b613c533715b363cd",
"content_id": "0c07bbfb45dd81fc1e9a70d31be6015d914eed5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3103,
"license_type": "no_license",
"max_line_length": 299,
"num_lines": 64,
"path": "/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Daniel Lennart\n## DevOps and Data Science portfolio\nThis repository contains a collection of Data Science, Data Engineering and DevOps projects completed by me for self learning, academic, hobby and work purposes. It is presented in a form of Python notebooks and various cheatsheets.\n\nFor more information about what I do see\n\n| Media | Link |\n| ------ | ----- |\n| Website | <https://dataping.io> |\n| LinkedIn | <https://www.linkedin.com/in/daniel-lennart> |\n\n# Contents\n - [Ansible](#ansible)\n - [Docker](#docker)\n - [Vagrant](#vagrant)\n - [Terraform](#terraform)\n - [R](#r)\n - [Data](#data)\n\n## <a name=\"ansible\"></a>Ansible\n\n\nAnsible is software that automates software provisioning, configuration management, and application deployment.\n\n* [Cheatsheet](../master/ansible/cheatsheet.md)\n* [User provisioning](../master/vagrant/provisioning/base_config.yml)\n* [Install Docker](../master/vagrant/provisioning/docker.yml)\n* [Create and start docker containers](../master/vagrant/provisioning/docker-containers.yml)\n\n## Docker\n\n\nDocker is an open platform for developers and sysadmins to build, ship, and run distributed applications, whether on laptops, data center VMs, or the cloud.\n\n* [Flask + MySQL example](../master/docker/flask-mysql-example)\n\n## Vagrant\n\n\n\nVagrant is an open-source software product for building and maintaining portable virtual software development environments, e.g. for VirtualBox, Hyper-V, Docker, VMware, and AWS which try to simplify software configuration management of virtualizations in order to increase development productivity.\n* [Vagrant + Ansible + Docker example](../master/vagrant)\n\n## Terraform\n\n\nHashiCorp Terraform enables you to safely and predictably create, change, and improve infrastructure. It is an open source tool that codifies APIs into declarative configuration files that can be shared amongst team members, treated as code, edited, reviewed, and versioned.\n\n* [Deep learning environment provisioning](../master/terraform)\n\n## Data\n\n\nExamples of data projects\n* [Complete machine learning workflow to predict property prices](../master/property-project)\n* [Data challenge to predict movie profit ](../master/box-office-predictions)\n\n\n## R\n\n\nR is a programming language and free software environment for statistical computing and graphics that is supported by the R Foundation for Statistical Computing. The R language is widely used among statisticians and data miners for developing statistical software and data analysis.\n\n* [Exploring movie ratings data](../master/r/movie-ratings)\n"
},
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.7661290168762207,
"avg_line_length": 18.076923370361328,
"blob_id": "23c1e88c75e6c0f9a57433f43e2c16d40196defb",
"content_id": "d96c1e2338afdb21959334f3d0928681d944d3db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 13,
"path": "/docker/flask-mysql-example/www/Dockerfile",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Simple Flask container.\nFROM python:2.7\nLABEL maintainer=\"Daniel Lennart\"\n\n# Copy app files to the container\nCOPY app /opt/www\nWORKDIR /opt/www\n\n# Install dependencies for the project.\nRUN pip install -r requirements.txt\n\nEXPOSE 5000\nCMD python index.py\n"
},
{
"alpha_fraction": 0.7463942170143127,
"alphanum_fraction": 0.7475961446762085,
"avg_line_length": 19.292682647705078,
"blob_id": "78c37b8539fd5c768cbb70f08b178c0923fd1990",
"content_id": "6efadd947dddbb9c36141a87f9c63166d3bec315",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 41,
"path": "/vagrant/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Vagrant VM provisioning example\n## Description\nThis configuration example will do the following:\n\n* Create a centos 7 Vagrant virtual machine\n* Create admin user\n* Add SSH key for the user\n* Install Ansible, EPEL repo, development tools\n* Install default toolset defined in configuration file\n* Run Ansible playbooks to\n * Install Docker\n * Create and start Jenkins docker container\n* Backup docker data volumes on halting the VM\n\n## Usage\n```\nvagrant up\n```\nin vm directory to start the VM\n\n---\n```\nvagrant provision\n```\nin vm directory to reprovision existing VM after changing settings\n\n---\n```\nvagrant halt\n```\nin vm directory to backup data volumes and stop the VM\n\n---\n```\nvagrant destroy\n```\nin vm directory to destroy VM and data\n\n---\n\nUser, SSH key and toolset configuration is defined in [CFG.yml](provisioning/CFG.yml)\n"
},
{
"alpha_fraction": 0.7300275564193726,
"alphanum_fraction": 0.7300275564193726,
"avg_line_length": 13.917808532714844,
"blob_id": "53cc44dd5167973762de535b967f4aade297c40f",
"content_id": "f3f49fa15c3e9034a3cff42bdf2f9923d1b94a51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 73,
"path": "/terraform/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Terraform\n\nTerraform code to provision CPU or GPU AWS instances for [fast.ai deep learning course](fast.ai)\n\n## Getting Started\n\nThe structure of this Terraform repository loosely follows the [published Hashicorp best practices](https://github.com/hashicorp/best-practices).\n\nAlso note that for API access to AWS accounts, use API access credentials in a [following manner](https://www.terraform.io/docs/providers/aws/#environment-variables).\n\n# Usage\n\nTo install locally terraform from Hashicorp\n\nRun:\n```\nmake\n```\n\n# Maintenance\n\n- One directory per environment to manage\n\n# Using to build CPU instance\n\nPlan the run:\n```\nmake cpu_plan\n```\n\nApply the configuration:\n```\nmake cpu_apply\n```\n\nDestroy the entire environment:\n```\nmake cpu_destroy\n```\n\n# Building CPU instance\n\nPlan the run:\n```\nmake cpu_plan\n```\n\nApply the configuration:\n```\nmake cpu_apply\n```\n\nDestroy the entire environment:\n```\nmake cpu_destroy\n```\n\n# Building GPU instance\n\nPlan the run:\n```\nmake gpu_plan\n```\n\nApply the configuration:\n```\nmake gpu_apply\n```\n\nDestroy the entire environment:\n```\nmake gpu_destroy\n```\n"
},
{
"alpha_fraction": 0.6669341921806335,
"alphanum_fraction": 0.6789727210998535,
"avg_line_length": 30.149999618530273,
"blob_id": "c579d5f2447d311aebf4cad5076f2b89d9eccb79",
"content_id": "fe170ca16ec5bba7f002b3457bda7076d2ae40f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1246,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 40,
"path": "/docker/flask-mysql-example/www/app/index.py",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nfrom flask import Flask, abort, request, jsonify, g, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_httpauth import HTTPBasicAuth\nfrom passlib.apps import custom_app_context as pwd_context\nfrom itsdangerous import (TimedJSONWebSignatureSerializer\n as Serializer, BadSignature, SignatureExpired)\n\napp = Flask(__name__)\n\n# Configure MySQL connection.\ndb = SQLAlchemy()\ndb_uri = 'mysql://root:insertsecurepass@db/users'\napp.config['SECRET_KEY'] = 'some very secret app key'\napp.config['SQLALCHEMY_DATABASE_URI'] = db_uri\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb.init_app(app)\n\n# extensions\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(32), index=True)\n password_hash = db.Column(db.String(512))\n\[email protected](\"/\")\n result = False\n if db.session.query(\"1\").from_statement(\"SELECT 1\").all():\n\tresult = True\n if result:\n res = Markup('<span style=\"color: green;\">PASSED</span>')\n else:\n res = Markup('<span style=\"color: red;\">FAILED</span>')\n\nif __name__ == '__main__':\n db.create_all()\n app.run(host=\"0.0.0.0\", port=5000)\n"
},
{
"alpha_fraction": 0.7766990065574646,
"alphanum_fraction": 0.7794729471206665,
"avg_line_length": 48.72413635253906,
"blob_id": "d5f3ea4b1f70267e4ee9fea86534566afea476e7",
"content_id": "f0743212a952e21e3877035f5d3f0c17988cd7bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1442,
"license_type": "no_license",
"max_line_length": 321,
"num_lines": 29,
"path": "/property-project/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Machine learning workflow to predict property prices\nThis is my final project for MSc Data Engineering degree. Project fills the gap I feel exists in the academic course where few algorithms were covered in great depth down to mathematical formulas, but choosing best algorithms for the job was barely touched and machine learning workflow as a whole was not covered at all.\n\nThis project description is still work in progress as I need to convert a huge MSc dissertation into web readable format.\n\n## Description\nProject presents a full machine learning workflow, divided into following parts:\n* Exploratory analysis\n* Data cleaning\n* Feature engineering\n* Model training and performance checks\n* Final model deployment\n\n## Data\nData for the project was scrapped from [Zoopla](https://www.zoopla.co.uk/) via API they provide. Dataset only covers properties from biggest Scotland cities listed for sale on Zoopla in August 2016.\n\n## Implementation\n\nProject was written using Python and following libraries:\n* [Pandas](https://pandas.pydata.org/)\n* [Numpy](http://www.numpy.org/)\n* [scikit-learn](http://scikit-learn.org/stable/index.html)\n* [Seaborn](https://seaborn.pydata.org/)\n\nAlso I used [Jupyter notebooks](https://jupyter.org/) for the ML code and [Flask framework](http://flask.pocoo.org/) for final model deployment.\n\n## Project structure\n* [Data](../property-project/notebooks/data/)\n* [Notebooks](../property-project/notebooks/)\n"
},
{
"alpha_fraction": 0.6854881048202515,
"alphanum_fraction": 0.6997361183166504,
"avg_line_length": 38.47916793823242,
"blob_id": "e1e4c0d10f563fac366e49687f536e18101d07c6",
"content_id": "afd60f7f102c738c86a28bd4008ed6e05eca74c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Ruby",
"length_bytes": 1895,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 48,
"path": "/vagrant/Vagrantfile",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "required_plugins = %w( vagrant-triggers )\nrequired_plugins.each do |plugin|\n exec \"vagrant plugin install #{plugin};vagrant #{ARGV.join(\" \")}\" unless Vagrant.has_plugin? plugin || ARGV[0] == 'plugin'\nend\n\nVagrant.configure(\"2\") do |config|\n\n # General config\n config.vbguest.auto_update = true\n config.ssh.insert_key = false\n config.vm.box = \"centos/7\"\n config.vm.hostname = \"cloud-host-01\"\n\n # Hardware configuration\n config.vm.provider 'virtualbox' do |vb|\n vb.customize [\"modifyvm\", :id, \"--memory\", 3072]\n vb.customize [\"modifyvm\", :id, \"--cpus\", 2]\n end\n\n # Network\n config.vm.network :private_network, ip: '192.168.78.100'\n config.vm.network :forwarded_port, guest: 22, host: 2229, id: 'ssh'\n\n # Shared folders\n #config.vm.synced_folder '../../datasets', '/src/datasets', disabled: true, type: \"virtualbox\"\n config.vm.synced_folder \".\", \"/vagrant\", type: \"virtualbox\"\n\n # Provisioning\n config.vm.provision :shell, inline: \"yum install -y epel-release\"\n config.vm.provision :shell, inline: \"yum install -y dkms\"\n config.vm.provision :shell, inline: \"yum install -y make gcc\"\n config.vm.provision :shell, inline: \"yum install -y kernel-devel\"\n config.vm.provision :shell, inline: \"yum install -y ansible\"\n config.vm.provision :shell, inline: \"yum upgrade -y\"\n\n config.vm.provision :shell, inline: \"ansible-galaxy install geerlingguy.docker\"\n config.vm.provision :ansible_local, playbook: \"provisioning/docker.yml\"\n config.vm.provision :ansible_local, playbook: \"provisioning/base_config.yml\"\n config.vm.provision :ansible_local, playbook: \"provisioning/docker-containers.yml\", run: 'always'\n\n #-- Triggers on changing state ---------------#\n # Backup data volumes before vm is switched off\n config.trigger.before :halt do\n info \"Dumping the data volume before destroying the VM...\"\n run_remote \"bash /etc/init.d/backup_volumes.sh\"\n end\n\nend\n"
},
{
"alpha_fraction": 0.5241264700889587,
"alphanum_fraction": 0.5440931916236877,
"avg_line_length": 27.619047164916992,
"blob_id": "fadd5011fecde5df659e0e52e208639de0a82f70",
"content_id": "8c7bdbb11be005f666ef51ff1826ea7c23b7e824",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1202,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 42,
"path": "/r/movie-ratings/movie-app/ui.R",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "#\n# This is the user-interface definition of a Shiny web application. You can\n# run the application by clicking 'Run App' above.\n#\n# Find out more about building applications with Shiny here:\n# \n# http://shiny.rstudio.com/\n#\n\nlibrary(shiny)\n\n# Define UI for application that draws a histogram\nshinyUI(fluidPage(\n \n # Application title\n titlePanel(\"Movie Ratings\"),\n # Sidebar with a slider input for number of bins \n sidebarLayout(\n sidebarPanel(\n checkboxGroupInput(\"Year\",\n \"Release years:\",\n c(2007,2008,2009,2010,2011),\n selected = 2007\n ),\n checkboxGroupInput(\"Genre\",\n \"Genres:\",\n c(\"Action\", \"Adventure\", \"Comedy\", \"Drama\", \"Horror\", \"Romance\", \"Thriller\"),\n selected = \"Action\"\n ),\n radioButtons(\"Ratings\",\n \"Ratings: \",\n choices = c(\"AudienceRating\", \"CriticRating\"),\n selected = \"CriticRating\")\n ),\n \n # Show a plot of the generated distribution\n mainPanel(\n plotOutput(\"mainPlot\"),\n tableOutput(\"results\")\n )\n )\n))\n"
},
{
"alpha_fraction": 0.5466212630271912,
"alphanum_fraction": 0.6045310497283936,
"avg_line_length": 20.69439697265625,
"blob_id": "a39ecdf77f9e867ade7eb40cec4908e285bef3ca",
"content_id": "11d43c60c35a27742f96f952b1320a3738f39736",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 25558,
"license_type": "no_license",
"max_line_length": 278,
"num_lines": 1178,
"path": "/box-office-predictions/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "\n# Box office predictions\n* Complete machine learning workflow. Exploratory analysis -> Data cleaning -> Feature engineering -> Model training\n* Interpretation of results\n\nProject is using predictive modelling on data gathered from IMDB up to 2014 to predict box office profits for movies released in 2015 and 2016.\n\n## Objectives\n* In general, is the profit of a movie correlated with its user score on IMDb?\n* How about its number of votes?\n* What do these correlations tell you?\n* Using the data from 2014 and earlier, can you predict the profit of movies released in 2015 and 2016?\n* Let's say that you were able to show movies pre-release to a representative focus group, which accurately anticipates the score of a movie (but not its overall popularity), can you improve your model?\n\n### Data\n* One table *box_office_predictions.csv* containing data on 6000 movies from IMDb, released in 2016 and earlier.\n\n#### Data dictionary\n* **budget** - Total cost of the film\n* **country** - country of release\n* **director** - Film director's name\n* **genre** - Primary genre category\n* **gross** - Total gross revenue\n* **name** - Name and year of the film\n* **rating** - MPAA rating of the film\n* **runtime** - Length of the film in mins\n* **score** - User score on IMDb\n* **star** - Lead actor of the film\n* **studio** - Studio that produced the film\n* **votes** - Number of user ratings on IMDb\n\n### Loading libraries\n\n\n```python\n# Python 3 compatibility\nfrom __future__ import print_function\n\n# NumPy for numerical computing\nimport numpy as np\n\n# Pandas for DataFrames\nimport pandas as pd\npd.set_option('display.max_columns', 20)\n\n# Matplotlib for visualization\nfrom matplotlib import pyplot as plt\n# display plots in the notebook instead of popup\n\n# Seaborn for nicer plots\nimport seaborn as sns\n\n# Cleaner output\nimport warnings\nwarnings.filterwarnings('ignore')\n```\n\n### Loading data\n\n\n```python\ndf = pd.read_csv('../data/box_office_predictions.csv')\n```\n\n## Exploratory data analysis\n### Basic checks on the dataset\n* Shape\n* Data types\n* Categorical variables\n* Numeric distributions\n* Dataset statistics\n* Statistics including categorical features\n* First rows of the dataset\n\n\n\n```python\ndf.head()\n```\n\n\n\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>budget</th>\n <th>country</th>\n <th>director</th>\n <th>genre</th>\n <th>gross</th>\n <th>name</th>\n <th>rating</th>\n <th>runtime</th>\n <th>score</th>\n <th>star</th>\n <th>studio</th>\n <th>votes</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>237000000.0</td>\n <td>UK</td>\n <td>James Cameron</td>\n <td>Action</td>\n <td>760507625.0</td>\n <td>Avatar (2009)</td>\n <td>PG-13</td>\n <td>162</td>\n <td>7.8</td>\n <td>Sam Worthington</td>\n <td>Twentieth Century Fox Film Corporation</td>\n <td>958400</td>\n </tr>\n <tr>\n <th>1</th>\n <td>200000000.0</td>\n <td>USA</td>\n <td>James Cameron</td>\n <td>Drama</td>\n <td>658672302.0</td>\n <td>Titanic (1997)</td>\n <td>PG-13</td>\n <td>194</td>\n <td>7.8</td>\n <td>Leonardo DiCaprio</td>\n <td>Twentieth Century Fox Film Corporation</td>\n <td>865551</td>\n </tr>\n <tr>\n <th>2</th>\n <td>150000000.0</td>\n <td>USA</td>\n <td>Colin Trevorrow</td>\n <td>Action</td>\n <td>652270625.0</td>\n <td>Jurassic World (2015)</td>\n <td>PG-13</td>\n <td>124</td>\n <td>7.0</td>\n <td>Chris Pratt</td>\n <td>Universal Pictures</td>\n <td>470625</td>\n </tr>\n <tr>\n <th>3</th>\n <td>220000000.0</td>\n <td>USA</td>\n <td>Joss Whedon</td>\n <td>Action</td>\n <td>623357910.0</td>\n <td>The Avengers (2012)</td>\n <td>PG-13</td>\n <td>143</td>\n <td>8.1</td>\n <td>Robert Downey Jr.</td>\n <td>Marvel Studios</td>\n <td>1069292</td>\n </tr>\n <tr>\n <th>4</th>\n <td>185000000.0</td>\n <td>USA</td>\n <td>Christopher Nolan</td>\n <td>Action</td>\n <td>534858444.0</td>\n <td>The Dark Knight (2008)</td>\n <td>PG-13</td>\n <td>152</td>\n <td>9.0</td>\n <td>Christian Bale</td>\n <td>Warner Bros.</td>\n <td>1845853</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\n* The dataset has a mix of numeric and categorical features.\n* There are variables for budget and gross revenue, but no variable for profit or roi. These will need creating later.\n* The \"name\" feature also includes the year the film was released. We can extract this information to create an age of film feature.\n\n\n```python\n# 12 features for 6000 observations\ndf.shape\n```\n\n\n\n\n (6000, 12)\n\n\n\n\n```python\n# All features\ndf.dtypes\n```\n\n\n\n\n budget float64\n country object\n director object\n genre object\n gross float64\n name object\n rating object\n runtime int64\n score float64\n star object\n studio object\n votes int64\n dtype: object\n\n\n\n\n```python\n# Only categorical\ndf.dtypes[df.dtypes == 'object']\n```\n\n\n\n\n country object\n director object\n genre object\n name object\n rating object\n star object\n studio object\n dtype: object\n\n\n\n\n```python\n# Plot histogram grid\ndf.hist(figsize=[14,14], xrot=315)\n# Clear the text \"residue\"\nplt.show()\n```\n\n\n\n\n\n#### Assumptions\n* Looking at the budget - possibly in millions\n* Score looks normally distributes which is usual with things involving human population\n* Looks like most movies have very few votes\n* Most movies are about 110 minutes in length which is consistent with research on this here https://www.slashfilm.com/by-the-numbers-the-length-of-feature-films/2/ and here https://www.reddit.com/r/dataisbeautiful/comments/6vnwa9/average_movie_length_by_country_source_imdb_of/\n* Gross is not clear, seems to be too small to be in millions. Will need further looking into\n\n\n```python\ndf.describe()\n```\n\n\n\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>budget</th>\n <th>gross</th>\n <th>runtime</th>\n <th>score</th>\n <th>votes</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>count</th>\n <td>6.000000e+03</td>\n <td>6.000000e+03</td>\n <td>6000.000000</td>\n <td>6000.000000</td>\n <td>6.000000e+03</td>\n </tr>\n <tr>\n <th>mean</th>\n <td>2.469918e+07</td>\n <td>3.341635e+07</td>\n <td>106.587000</td>\n <td>6.386383</td>\n <td>7.188537e+04</td>\n </tr>\n <tr>\n <th>std</th>\n <td>3.721710e+07</td>\n <td>5.735205e+07</td>\n <td>18.026885</td>\n <td>0.994921</td>\n <td>1.308033e+05</td>\n </tr>\n <tr>\n <th>min</th>\n <td>0.000000e+00</td>\n <td>4.410000e+02</td>\n <td>50.000000</td>\n <td>1.500000</td>\n <td>2.700000e+01</td>\n </tr>\n <tr>\n <th>25%</th>\n <td>0.000000e+00</td>\n <td>1.527796e+06</td>\n <td>95.000000</td>\n <td>5.800000</td>\n <td>7.791750e+03</td>\n </tr>\n <tr>\n <th>50%</th>\n <td>1.100000e+07</td>\n <td>1.229897e+07</td>\n <td>102.000000</td>\n <td>6.500000</td>\n <td>2.660150e+04</td>\n </tr>\n <tr>\n <th>75%</th>\n <td>3.262500e+07</td>\n <td>4.007256e+07</td>\n <td>115.000000</td>\n <td>7.100000</td>\n <td>7.677475e+04</td>\n </tr>\n <tr>\n <th>max</th>\n <td>3.000000e+08</td>\n <td>7.605076e+08</td>\n <td>366.000000</td>\n <td>9.300000</td>\n <td>1.868308e+06</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\nBased on the summary statistics, we see that some films have a budget of 0 in the dataset. Here are a few examples:\n\n\n```python\n# Examples of films with 0 budget\ndf[df.budget == 0].head()\n```\n\n\n\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>budget</th>\n <th>country</th>\n <th>director</th>\n <th>genre</th>\n <th>gross</th>\n <th>name</th>\n <th>rating</th>\n <th>runtime</th>\n <th>score</th>\n <th>star</th>\n <th>studio</th>\n <th>votes</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>56</th>\n <td>0.0</td>\n <td>UK</td>\n <td>David Yates</td>\n <td>Adventure</td>\n <td>295983305.0</td>\n <td>Harry Potter and the Deathly Hallows: Part 1 (...</td>\n <td>PG-13</td>\n <td>146</td>\n <td>7.7</td>\n <td>Daniel Radcliffe</td>\n <td>Warner Bros.</td>\n <td>370003</td>\n </tr>\n <tr>\n <th>207</th>\n <td>0.0</td>\n <td>USA</td>\n <td>Walt Becker</td>\n <td>Action</td>\n <td>168273550.0</td>\n <td>Wild Hogs (2007)</td>\n <td>PG-13</td>\n <td>100</td>\n <td>5.9</td>\n <td>Tim Allen</td>\n <td>Touchstone Pictures</td>\n <td>104657</td>\n </tr>\n <tr>\n <th>431</th>\n <td>0.0</td>\n <td>USA</td>\n <td>John G. Avildsen</td>\n <td>Action</td>\n <td>115103979.0</td>\n <td>The Karate Kid Part II (1986)</td>\n <td>PG</td>\n <td>113</td>\n <td>5.9</td>\n <td>Pat Morita</td>\n <td>Columbia Pictures Corporation</td>\n <td>58596</td>\n </tr>\n <tr>\n <th>553</th>\n <td>0.0</td>\n <td>USA</td>\n <td>Nora Ephron</td>\n <td>Comedy</td>\n <td>95318203.0</td>\n <td>Michael (1996)</td>\n <td>PG</td>\n <td>105</td>\n <td>5.7</td>\n <td>John Travolta</td>\n <td>Turner Pictures (I)</td>\n <td>36553</td>\n </tr>\n <tr>\n <th>592</th>\n <td>0.0</td>\n <td>USA</td>\n <td>Tyler Perry</td>\n <td>Comedy</td>\n <td>90485233.0</td>\n <td>Madea Goes to Jail (2009)</td>\n <td>PG-13</td>\n <td>103</td>\n <td>4.3</td>\n <td>Tyler Perry</td>\n <td>Tyler Perry Company, The</td>\n <td>10095</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\nThese are most likely missing values or data collection errors. But since our goal is to investigate profitability of films, we cannot study films with missing budget values.\n\nTo improve the analysis in the future, we'd want to troubleshoot our data source to find out if we can get the budgets of those films. For now, we'll remove those films.\n\n\n```python\n# Remove films with \"0\" budget\ndf = df.loc[df.budget > 0,:]\n```\n\n\n```python\ndf.describe(include=['object'])\n```\n\n\n\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>country</th>\n <th>director</th>\n <th>genre</th>\n <th>name</th>\n <th>rating</th>\n <th>star</th>\n <th>studio</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>count</th>\n <td>4089</td>\n <td>4089</td>\n <td>4089</td>\n <td>4089</td>\n <td>4089</td>\n <td>4089</td>\n <td>4089</td>\n </tr>\n <tr>\n <th>unique</th>\n <td>42</td>\n <td>1757</td>\n <td>16</td>\n <td>4089</td>\n <td>8</td>\n <td>1501</td>\n <td>1232</td>\n </tr>\n <tr>\n <th>top</th>\n <td>USA</td>\n <td>Woody Allen</td>\n <td>Comedy</td>\n <td>Nadine (1987)</td>\n <td>R</td>\n <td>Nicolas Cage</td>\n <td>Universal Pictures</td>\n </tr>\n <tr>\n <th>freq</th>\n <td>3275</td>\n <td>26</td>\n <td>1136</td>\n <td>1</td>\n <td>2001</td>\n <td>36</td>\n <td>235</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\n#### Insights\n* There seems to be no missing values for categorical features\n* Most movies are US comedies, however there are 52 counties present in the dataset and 17 genres\n* some categorical features have a large number of unique classes relative to the number of total observations.\n\nThere are 1232 unique studios in the dataset of 4089 observations (after filtering for non-zero budget). This will most likely lead to sparse classes, so we'll want to address this during feature engineering.\n\n\n```python\n# Reverse sort studios by total number of films\ndf.studio.value_counts().tail()\n```\n\n\n\n\n Applied Action 1\n Santa Monica Holdings 1\n Seraphim Films 1\n George Films 1\n SBS Productions 1\n Name: studio, dtype: int64\n\n\n\nCompare that to the top studios in the dataset, and we can see that it will be useful to combine low-frequency studios into tiered classes\n\n\n```python\n# Top studios by total number of films\ndf.studio.value_counts().head()\n```\n\n\n\n\n Universal Pictures 235\n Warner Bros. 231\n Paramount Pictures 197\n Twentieth Century Fox Film Corporation 148\n New Line Cinema 123\n Name: studio, dtype: int64\n\n\n\n### Data cleaning considerations\n* Check for duplicates\n* Check for missing values\n\n#### Check for duplicates\n\n\n```python\n# check for duplicates by movie name\ndf['name'].value_counts().head()\n```\n\n\n\n\n Nadine (1987) 1\n La casa de los espíritus (1993) 1\n La Bamba (1987) 1\n Intersection (1994) 1\n Car 54, Where Are You? (1994) 1\n Name: name, dtype: int64\n\n\n\nit seems there are no duplicates in the dataset\n\n#### Find missing values\n\n\n```python\n# Find missing data in categorical features\ndf.select_dtypes(include=['object']).isnull().sum()\n```\n\n\n\n\n country 0\n director 0\n genre 0\n name 0\n rating 0\n star 0\n studio 0\n dtype: int64\n\n\n\nSeems no data is missing here\n\n\n```python\n# Display number of missing values by feature (numeric)\ndf.select_dtypes(exclude=['object']).isnull().sum()\n```\n\n\n\n\n budget 0\n gross 0\n runtime 0\n score 0\n votes 0\n dtype: int64\n\n\n\nNo missing values here as well\n\n## Feature engineering\n\nCreating target variable\n\n\n```python\ndf['profit'] = df.gross - df.budget\n```\n\n### Combine sparse classes into tiers\n\nThere are other valid ways of combining classes. For example, you could try combining studios based on their average production budget (as a proxy for studio size). We will combine them based on their total number of films in the dataset.\n\n\n```python\n# Number of films from each studio\nstudio_counts = df.studio.value_counts()\n\n# Tiers for sparser studios\none = studio_counts[studio_counts <= 1].index\nthree_five = studio_counts[(studio_counts > 1) & (studio_counts <= 3)].index\nfive_ten = studio_counts[(studio_counts > 3) & (studio_counts <= 5)].index\nten_plus = studio_counts[(studio_counts > 5) & (studio_counts <= 10)].index\n\n# Combine sparse studios\ndf['studio'].replace(one, '1', inplace=True)\ndf['studio'].replace(three_five, '3-5', inplace=True)\ndf['studio'].replace(five_ten, '5-10', inplace=True)\ndf['studio'].replace(ten_plus, '10+', inplace=True)\n```\n\n\n```python\n# Same with stars\n# Number of films from each star\nstar_counts = df.star.value_counts()\n\n# Tiers for sparser stars\none = star_counts[star_counts <= 1].index\nthree_five = star_counts[(star_counts > 1) & (star_counts <= 3)].index\nfive_ten = star_counts[(star_counts > 3) & (star_counts <= 5)].index\nten_plus = star_counts[(star_counts > 5) & (star_counts <= 10)].index\n\n# Combine sparse stars\ndf['star'].replace(one, '1', inplace=True)\ndf['star'].replace(three_five, '3-5', inplace=True)\ndf['star'].replace(five_ten, '5-10', inplace=True)\ndf['star'].replace(ten_plus, '10+', inplace=True)\n```\n\n\n```python\n# Number of films from each director\ndirector_counts = df.director.value_counts()\n\n# Tiers for sparser directors\none = director_counts[director_counts <= 1].index\nthree_five = director_counts[(director_counts > 1) & (director_counts <= 3)].index\nfive_ten = director_counts[(director_counts > 3) & (director_counts <= 5)].index\nten_plus = director_counts[(director_counts > 5) & (director_counts <= 10)].index\n\n# Combine sparse directors\ndf['director'].replace(one, '1', inplace=True)\ndf['director'].replace(three_five, '3-5', inplace=True)\ndf['director'].replace(five_ten, '5-10', inplace=True)\ndf['director'].replace(ten_plus, '10+', inplace=True)\n```\n\n### Combining countries\n\n\n```python\n# it looks like movies mainly come from top 5 countries in this dataset\n#Number of films from each country\ncountry_counts = df.country.value_counts()\n\n# New class frequencies\ndf.country.value_counts()\ntop_5_list = df.country.value_counts().index[:5]\ndf.loc[~df.country.isin(top_5_list), 'country'] = 'Other'\nsns.countplot(y='country', data=df)\n```\n\n\n\n\n <matplotlib.axes._subplots.AxesSubplot at 0x1a217d20f0>\n\n\n\n\n\n\n\n\n```python\nsns.countplot(y='genre', data=df)\n```\n\n\n\n\n <matplotlib.axes._subplots.AxesSubplot at 0x1a21280ba8>\n\n\n\n\n\n\n\nIt looks like we have 8 top genres, let's combine sparse genres into Other\n\n\n```python\n# top 8 genres\ntop_8_list = df.genre.value_counts().index[:8]\ndf.loc[~df.genre.isin(top_8_list), 'genre'] = 'Other'\nsns.countplot(y='genre', data=df)\n```\n\n\n\n\n <matplotlib.axes._subplots.AxesSubplot at 0x1a20e41c88>\n\n\n\n\n\n\n\n*rating* feature has an issue where Unrated films have different labels:\n\n\n```python\nsns.countplot(y='rating', data=df)\n```\n\n\n\n\n <matplotlib.axes._subplots.AxesSubplot at 0x1a212fe240>\n\n\n\n\n\n\n\n\n```python\n# Fix \"unrated\" labels\ndf['rating'].replace(['NOT RATED', 'UNRATED', 'Not specified'], 'NR', inplace=True)\n```\n\n\n```python\nsns.countplot(y='rating', data=df)\n```\n\n\n\n\n <matplotlib.axes._subplots.AxesSubplot at 0x1a211a2d30>\n\n\n\n\n\n\n\nFinally, we'll create an age feature for the film.\n\nNote: We will set \"today\" to 2014 to imitate an analysis performed in 2014 to predict films in 2015 and 2016.\n\n\n```python\ndef extract_age(s, today=2014):\n return today - int( s[-5:-1] )\n```\n\n\n```python\n# Create \"age\" feature\ndf['age'] = df.name.apply(extract_age)\n```\n\n\n```python\ndf.head()\n```\n\n\n\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>budget</th>\n <th>country</th>\n <th>director</th>\n <th>genre</th>\n <th>gross</th>\n <th>name</th>\n <th>rating</th>\n <th>runtime</th>\n <th>score</th>\n <th>star</th>\n <th>studio</th>\n <th>votes</th>\n <th>profit</th>\n <th>age</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>237000000.0</td>\n <td>UK</td>\n <td>5-10</td>\n <td>Action</td>\n <td>760507625.0</td>\n <td>Avatar (2009)</td>\n <td>PG-13</td>\n <td>162</td>\n <td>7.8</td>\n <td>3-5</td>\n <td>Twentieth Century Fox Film Corporation</td>\n <td>958400</td>\n <td>523507625.0</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <td>200000000.0</td>\n <td>USA</td>\n <td>5-10</td>\n <td>Drama</td>\n <td>658672302.0</td>\n <td>Titanic (1997)</td>\n <td>PG-13</td>\n <td>194</td>\n <td>7.8</td>\n <td>Leonardo DiCaprio</td>\n <td>Twentieth Century Fox Film Corporation</td>\n <td>865551</td>\n <td>458672302.0</td>\n <td>17</td>\n </tr>\n <tr>\n <th>2</th>\n <td>150000000.0</td>\n <td>USA</td>\n <td>3-5</td>\n <td>Action</td>\n <td>652270625.0</td>\n <td>Jurassic World (2015)</td>\n <td>PG-13</td>\n <td>124</td>\n <td>7.0</td>\n <td>3-5</td>\n <td>Universal Pictures</td>\n <td>470625</td>\n <td>502270625.0</td>\n <td>-1</td>\n </tr>\n <tr>\n <th>3</th>\n <td>220000000.0</td>\n <td>USA</td>\n <td>3-5</td>\n <td>Action</td>\n <td>623357910.0</td>\n <td>The Avengers (2012)</td>\n <td>PG-13</td>\n <td>143</td>\n <td>8.1</td>\n <td>Robert Downey Jr.</td>\n <td>10+</td>\n <td>1069292</td>\n <td>403357910.0</td>\n <td>2</td>\n </tr>\n <tr>\n <th>4</th>\n <td>185000000.0</td>\n <td>USA</td>\n <td>10+</td>\n <td>Action</td>\n <td>534858444.0</td>\n <td>The Dark Knight (2008)</td>\n <td>PG-13</td>\n <td>152</td>\n <td>9.0</td>\n <td>Christian Bale</td>\n <td>Warner Bros.</td>\n <td>1845853</td>\n <td>349858444.0</td>\n <td>6</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\n## Correlations\n\n* Is the profit of a movie correlated with it's user score?\n * There is very low positive correlation, we can assume that these two features are not correlated\n* How about its number of votes?\n * Votes have strong positive correlation with gross profit\n* What do these correlations tell us?\n * Gross profit is directly dependent on how many people watched the movie. It looks as movie recieves more ratings as more people watch it but the score people give does not depend on the profit and vice versa.\n\n\n```python\n# Calculate correlations:\ncorrelations = df.corr()\n\n### SNS plot theme\n# Change color scheme\nsns.set_style('white')\n# Generate a mask for the upper triangle\nmask = np.zeros_like(correlations, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True\n# Make the figsize\nplt.figure(figsize=(15,15))\n\n# Plot heatmap of correlations\nsns.heatmap(correlations*100, annot=True, mask=mask, cbar=False)\n```\n\n\n\n\n <matplotlib.axes._subplots.AxesSubplot at 0x1a21a57d30>\n\n\n\n\n\n\n\n## Machine learning\n\nNext, we will prepare the data for machine learning by creating an analytical base table. We will drop name columns because it's basically an index column and the gross, votes, and score features because we do not know them at the time.\n\n\n```python\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_absolute_error\n```\n\n\n```python\n# Create analytical base table (ABT)\nabt = pd.get_dummies ( df.drop(['name', 'gross', 'votes', 'score'], axis=1) )\n```\n\nWe'll split the data based on date of training data (2014 and earlier) and test data (2015, 2016)\n\n\n```python\n# Train / Test split based on date of training data (2014 and earlier) and test data (2015, 2016)\ntrain = abt[abt.age >= 0]\ntest = abt[abt.age <= 0]\n\ny_train = train.profit\nX_train = train.drop(['profit'], axis=1)\n\ny_test = test.profit\nX_test = test.drop(['profit'], axis=1)\n```\n\nWe will try a basic random forest\n\n\n```python\n# Train a basic random forest model\nrf = RandomForestRegressor(random_state=1234)\nrf.fit(X_train, y_train)\n\n# Make prediction on test set\npred = rf.predict(X_test)\n```\n\n\n```python\nsns.jointplot(y_test, pred, kind='reg')\nplt.xlabel('Actual Profit')\nplt.ylabel('Predicted Profit')\nplt.show()\n```\n\n\n\n\n\n\n```python\n# scores\nprint('R^2 score: ', r2_score(y_test, pred))\nprint('MAE: ', mean_absolute_error(y_test, pred))\n```\n\n R^2 score: 0.22029037231625237\n MAE: 29684720.822394677\n\n\nAs you can see in the plot, the very model actually does a pretty good job predicting the profitability of films based on the limited data we have and out of the box model.\n\nWhether this model performance is \"good enough\" will depend on the use-case. For example, in a betting market, this model would already give a formidable edge.\n\nFinally, we'll plot the feature importances.\n\n\n```python\n# Helper function for plotting feature importances\ndef plot_feature_importances(columns, feature_importances, show_top_n=10):\n feats = dict( zip(columns, feature_importances) )\n imp = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Gini-importance'})\n imp.sort_values(by='Gini-importance').tail(show_top_n).plot(kind='barh', figsize=(8,8))\n plt.show()\n```\n\n\n```python\nplot_feature_importances(X_train.columns, rf.feature_importances_)\n```\n\n\n\n\n\nAs a whole, the budget feature was the most important in our model. But earlier, we found that budget and profit were not correlated?\n\nThis seems contradictory, but the answer has to do with the difference between first-order correlations and a full model. Earlier, we were looking at the correlation between budget and profit at an aggregate level.\n\nBut now that we've built a model, we can look at the affect of budget while controlling for all the other input features as well.\n\n## Machine Learning with Pre-Screen\nNext, we'll create a new analytical base table for the scenario where we're able to collect an accurate score input based on film pre-screenings.\n\n\n```python\n# Create new analytical base table (ABT)\nabt_ps = pd.get_dummies ( df.drop(['name', 'gross', 'votes'], axis=1) )\n```\n\n\n```python\ntrain = abt_ps[abt_ps.age >= 0]\ntest = abt_ps[abt_ps.age <= 0]\n\ny_train = train.profit\nX_train = train.drop(['profit'], axis=1)\n\ny_test = test.profit\nX_test = test.drop(['profit'], axis=1)\n```\n\n\n```python\n# Train a basic random forest model\nrf = RandomForestRegressor(random_state=1234)\nrf.fit(X_train, y_train)\n\n# Make prediction on test set\npred = rf.predict(X_test)\n```\n\n\n```python\nsns.jointplot(y_test, pred, kind='reg')\nplt.xlabel('Actual Profit')\nplt.ylabel('Predicted Profit')\nplt.show()\n```\n\n\n\n\n\n\n```python\n# scores\nprint('R^2 score: ', r2_score(y_test, pred))\nprint('MAE: ', mean_absolute_error(y_test, pred))\n```\n\n R^2 score: 0.3371924664808582\n MAE: 25972965.048558757\n\n\nIncluding the score feature improves our model's performance substantially. We should make effort to collect this data for any film we'd like to predict.\n\n\n```python\nplot_feature_importances(X_train.columns, rf.feature_importances_)\n```\n\n\n\n\n\nThe score turns out to be the second important feature after the budget\n"
},
{
"alpha_fraction": 0.6166263222694397,
"alphanum_fraction": 0.6206617951393127,
"avg_line_length": 29.975000381469727,
"blob_id": "b4b57f05d7a865e15686754890d20ae5e1a6bd7b",
"content_id": "28a68c3899013d6b39a4902a8e53a33a6f47f7e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1239,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 40,
"path": "/r/movie-ratings/movie-app/server.R",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "#\n# This is the server logic of a Shiny web application. You can run the \n# application by clicking 'Run App' above.\n#\n# Find out more about building applications with Shiny here:\n# \n# http://shiny.rstudio.com/\n#\n\nlibrary(shiny)\nlibrary(ggplot2)\nlibrary(dplyr)\n\nmovies<- read.csv(\"Movie-Ratings.csv\")\n# rename columns to get rid of special characters\ncolnames(movies)<-c(\"Film\", \"Genre\", \"CriticRating\", \"AudienceRating\", \n \"BudgetMillions\", \"Year\")\n# check stucture of the data (6 variables:562 objects loaded)\nprint(str(movies))\n# Define server logic required to draw a histogram\nshinyServer(function(input, output) {\n \n output$mainPlot <- renderPlot({\n filtered <- \n movies %>%\n filter(Genre ==input$Genre,\n Year == input$Year\n )\n if(input$Ratings == \"AudienceRating\"){\n ggplot(filtered, aes(x=BudgetMillions, y=AudienceRating,\n colour=Genre, size=BudgetMillions)) +\n geom_point(aes(x=BudgetMillions))\n }\n else{\n ggplot(filtered, aes(x=BudgetMillions, y=CriticRating,\n colour=Genre, size=BudgetMillions)) +\n geom_point(aes(x=BudgetMillions)) \n }\n })\n})\n"
},
{
"alpha_fraction": 0.6702127456665039,
"alphanum_fraction": 0.7340425252914429,
"avg_line_length": 14.666666984558105,
"blob_id": "4cffb4246fbf5c9f13abc2e20d3f7a3d0aa4af59",
"content_id": "2e06b76fc738930d5f0568b4237c312efd989893",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 6,
"path": "/docker/flask-mysql-example/db/Dockerfile",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Simple mysql container\nFROM mysql:5.7\nLABEL maintainer=\"Daniel Lennart\"\n\nEXPOSE 3306\nCMD [\"mysqld\"]\n"
},
{
"alpha_fraction": 0.778052806854248,
"alphanum_fraction": 0.7897689938545227,
"avg_line_length": 72.90243530273438,
"blob_id": "fb84fef1ca9ef868a6df90a97b0710101b3c9cf6",
"content_id": "8e75729c5e28f9c4408ededc68df03ba50c91c84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6060,
"license_type": "no_license",
"max_line_length": 900,
"num_lines": 82,
"path": "/r/movie-ratings/README.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# Exploring movie ratings data with R\nThis is an assignment done for R part of Programming Languages for Data Engineering module at University of Dundee\n## Dataset\nDataset chosen for the assignment is a movie rating dataset containing movie budgets, genres, audience ratings from RottenTomatoes and critics ratings for years 2007-2011. Dataset contains 562 observations of 6 variables.\n### Statistical measures\nSome statistical measures were obtained using summary() function:\n\n\n\nStandard deviation for all numerical measures was calculated using std() function and is listed in a table below.\n\n| Measure | Standard deviation |\n| ------ | ----- |\n| CriticRating | 26.39239 |\n| AudienceRating | 16.8277 |\n| BudgetMillions | 48.63848 |\n\n## Data relationships\nSeveral plots were chosen to represent data relationships. Boxplots in Fig. 1 and Fig. 2 represent the relationship between rating and movie genre, showing data scatter, distribution and median. By comparing Audience and Critics ratings it can be seen that critic ratings are more spread out than the Audience ones. Also some of the genres had more data points than others. The effect of this is seen in the graph on Fig. 3 which shows relationship between Audience and Critics ratings per year and genre with overlayed trend. It can be clearly seen that some years and genres do not have enough data to produce meaningful results. Also there are some interesting observations that can be made, for example, looking at romance movies released in 2011 it can be seen that quite low critics rating of approximately 25 corresponds to a high audience rating of 75 which is quite unusual for other genres.\n\n#### Fig. 1 Audience rating boxplot\n\n\n#### Fig. 2 Critics rating boxplot\n\n\n#### Fig. 3 Audience vs Critics\n\n\nFig. 4 and Fig. 5 show the relationship between budgets, ratings and genres.\n\n#### Fig. 4 Audience rating vs Budget\n\n\n#### Fig. 5 Critics rating vs Budget\n\n\nAgain, graphs show that critics ratings are more distributed, but we cannot conclude any obvious relationships between budget and the rating. It can be noted from this graphs that action movies tend to have bigger budgets along with adventure movies. \n\nLastly, Fig. 6 presents budget distributions per genre.\n#### Fig. 6 Budget distribution\n\n\n## Normal distributions\nFrom looking at the data histograms, interesting observation was made. Audience rating resembles normal distribution while critics ratings are more uniformly distributed. Fig. 6 and Fig. 7 illustrate this observation. This can be explained with a theory that audience is representing population and population opinion tends to be generally distributed, while critics rank movies according to the set of rules. This can also explain more widely distributed critics ratings.\n\n \n\nIn order to check if audience ratings are indeed normally distributed, random normal distribution was generated with same mean and standard deviation calculated earlier. Results were plotted with qqnorm() and are presented in the following plots\n\n#### Randomly generated normal distribution\n\n#### Audience ratings\n\n\nFrom looking at the plots, it can be concluded that Audience ratings are indeed normally distributed.\n\n## Linear regression\nIn order to investigate data relationships, linear regression was performed on some variables. Results are presented in following plots.\n\n#### Audience and Critics ratings\n\n\n#### Audience rating and Budget\n\n\n#### Critics rating and Budget\n\n\nIn both Audience vs Critics and Audience vs Budget there is a significant relationship between variables as p value is less than 0.05. However in case of critics rating and budget, p value is 0.781, hence it can be concluded that there is no relationship between budget and critics rating. Linear regression was performed with lm() function. Code snippet is presented below:\n```\nmodel2 <- lm(AudienceRating~BudgetMillions, data=movies)\nab <- ggplot(data=movies, aes(x=AudienceRating, y=BudgetMillions))\nab + geom_point()+\n geom_smooth(method='lm')\n```\n## Shiny application\nFinally, an interface to explore the dataset has been developed in Shiny. It allows to subset and plot 2 variables, year, genre and switch between audience and critics rating for those years and genres. Screenshots are presented below\n#### Shiny Audience ratings\n\n#### Shiny Critics ratings\n\n"
},
{
"alpha_fraction": 0.642988383769989,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 33.74468231201172,
"blob_id": "f823d1f69ea43737371be15c6eda82739ffca025",
"content_id": "ac8ba380565514538d4695a07f986f980d203a9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1633,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 47,
"path": "/terraform/Makefile",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------\n# # Local variables\n# ----------------------------------------------------------------------\n# # Current working folder\nLCD=$(shell pwd)\nDIR_MAIN=$(LCD)\nDIR_SCRIPTS=$(LCD)/scripts\nDIR_TMP=$(LCD)/tmp\n\n.DEFAULT_GOAL := setup\n\n.PHONY: help\nhelp: ## Display this help\n\t@echo \"Targets:\"\n\t@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' Makefile | \\\n\t\tsort | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-20s\\033[0m %s\\n\", $$1, $$2}'\n\n.PHONY: setup\nsetup: terraform ## (default) Setup terraform\n\n.PHONY: terraform\nterraform: ## Setup terraform\n\t@$(DIR_SCRIPTS)/setup_terraform.sh\n\n.PHONY: cpu_plan\ncpu_plan: ## Terraform plan CPU instance\n\t@bin/terraform plan -state=envs/cpu/fastai-dl.tfstate -var-file=envs/cpu/fastai-dl.tfvars envs/cpu/\n\n.PHONY: cpu_apply\ncpu_apply: ## Terraform apply CPU instance\n\t@bin/terraform apply -state=envs/cpu/fastai-dl.tfstate -var-file=envs/cpu/fastai-dl.tfvars envs/cpu/\n\n.PHONY: cpu_destroy\ncpu_destroy: ## Terraform destroy CPU instance\n\t@bin/terraform destroy -state=envs/cpu/fastai-dl.tfstate -var-file=envs/cpu/fastai-dl.tfvars envs/cpu/\n\n.PHONY: gpu_plan\ngpu_plan: ## Terraform plan GPU instance\n\t@bin/terraform plan -state=envs/gpu/fastai-dl.tfstate -var-file=envs/gpu/fastai-dl.tfvars envs/gpu/\n\n.PHONY: gpu_apply\ngpu_apply: ## Terraform apply GPU instance\n\t@bin/terraform apply -state=envs/gpu/fastai-dl.tfstate -var-file=envs/gpu/fastai-dl.tfvars envs/gpu/\n\n.PHONY: gpu_destroy\ngpu_destroy: ## Terraform destroy GPU instance\n\t@bin/terraform destroy -state=envs/gpu/fastai-dl.tfstate -var-file=envs/gpu/fastai-dl.tfvars envs/gpu/\n"
},
{
"alpha_fraction": 0.6947715282440186,
"alphanum_fraction": 0.6961846351623535,
"avg_line_length": 23.95294189453125,
"blob_id": "5a1b41cc3a7acac6290c8030531247d95d4c14ce",
"content_id": "815cbddcff6c25d0f441e422da749eadff7e994f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2123,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 85,
"path": "/ansible/cheatsheet.md",
"repo_name": "daniel-lennart/data-portfolio",
"src_encoding": "UTF-8",
"text": "\n\n# Various [Ansible](https://www.ansible.com/) configuration management commands\n\n#### Show groups\n\n```\nansible -m debug -i inventories/inventory.sh -a \"var=hostvars[inventory_hostname]['group_names']\" [inventory hostname]\n```\n\n#### Show hosts in group\n\n```\nansible -i inventories/inventory.sh -m ping [ group name ] --list-hosts\n```\n\n#### Run playbook\n\n```\nansible-playbook -i inventories/inventory.sh -l [ group name ] [ playbook name ]\n```\n\n#### Run command against all servers\n\n```\nansible -i inventories/inventory.sh '*' -m shell -a 'grep \"repo\" /etc/yum.repos.d/remi*'\n```\n\n#### Run release against 1 server\n\n```\nansible-playbook -i inventories/inventory.sh -l [ server name ] [release playbook]\n```\n\n#### Run for specific host from specific task\n\n```\nansible-playbook [release playbook] -i inventories/inventory.sh -l '[server-hostname]' -D --start-at-task 'Install cloudfuse'\n```\n\n#### Run mysql query\n\n```\nansible -i inventories/inventory.sh -m shell -a 'sudo mysql --execute=\"SELECT user,host FROM mysql.user\"' [db-servers-group-name]\n```\n\n#### Find what servers use SSL certificate\n\n```\nansible -i inventories/inventory.sh '*' -m shell -a 'find /etc/httpd/conf/certificates -name \"ssl.crt\"'\n```\n\n#### Run tag against server\n\n```\nansible-playbook [release playbook] -i inventories/inventory.sh -l '[server.hostname]' -D --check --tags \"[tag_name]\"\n```\n\n#### Run site play against staging environment\n\n```\nansible-playbook -i inventories/inventory.sh -u centos -l group_staging [release playbook] --list-hosts\n```\n\n#### Run user management playbooks from main release playbook\n\n```\nansible-playbook -i inventories/inventory.sh -u centos -l [ group ] --tags \"user_management\" [release playbook]\n```\n\n#### Limit playbook to several groups and list hosts\n\n```\nansible-playbook -i inventories/inventory.sh -u centos -l group1:group2 [release playbook] --list-hosts\n```\n\n#### Gather and cache facts to use with other group\n\n```\nansible -i inventories/inventory.sh 'group name' -m setup\n```\n\n#### Run specific task\n\n```\nansible-playbook [task playbook] -i inventories/inventory.sh -e \"task_playbook=some_task.yml\"\n```\n"
}
] | 17 |
ahmetsensan/Projects | https://github.com/ahmetsensan/Projects | 6f227625e3e158ce027b1a79e11a6f118a8d8b94 | fb8b1053759d08d7fffeb9dc2a73e3aa8feda26b | 834f9adaea97cb6801cc68bb9d353d3f95521f8b | refs/heads/master | 2020-09-08T19:48:59.649144 | 2020-09-07T08:13:35 | 2020-09-07T08:13:35 | 221,229,298 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.388579398393631,
"alphanum_fraction": 0.4526462256908417,
"avg_line_length": 16.947368621826172,
"blob_id": "185d76422ad98a84ab3d25f52e54ee7e499fe249",
"content_id": "0880906ed59425a6a2047fac74555a22524e8029",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 38,
"path": "/project_1.py",
"repo_name": "ahmetsensan/Projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 12 16:38:06 2019\r\n\r\n@author: Ahmet Şensan\r\n\"\"\"\r\n\r\n#%% Example \r\n \r\n#1640. yil == 17. yuzyil\r\n#109. yil == 2. yuzyil\r\n# 200. yil == 20. yuzyil\r\n\r\n#metod yazın \r\n #input integer yillar\r\n #output yuzyil\r\n \r\n\r\ndef year2century(year):\r\n \"\"\"\r\n year to century\r\n \"\"\"\r\n str_year = str(year)\r\n \r\n if(len(str_year)<3):\r\n return 1\r\n \r\n elif(len(str_year) == 3):\r\n \r\n if(str_year[1:3] == \"00\"):\r\n return int(str_year[0])\r\n else:\r\n return int(str_year[0]) +1\r\n else:\r\n if (str_year[2:4 == \"00\"]):\r\n return int (str_year[:2])\r\n else:\r\n return int (str_year[:2])+1"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 12.5,
"blob_id": "913f2b8fba5a9378669a454333ebf450878bf316",
"content_id": "536eab5b81053b2d85a921535423dce627a5536e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ahmetsensan/Projects",
"src_encoding": "UTF-8",
"text": "# Projects\nYear to Century\n"
}
] | 2 |
hsamvel/Python_homeworks | https://github.com/hsamvel/Python_homeworks | 809b506432ad05cf40c547c022129080b76182ad | 80da71ee3c139b793e84a5fdf9125bae5d078384 | d36d0ddf332ae7fdbe215292f04f3ee5d4e2eace | refs/heads/master | 2023-02-03T17:26:35.178484 | 2020-12-22T14:11:22 | 2020-12-22T14:11:22 | 303,796,366 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4523160755634308,
"alphanum_fraction": 0.531335175037384,
"avg_line_length": 20.9375,
"blob_id": "697fa4bcac8dcedc7e3869c1a312c300037d70f5",
"content_id": "58029111e984f666f43bd6bbe9f0875ee3a55acf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 16,
"path": "/homework5_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [1,5,20,30]\r\nd = list_1[-1]-list_1[-2]\r\nlist_1.append(d)\r\nlist_1.sort()\r\nnew_list = []\r\nfor i in range(len(list_1)-1,0,-1):\r\n d=list_1[1]\r\n if list_1[i]-d == list_1[i-1] + d:\r\n new_list.append(list_1[i]-d)\r\nfor el in new_list:\r\n list_1.append(el)\r\nlist_1.sort()\r\nif sum(list_1[1:]) % d == 0:\r\n print(True,list_1)\r\nelse:\r\n print(False)\r\n"
},
{
"alpha_fraction": 0.5125347971916199,
"alphanum_fraction": 0.621169924736023,
"avg_line_length": 21.4375,
"blob_id": "1c86bfd87bff1739ea37ae9f6b2461a99741d61f",
"content_id": "e17dc5be01d2ed14bb27a79cb5338fe22669f796",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 359,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 16,
"path": "/homework8_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [9,5,8,5,20,1,2,-3,-2,-1,0]\nlist_1.sort()\nlist_1.reverse()\nlist_2 = list_1[0:3]\nlist_3 = list_1[len(list_1)-2:len(list_1)]\nlist_3_count=1\nfor el in list_3:\n list_3_count *= el\nlist_3_count *=list_1[0]\nlist_2_count = 1\nfor elem in list_2:\n list_2_count*=elem\nif list_3_count > list_2_count:\n print(list_3_count)\nelse:\n print(list_2_count)\n"
},
{
"alpha_fraction": 0.45077720284461975,
"alphanum_fraction": 0.4896373152732849,
"avg_line_length": 19.36842155456543,
"blob_id": "c925671a2f39ea6a961aff175657300da556ff91",
"content_id": "f79288e8cb20b3390075cc5277769fb121d30d91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 19,
"path": "/homework7_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [1,4,6,5,7,10]\ndef swap_list_elements(list_1):\n kent = [] \n zuyg=[]\n for el in list_1:\n if el % 2 == 0:\n zuyg.append(el)\n else:\n kent.append(el)\n new_list = []\n i = 0\n while i < len(kent):\n new_list.append(zuyg[i])\n new_list.append(kent[i])\n i+=1\n return new_list\n\n\nprint(swap_list_elements(list_1))"
},
{
"alpha_fraction": 0.446601927280426,
"alphanum_fraction": 0.5024271607398987,
"avg_line_length": 28.428571701049805,
"blob_id": "ef203ca1aa711a532a907bc44a1adf578007640d",
"content_id": "1a4fc25a56a98e393b6fb6ba0d72670b86fd9f48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 14,
"path": "/homework11_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 =[4, 2, 1, 5, 3]\n\n\ndef sum_of_pairs(list_1,number):\n for i in range(len(list_1)):\n if number - list_1[i] in list_1:\n if number - list_1[i] == list_1[i] and list_1[i] in list_1[i+1:]:\n return number - list_1[i] , list_1[i]\n if number - list_1[i] != list_1[i]:\n return number - list_1[i],list_1[i]\n return '' \n\n\nprint(sum_of_pairs(list_1,8))\n"
},
{
"alpha_fraction": 0.4094488322734833,
"alphanum_fraction": 0.4519684910774231,
"avg_line_length": 45.5,
"blob_id": "5b501f8a55e7892f8b313496011ffe7f333fafe3",
"content_id": "1910ce1da84a89831a1ad26d09818291810b9502",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 635,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 12,
"path": "/homework6_1_(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def possible_turns(cell):\n cell=list(cell)\n letter = ['','A','B','C','D','E','F','G','H']\n number = ['', 1,2,3,4,5,6,7,8]\n for i in range(1,len(letter),1):\n if letter.index(cell[0])- i == 2 or i - letter.index(cell[0]) ==2:\n print(letter[i] + str(number[int(cell[1])-1]),letter[i] + str(number[int(cell[1])+1]))\n if letter.index(cell[0])-i == 1 or i - letter.index(cell[0]) ==1:\n print(letter[i] + str(number[int(cell[1])-2]),letter[i] + str(number[int(cell[1])+2]))\n \n \npossible_turns('B1')\n "
},
{
"alpha_fraction": 0.4850574731826782,
"alphanum_fraction": 0.4965517222881317,
"avg_line_length": 27.133333206176758,
"blob_id": "be5ce81907d7232f7802cf76a3ccc953d8de13ac",
"content_id": "a245413fec029f5444984503214622759049b054",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 15,
"path": "/homework_5_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "n = input().split(', ')\nm = [input().split(', ') for i in range(len(n))]\nour_vars = []\nnames = input().split(', ')\nnew_list = []\nfor i in range(len(m)):\n for elem in names:\n if elem not in m[i]:\n our_vars.append(elem)\nfor j in range(len(m)):\n if our_vars[0] in m[j]:\n new_list+=m[j]\nfor c in range(len(n)):\n if len(new_list)== int(n[c][-1]):\n print(our_vars[0],n[c][0:-2],sep=':')\n \n"
},
{
"alpha_fraction": 0.4476885497570038,
"alphanum_fraction": 0.5158150792121887,
"avg_line_length": 30.230770111083984,
"blob_id": "674f27c59377f1fc119889a83d4877a902057a94",
"content_id": "567c7964a52341e20b859a28800fd44473f535de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 411,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 13,
"path": "/homework7_2(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "dict_1 = {'A' : (0,0),'B' : (0,4),'C':(2,0),'D':(2,4),'E':(0,-4),'F':(2,-4)}\nnew_list = []\nlist_2 = []\nfor elem in dict_1:\n new_list.append(sum(dict_1[elem]))\n list_2.append(elem)\nprint(new_list)\ndict_2= {}\nfor i in range(0,len(new_list),1):\n for j in range(1,len(new_list),1):\n if list_2[i] != list_2[j]:\n dict_2[list_2[i]+list_2[j]]= abs(new_list[j]-new_list[i])\nprint(dict_2)\n\n "
},
{
"alpha_fraction": 0.4091953933238983,
"alphanum_fraction": 0.4850574731826782,
"avg_line_length": 23.22222137451172,
"blob_id": "33712009c4a86061dd26b4b196aa789c52c156a8",
"content_id": "0b06874fd91f068be981207bf0ed2f5b790a514f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 18,
"path": "/homework9_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [2,2,2,2,2,2,1,2]\ncount = 0\ndef weighing_machine(list_1,a=0):\n global count\n if count == 3:\n if list_1[-1] % 2 != 0:\n return len(list_1)-1\n else:\n return len(list_1)-2\n if sum(list_1[a:a+2]) % 2 != 0:\n if list_1[a] % 2 ==1:\n return a \n return a + 1\n else:\n count += 1\n return weighing_machine(list_1,a=a+2)\n\nprint(weighing_machine(list_1))"
},
{
"alpha_fraction": 0.48571428656578064,
"alphanum_fraction": 0.5214285850524902,
"avg_line_length": 19,
"blob_id": "c7e90da816af82e5fc344088b16686190bd38955",
"content_id": "bc3ca75975f1438d76972ac39a583e0e5c6eaeb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 7,
"path": "/homework10_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "number = int(input())\ncount = 0\nfor i in range(1,number+1):\n for el in str(i):\n if '2' in el:\n count += 1\nprint(count)\n"
},
{
"alpha_fraction": 0.5367913246154785,
"alphanum_fraction": 0.5422195196151733,
"avg_line_length": 23.969696044921875,
"blob_id": "9bded0ec7ec18a508104405cb48cb311b6d73e90",
"content_id": "43794223c224e0bfe55de423570c587fb45f5a1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1658,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 66,
"path": "/homework19_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "\n\nclass Weapon:\n\n def __init__(self,name,damage,range):\n self.name = name\n self.damage = damage\n self.range = range\n\n def hit(self,actor,target):\n if BaseCharacter.is_alive(target):\n pass\n else:\n print(\"the enemy is already defeated\")\n\n\n def __str__(self):\n return self.name\n\nclass BaseCharacter:\n def __init__(self,pos_x,pos_y,hp):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.hp = hp\n\n def move(self,delta_x,delta_y):\n self.pos_x = delta_x\n self.pos_y = delta_y\n\n def is_alive(self):\n return self.hp > 0\n\n\n def get_damage(self,amount):\n if self.hp - amount > 0:\n return self.hp - amount\n else:\n return 0\n def get_cords(self):\n return self.pos_x,self.pos_y\n\nclass BaseEnemy(BaseCharacter):\n def __init__(self, pos_x, pos_y, hp,weapon):\n super().__init__(pos_x, pos_y, hp)\n self.weapon = weapon\n\n def hit(self,target):\n if isinstance(target,MainHero):\n pass\n else:\n return \"I can hit only main hero\"\n\n def __str__(self):\n return f\"enemy is in the position {self.pos_x,self.pos_y} with weapon {self.weapon}\"\n\nclass MainHero(BaseCharacter):\n def __init__(self, pos_x, pos_y, hp,weapon):\n super().__init__(pos_x, pos_y, hp)\n self.weapon = weapon\n\n def heal(self,amount):\n if self.hp + amount <= 200:\n return self.hp + amount\n else:\n return 200\n def add_weapon(self,weapon):\n if isinstance(weapon,Weapon):\n return f\"picked up {weapon}\"\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.4871794879436493,
"alphanum_fraction": 0.5201465487480164,
"avg_line_length": 21.83333396911621,
"blob_id": "7413b837189d16b2eb0da8776127d9500053cf30",
"content_id": "63233d5b7f6ec0ad332473633412340d55ee0328",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 12,
"path": "/homework13_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def str_vs_str(str1,str2):\n count = 0\n for el in str1:\n if el in str2:\n if list(str1).count(el) == list(str2).count(el):\n count += 1\n if count == len(str2):\n return True\n return False\n\n\nprint(str_vs_str( 'abcde','ceafb'))"
},
{
"alpha_fraction": 0.37899544835090637,
"alphanum_fraction": 0.44748857617378235,
"avg_line_length": 21,
"blob_id": "9cd61843cb10680d4b953589c229114c40b4746e",
"content_id": "6412a28ef08e7d870697dc1fe87b4f0518f140e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 10,
"path": "/homework3_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [1,1,1]\ndef b(list_1):\n count = 0\n for i in range(len(list_1)-1):\n j = i +1\n while list_1[j] <= list_1[i]:\n list_1[j] += 1\n count += 1\n return count\nprint(b(list_1))"
},
{
"alpha_fraction": 0.46027398109436035,
"alphanum_fraction": 0.5123287439346313,
"avg_line_length": 21.75,
"blob_id": "53b10421ebfd9da6b3968d35d5c858b2121a5d24",
"content_id": "93ed277287d0fb624b22c2aa28729b48818a512e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 16,
"path": "/homework7_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def sorted_list(list_1):\n new_list = []\n count = 0\n for i in range(len(list_1)):\n if list_1[i] == -1:\n new_list.append(i)\n count += 1\n list_1.sort()\n list_2 = list_1[count:]\n for i in range(len(new_list)):\n list_2.insert(new_list[i],-1)\n return list_2\n\n\ns_list =[2,-1,1,5,4,-1,3]\nprint(sorted_list(s_list))\n\n"
},
{
"alpha_fraction": 0.6804123520851135,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 23.5,
"blob_id": "e33d2c74b138baa2dced4c8c33186959c6664c3f",
"content_id": "2a34a69747190559dd406a3ebc25fb21886b806f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 4,
"path": "/homework3_2(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "Super_string = set(input())\nsub_string = set(input())\nc = Super_string & sub_string\nprint(str(c))"
},
{
"alpha_fraction": 0.3598615825176239,
"alphanum_fraction": 0.41522490978240967,
"avg_line_length": 21.076923370361328,
"blob_id": "2893f2f3cc486283d25d35e1ade45393b699f424",
"content_id": "c0cfaeac5a5a1d4351370300c2d669e4da31f095",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 13,
"path": "/homework_from_slide.py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "#homework_2\n#for i in range(1,5):\n #for j in range(1,i+1):\n #if j < i:\n #print(i,end=' ')\n #else:\n #print(i,end='\\n')\n\n#homework_1\n#for i in range(2,101,1):\n #for j in range(i+1,101,1):\n #if i ** j == j ** i:\n #print(i,j,i**j) "
},
{
"alpha_fraction": 0.4197761118412018,
"alphanum_fraction": 0.4776119291782379,
"avg_line_length": 27.263158798217773,
"blob_id": "33db4f9390c81418a4596f974ebcfb4918278866",
"content_id": "fa877a4427197130a329b063d6ae7e28088b8c18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 19,
"path": "/homework8_2(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def non_decreasing_sequence(*nums):\n list_1 = [*nums]\n new_list = []\n for i in range(len(list_1)-1):\n if list_1[i]<abs(list_1[i+1]) and list_1[i]>list_1[i+1]:\n list_1[i+1]= abs(list_1[i+1])\n if list_1[i]>=abs(list_1[i+1]):\n list_1[i]= -(list_1[i])\n count = 0 \n for i in range(len(list_1)-1):\n if list_1[i] <= list_1[i+1]:\n count += 1\n if len(list_1)-count == 1:\n print('Yes',list_1)\n else:\n print('no')\n \n\nnon_decreasing_sequence(1,1,0)"
},
{
"alpha_fraction": 0.40753424167633057,
"alphanum_fraction": 0.4623287618160248,
"avg_line_length": 21.230770111083984,
"blob_id": "9c8eae10a33952067b145c74c059b359d4f56fd7",
"content_id": "2dc5548725f93913b132497978528e855d622657",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 292,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 13,
"path": "/homework3_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [5,4,6,9,3]\ndef a(list_1):\n new_list = []\n for i in range(1,len(list_1),1):\n if list_1[i] > list_1[i-1]:\n continue\n else:\n new_list.append(list_1[i])\n if len(new_list) == 1:\n print(True)\n else:\n print(False)\na(list_1) "
},
{
"alpha_fraction": 0.34391534328460693,
"alphanum_fraction": 0.4285714328289032,
"avg_line_length": 17,
"blob_id": "20e5826d68fc6be636cf329fbd0a12ae7b9b6202",
"content_id": "bfe6fb0106062ef087c4ec593a06e0d47e8b0b63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 21,
"path": "/homework12_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "a = [[10,9,6,3,7],\n [6,10,2,9,7],\n [7,6,3,8,2],\n [8,9,7,9,9],\n [6,8,6,8,2]]\ni = -1 \nj = 0 \nnew_a = []\ndef rotateImage(a):\n global i,j,new_a\n if j >= len(a):\n return new_a\n row = []\n while abs(i) <= len(a):\n row.append(a[i][j])\n i -= 1 \n new_a.append(row)\n j += 1\n i = -1 \n return rotateImage(a)\nprint(rotateImage(a))\n"
},
{
"alpha_fraction": 0.4894736707210541,
"alphanum_fraction": 0.5684210658073425,
"avg_line_length": 26.14285659790039,
"blob_id": "542ad255762dd9ecbb7e0ba0a4c642f492fc8b07",
"content_id": "51b0d0f30a2a55ad8b1ce53ed12fdee32ebd9341",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 14,
"path": "/homework9_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "# 9_1\ndef buildPalindrome(string_1):\n if string_1 == string_1[-1::-1]:\n return string_1\n if string_1[-1] != string_1[0] and string_1[-1] != string_1[1]:\n string_1 += string_1[1]\n elif string_1[-1] == string_1[0]:\n string_1 += string_1[1]\n else:\n string_1 += string_1[0]\n return buildPalindrome(string_1)\n\n\nprint(buildPalindrome('abaa'))\n"
},
{
"alpha_fraction": 0.4464285671710968,
"alphanum_fraction": 0.5595238208770752,
"avg_line_length": 27,
"blob_id": "da5568e321c88719862daff8f0fe7fa3fb6f2f0a",
"content_id": "166bcca569428151048d3fa3ab3c5ca3b93db91d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 6,
"path": "/homework10_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "list_1 = [5,9,2,12,5,8]\ncount = 0\nfor i in range(0,len(list_1)-1,1):\n if abs(list_1[i]-list_1[i+1]) > count:\n count = abs(list_1[i]-list_1[i+1])\nprint(count)\n"
},
{
"alpha_fraction": 0.32122093439102173,
"alphanum_fraction": 0.3531976640224457,
"avg_line_length": 22.285715103149414,
"blob_id": "4223fa486fd5ec3f887a1099d68afe5a58b2c562",
"content_id": "beae78d44862eec66968e29fbd9925c42957ef87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 28,
"path": "/homework8_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def answer_queries(k,*query_counts):\n list_1 = [*query_counts]\n a = 0\n count = 0\n if len(list_1) == 1:\n count = (list_1[0]+k) // k\n return count\n else:\n for el in list_1:\n count += 1\n if el >= k:\n a+= el-k\n elif el < k and a!=0:\n a = a-(k-el)\n elif el + a < k:\n return count\n if a > k:\n while a>=0:\n a-=k\n count+=1\n return count\n elif a==0:\n return count + 1\n else:\n return count\n \n \nprint(answer_queries(5,10,5,5,3,2,1))\n \n \n \n \n"
},
{
"alpha_fraction": 0.5655737519264221,
"alphanum_fraction": 0.6065573692321777,
"avg_line_length": 16.5,
"blob_id": "25df2b7e4625280e3befb8d9715992ac4603d13a",
"content_id": "07e00537335f62a82d48edec66d8b5fcf44b2bdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 14,
"path": "/homework11_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "count = 0\n\n\ndef count_elements_of_number(number):\n global count\n if number < 10:\n count += 1\n else:\n count += 1\n return count_elements_of_number(number // 10)\n return count\n\n\nprint(count_elements_of_number(999))"
},
{
"alpha_fraction": 0.31683167815208435,
"alphanum_fraction": 0.3861386179924011,
"avg_line_length": 13.428571701049805,
"blob_id": "3b13e5db7d3d146a337143bda34c7cb1f452f34f",
"content_id": "ba8b039e65df365a670a295de0fb3e445320dd28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 7,
"path": "/homework19_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def func_1(n):\n q = n\n while q >=1:\n print(n-(q-1),end='')\n q = q -1\n\nfunc_1(13)\n"
},
{
"alpha_fraction": 0.5431472063064575,
"alphanum_fraction": 0.5939086079597473,
"avg_line_length": 23.75,
"blob_id": "d4d28c131486d9e1e033a4721e32dc1e24c815fc",
"content_id": "890dd2476562c093dfc1178130e64ad94c89e804",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 8,
"path": "/homework6_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "a=list('A1')\nb=list('B5')\nlist1 = [int(el) for el in a if el.isdigit()]\nlist2 = [int(elem) for elem in b if elem.isdigit()]\nif list1[0] % 2 == list2[0] % 2:\n print(False)\nelse:\n print(True)"
},
{
"alpha_fraction": 0.5229681730270386,
"alphanum_fraction": 0.565371036529541,
"avg_line_length": 21.91666603088379,
"blob_id": "e04d2c17104f2e08c4fa013dca4ebd8689d63d0b",
"content_id": "0935f2193c4adb69d2041094e7af332c7ab5c761",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 283,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 12,
"path": "/homework3_1(Ruben).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "n = int(input())\nnames = []\ngrades = []\nfor i in range(n):\n names.append(input())\n grades.append(float(input()))\nlist_1 = [names,grades]\nminn = 500\nfor el in list_1[1]:\n if el < minn and el > min(list_1[1]):\n minn = el\nprint(list_1[0][list_1[1].index(minn)])\n\n\n\n \n"
},
{
"alpha_fraction": 0.44680851697921753,
"alphanum_fraction": 0.4984802305698395,
"avg_line_length": 31.100000381469727,
"blob_id": "3fa38a20a3710de479b5978c953e4633d336b972",
"content_id": "3bb8c91bfff561c454d09fd7d23e12eff213d4a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 10,
"path": "/homework8_1(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def about_books(author_name,**var):\n list_1 = [var]\n print(list_1)\n for el in list_1:\n for elem in el:\n if el[elem][0] == author_name:\n print(elem,el[elem][1])\n \n \nabout_books('dyuma',askanio = ('dyuma',1965),sherlock = ('conan doyle',1887),thrones=('martin george',1999))\n "
},
{
"alpha_fraction": 0.5405405163764954,
"alphanum_fraction": 0.6040540337562561,
"avg_line_length": 48.33333206176758,
"blob_id": "ca83b61fc23a4fb15585305f48d05a5a11b69eb3",
"content_id": "0576efa6b1b6255b2bd8eabe0d1ad25e31bb0bce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 740,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 15,
"path": "/homework9_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "string_1 = '(bar)'\n\n\ndef reverse_string(string_1):\n if string_1.count(')') == 1:\n string_1 = string_1.replace(string_1[string_1.rfind('('):string_1.rfind(')')+1],string_1[string_1.rfind(')')-1:string_1.rfind('('):-1])\n if string_1[string_1.find(')')]==string_1[string_1.find(')')+1]:\n string_1 = string_1.replace(string_1[string_1.rfind('('):string_1.find(')')+1],string_1[string_1.find(')')-1:string_1.rfind('('):-1])\n return reverse_string(string_1)\n string_1 = string_1.replace(string_1[string_1.rfind('('):string_1.rfind(')')+1],string_1[string_1.rfind(')')-1:string_1.rfind('('):-1])\n if ')' not in string_1:\n return string_1\n return reverse_string(string_1)\n\nprint(reverse_string(string_1))\n"
},
{
"alpha_fraction": 0.35555556416511536,
"alphanum_fraction": 0.46666666865348816,
"avg_line_length": 25.25,
"blob_id": "248cce00b6eff8eb53589a8e88a4b9cb0844cec2",
"content_id": "11b075c8f94abe5425b137fc74c4bc3d7543822f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 315,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 12,
"path": "/homework13_2(Narek).py",
"repo_name": "hsamvel/Python_homeworks",
"src_encoding": "UTF-8",
"text": "def search_(matrix,value):\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] == value:\n return i,j\n return -1\n\nmatrix = [[10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 28, 37, 48],\n [32, 33, 29, 50]]\nprint(search_(matrix,29))\n"
}
] | 28 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.