repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
theyashsharma/Object-detection
https://github.com/theyashsharma/Object-detection
449914da6929e973679f81f0a17dcb430961fe29
b96ac4bc9e384e68bad8f740c876ec28a73a0a9a
a4e6ea11993802435e3f6c88b0b922a77f51dc68
refs/heads/master
2023-04-28T06:37:44.336652
2021-05-20T08:59:48
2021-05-20T08:59:48
369,133,061
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6699561476707458, "alphanum_fraction": 0.7116228342056274, "avg_line_length": 26.66666603088379, "blob_id": "18eef4792e3402fd517e090443eb188b6db56042", "content_id": "85d8b3c451cb486ba94fbeb50ce03202ab224f8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "no_license", "max_line_length": 142, "num_lines": 33, "path": "/Face Detection/faceDetection.py", "repo_name": "theyashsharma/Object-detection", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 6 16:17:57 2020\n\n@author: yash\n\"\"\"\n\nimport cv2\n\n# Creating a cascaded classifier object\nface_cascade = cv2.CascadeClassifier(\"/home/yash/Basic/Computer Science/Data Science/Projects/Face Detection/haarcascade_frontalface_alt.xml\")\n\n# Reading the image\nimg = cv2.imread(\"/home/yash/Basic/Computer Science/Data Science/Projects/Face Detection/face_photo.jpg\", 1)\n\n# Reading the image as gray scale image\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Searching the coordinates of image\nfaces = face_cascade.detectMultiScale(gray_img, scaleFactor = 1.05, minNeighbors = 5)\n\nprint(type(faces))\nprint(faces)\n\nfor x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n \nresized = cv2.resize(img, (int(img.shape[1]), int(img.shape[0])))\n\ncv2.imshow(\"Gray\", resized)\ncv2.waitKey(0)\ncv2.destroyAllWindows()" }, { "alpha_fraction": 0.7152230739593506, "alphanum_fraction": 0.7283464670181274, "avg_line_length": 45.625, "blob_id": "469dd71438e3e50af7b84f06befeeba39ba7f23c", "content_id": "7c722d69ef9674589aea50b49158122a26e52554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 236, "num_lines": 16, "path": "/Yoga/Yoga.py", "repo_name": "theyashsharma/Object-detection", "src_encoding": "UTF-8", "text": "from imageai.Detection import ObjectDetection\r\nimport os\r\nexecution_path = os.getcwd()\r\ndetector=ObjectDetection()\r\ndetector.setModelTypeAsRetinaNet()\r\ndetector.setModelPath( os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\r\ndetector.loadModel()\r\ncustom_objects = detector.CustomObjects(person=True, car=False)\r\ndetections = detector.detectCustomObjectsFromImage(input_image=os.path.join(execution_path , \"YOGA.jpg\"), output_image_path=os.path.join(execution_path , \"YOGA_new.jpg\"), custom_objects=custom_objects, minimum_percentage_probability=10)\r\n\r\ncount=0\r\nfor eachObject in detections:\r\n print(eachObject[\"name\"] , \" : \" , eachObject[\"percentage_probability\"] )\r\n print(\"--------------------------------\")\r\n count=count+1\r\nprint(count)\r\n" }, { "alpha_fraction": 0.7197943329811096, "alphanum_fraction": 0.7326478362083435, "avg_line_length": 49.86666488647461, "blob_id": "98e4df3c7e875d12dfed1c611030e65fe3e342bd", "content_id": "97434acdf1b1e66a106757cfaef67a5846e2bb23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 240, "num_lines": 15, "path": "/Kanwar/kanwar.py", "repo_name": "theyashsharma/Object-detection", "src_encoding": "UTF-8", "text": "from imageai.Detection import ObjectDetection\r\nimport os\r\nexecution_path = os.getcwd()\r\ndetector=ObjectDetection()\r\ndetector.setModelTypeAsRetinaNet()\r\ndetector.setModelPath( os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\r\ndetector.loadModel()\r\ncustom_objects = detector.CustomObjects(person=True, car=False, banana=False)\r\ndetections = detector.detectCustomObjectsFromImage(input_image=os.path.join(execution_path , \"kanwar.jpg\"), output_image_path=os.path.join(execution_path , \"kanwar_new.jpg\"), custom_objects=custom_objects, minimum_percentage_probability=15)\r\ncount=0\r\nfor eachObject in detections:\r\n print(eachObject[\"name\"] , \" : \" , eachObject[\"percentage_probability\"] )\r\n print(\"--------------------------------\")\r\n count=count+1\r\nprint(count)\r\n" }, { "alpha_fraction": 0.6977025270462036, "alphanum_fraction": 0.7327690720558167, "avg_line_length": 24.84375, "blob_id": "cfc2a081e99a5e5b1d16273058e1ddd3907d83c0", "content_id": "432c4aa6bc124ca2ba8fe5175ea4346191850923", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 827, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/Pedestrian Detection/PedDetection.py", "repo_name": "theyashsharma/Object-detection", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n# Create our body classifier\nbody_classifier = cv2.CascadeClassifier('haarcascade_fullbody.xml')\n\n# Initiate video capture for video file\ncap = cv2.VideoCapture('walking.avi')\n\n# Loop once video is successfully loaded\nwhile cap.isOpened():\n \n# Read first frame\nret, frame = cap.read()\nframe = cv2.resize(frame, None,fx=2, fy=1.5, interpolation =\ncv2.INTER_LINEAR)\ngray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n# Pass frame to our body classifier\nbodies = body_classifier.detectMultiScale(gray, 1.2, 3)\n\n# Extract bounding boxes for any bodies identified\nfor (x,y,w,h) in bodies:\ncv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\nrectangular box will be shown\ncv2.imshow('Pedestrians', frame)\n\n\nif cv2.waitKey(1) == 13: #13 is the Enter Key\nbreak\ncap.release()\ncv2.destroyAllWindows()\n" } ]
4
pentium3/dl_coursera
https://github.com/pentium3/dl_coursera
61c8f5dd0f3823bd0c457dc5b58f8a0f03b9ae5a
912a9580d809f0ce9b50e958398e5ead3d80643f
c4ea0c79b259d7a7a03dc29515e5147a0a8cbe2b
refs/heads/master
2020-03-19T07:29:23.826603
2018-10-22T01:22:17
2018-10-22T01:22:17
136,118,493
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5755724906921387, "alphanum_fraction": 0.5960305333137512, "avg_line_length": 31.425743103027344, "blob_id": "67fd92afc758cc88e03f521521ed25f5d9c56332", "content_id": "d0c1b843ed13ca4c417bba7ed16010cdac0065c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3275, "license_type": "no_license", "max_line_length": 124, "num_lines": 101, "path": "/1_Neural_Networks_and_Deep_Learning/1_Logistic_Regression_with_a_Neural_Network_mindset/week1_logistic.py", "repo_name": "pentium3/dl_coursera", "src_encoding": "UTF-8", "text": "'''\nclassify cat image by logistic regression\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\ndef sigmod(x):\n return (1 / (1 + np.exp(-x)))\n\ndef initialize_with_zeros(dim):\n w = np.zeros((dim,1))\n b = 0\n return (w,b)\n\ndef propagate(w, b, X, Y):\n m = X.shape[1]\n #forward propgation\n A = sigmod(np.dot(w.T, X) + b)\n J = (-1/m) * np.sum( Y * np.log(A) + (1-Y) * np.log(1-A) )\n #backward propgation\n dw = (1/m) * np.dot(X, (A - Y).T)\n db = (1/m) * np.sum(A - Y)\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n J = np.squeeze(J)\n assert(J.shape == ())\n grads = {\"dw\": dw , \"db\": db}\n return grads, J\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):\n costs = []\n for i in range(num_iterations):\n grads, cost = propagate(w, b, X, Y)\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n w = w - learning_rate * dw\n b = b - learning_rate * db\n #record the cost of every 100 iterations\n if(i%100 == 0):\n costs.append(cost)\n if(print_cost):\n print(\"cost of iteration %i == %f\"%(i, cost))\n params = {\"w\": w , \"b\": b}\n grads = {\"dw\": dw, \"db\": db}\n return params, grads, costs\n\ndef predict(w, b, X):\n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n A = sigmod(np.dot(w.T, X)+b)\n for i in range(A.shape[1]):\n Y_prediction[0, i] = 1 if(A[0, i]>0.5) else 0\n assert(Y_prediction.shape == (1,m))\n return Y_prediction\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):\n w, b = initialize_with_zeros(X_train.shape[0])\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n train_acc = 100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100\n test_acc = 100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100\n print(\"training accuracy == \",train_acc)\n print(\"testing accuracy == \",test_acc)\n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n return d\n\ntrain_set_x_org, train_set_y, test_set_x_org, test_set_y, classes = load_dataset()\n\nm_train = np.shape(train_set_x_org)[0]\nm_test = np.shape(test_set_x_org)[0]\nnum_px = np.shape(train_set_x_org)[1]\nprint(m_train, m_test, num_px)\n\ntrain_set_x_flatten = train_set_x_org.reshape(m_train, -1).T\ntest_set_x_flatten = test_set_x_org.reshape(m_test, -1).T\ntrain_set_x = train_set_x_flatten/255\ntest_set_x = test_set_x_flatten/25\nprint(np.shape(train_set_x_org))\nprint(np.shape(train_set_y))\nprint(np.shape(test_set_x_org))\nprint(np.shape(test_set_y))\nprint(np.shape(train_set_x))\nprint(np.shape(test_set_x))\n\nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)\n" } ]
1
acio-olympiad/2020Contest0
https://github.com/acio-olympiad/2020Contest0
5d332ece8a1d89c6d0823fd188cf364008c00036
0b8559bc937ccd996664ea16724eb18a92f3560c
ae38f2baebec49bd8ee4a106c8db51da6878cc0b
refs/heads/master
2022-12-08T03:08:15.446006
2020-08-06T11:06:03
2020-08-06T11:06:03
280,780,012
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5142857432365417, "alphanum_fraction": 0.5314285755157471, "avg_line_length": 16.5, "blob_id": "0f6dd13ad264dce91a6b4cff3b7585e941c2b4a4", "content_id": "238709d7b41b058a0d85dfc330a76a12911b41e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 175, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/pairs/validator/sub2.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n#include <algorithm>\n\nint main() {\n\tChecker c{}; c.validate();\n\tfor(int i = 0; i < c.N; ++i){\n\t\tensuref(c.skills[i] == i + 1, \"s_i != i\");\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5354911684989929, "alphanum_fraction": 0.5479841232299805, "avg_line_length": 20.475608825683594, "blob_id": "8e9775221132200e3281505c82038a9fa9722bbb", "content_id": "d6cbb080ccf625803ba1c2e681974adeea846607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1761, "license_type": "no_license", "max_line_length": 83, "num_lines": 82, "path": "/radio/validator/common.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#ifndef COMMON_H\n#define COMMON_H\n#include \"testlib.h\"\n#include \"bounds.h\"\nusing namespace std;\n\ntypedef pair<int,int> pii;\n\nbool can_do(int L, int K, int D, vector<int> B, vector<vector<int>> edges){\n\tvector<bool> seen(edges.size());\n\tvector<int> bfsc, bfsn;\n\tint cnt = 0;\n\tseen[1]=1;\n\tif(B[1] >= L)bfsc.push_back(1),cnt++;\n\tfor(int i = 0; i < D; ++i){\n\t\tfor(int u: bfsc){\n\t\t\tfor(int v: edges[u]){\n\t\t\t\tif(!seen[v] && B[v] >= L){\n\t\t\t\t\tseen[v]=1;\n\t\t\t\t\tbfsn.push_back(v);\n\t\t\t\t\tcnt++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tswap(bfsc,bfsn);\n\t\tbfsn.clear();\n\t}\n\treturn cnt >= K;\n}\n\nint solve(int N, int M, int K, int D, vector<int> B, vector<int> U, vector<int> V){\n\tvector<vector<int>> edges(N+1);\n\tfor(int i = 0; i < M; ++i){\n\t\tedges[U[i]].push_back(V[i]);\n\t\tedges[V[i]].push_back(U[i]);\n\t}\n\tint lo=1,hi=1e6+1,mid;\n\twhile(lo!=hi){\n\t\tmid=lo+hi>>1;\n\t\tif(can_do(mid, K, D, B, edges))lo=mid + 1;\n\t\telse hi = mid;\n\t}\n\treturn lo-1;\n}\n\nstruct Checker {\n\tint N, M, K, D;\n\tvector<int> B{0}, U, V;\n\tset<pii> edges;\n\tvoid validate() {\n\t\tregisterValidation();\n\t\tN = inf.readInt(MIN_N, MAX_N, \"N\");\n\t\tinf.readSpace();\n\t\tM = inf.readInt(MIN_M, MAX_M, \"M\");\n\t\tinf.readSpace();\n\t\tK = inf.readInt(MIN_K, N, \"K\");\n\t\tinf.readSpace();\n\t\tD = inf.readInt(MIN_D, MAX_D, \"D\");\n\t\tinf.readEoln();\n\t\tfor (int i = 0; i < N; i++) {\n\t\t\tint b_i = inf.readInt(MIN_b_i, MAX_b_i, \"b_i\");\n\t\t\tB.push_back(b_i);\n\t\t\tif(i!=N-1)inf.readSpace();\n\t\t}\n\t\tinf.readEoln();\n for(int i = 0; i < M; ++i){\n\t\t\tint u = inf.readInt(1, N, \"u\");\n\t\t\tinf.readSpace();\n\t\t\tint v = inf.readInt(1, N, \"v\");\n\t\t\tensuref(u != v, \"u == v, self loop\");\n\t\t\tensuref(!edges.count({u,v}), \"Duplicate edge\");\n\t\t\tU.push_back(u);\n\t\t\tV.push_back(v);\n\t\t\tedges.insert({u,v});\n\t\t\tedges.insert({v,u});\n\t\t\tinf.readEoln();\n\t\t}\n\t\tinf.readEof();\n\t}\n};\n\n#endif\n" }, { "alpha_fraction": 0.5633803009986877, "alphanum_fraction": 0.591549277305603, "avg_line_length": 16.75, "blob_id": "8518d4772950f953ef3531e0b0e56e0f2541271e", "content_id": "b1f1cdab4fd3328239f84517de73903090e5c8bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 142, "license_type": "no_license", "max_line_length": 68, "num_lines": 8, "path": "/holes/validator/sub4.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.L <= 5 && c.W <= 5, \"L or W does not match sub4 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5917041301727295, "alphanum_fraction": 0.6161919236183167, "avg_line_length": 22, "blob_id": "46c6076089db01dd64a1003f16856cc83d81393e", "content_id": "140e2be276eb915640133d74513f0b43adcf26ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 81, "num_lines": 87, "path": "/pairs/data/gen/gen.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import sys\nimport random\n\nusage = \"\"\"\nUsage:\nTo generate a random case for a subtask use:\npython gen.py <subtask>\nTo specify additional parameters, use:\npython gen.py <subtask> <N>\ncase will be written to stdout\nsubtask - the subtask of case (1-5)\nN - number of beads (1 - subtask limit)\n\"\"\"\n\n\nSUBTASKS = 4\nMAX_A = 2 * int(1e5)\nMAX_SKILL = int(1e5)\nMIN_SKILL = 0\n\ndef random_skills(N):\n mn_skill = random.randint(MIN_SKILL,MAX_SKILL)\n mx_skill = random.randint(MIN_SKILL,MAX_SKILL)\n mn_skill,mx_skill = min(mn_skill,mx_skill),max(mn_skill,mx_skill)\n return [random.randint(mn_skill,mx_skill) for i in range(N)]\n\ndef binary_skills(N):\n return [random.randint(0,1) for i in range(N)]\n\ndef triangle_skills(N):\n return list(range(1,N+1))\n\ndef random_range(skills):\n mn = MAX_SKILL\n mx = MIN_SKILL\n for s in skills:\n mn = min(s,mn)\n mx = max(s,mx)\n a = random.randint(mn*2,mx*2)\n b = random.randint(mn*2,mx*2)\n a,b = min(a,b),max(a,b)\n return a,b\n\ndef random_a_inf_b(skills):\n mn = MAX_SKILL\n mx = MIN_SKILL\n for s in skills:\n mn = min(s,mn)\n mx = max(s,mx)\n a = random.randint(mn*2,mx*2)\n b = MAX_A\n return a,b\n\ndefaults = (100000, random_range, random_skills)\nsubtask_info = (\n (),\n (1000, defaults[1], defaults[2]),\n (defaults[0], defaults[1], triangle_skills),\n (defaults[0], random_a_inf_b, defaults[2]),\n defaults\n) # MAXN, range gen, skill gen \n\nargs = sys.argv[1:]\n\nif len(args) < 1:\n print(usage)\n exit(0)\n\nsubtask = int(args[0])\nif subtask < 1 or subtask > SUBTASKS:\n print(\"Invalid subtask: '%d'. Must be between (1 - %d)\" % (subtask,SUBTASKS))\n exit(0)\n\nMAXN, range_gen, skill_gen = subtask_info[subtask]\n\nif len(args) > 1:\n N = int(args[1])\n if N < 2 or N > MAXN:\n print(\"Invalid N for subtask: '%d'. Must be between (2 - %d)\" % (N,MAXN))\n exit(0)\nelse:\n N = random.randint(2, MAXN)\n\nskills = skill_gen(N)\nA,B = range_gen(skills)\nprint(N,A,B)\nprint(*skills)\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 22.46666717529297, "blob_id": "7f748c21942f371764cf387ee8a4277ba4cf5308", "content_id": "7862958a9614e6f7f60fdad2c40f8f60ca1a3d99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 352, "license_type": "no_license", "max_line_length": 91, "num_lines": 15, "path": "/accumulate/validator/bounds.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "/*\n * ! WARNING !\n * If modifying this file, be sure to also modify the problem statement's Subtasks section\n * and also modify the bounds.h in the validators directory\n */\n#ifndef BOUNDS_H\n#define BOUNDS_H\n\nconst int MIN_N = 2;\nconst int MAX_N = 100000;\nconst int MIN_K = 1;\nconst int MIN_A = -100000;\nconst int MAX_A = 100000;\n\n#endif // BOUNDS_H\n" }, { "alpha_fraction": 0.5888888835906982, "alphanum_fraction": 0.6277777552604675, "avg_line_length": 17, "blob_id": "a3faaf26e4c4d517d38a86f27d6fe8935834db91", "content_id": "ec7cf6ecd267395ac20f5168a7b26cf17c1cdfb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 180, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/battleship/validator/sub4.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\n\nint main() {\n\tc.validate();\n\tensuref(c.N <= 80, \"N does not match sub4 bounds\");\n\tensuref(c.M <= 80, \"M does not match sub4 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5537757277488708, "alphanum_fraction": 0.5720824003219604, "avg_line_length": 18, "blob_id": "7d1bf127c66eaf5d182db10c2b3b472bd512972e", "content_id": "dbcdcc38608669e865a46b7940e83229145ba946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 437, "license_type": "no_license", "max_line_length": 44, "num_lines": 23, "path": "/accumulate/validator/common.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#ifndef COMMON_H\n#define COMMON_H\n#include \"testlib.h\"\n#include \"bounds.h\"\n\nstruct Checker {\n\tint N, K, a[100005];\n\tvoid validate() {\n\t\tregisterValidation();\n\t\tN = inf.readInt(MIN_N, MAX_N, \"N\");\n\t\tinf.readSpace();\n\t\tK = inf.readInt(MIN_K, N-1, \"K\");\n\t\tinf.readEoln();\n\t\tfor (int i = 1; i <= N; i++) {\n\t\t\ta[i] = inf.readInt(MIN_A, MAX_A, \"a[i]\");\n\t\t\tif (i < N) inf.readSpace();\n\t\t\telse inf.readEoln();\n\t\t}\n\t\tinf.readEof();\n\t}\n};\n\n#endif\n" }, { "alpha_fraction": 0.4859813153743744, "alphanum_fraction": 0.5046728849411011, "avg_line_length": 21.77777862548828, "blob_id": "05e085482619c73eb9258d761c02ddc5399b20ff", "content_id": "945ae66d70ec382bd0927cd308766fec4f9d82f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/accumulate/solutions/junhua-full.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#k = open(\"gamein.txt\",\"r\")\r\n#l = open(\"gameout.txt\",\"w\")\r\ndef read(): return(input()) #return(k.readline())\r\ndef write(x): print(x) #l.write(str(x))\r\n[N,K]=[int(c) for c in read().split()]\r\nA = [int(c) for c in read().split()]\r\nB = [0]\r\nfor i in range(N):\r\n B.append(B[-1] + A[i])\r\nB = (B[::-1])[:-2]\r\nB.sort()\r\nAns = 0\r\nwhile len(B) and B[-1] > 0 and K:\r\n Ans += B.pop()\r\n K -= 1\r\nwrite(Ans)\r\n#k.close()\r\n#l.close()\r\n" }, { "alpha_fraction": 0.6376146674156189, "alphanum_fraction": 0.6674311757087708, "avg_line_length": 17.95652198791504, "blob_id": "d6c6891c5145df46831c083173006f7c0c1c88f4", "content_id": "e0fa95cb9d2374764e5a65da1dc66cda30f4fc0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 436, "license_type": "no_license", "max_line_length": 91, "num_lines": 23, "path": "/radio/validator/bounds.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "/*\n * ! WARNING !\n * If modifying this file, be sure to also modify the problem statement's Subtasks section\n * and also modify the bounds.h in the validators directory\n */\n#ifndef BOUNDS_H\n#define BOUNDS_H\n\nconst int MIN_N = 1;\nconst int MAX_N = 1e5;\n\nconst int MIN_M = 0;\nconst int MAX_M = 1e5;\n\nconst int MIN_D = 1;\nconst int MAX_D = 1e5;\n\nconst int MIN_K = 1;\n\nconst int MIN_b_i = 1;\nconst int MAX_b_i = 1e6;\n\n#endif // BOUNDS_H\n" }, { "alpha_fraction": 0.4566294848918915, "alphanum_fraction": 0.5037174820899963, "avg_line_length": 27.298246383666992, "blob_id": "535a5d1ff3f241b14c8ebead6551949d6c1bbea4", "content_id": "93e44c2bcad301f7ef9a68badc4a1e088ff7c77a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 73, "num_lines": 57, "path": "/battleship/solutions/junhua-full.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#k = open(\"shipin.txt\",\"r\")\n#l = open(\"shipout.txt\",\"w\")\ndef read(): return(input()) #return(k.readline())\ndef write(x): print(x) #l.write(str(x))\n\n[N,M,K]=[int(c) for c in read().split()] #read input and 1 index gri\nA=[[0]*(M+2)]\nfor i in range(N):\n Row = [int(c) for c in read().split()]\n Row.insert(0,0)\n Row.append(0)\n A.append(Row)\nA.append([0]*(M+2))\n \n\nHsum=[]\nVsum=[[0]*(M+2)]\ndp1=[[0]*(M+2) for i in range(N+2)]\ndp2=[[0]*(M+2) for i in range(N+2)]\n\nfor r in range(N+2): #precalc prefix sums\n Hsum.append([0])\n for c in range(1,M+2):\n Hsum[r].append(Hsum[r][c-1]+A[r][c])\n \nfor r in range(1,N+2):\n Vsum.append([0])\n for c in range(1,M+2):\n Vsum[r].append(Vsum[r-1][c]+A[r][c]) \n\nfor r in range(1,N+1): #precalculate optimums for subgrids\n for c in range(1,M+1): \n dp1[r][c]=max(dp1[r-1][c],dp1[r][c-1])\n if r >= K:\n dp1[r][c]=max(dp1[r][c], Vsum[r][c] - Vsum[r-K][c])\n if c >= K:\n dp1[r][c]=max(dp1[r][c], Hsum[r][c] - Hsum[r][c-K])\n\nfor r in range(N,0,-1):\n for c in range(M,0,-1):\n dp2[r][c]=max(dp2[r+1][c],dp2[r][c+1])\n if r+K-1 <= N:\n dp2[r][c]=max(dp2[r][c], Vsum[r+K-1][c] - Vsum[r-1][c])\n if c+K-1 <= M:\n dp2[r][c]=max(dp2[r][c], Hsum[r][c+K-1] - Hsum[r][c-1])\n\nAns = 0 #calculate Answer\nfor r in range(1,N): #split at row: 1 ... i is one, i+1 ... N is other\n Ans = max(Ans, dp1[r][M] + dp2[r+1][1])\nfor c in range(1,M): #split at column: 1 ... i is one, i+1 ... M is other\n Ans = max(Ans, dp1[N][c] + dp2[1][c+1])\n\nwrite(Ans)\n\n\n#k.close()\n#l.close()\n\n" }, { "alpha_fraction": 0.5352112650871277, "alphanum_fraction": 0.5492957830429077, "avg_line_length": 14.777777671813965, "blob_id": "1a9fdddf21190be00a12a10cc5503098e37cf129", "content_id": "54d3f6341657f2a03e46eda04bc2b45021a302f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 142, "license_type": "no_license", "max_line_length": 67, "num_lines": 9, "path": "/battleship/validator/sub2.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\n\nint main() {\n\tc.validate();\n\tensuref(c.N == c.M && c.M == c.K, \"N, M, K are not equal (sub2)\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6061320900917053, "alphanum_fraction": 0.6061320900917053, "avg_line_length": 16.66666603088379, "blob_id": "c4619bc9dff64001d8a91937f9bd91d6bc08b87a", "content_id": "8bb1ecab697b97d52ed494cb423563b6a645de4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 424, "license_type": "no_license", "max_line_length": 37, "num_lines": 24, "path": "/holes/validator/common.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#ifndef COMMON_H\n#define COMMON_H\n#include \"testlib.h\"\n#include \"bounds.h\"\nusing namespace std;\n\nstruct Checker {\n\tint L, W, A, B;\n\tvoid validate() {\n\t\tregisterValidation();\n\t\tL = inf.readInt(MIN_L, MAX_L, \"L\");\n\t\tinf.readSpace();\n\t\tW = inf.readInt(MIN_W, MAX_W, \"W\");\n\t\tinf.readEoln();\n\t\tA = inf.readInt(MIN_A, L, \"A\");\n\t\tinf.readSpace();\n\t\tB = inf.readInt(MIN_B, W, \"B\");\n\t\tinf.readEoln();\n\t\tinf.readEof();\n\t}\n};\n\n\n#endif\n" }, { "alpha_fraction": 0.5381920337677002, "alphanum_fraction": 0.6131744980812073, "avg_line_length": 28.12244987487793, "blob_id": "f4e3da18dab1eff68afdea4a015a0b9c36cb9a43", "content_id": "c8b451337fa58d4a36a2528abdde8c0aba65d11c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1427, "license_type": "no_license", "max_line_length": 138, "num_lines": 49, "path": "/radio/data/gen/gen.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#aux gen\nCases = []\nimport random\n\nnvals = [1/10,1/4,1/2,9/10,1]\ndvals = [10,15,20,100,200]\ndef do_subtask(subtask,max_n,max_m,max_bandwidth, use_dvals):\n m = max_m\n for i in range(len(nvals)):\n nc = nvals[i]\n n = nc * max_n\n n = min(max_n,int(n))\n\n if (use_dvals):\n d = dvals[i]\n else:\n d = n\n\n for j in nvals:\n k = j * n\n k = min(max_n,int(k))\n disconnect_starting_node = 1 if (random.randint(1,10) == 1) else 0\n high_starting_node_bandwidth = 1 if (random.randint(1,10) == 1) else 0\n for bump_k_target in range(2):\n s = f\"{subtask} {max_bandwidth} {disconnect_starting_node} {high_starting_node_bandwidth} {bump_k_target} {n} {m} {k} {d}\"\n Cases.append(s)\n\ndo_subtask(1,100000,100000,2,False)\ndo_subtask(2,100000,100000,20,False)\ndo_subtask(3,1000,1000,1000000,False)\ndo_subtask(4,100000,100000,1000000,True)\n\nfor case in Cases:\n print(case)\n\n\"\"\"\nfirst line: \nsubtask\nmax_bandwidth\ndisconnect_starting_node - boolean, if true, starting node is disconnected (10% of cases)\nhigh_starting_node_bandwidth - boolean, if true, starting node's bandwidth is max_bandwidth (10% of cases)\nbump_k_target - boolean, if true, then once a stronger K is determined, it will increment K if possible (50% of cases)\nn\nm\nk \nd\n\n\"\"\"\nprint() # Last print required to tell program to stop\n" }, { "alpha_fraction": 0.5984848737716675, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 17.85714340209961, "blob_id": "728e51f92bf271015f23ded7d1836216c55a98f8", "content_id": "7575dc1d13fe32b443bebcd537dcc724f20bfd0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 132, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/pairs/validator/sub3.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nint main() {\n\tChecker c{}; c.validate();\n\tensuref(c.B == MAX_A, \"B does not match sub3 bounds\");\t\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5634920597076416, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 14.75, "blob_id": "708a1c014c21b6e1ab44fce1c5dceacb71bc9531", "content_id": "f896982d835a4b18a02b06b2f14e9dc2545cc1e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 126, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/accumulate/validator/sub1.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.K == 1, \"K = 1 not maintained (sub1)\\n\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5412843823432922, "alphanum_fraction": 0.5535168051719666, "avg_line_length": 17.11111068725586, "blob_id": "757b809e082ecc7287af08774929cc2f0392db60", "content_id": "6a6a3af4941e4832305763fa71bd0f5694e85784", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/pairs/solutions/james-full.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import sys\n\nX,Y = list(sys.stdin)\n\nN,A,B = map(int, X.split())\nskills = list(map(int, Y.split()))\n\nskills.sort()\nans=0\nl=r=N-1\nfor s in range(N):\n l = max(l,s)\n r = max(r,s)\n while l > s and skills[s] + skills[l] >= A:l-=1\n while r > s and skills[s] + skills[r] > B:r-=1\n ans += r-l\n\nsys.stdout.write(str(ans))\n\n" }, { "alpha_fraction": 0.4704301059246063, "alphanum_fraction": 0.5551075339317322, "avg_line_length": 30.885713577270508, "blob_id": "31951890fb7007138711506b00d3f1531834326f", "content_id": "ed75d7897ae9aa989af35827d694dab491410657", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2232, "license_type": "no_license", "max_line_length": 181, "num_lines": 70, "path": "/jewels/data/gen/bootstrap.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import os\nimport random\n\ntypes = [\"all-blue\", \"all-red\", \"alternating\", \"end-biased\", \"middle-biased\", \"ascending\", \"2-non-adjacent-reds\", \"no-adjacent-reds\", \"random-v1\", \"random-v2\", \"random-v3\", \"mixed\"]\nblock_types = [3,4,5,9,11]\n\ndef print_system(command):\n print(\"Generating\", command.split()[-1])\n os.system(command)\n\ndef gen_case(of, sb, *args):\n print_system(\"python3 gen/gen.py %d %s > %s-sub%d.in\" % (sb, ' '.join(map(str,args)), of, sb))\n\ndef sub1():\n for i in range(16):\n print_system(\"python3 gen/gen-sub1.py %d > %d-sub1.in\" % (i,i))\n\ndef sub2():\n gen_case(\"min\", 2, 4)\n for i in range(10):\n gen_case(\"small-%d\" % i, 2, random.randint(6,10))\n for i in range(3):\n gen_case(\"random-%d\" % i, 2)\n for i in range(3):\n gen_case(\"max-%d\" % i, 2, 100000)\n\ndef sub3():\n gen_case(\"min\", 3, 4)\n for i in range(5):\n gen_case(\"small-%d\" % i, 3, random.randint(6,10))\n for i in range(len(types)):\n gen_case(\"size-999-%s\" % types[i], 3, 999, i)\n for i in range(len(types)):\n gen_case(\"max-%s\" % types[i], 3, 1000, i)\n for i in range(5):\n gen_case(\"max-%d\" % i, 3, 1000)\n\n for block_count in [2,3,4,9,10,99,100,397,400]: \n for block_type in block_types:\n gen_case(\"block-%d-%s\" % (block_count, types[block_type]), 3, 1000, block_type, block_count)\n\ndef sub4():\n gen_case(\"min\", 4, 4)\n for i in range(10):\n gen_case(\"small-%d\" % i, 4, random.randint(6,10))\n for i in range(3):\n gen_case(\"random-%d\" % i, 4)\n for i in range(3):\n gen_case(\"max-%d\" % i, 4, 100000)\n\ndef sub5():\n gen_case(\"min\", 5, 4)\n for i in range(10):\n gen_case(\"small-%d\" % i, 5, random.randint(6,10))\n for i in range(len(types)):\n gen_case(\"size-99999-%s\" % types[i], 5, 99999, i)\n for i in range(len(types)):\n gen_case(\"max-%s\" % types[i], 5, 100000, i)\n for i in range(10):\n gen_case(\"max-%d\" % i, 5, 100000)\n\n for block_count in [2,3,4,9,10,99,100,999,1000,9999,10000,39997,40000]: \n for block_type in block_types:\n gen_case(\"block-%d-%s\" % (block_count, types[block_type]), 5, 100000, block_type, block_count)\n\nsub1()\nsub2()\nsub3()\nsub4()\nsub5()\n" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.4333333373069763, "avg_line_length": 14, "blob_id": "70c347ad85ace78a1b1e53e0c6697b18b6bdf735", "content_id": "0b532a65a1cabdb618e958246c1be10cdbd4d07b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 120, "license_type": "no_license", "max_line_length": 38, "num_lines": 8, "path": "/holes/solutions/tunan-full.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include <cstdio>\n\nint L, W, A, B;\n\nint main() {\n\tscanf(\"%d %d %d %d\", &L, &W, &A, &B);\n\tprintf(\"%d\\n\", (L/A)*(W/B));\n}\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 14.75, "blob_id": "186552841df8f4186ce9ef593c3cce126edb9c2d", "content_id": "56ed2755d9b301d0e7c5da2cb033bd423a6e8204", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 63, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/radio/statement/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "all: statement.pdf\n\nstatement.pdf:\n\tcp radio.pdf statement.pdf\n" }, { "alpha_fraction": 0.5633803009986877, "alphanum_fraction": 0.591549277305603, "avg_line_length": 16.75, "blob_id": "592eb8da2965f7e85a36aca94855efd6531c38f1", "content_id": "f58eb74fa3e20ce552445a8436f3f3dbe3f26880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 142, "license_type": "no_license", "max_line_length": 68, "num_lines": 8, "path": "/holes/validator/sub2.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.A == 1 && c.B == 1, \"A or B does not match sub2 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5938628315925598, "alphanum_fraction": 0.5974729061126709, "avg_line_length": 18.785715103149414, "blob_id": "5a86165d942acaa4c4c0b1ce83383c10df3e7b4d", "content_id": "27acf21b67d06dd3bd3cd987e002082611e956fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 554, "license_type": "no_license", "max_line_length": 54, "num_lines": 28, "path": "/pairs/validator/common.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#ifndef COMMON_H\n#define COMMON_H\n#include \"testlib.h\"\n#include \"bounds.h\"\nusing namespace std;\n\nstruct Checker {\n\tint N, A, B;\n\tvector<int> skills;\n\tvoid validate() {\n\t\tregisterValidation();\n\t\tN = inf.readInt(MIN_N, MAX_N, \"N\");\n\t\tinf.readSpace();\n\t\tA = inf.readInt(MIN_A, MAX_A, \"A\");\n\t\tinf.readSpace();\n\t\tB = inf.readInt(A, MAX_A, \"B\");\n\t\tinf.readEoln();\n\t\tfor (int i = 0; i < N; i++) {\n\t\t\tint s_i = inf.readInt(MIN_SKILL, MAX_SKILL, \"s_i\");\n\t\t\tskills.push_back(s_i);\n\t\t\tif(i!=N-1)inf.readSpace();\n\t\t}\n\t\tinf.readEoln();\n\t\tinf.readEof();\n\t}\n};\n\n#endif\n" }, { "alpha_fraction": 0.37463125586509705, "alphanum_fraction": 0.4115044176578522, "avg_line_length": 19.238805770874023, "blob_id": "81d3e6ad70af037e90e5686ca802d52da9850e8e", "content_id": "77b5f710b28b1cd27f7af6fb70fefe9978cc6967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1356, "license_type": "no_license", "max_line_length": 60, "num_lines": 67, "path": "/radio/solutions/quang-full.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include <cstdio>\n#include <vector>\n#include <queue>\nusing namespace std;\n\nint n,m,k,d;\nvector<int> g[100005];\nint bandwidth[100005];\nint dist[100005];\nbool pushed[100005];\n\n\nint decision(int x){\n if (bandwidth[1] < x) return 0;\n for (int i = 1; i <= n; i++){\n dist[i] = 1e9;\n pushed[i] = false;\n }\n queue<int> q;\n q.push(1);\n pushed[1] = true;\n dist[1] = 0;\n while (!q.empty()){\n int cur = q.front();\n q.pop();\n for (int tgt : g[cur]){\n if (pushed[tgt] || bandwidth[tgt] < x) continue;\n pushed[tgt] = true;\n dist[tgt] = dist[cur]+1;\n q.push(tgt);\n }\n }\n int cnt = 0;\n for (int i = 1; i <= n; i++){\n if (dist[i] <= d){\n cnt++;\n }\n }\n return cnt;\n}\n\nint main(){\n scanf(\"%d %d %d %d\", &n,&m,&k,&d);\n for (int i =1 ; i <= n; i++){\n scanf(\"%d\", &bandwidth[i]);\n }\n for (int i = 1; i <= m; i++){\n int a,b;\n scanf(\"%d %d\", &a, &b);\n g[a].push_back(b);\n g[b].push_back(a);\n }\n int best = 0;\n int lo = 0;\n int hi = 1000000;\n while (lo <= hi){ \n int mid = (lo + hi)/2;\n if (decision(mid) >= k){\n best = mid;\n lo = mid + 1;\n }\n else{\n hi = mid - 1;\n }\n }\n printf(\"%d\\n\", best);\n}\n" }, { "alpha_fraction": 0.49082568287849426, "alphanum_fraction": 0.5097139477729797, "avg_line_length": 24.72916603088379, "blob_id": "86d18d7287af2f4cba84e02ddba4203e20af7798", "content_id": "d1426b8eaf66b764f3d0fcc2adb14d41483b1484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3706, "license_type": "no_license", "max_line_length": 128, "num_lines": 144, "path": "/radio/data/gen/casegen.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "\n#include <cstdio>\n#include <cstdlib>\n#include <time.h>\n#include <set>\n#include <utility>\n#include <queue>\n#include <vector>\nusing namespace std;\nint n,m,k,d;\nvector<int> g[100005];\nint bandwidth[100005];\nint dist[100005];\nbool pushed[100005];\n\n\n// taken from full to determine the actual value of k- make the test a little stronger\nint decision(int x){\n if (bandwidth[1] < x) return 0;\n // printf(\"%d\\n\", bandwidth[1]);\n for (int i = 1; i <= n; i++){\n dist[i] = 1e9;\n pushed[i] = false;\n }\n queue<int> q;\n q.push(1);\n pushed[1] = true;\n dist[1] = 0;\n while (!q.empty()){\n int cur = q.front();\n q.pop();\n for (int tgt : g[cur]){\n if (pushed[tgt] || bandwidth[tgt] < x) continue;\n pushed[tgt] = true;\n dist[tgt] = dist[cur]+1;\n q.push(tgt);\n }\n }\n int cnt = 0;\n for (int i = 1; i <= n; i++){\n if (dist[i] <= d){\n cnt++;\n }\n }\n return cnt;\n}\n\n\ntypedef pair<int,int> pii;\nvector<pii> final_edges;\nset<pii> all_edges;\n\n// one line: \n/*\nsubtask\nmax_bandwidth\ndisconnect_starting_node - boolean, if true, starting node is disconnected (10% of cases)\nhigh_starting_node_bandwidth - boolean, if true, starting node's bandwidth is max_bandwidth (10% of cases)\nbump_k_target - boolean, if true, then once a stronger K is determined, it will increment K if possible (50% of cases)\nn\nm\nk - will bump up this value to a stronger value\nd\n*/\n\nint main() {\n // freopen(\"test.in\", \"r\", stdin);\n srand(time(NULL)); \n int subtask;\n int max_bandwidth;\n int disconnect_starting_node;\n int high_starting_node_bandwidth;\n int bump_k_target;\n scanf(\"%d %d %d %d %d\", &subtask, &max_bandwidth, &disconnect_starting_node, &high_starting_node_bandwidth, &bump_k_target);\n scanf(\" %d %d %d %d\", &n, &m, &k, &d);\n // printf(\"%d %d\\n\",subtask, max_bandwidth);\n // printf(\"????\");\n // generate the edges\n if (!disconnect_starting_node){\n for (int i = 2; i <= min(n,m+1); i++){\n int a = rand() % (i-1);\n a++;\n final_edges.push_back({a,i});\n all_edges.insert(make_pair(a,i));\n all_edges.insert(make_pair(i,a));\n g[a].push_back(i);\n g[i].push_back(a);\n }\n }\n \n while (final_edges.size() < m){\n int a = rand() % n + 1;\n int b = rand() % n + 1;\n if (b == a || all_edges.find(make_pair(a,b)) != all_edges.end()) continue;\n if (disconnect_starting_node && (a == 1 || b == 1)) continue;\n all_edges.insert(make_pair(a,b));\n all_edges.insert(make_pair(b,a));\n final_edges.push_back({a,b});\n g[a].push_back(b);\n g[b].push_back(a);\n }\n for (int i = 1; i <= n; i++){\n bandwidth[i] = rand() % max_bandwidth + 1;\n }\n if (high_starting_node_bandwidth){\n bandwidth[1] = 1;\n }\n else{\n bandwidth[1] = max_bandwidth;\n }\n\n int lo = 0;\n int hi = 1000000;\n int best = 0;\n while (lo <= hi){ \n int mid = (lo + hi)/2;\n if (decision(mid) >= k){\n best = mid;\n lo = mid + 1;\n }\n else{\n hi = mid - 1;\n }\n }\n if (disconnect_starting_node){\n // eh do stuff, keep original k\n }\n else{\n k = decision(best);\n if (bump_k_target){\n k = min(k+1,n);\n }\n }\n\n // printf(\"%d\\n\", best);\n printf(\"%d %d %d %d\\n\", n, m, k, d);\n printf(\"%d\", bandwidth[1]);\n for (int i = 2; i <= n; i++){\n printf(\" %d\", bandwidth[i]);\n }\n printf(\"\\n\");\n for (pii cur : final_edges){\n printf(\"%d %d\\n\", cur.first, cur.second);\n }\n}\n" }, { "alpha_fraction": 0.5429553389549255, "alphanum_fraction": 0.561855673789978, "avg_line_length": 19.785715103149414, "blob_id": "1d185c2a9c2e64e072da3378ccbc899bb49fbb17", "content_id": "381873d6fa1cc3fef4692c3a3afddcad3ee3c2fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 582, "license_type": "no_license", "max_line_length": 54, "num_lines": 28, "path": "/battleship/validator/common.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#ifndef COMMON_H\n#define COMMON_H\n#include \"testlib.h\"\n#include \"bounds.h\"\nusing namespace std;\n\nstruct Checker {\n\tint N, M, K;\n\tint grid[1005][1005];\n\tvoid validate() {\n\t\tregisterValidation();\n\t\tN = inf.readInt(MIN_N, MAX_N, \"N\");\n\t\tinf.readSpace();\n\t\tM = inf.readInt(MIN_M, MAX_M, \"M\");\n\t\tinf.readSpace();\n\t\tK = inf.readInt(MIN_K, MAX_K, \"K\");\n\t\tinf.readEoln();\n\t\tfor (int i = 0; i < N; i++) {\n\t\t\tfor (int j = 0; j < M; j++) {\n\t\t\t\tgrid[i][j] = inf.readInt(MIN_A, MAX_A, \"a[i][j]\");\n\t\t\t\tif (j < M-1) inf.readSpace();\n\t\t\t\telse inf.readEoln();\n\t\t\t}\n\t\t}\n\t\tinf.readEof();\n\t}\n};\n#endif\n" }, { "alpha_fraction": 0.6465753316879272, "alphanum_fraction": 0.6849315166473389, "avg_line_length": 21.8125, "blob_id": "09147f6260cd987b3ccd2ee32f2e3facfc532d91", "content_id": "4ca757bc69a458bb6a0925c51453969e2e404423", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 365, "license_type": "no_license", "max_line_length": 91, "num_lines": 16, "path": "/holes/validator/bounds.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "/*\n * ! WARNING !\n * If modifying this file, be sure to also modify the problem statement's Subtasks section\n * and also modify the bounds.h in the validators directory\n */\n#ifndef BOUNDS_H\n#define BOUNDS_H\n\nconst int MIN_L = 1;\nconst int MIN_W = 1;\nconst int MIN_A = 1;\nconst int MIN_B = 1;\nconst int MAX_L = 10000;\nconst int MAX_W = 10000;\n\n#endif // BOUNDS_H\n" }, { "alpha_fraction": 0.5888888835906982, "alphanum_fraction": 0.6277777552604675, "avg_line_length": 17, "blob_id": "fb1d05d193d88a768eb25c967a64cba0e04d9419", "content_id": "af10d8b730a2d99fd2fe049ba6a4605a4cc2d7b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 180, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/battleship/validator/sub3.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\n\nint main() {\n\tc.validate();\n\tensuref(c.N <= 30, \"N does not match sub3 bounds\");\n\tensuref(c.M <= 30, \"M does not match sub3 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6830601096153259, "alphanum_fraction": 0.693989098072052, "avg_line_length": 15.636363983154297, "blob_id": "8a7b167d40ac3b8007208ac47bf107dec3190f50", "content_id": "19117db27410c2d145b43f20fae45ee96e258e0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 366, "license_type": "no_license", "max_line_length": 46, "num_lines": 22, "path": "/accumulate/data/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": ".PHONY: all clean input output\n\nall: input output check zip\n\ninput:\n\t# Put any data generation commands here\n\tg++ -std=c++14 -o tkgen gen/tkgen.cpp\n\tpython3 gen/gen.py | python3 gen/bootstrap.py\n\tcp handmade/* .\n\noutput: input\n\tbash ./make-output-py.sh\n\ncheck: input\n\nzip: input output check\n\tzip data.zip *.in *.out\n\nclean:\n\t-rm tkgen\n\t-rm *.in *.out\n\t-rm data.zip\n" }, { "alpha_fraction": 0.4631650745868683, "alphanum_fraction": 0.5381991863250732, "avg_line_length": 25.672727584838867, "blob_id": "2b4c23090cf7455dea7cd095bd8eb25e288e33c3", "content_id": "5a05270dad589655b2c2f7278f2e688f4beacf78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 98, "num_lines": 55, "path": "/pairs/data/gen/bootstrap.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import os\nimport random\n\ndef print_system(command):\n print(\"Generating\", command.split()[-1])\n os.system(command)\n\n\ndef gen_case(of, sb, *args):\n print_system(\"python3 gen/gen.py %d %s > %s-sub%d.in\" % (sb, ' '.join(map(str,args)), of, sb))\n\ndef sub1():\n print(\"Subtask 1...\")\n gen_case(\"min\", 1, 2)\n for i in range(10):\n gen_case(\"small-%d\" % i, 1, random.randint(6,10))\n for i in range(5):\n gen_case(\"random-%d\" % i, 1, random.randint(2,1000))\n for i in range(5):\n gen_case(\"max-%d\" % i, 1, 1000)\n\ndef sub2():\n print(\"Subtask 2...\")\n gen_case(\"min\", 2, 2)\n for i in range(10):\n gen_case(\"small-%d\" % i, 2, random.randint(6,10))\n for i in range(5):\n gen_case(\"random-%d\" % i, 2, random.randint(2,100000))\n for i in range(5):\n gen_case(\"max-%d\" % i, 2, 100000)\n\ndef sub3():\n print(\"Subtask 3...\")\n gen_case(\"min\", 3, 2)\n for i in range(10):\n gen_case(\"small-%d\" % i, 3, random.randint(6,10))\n for i in range(5):\n gen_case(\"random-%d\" % i, 3, random.randint(2,100000))\n for i in range(5):\n gen_case(\"max-%d\" % i, 3, 100000)\n\ndef sub4():\n print(\"Subtask 4...\")\n gen_case(\"min\", 4, 2)\n for i in range(10):\n gen_case(\"small-%d\" % i, 4, random.randint(6,10))\n for i in range(5):\n gen_case(\"random-%d\" % i, 4, random.randint(2,100000))\n for i in range(5):\n gen_case(\"max-%d\" % i, 4, 100000)\n\nsub1()\nsub2()\nsub3()\nsub4()" }, { "alpha_fraction": 0.5308310985565186, "alphanum_fraction": 0.5442359447479248, "avg_line_length": 22.3125, "blob_id": "967e312b220d4e22cf345e5732426e8e3c52d242", "content_id": "9e22ec0d2ef3d983bd6c5a3174d70091e4842fd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 373, "license_type": "no_license", "max_line_length": 79, "num_lines": 16, "path": "/jewels/validator/sub2.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n#include <algorithm>\n\nint main() {\n\tChecker c{}; c.validate();\n\tbool prev_red = c.S.back() == 'r' && c.N > 1;\n\tfor(int i = 0; i < c.N; ++i){\n\t\tif(c.S[i] == 'r'){\n\t\t\tensuref(!prev_red, \"Adjacent red beads\");\n\t\t\tprev_red = true;\n\t\t}else \n\t\t\tprev_red = false;\n\t}\n\tensuref(count(c.S.begin(), c.S.end(), 'r') == 2, \"There are not 2 red beads\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4409937858581543, "alphanum_fraction": 0.4658385217189789, "avg_line_length": 11.384614944458008, "blob_id": "8f88898c9889f2c2515f7e10de683e7a896611ad", "content_id": "b8c6a5d98d0bcf6cbb183672d767e18189cebea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 21, "num_lines": 13, "path": "/jewels/data/gen/gen-sub1.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import sys\n\nt = int(sys.argv[1])\n\ns = []\nfor i in range(4):\n if t & (1<<i):\n s.append('r')\n else:\n s.append('b')\n\nprint(4)\nprint(''.join(s))\n" }, { "alpha_fraction": 0.37061119079589844, "alphanum_fraction": 0.3953185975551605, "avg_line_length": 19.36111068725586, "blob_id": "b9c70197743b8b686d86a029f7ad2487d5093f83", "content_id": "9efc3fccd36d4508c1aacde48baaf038c24416ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "no_license", "max_line_length": 41, "num_lines": 36, "path": "/radio/solutions/james-python-2-full.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "N,M,K,D = map(int,input().split())\r\nB = [0] + list(map(int, input().split()))\r\nG = [[] for _ in range(N+1)]\r\nfor i in range(M):\r\n u,v = map(int,input().split())\r\n G[u].append(v)\r\n G[v].append(u)\r\n\r\ndef can_do(L):\r\n todo = [1]*(N+1)\r\n bfsc=bfsn=[]\r\n cnt = 0\r\n if B[1] >= L:\r\n todo[1]=0\r\n bfsn.append(1)\r\n cnt += 1\r\n for i in range(D):\r\n bfsc=bfsn\r\n bfsn=[]\r\n for u in bfsc:\r\n for v in G[u]:\r\n if todo[v] and B[v] >= L:\r\n todo[v]=0\r\n bfsn.append(v)\r\n cnt += 1\r\n return cnt >= K\r\n\r\nlo=mid=1\r\nhi=1<<20\r\nwhile lo!=hi:\r\n mid=hi+lo>>1\r\n if can_do(mid):\r\n lo = mid + 1\r\n else:\r\n hi = mid\r\nprint(lo-1)\r\n" }, { "alpha_fraction": 0.41558441519737244, "alphanum_fraction": 0.4523809552192688, "avg_line_length": 19.686567306518555, "blob_id": "74c38a1d84858de9203c424e6d5415fd0e490160", "content_id": "453e4a1ae86d79d1b603e8fad0af29fe849bf96f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1386, "license_type": "no_license", "max_line_length": 61, "num_lines": 67, "path": "/radio/solutions/quang-heuristicdfs.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "// should only get subtask 3, technically O(n^2)\n\n#include <cstdio>\n#include <vector>\n#include <queue>\nusing namespace std;\n\nint n,m,k,d;\nvector<int> g[100005];\nint bandwidth[100005];\nint dist[100005];\nbool seen[100005];\n\nvoid dfs( int pos, int cur_depth,int x){\n seen[pos] = true;\n dist[pos] = cur_depth;\n if (cur_depth == d) return;\n for (int tgt : g[pos]){\n if (bandwidth[tgt] >= x && cur_depth+ 1 < dist[tgt]){\n dfs(tgt,cur_depth+1,x);\n }\n }\n}\n\nint decision(int x){\n if (bandwidth[1] < x) return 0;\n for (int i = 1; i <= n; i++){\n dist[i] = 1e9;\n seen[i] = false;\n }\n dfs(1,0,x);\n int cnt = 0;\n for (int i = 1; i <= n; i++){\n cnt += seen[i];\n }\n return cnt;\n}\n\nint main(){\n freopen(\"radin.txt\", \"r\", stdin);\n freopen(\"radout.txt\", \"w\", stdout);\n\n scanf(\"%d %d %d %d\", &n,&m,&k,&d);\n for (int i =1 ; i <= n; i++){\n scanf(\"%d\", &bandwidth[i]);\n }\n for (int i = 1; i <= m; i++){\n int a,b;\n scanf(\"%d %d\", &a, &b);\n g[a].push_back(b);\n g[b].push_back(a);\n }\n int best = 0;\n int lo = 0;\n int hi = 1000000;\n while (lo <= hi){ \n int mid = (lo + hi)/2;\n if (decision(mid) >= k){\n best = mid;\n lo = mid + 1;\n }\n else{\n hi = mid - 1;\n }\n }\n printf(\"%d\\n\", best);\n}\n" }, { "alpha_fraction": 0.5183413028717041, "alphanum_fraction": 0.6108452677726746, "avg_line_length": 28.85714340209961, "blob_id": "f4e36b94fab1d103ca2856fecc13fdf26deca06a", "content_id": "2252b6ffda3c217fe8db14d99c704c630cd65f4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 71, "num_lines": 21, "path": "/holes/data/gen/gen.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import random\nimport sys\n\nop = int(sys.argv[1])\nif op == 1:\n print(sys.argv[2], sys.argv[3])\n print(sys.argv[4], sys.argv[5])\nelif op == 2:\n # Pure random, subtask 2\n print(random.randint(1, 10000), random.randint(1, 10000))\n print(1, 1)\nelif op == 3:\n # Pure random, subtask 5\n l, w = random.randint(1, 10000), random.randint(1, 10000)\n print(l, w)\n print(random.randint(1, l), random.randint(1, w))\nelif op == 4:\n # Small A, B random, subtask 5\n l, w = random.randint(1, 10000), random.randint(1, 10000)\n print(l, w)\n print(random.randint(1, min(10, l)), random.randint(1, min(10, w)))\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 14.75, "blob_id": "e2ff572aeca135de1faa8217b04f91ed2dee04b6", "content_id": "d81f9d2983d134b91471a6de616f127341ccf5a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 63, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/holes/statement/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "all: statement.pdf\n\nstatement.pdf:\n\tcp holes.pdf statement.pdf\n" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.517241358757019, "avg_line_length": 28, "blob_id": "5674af1a29954326e7c8708bce478397278c47f7", "content_id": "48a011f101b7a5a3c066742b279c000ced620e3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/holes/solutions/tunan-full.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "L, W = map(int, input().split())\nA, B = map(int, input().split())\nprint((L//A)*(W//B))\n" }, { "alpha_fraction": 0.5883668661117554, "alphanum_fraction": 0.5906040072441101, "avg_line_length": 17.625, "blob_id": "c5e2cac7444477ff8717190850a2497aecb7a271", "content_id": "559507c14ef80a19171654c240791fee2460a402", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 447, "license_type": "no_license", "max_line_length": 82, "num_lines": 24, "path": "/jewels/validator/common.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#ifndef COMMON_H\n#define COMMON_H\n#include \"testlib.h\"\n#include \"bounds.h\"\nusing namespace std;\n\nstruct Checker {\n\tint N;\n\tstring S;\n\tvoid validate() {\n\t\tregisterValidation();\n\t\tN = inf.readInt(MIN_N, MAX_N, \"N\");\n\t\tinf.readEoln();\n\t\tfor (int i = 0; i < N; i++) {\n\t\t\tchar c = inf.readChar();\n\t\t\tensuref(c == 'r' || c == 'b', \"Unknown bead colour, must be either 'r' or 'b\");\n\t\t\tS.push_back(c);\n\t\t}\n\t\tinf.readEoln();\n\t\tinf.readEof();\n\t}\n};\n\n#endif\n" }, { "alpha_fraction": 0.6015037298202515, "alphanum_fraction": 0.6240601539611816, "avg_line_length": 15.625, "blob_id": "8d0775a6c0547019e5928db7cd1978f35c35de55", "content_id": "9475cea8abf01874a41753d794fa4a69a452005f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 133, "license_type": "no_license", "max_line_length": 59, "num_lines": 8, "path": "/accumulate/validator/sub4.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.K == c.N-1, \"K does not adhere to sub4 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 14.75, "blob_id": "f21d9eb0d6f87128352c27fd9fb548716ec6040b", "content_id": "c3dd20a7b43716a1dc7c627e9672049a77000522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 63, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/pairs/statement/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "all: statement.pdf\n\nstatement.pdf:\n\tcp pairs.pdf statement.pdf\n" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.78125, "avg_line_length": 15, "blob_id": "8f815e276df1cf7bb48c465621c808e0f512002d", "content_id": "8be9b5f3a3e572143ae72ece3a01c25b77173c9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 64, "license_type": "no_license", "max_line_length": 28, "num_lines": 4, "path": "/jewels/statement/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "all: statement.pdf\n\nstatement.pdf:\n\tcp jewels.pdf statement.pdf\n" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 15.5, "blob_id": "0c58539c5d57c143c5891791ee6e86c5aade1ee7", "content_id": "144df499c5446116cbe736e9ab28e390bef2b73d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 132, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/accumulate/validator/sub3.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.N <= 1000, \"N does not adhere to sub3 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5096660852432251, "alphanum_fraction": 0.5281195044517517, "avg_line_length": 19.690908432006836, "blob_id": "cef01ea1cdc6f1250fba53d07aa5a6ebdc25f900", "content_id": "9afa2ece6502f7b394c244823763e9c632d099cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1138, "license_type": "no_license", "max_line_length": 83, "num_lines": 55, "path": "/radio/solutions/james-full.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <iostream>\n\nusing namespace std;\n\nbool can_do(int L, int K, int D, vector<int> B, vector<vector<int>> edges){\n\tvector<bool> seen(edges.size());\n\tvector<int> bfsc, bfsn;\n\tint cnt = 0;\n\tseen[1]=1;\n\tif(B[1] >= L)bfsc.push_back(1),cnt++;\n\tfor(int i = 0; i < D; ++i){\n\t\tfor(int u: bfsc){\n\t\t\tfor(int v: edges[u]){\n\t\t\t\tif(!seen[v] && B[v] >= L){\n\t\t\t\t\tseen[v]=1;\n\t\t\t\t\tbfsn.push_back(v);\n\t\t\t\t\tcnt++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tswap(bfsc,bfsn);\n\t\tbfsn.clear();\n\t}\n\treturn cnt >= K;\n}\n\nint solve(int N, int M, int K, int D, vector<int> B, vector<int> U, vector<int> V){\n\tvector<vector<int>> edges(N+1);\n\tfor(int i = 0; i < M; ++i){\n\t\tedges[U[i]].push_back(V[i]);\n\t\tedges[V[i]].push_back(U[i]);\n\t}\n\tint lo=1,hi=1e6+1,mid;\n\twhile(lo!=hi){\n\t\tmid=lo+hi>>1;\n\t\tif(can_do(mid, K, D, B, edges))lo=mid + 1;\n\t\telse hi = mid;\n\t}\n\treturn lo-1;\n}\n\nint N,M,K,D;\nvector<int> B,U,V;\nint main(){\n ios_base::sync_with_stdio(0);\n cin.tie(0);\n cin>>N>>M>>K>>D;\n B.resize(N+1);\n U.resize(M);\n V.resize(M);\n for(int i = 1; i <= N; ++i)cin>>B[i];\n for(int i = 0,u,v; i < M; ++i)cin>>U[i]>>V[i];\n cout << solve(N,M,K,D,B,U,V);\n}\n" }, { "alpha_fraction": 0.34343433380126953, "alphanum_fraction": 0.46843433380126953, "avg_line_length": 24.399999618530273, "blob_id": "c48949f983dab35634ffa827f752708e3d749b21", "content_id": "de6b85235c03594de3c8c4fb2328320e52193769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 792, "license_type": "no_license", "max_line_length": 112, "num_lines": 30, "path": "/battleship/data/gen/tkgen.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include <iostream>\r\nusing namespace std;\r\nint LD[20]={1,10,100,1000,1000,10000,10000,10000,10000,100000,100000,100000,100000,100000,100000,100000,100000};\r\n\r\nint logrand() {\r\n int low = LD[rand()%15];\r\n return(rand()%(low*9) + low);\r\n}\r\n\r\nint rng(int l, int r) {\r\n return(rand()%(r-l+1) + l);\r\n}\r\n\r\nint main() {\r\n srand(time(NULL)); //reseed by input?\r\n int N,M, K, type; //which type of rand\r\n cin >> N >> M >> K >> type;\r\n printf(\"%d %d %d\\n\",N,M,K);\r\n for (int i=1; i<=N; i++) {\r\n for (int j=1; j<=M; j++) {\r\n if (type) { //type = 1 implies logrand\r\n printf(\"%d\",logrand());\r\n } else {\r\n printf(\"%d\",rng(0,1000000));\r\n }\r\n\t\t\tif (j < M) printf(\" \");\r\n }\r\n printf(\"\\n\");\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5051546096801758, "alphanum_fraction": 0.5206185579299927, "avg_line_length": 20.55555534362793, "blob_id": "e1f330a735162d719167aa05c468bd3e558b1c6d", "content_id": "756bf11ed64bf835cc784542739daea4ef567432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 194, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/radio/validator/sub1.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nint main() {\n\tChecker c{}; c.validate();\n\tensuref(c.D == c.N, \"D != N\");\n\tint ans = solve(c.N, c.M, c.K, c.D, c.B, c.U, c.V);\n\tensuref(ans <= 2, \"answer > 2\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 16, "blob_id": "9accf459e2f54d452bf9115a296e88e9d95a21eb", "content_id": "6f60d274a41b575a4a7ba306f62bcd5ecea15f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 68, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/accumulate/statement/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "all: statement.pdf\n\nstatement.pdf:\n\tcp accumulate.pdf statement.pdf\n" }, { "alpha_fraction": 0.6621983647346497, "alphanum_fraction": 0.6863270998001099, "avg_line_length": 19.72222137451172, "blob_id": "c4ad960e95a6389a8873b0d7072332d6bd4c775f", "content_id": "77feae71e6cc888d1136ae278c80661a918653c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 373, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/pairs/validator/bounds.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "/*\n * ! WARNING !\n * If modifying this file, be sure to also modify the problem statement's Subtasks section\n * and also modify the bounds.h in the validators directory\n */\n#ifndef BOUNDS_H\n#define BOUNDS_H\n\nconst int MIN_N = 2;\nconst int MAX_N = 1e5;\n\nconst int MIN_A = 0;\nconst int MAX_A = 2e5;\n\nconst int MIN_SKILL = 0;\nconst int MAX_SKILL = 1e5;\n\n#endif // BOUNDS_H\n" }, { "alpha_fraction": 0.4250764548778534, "alphanum_fraction": 0.46330276131629944, "avg_line_length": 16.675676345825195, "blob_id": "4c9cba4b2ae73bf9fccfd33044fe917e493938da", "content_id": "5f814c0491db707f06828b4c3ab0aa8b14bec568", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 42, "num_lines": 37, "path": "/jewels/solutions/james-full.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import sys\n\nN,jewels = sys.stdin\njewels = jewels.strip()\n\nblocks = []\nl = 0\nc = jewels[0] \nfor j in jewels:\n if j == c:\n l += 1\n else:\n blocks.append((l,c))\n c = j\n l = 1\nblocks.append((l,c))\n\n#print(blocks)\n\nans = 0\nif len(blocks) > 1:\n if blocks[-1][1] == blocks[0][1]:\n l,c = blocks[0]\n l += blocks[-1][0]\n blocks[-1] = (l,c)\n blocks[0] = (l,c)\n else:\n ans = blocks[0][0] + blocks[-1][0]\n s = blocks[0][0]\n for i in range(1,len(blocks)):\n s += blocks[i][0]\n ans = max(ans,s)\n s -= blocks[i-1][0]\nelse:\n ans = N\n\nsys.stdout.write(str(ans) + \"\\n\")\n" }, { "alpha_fraction": 0.552925705909729, "alphanum_fraction": 0.5770326256752014, "avg_line_length": 24.209945678710938, "blob_id": "4becd5a72c7b12ca6b9a002c14a3217e7c28176b", "content_id": "33ff404d5ed3075d4563aaddf1ab525e6699a815", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4563, "license_type": "no_license", "max_line_length": 128, "num_lines": 181, "path": "/jewels/data/gen/gen.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import sys\nimport random\n\nusage = \"\"\"\nUsage:\nTo generate a random case for a subtask use:\npython gen.py <subtask>\nTo specify additional parameters, use:\npython gen.py <subtask> <N> <casetype> <arg1> <arg2> ...\ncase will be written to stdout\nsubtask - the subtask of case (1-5)\nN - number of beads (1 - subtask limit)\ncasetype - the type of case to be generated (0-11)\narg - argument to pass to casetype\n\"\"\"\n\ndef random_partition(cnt, n=0): # O(n)\n if n == 0:\n n = random.randint(1,max(1,cnt))\n if cnt == 0:\n return [0] * n\n density = [random.uniform(0.0, 1.0)for i in range(n)]\n mul = cnt/sum(density)\n\n split = [int(mul * d) for d in density]\n delta = sum(split)-cnt\n\n while delta < 0:\n idx = random.randrange(0,n)\n split[idx] += 1\n delta += 1\n\n return split\n\ndef blocks_to_string(blocks):\n s = []\n for b in range(len(blocks)):\n s.append((\"rb\"[b%2])*blocks[b]) #maybe make blue first 50% of the time?\n return ''.join(s)\n\ndef blue(N):\n return 'b'*N\n\ndef red(N):\n return 'r'*N\n\ndef alternating(N):\n s = []\n for i in range(N):\n s.append(\"rb\"[i%2])\n return ''.join(s)\n\ndef end_biased(N, B=0):\n blocks = random_partition(N,B)\n blocks.sort()\n biased = []\n for i in range(len(blocks)-1, -1, -2):\n biased.append(blocks[i])\n for i in range(len(blocks)%2, len(blocks), 2):\n biased.append(blocks[i]) \n return blocks_to_string(biased)\n\ndef middle_biased(N, B=0):\n blocks = random_partition(N,B)\n blocks.sort()\n biased = []\n for i in range(0, len(blocks), 2):\n biased.append(blocks[i])\n for i in range(len(blocks)-1-len(blocks)%2, -1, -2):\n biased.append(blocks[i]) \n return blocks_to_string(biased)\n\ndef ascending(N, B=0):\n blocks = random_partition(N,B)\n blocks.sort()\n return blocks_to_string(blocks)\n\n\"\"\"\ndef sub2(N): # at most 3 red\n reds = random.randint(0, min(N,3))\n s = blue(N-reds)\n for i in range(reds):\n split = random.randint(0,len(s))\n s = s[:split] + 'r' + s[split:]\n return s\n\"\"\"\n\ndef random_rotate(s):\n split = random.randrange(0,len(s))\n return s[split:] + s[:split]\n\ndef sub2(N): # exactly 2, non adjacent reds\n assert(N >= 4)\n split = random.randint(1,N-3)\n if random.randrange(0,2):\n return 'r' + blue(split) + 'r' + blue(N-split-2)\n else:\n return blue(split) + 'r' + blue(N-split-2) + 'r'\n \ndef sub4(N): # no adjacent red\n reds = random.randint(0,N//2)\n if reds == 0:\n return blue(N)\n else:\n flip = random.randrange(0,1)\n blocks = random_partition(N-reds*2,reds) # need to make sure each block has at least 1 blue\n s = []\n for i in range(reds):\n if flip:\n s.append('r')\n s.append(blue(blocks[i] + 1))\n else:\n s.append(blue(blocks[i] + 1))\n s.append('r')\n return ''.join(s)\n\ndef rand(N):\n return ''.join([random.choice(\"rb\")for i in range(N)])\n\ndef random_blocks(N, B=0):\n blocks = random_partition(N,B)\n return blocks_to_string(blocks)\n\ndef random_type(N):\n f = random.choice(CASE_TYPES)\n if f == sub2 and N < 4: # cannot gen a sub2 case with size < 4\n f = red\n return f(N)\n\ndef mixed(N, B=2):\n blocks = random_partition(N,B)\n s = [random_type(blocks[i]) for i in range(B)]\n return ''.join(s)\n\nCASE_TYPES = [blue, red, alternating, end_biased, middle_biased, ascending, sub2, sub4, rand, random_blocks, random_type, mixed]\n\nSUBTASKS = 5\nargs = sys.argv[1:]\n\nif len(args) < 1:\n print(usage)\n exit(0)\n\ndefaults = (100000, list(range(len(CASE_TYPES))))\nsubtask_info = (\n (),\n (4, defaults[1]),\n (defaults[0], [6]),\n (1000, defaults[1]),\n (defaults[0], [7]),\n defaults\n) # MAXN, ALLOWED CASES \n\nsubtask = int(args[0])\nif subtask < 1 or subtask > SUBTASKS:\n print(\"Invalid subtask: '%d'. Must be between (1 - %d)\" % (subtask,SUBTASKS))\n exit(0)\n\nMAXN, allowed_cases = subtask_info[subtask]\n\nif len(args) > 1:\n N = int(args[1])\n if N < 4 or N > MAXN:\n print(\"Invalid N for subtask: '%d'. Must be between (4 - %d)\" % (N,MAXN))\n exit(0)\nelse:\n N = random.randint(4, MAXN)\n\nif len(args) > 2:\n case_type = int(args[2])\n if case_type not in allowed_cases:\n print(\"Invalid case_type for subtask: '%d'.\" % case_type)\n exit(0)\nelse:\n case_type = random.choice(allowed_cases)\n\nprint(N)\ns = CASE_TYPES[case_type](N, *map(int,args[3:]))\nif random.randrange(0,2):\n s = random_rotate(s)\nprint(s)\n" }, { "alpha_fraction": 0.6872727274894714, "alphanum_fraction": 0.6981818079948425, "avg_line_length": 21.91666603088379, "blob_id": "03ea2e8506742cd99ac4f642f276628691e2588b", "content_id": "a050859ae8df1879f2b1c32265e5791d6ce3e974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 275, "license_type": "no_license", "max_line_length": 91, "num_lines": 12, "path": "/jewels/validator/bounds.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "/*\n * ! WARNING !\n * If modifying this file, be sure to also modify the problem statement's Subtasks section\n * and also modify the bounds.h in the validators directory\n */\n#ifndef BOUNDS_H\n#define BOUNDS_H\n\nconst int MIN_N = 4;\nconst int MAX_N = 1e5;\n\n#endif // BOUNDS_H\n" }, { "alpha_fraction": 0.6280193328857422, "alphanum_fraction": 0.6835748553276062, "avg_line_length": 22, "blob_id": "48a8dfbfb9350df908f4cd41db60275fa37939b4", "content_id": "2f92f9b20daef9f4ccdd93b487d48c7eb93f41ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 414, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/battleship/validator/bounds.h", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "/*\n * ! WARNING !\n * If modifying this file, be sure to also modify the problem statement's Subtasks section\n * and also modify the bounds.h in the validators directory\n */\n#ifndef BOUNDS_H\n#define BOUNDS_H\n\nconst int MIN_N = 1;\nconst int MIN_M = 1;\nconst int MIN_K = 1;\nconst int MAX_N = 1000;\nconst int MAX_M = 1000;\nconst int MAX_K = 1000;\nconst int MIN_A = 0;\nconst int MAX_A = 1000000;\n\n#endif // BOUNDS_H\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6240000128746033, "avg_line_length": 14.625, "blob_id": "5492838a2a9bdc4622334c712a57fa074554832c", "content_id": "7c096799d4607c8d1ce04d064c7ddf474f7c3211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 125, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/holes/validator/sub3.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.W == 1, \"W does not match sub3 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.43852856755256653, "alphanum_fraction": 0.5304937362670898, "avg_line_length": 20.978723526000977, "blob_id": "a5b16600aa2a1f81bf6316871d4b92b838a2a4f6", "content_id": "d9c5e264bf7951b48c53a0a630af35fcf996e864", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 135, "num_lines": 47, "path": "/holes/data/gen/gen_bootstrap.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import random\nimport itertools\nimport os\n\ncnt = 1\n\ndef case(name, a):\n global cnt\n cmd = \"python3 gen/gen.py %s > %s_%d.in\" % (\" \".join(map(str, a)), name, cnt)\n print(cmd)\n os.system(cmd)\n cnt += 1\n\n# Sub1\ncase(\"AxxxxA_one\", [1, 12, 8, 5, 2])\n\n# Sub2\ncase(\"Axx11A_max\", [1, 10000, 10000, 1, 1])\nfor i in range(8):\n case(\"Axx11A\", [2])\n\nfor i in range(3):\n w = random.randint(1, 10000)\n case(\"A1x11A\", [1, 1, w, 1, 1])\n\nfor i in range(5):\n l = random.randint(1, 10000)\n case(\"Ax111A\", [1, l, 1, 1, 1])\n\n# Sub3\nfor i in range(5):\n l = random.randint(1, 10000)\n ml = l\n if i >= 2:\n ml = min(ml, 100)\n a = random.randint(1, ml)\n case(\"Ax1x1A\", [1, l, 1, a, 1])\n\n# Sub4, includes min case\nfor l, w, a, b in list(filter(lambda t: t[2] <= t[0] and t[3] <= t[1] and t[1] <= t[0], itertools.product([1, 2, 3, 4, 5], repeat=4))):\n case(\"A%d%d%d%dA\" % (l, w, a, b), [1, l, w, a, b])\n\n# Sub5\nfor i in range(3):\n case(\"AxxxxA_rand\", [3])\nfor i in range(5):\n case(\"AxxxxA_biglw\", [4])\n" }, { "alpha_fraction": 0.4103967249393463, "alphanum_fraction": 0.5212038159370422, "avg_line_length": 16.743589401245117, "blob_id": "0a16e8d3680fe0e982fb553b2d5ebd621258d140", "content_id": "ce0b16d3580dbca563bdc2de8920eb1b4e34854a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "no_license", "max_line_length": 52, "num_lines": 39, "path": "/accumulate/data/gen/gen.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import random\r\nCases = []\r\ndef addcase(n,k,subtask):\r\n Cases.append([n,k,subtask])\r\n \r\ndef SB1():\r\n for i in range(15):\r\n addcase(10**5 - random.randint(0,10),1,1)\r\n addcase(2,1,1)\r\n addcase(3,1,1)\r\n\r\ndef SB2():\r\n for i in range(15):\r\n addcase(10**5,random.randint(1, 10**5-1), 2)\r\n addcase(2,1,2)\r\n addcase(3,1,2)\r\n addcase(3,2,2)\r\n\r\ndef SB3():\r\n for i in range(15):\r\n addcase(1000,random.randint(1, 999), 3)\r\n\r\ndef SB4():\r\n for i in range(15):\r\n addcase(10**5,10**5-1, 4)\r\n\r\ndef SB5():\r\n for i in range(15):\r\n addcase(10**5,random.randint(1, 10**5-1), 5)\r\n\r\n\r\nSB1()\r\nSB2()\r\nSB3()\r\nSB4()\r\nSB5()\r\nprint(len(Cases))\r\nfor i in Cases:\r\n print(i[0],i[1],i[2])\r\n" }, { "alpha_fraction": 0.5824176073074341, "alphanum_fraction": 0.6318681240081787, "avg_line_length": 17.200000762939453, "blob_id": "745a1b0222c4315504798340693f0e5d57d47e57", "content_id": "29c7aed5e57e9548676c5d62081388851af3bd6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 182, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/battleship/validator/sub5.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\n\nint main() {\n\tc.validate();\n\tensuref(c.N <= 200, \"N does not match sub5 bounds\");\n\tensuref(c.M <= 200, \"M does not match sub5 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.5321637392044067, "avg_line_length": 16.100000381469727, "blob_id": "d2ca5b7ac41007d4145b009329749e04ab290178", "content_id": "b11be8e38d0cb011efd22a2875e0275023e8f4c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 171, "license_type": "no_license", "max_line_length": 60, "num_lines": 10, "path": "/accumulate/validator/sub2.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tfor (int i = 2; i <= c.N; i++) {\n\t\tensuref(c.a[i] >= c.a[i-1], \"A is not increasing (sub2)\");\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4968944191932678, "alphanum_fraction": 0.5403726696968079, "avg_line_length": 19.125, "blob_id": "1a56a00c9217d90c2e8ecc6f6e713295a4cf7e22", "content_id": "b3a51f8772971dec143d0e971c98b40331cd2eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 161, "license_type": "no_license", "max_line_length": 87, "num_lines": 8, "path": "/holes/validator/sub1.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nChecker c;\nint main() {\n\tc.validate();\n\tensuref(c.L == 12 && c.W == 8 && c.A == 5 && c.B == 2, \"This input is not subtask 1\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4139072895050049, "alphanum_fraction": 0.44701987504959106, "avg_line_length": 21.230770111083984, "blob_id": "e1613bb3b48e1913d30a09f7ca4fc23288ce5ac9", "content_id": "b4b55da7f56ea75a03d9adee4da57a3d6d22df32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 604, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/accumulate/data/gen/tkgen.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include <iostream>\r\n#include <cassert>\r\n#include <vector>\r\n#include <algorithm>\r\nusing namespace std;\r\n\r\nint main() {\r\n int N,K,SB;\r\n cin >> N >> K >> SB;\r\n if (SB == 1) {assert(K == 1);}\r\n if (SB == 3) {assert(N <= 1000);}\r\n if (SB == 4) {assert(K == N-1);}\r\n vector <int> Vals;\r\n for (int i=0; i<N; i++) {\r\n Vals.push_back((rand()-RAND_MAX/2)%100000);\r\n }\r\n if (SB == 2) {\r\n sort(Vals.begin(), Vals.end());\r\n }\r\n printf(\"%d %d\\n\",N,K);\r\n for (int i=0; i<N; i++) {\r\n printf(\"%d\",Vals[i]);\r\n\t\tif (i < N-1) printf(\" \");\r\n }\r\n\tprintf(\"\\n\");\r\n}\r\n" }, { "alpha_fraction": 0.6883468627929688, "alphanum_fraction": 0.6991869807243347, "avg_line_length": 15.772727012634277, "blob_id": "f93940d4dcd7f054a4d5c6ad39033577bf475ccb", "content_id": "28d96637af9eb5a6fe2c4c21ae39b9865b8ef530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 369, "license_type": "no_license", "max_line_length": 46, "num_lines": 22, "path": "/radio/data/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": ".PHONY: all clean input output\n\nall: input output check zip\n\ninput:\n\t# Put any data generation commands here\n\tg++ -std=c++14 -o casegen gen/casegen.cpp\n\tpython3 gen/gen.py | python3 gen/bootstrap.py\n\tcp handmade/* .\n\noutput: input\n\tbash ./make-output.sh\n\ncheck: input\n\nzip: input output check\n\tzip data.zip *.in *.out\n\nclean:\n\t-rm casegen\n\t-rm *.in *.out\n\t-rm data.zip\n" }, { "alpha_fraction": 0.4848484992980957, "alphanum_fraction": 0.49494948983192444, "avg_line_length": 15.416666984558105, "blob_id": "e7baba09c59147da61742090f29203396c38401d", "content_id": "3443c359c148056be3ce4619901b0061b7aa544c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/accumulate/data/gen/bootstrap.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import os\ncases = int(input())\ncnt = 1\n\ndef r(cmd):\n print(\"[B]\", cmd)\n os.system(cmd)\n\nfor _ in range(cases):\n ln = input()\n r(\"echo %s | ./tkgen > t_%d.in\" % (ln, cnt)) \n cnt += 1\n\n" }, { "alpha_fraction": 0.5483871102333069, "alphanum_fraction": 0.599078357219696, "avg_line_length": 23.11111068725586, "blob_id": "7ea4f3401a380403709f725d65d174dfdb566010", "content_id": "2fcf1afed9dd4f2bc91179b73e1532281806090b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 217, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/radio/validator/sub3.cpp", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\nint main() {\n\tChecker c{}; c.validate();\n\tensuref(c.D == c.N, \"D != N\");\n\tensuref(c.N <= 1000, \"N does not match sub3 bounds\");\n\tensuref(c.M <= 1000, \"M does not match sub3 bounds\");\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.45562130212783813, "alphanum_fraction": 0.4674556255340576, "avg_line_length": 14.363636016845703, "blob_id": "8d0b96360eeec4fdcd97106141442601a0ca2403", "content_id": "bab5dce855da7eb7eb6b9b39531fd303c3b41dfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/radio/data/gen/bootstrap.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "import os\ndef r(c):\n print(\"[B]\", c)\n os.system(c)\n\nx = input()\ncnt = 1\nwhile x:\n r(\"echo %s | ./casegen > t_%d.in\" % (x, cnt))\n x = input()\n cnt = cnt+1\n" }, { "alpha_fraction": 0.36090224981307983, "alphanum_fraction": 0.5134264230728149, "avg_line_length": 19.418603897094727, "blob_id": "b20b9e644a8b420652d6eb2a2566370dd7341979", "content_id": "1971c5a48f9e79f9d9aee47f89eb980540269c32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 70, "num_lines": 43, "path": "/battleship/data/gen/gen.py", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "#aux gen\r\nCases = []\r\nimport random\r\ndef addcase(n,m,k,t):\r\n Cases.append([n,m,k,t])\r\n \r\ndef SB1():\r\n for i in [20,50,100,300,500]:\r\n addcase(1,1000,i,0)\r\n addcase(1,1000,i,1)\r\n addcase(1,2,1,0) #edge case\r\n\r\ndef SB2():\r\n for i in [200,200,200,200,200,1000,1000]:\r\n addcase(i,i,i,0)\r\n addcase(i,i,i,1)\r\n addcase(2,2,2,0) #edge case\r\n\r\ndef SB3to5(subtask_number):\r\n n = [0,0,0,30,80,200][subtask_number]\r\n for i in [10, 20,30,50,75,80,100,150]:\r\n if i > n:\r\n break\r\n for j in range(3):\r\n addcase(n,n - random.randint(0,5), i, random.randint(0,1))\r\n\r\ndef SB6():\r\n for k in [32,64,128,256,512,900]:\r\n addcase(1000,1000,k,0)\r\n addcase(1000,1000,k,1)\r\n\r\ndef disp():\r\n print(len(Cases))\r\n for i in Cases:\r\n print(\" \".join([str(c) for c in i]))\r\n\r\nSB1()\r\nSB2()\r\nSB3to5(3)\r\nSB3to5(4)\r\nSB3to5(5)\r\nSB6()\r\ndisp()\r\n \r\n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 16, "blob_id": "d4475f436ee44ef5f25f6176d55ad639438c2b4f", "content_id": "50ee765c75ac768ca030ad84954043aea6361ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 68, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/battleship/statement/Makefile", "repo_name": "acio-olympiad/2020Contest0", "src_encoding": "UTF-8", "text": "all: statement.pdf\n\nstatement.pdf:\n\tcp battleship.pdf statement.pdf\n" } ]
62
goddess5321/BigDataTraining_Task2
https://github.com/goddess5321/BigDataTraining_Task2
3709839f33d26a8bdd347f18f2bbf891532724d7
9016d9d2416de607931077a7848d29b4c8f0b0cc
59e00bc8f98c3271dc644fe945b08f2f6842dd32
refs/heads/master
2022-10-20T11:23:05.301580
2020-06-09T14:55:42
2020-06-09T14:55:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6106408834457397, "alphanum_fraction": 0.61426842212677, "avg_line_length": 27.55172348022461, "blob_id": "d6aabfa705d36bd4754e6f212bf186220f62c25a", "content_id": "697c3a60cafe80cb1c00ba2b54d4e533e144c28f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 112, "num_lines": 29, "path": "/Task2/pipelines.py", "repo_name": "goddess5321/BigDataTraining_Task2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport csv\n\n\nclass Task2Pipeline(object):\n def __init__(self):\n self.file = open('bilibili.csv', 'a+', encoding='utf-8', newline='')\n self.writer = csv.writer(self.file)\n\n # 对 spider 传递过来的 item 对象进行处理\n def process_item(self, item, spider):\n # 数据处理:比如缺失数据整理、删除;重复数据清理;不合理数据的整理\n\n\n # 数据存储\n self.writer.writerow(\n [item['rank'], item['author'], item['arcurl'], item['description'], item['favorites'], item['play'],\n item['rank_score'], item['tag'], item['title'], item['video_review']])\n\n return item\n\n def close_spider(self, spider):\n self.file.close()" }, { "alpha_fraction": 0.6181318759918213, "alphanum_fraction": 0.6208791136741638, "avg_line_length": 19.19444465637207, "blob_id": "6168cb309a8eb9aa554be92814d5427b4c7616f7", "content_id": "2edcf1c45092dd68317d1a76664aef727d5d3eee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 806, "license_type": "no_license", "max_line_length": 52, "num_lines": 36, "path": "/Task2/items.py", "repo_name": "goddess5321/BigDataTraining_Task2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass Task2Item(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n author = scrapy.Field() # 视频作者\n\n arcurl = scrapy.Field() # 视频 url\n\n description = scrapy.Field() # 视频作品描述\n\n favorites = scrapy.Field() # 三连量\n\n play = scrapy.Field() # 播放量\n\n # rank_index = scrapy.Field() # 排名页数\n\n # rank_offset = scrapy.Field() # 页内排序\n\n rank = scrapy.Field() # 排名\n\n rank_score = scrapy.Field() # 排名分数\n\n tag = scrapy.Field() # 标签\n\n title = scrapy.Field() # 标题\n\n video_review = scrapy.Field() # 弹幕数\n\n" }, { "alpha_fraction": 0.5436997413635254, "alphanum_fraction": 0.5978552103042603, "avg_line_length": 40.42222213745117, "blob_id": "f2c87f37f55404b072896310f17d53a1dd7b2aef", "content_id": "79eb2000d20d005e79f84ad382f93837fe5bade6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 117, "num_lines": 45, "path": "/Task2/spiders/bilibili.py", "repo_name": "goddess5321/BigDataTraining_Task2", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nfrom Task2.items import Task2Item\n\n# 爬取 b站 2020-05-01至 2020-05-31一个月以来发布的所有鬼畜视频\nclass BilibiliSpider(scrapy.Spider):\n name = 'Task2'\n allowed_domains = ['s.search.bilibili.com']\n prefix = 'https://s.search.bilibili.com'\n\n # 起始请求,主要为了获取页数 numPages\n start_urls = [\n 'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_32548301302379645'\n '&main_ver=v3&search_type=video&view_type=hot_rank&order=click'\n '&copy_right=-1&cate_id=22&pagesize=20&time_from=20200501&time_to=20200531'\n ]\n\n # 第一步:获取页数 numPages\n def parse(self, response):\n r = json.loads(response.text)\n numPages = r['numPages']\n\n for i in range(numPages):\n url = 'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_32548301302379645' \\\n '&main_ver=v3&search_type=video&view_type=hot_rank&order=click' \\\n '&copy_right=-1&cate_id=22&page={}&pagesize=20&time_from=20200501&time_to=20200531' . format(i + 1)\n yield scrapy.Request(url=url, callback=self.parse_detail)\n\n\n def parse_detail(self, response):\n result = json.loads(response.text)['result']\n\n for i in range(len(result)):\n item = Task2Item()\n item['rank'] = (result[i]['rank_index']) * 20 + result[i]['rank_offset']\n item['author'] = result[i]['author']\n item['arcurl'] = result[i]['arcurl']\n item['description'] = result[i]['description']\n item['favorites'] = result[i]['favorites']\n item['play'] = result[i]['play']\n item['rank_score'] = result[i]['rank_score']\n item['tag'] = result[i]['tag'].replace(',', '&&')\n item['title'] = result[i]['title']\n item['video_review'] = result[i]['video_review']\n yield item\n\n" }, { "alpha_fraction": 0.8775510191917419, "alphanum_fraction": 0.884353756904602, "avg_line_length": 35.75, "blob_id": "eb451ed468ee7a2fcf74d003af204b03cdbcca37", "content_id": "7d58b7c6843afe190aeef1c0733eea2ac1be566b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 319, "license_type": "no_license", "max_line_length": 108, "num_lines": 4, "path": "/README.md", "repo_name": "goddess5321/BigDataTraining_Task2", "src_encoding": "UTF-8", "text": "# BigDataTraining_Task2\n选择一个感兴趣的方向或者课题相关的方向的网站数据进行采集,数据不限定与API接口、网页、JS注入内容,并将数据保存csv或MySQL或Redis;有兴趣的同学可以试试 Scrapy 和 Redis 连用的分布式爬虫。\n\n选择爬取b站鬼畜区排名榜\n" } ]
4
antoinemassih/flasklambda
https://github.com/antoinemassih/flasklambda
5604f3c4d55117b7a9d5655fb41acaa619cef147
48d750d305f13470dbcf7e171342ef3216941f9c
6e1f8283b51d6cbedfd2690d05e810fd305f3de8
refs/heads/master
2023-02-01T08:28:53.114003
2020-12-20T17:45:32
2020-12-20T17:45:32
323,130,383
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6958613991737366, "alphanum_fraction": 0.699711263179779, "avg_line_length": 21.586956024169922, "blob_id": "f9efd82b7db6e8803b7f3bb7f1ae735e372ad1ea", "content_id": "022b823ebe020230a414d9ec94b8dd7cc83584b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 78, "num_lines": 46, "path": "/app.py", "repo_name": "antoinemassih/flasklambda", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_potion import Api, ModelResource\n\napp = Flask(__name__)\napp.config[\n 'SQLALCHEMY_DATABASE_URI'] = \"postgresql://postgres:[email protected]:5432/postgres\"\ndb = SQLAlchemy(app)\n\n\nclass Dashboard(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(), nullable=False)\n\nclass Tickers(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(), nullable=False)\n\nclass Groups(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(), nullable=False)\n\n\ndb.create_all()\n\n\nclass DashboardResource(ModelResource):\n class Meta:\n model = Dashboard\n\nclass TickerResource(ModelResource):\n class Meta:\n model = Tickers\n\nclass GroupResource(ModelResource):\n class Meta:\n model = Groups\n\n\napi = Api(app)\napi.add_resource(DashboardResource)\napi.add_resource(TickerResource)\napi.add_resource(GroupResource)\n\nif __name__ == '__main__':\n app.run()\n" } ]
1
Barnez299/fastAPItest
https://github.com/Barnez299/fastAPItest
3f39c1f83e09676f987cfd71f3c596984ca2ce67
27ba368cc2e143cbd50353269761c69c30d1aa71
47b270fa1e7481746fde5b7c16e8ebfc22c8b7b0
refs/heads/main
2023-01-27T13:24:17.819206
2020-12-11T12:48:52
2020-12-11T12:48:52
320,546,559
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.7799999713897705, "avg_line_length": 23.83333396911621, "blob_id": "1acf41368c4fcef371a8048b9124ae890d3dc184", "content_id": "851186cf42a0835b9b8057abb8a90a5b9b2aa6dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 150, "license_type": "no_license", "max_line_length": 60, "num_lines": 6, "path": "/README.md", "repo_name": "Barnez299/fastAPItest", "src_encoding": "UTF-8", "text": "# fastAPItest\nFast API Test Build from scratch\n\n## Original Github project source used\n\nhttps://github.com/Mr-Manna/FastAPI-CRUD/blob/master/main.py\n\n" }, { "alpha_fraction": 0.7015873193740845, "alphanum_fraction": 0.7015873193740845, "avg_line_length": 21.571428298950195, "blob_id": "66331133a9a11c3093dcd8049f448c2255e96667", "content_id": "7627e03d2d8d63dbcdb997098ac655aefe6cca78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 65, "num_lines": 14, "path": "/post/schema.py", "repo_name": "Barnez299/fastAPItest", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom pydantic import BaseModel\n\n\n''' Model Schema Using Pydantic '''\n\n\nclass Post(BaseModel):\n id: int\n title: str\n body: str\n is_published: bool = False # Providing a default value False\n created: datetime = datetime.utcnow()\n modified: datetime = datetime.utcnow()" }, { "alpha_fraction": 0.695049524307251, "alphanum_fraction": 0.7128713130950928, "avg_line_length": 17.740739822387695, "blob_id": "6291b1a0c272fc2daee908249103b65775a15281", "content_id": "f6b08582144abbb2fba950b5263a2e998dce6cb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 64, "num_lines": 27, "path": "/config.py", "repo_name": "Barnez299/fastAPItest", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI\nimport databases\nimport sqlalchemy\n\n\napp = FastAPI()\n\n\n''' DATABASE CONNECTION '''\nDATABASE_URL = \"postgresql://postgres:123456789@localhost/posts\"\ndatabase = databases.Database(DATABASE_URL)\nmetadata = sqlalchemy.MetaData()\n\nengine = sqlalchemy.create_engine(\n DATABASE_URL\n)\n\n\n# ''' APP EVENT SETTING'''\n# @app.on_event(\"startup\")\n# async def startup():\n# await database.connect()\n\n\n# @app.on_event(\"shutdown\")\n# async def shutdown():\n# await database.disconnect()" }, { "alpha_fraction": 0.7033492922782898, "alphanum_fraction": 0.7081339955329895, "avg_line_length": 38.25, "blob_id": "e8c0bdcdd2f3f5977f6139802bef1801f11165d9", "content_id": "2827d75077c34693560bbfc604c035c5ab9eadf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 113, "num_lines": 16, "path": "/post/model.py", "repo_name": "Barnez299/fastAPItest", "src_encoding": "UTF-8", "text": "import sqlalchemy\nfrom config import metadata\nfrom datetime import datetime\n\n\n''' SQLAlchemy Model'''\nposts = sqlalchemy.Table(\n \"posts\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column(\"title\", sqlalchemy.String(100), unique=True),\n sqlalchemy.Column(\"body\", sqlalchemy.Text),\n sqlalchemy.Column(\"is_published\", sqlalchemy.Boolean),\n sqlalchemy.Column(\"created\", sqlalchemy.DateTime, default=datetime.utcnow().strftime(\"%Y-%m-%d\" \"%H:%M:%S\")),\n sqlalchemy.Column(\"modified\", sqlalchemy.DateTime, default=datetime.utcnow().strftime(\"%Y-%m-%d\" \"%H:%M:%S\"))\n)" }, { "alpha_fraction": 0.6449375748634338, "alphanum_fraction": 0.6588072180747986, "avg_line_length": 18, "blob_id": "82088a922f5afc58eb2ec4bd96bfa686a8bd558f", "content_id": "fc937925d6770395d37ac29bafb24632583e9d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "no_license", "max_line_length": 65, "num_lines": 38, "path": "/main.py", "repo_name": "Barnez299/fastAPItest", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI, Depends\nfrom config import engine\nfrom config import metadata, database\n\nimport uvicorn\n\nfrom post.route import post_route\n\nmetadata.create_all(engine)\n\n\napp = FastAPI(\n title=\"FastAPI CRUD Example\",\n docs_url=\"/docs\", redoc_url=\"/redocs\"\n)\n\n\n''' APP EVENT SETTING'''\[email protected]_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\[email protected]_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\napp.include_router(post_route, prefix=\"/api/post\", tags=[\"post\"])\n\n\[email protected](\"/\")\ndef home():\n return {\"message\": \"Welcome to FastAPI CRUD Example.\"}\n\n\nif __name__ == '__main__':\n\n uvicorn.run(\"main:app\", host=\"127.0.0.1\", port=8000)" }, { "alpha_fraction": 0.6727045774459839, "alphanum_fraction": 0.6783260703086853, "avg_line_length": 31.693878173828125, "blob_id": "0c429e5f2054008bf47263b18931ad661b9fc819", "content_id": "20b8d32a9dda32d1c9fb34da0711993b9ef3664c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1601, "license_type": "no_license", "max_line_length": 99, "num_lines": 49, "path": "/post/route.py", "repo_name": "Barnez299/fastAPItest", "src_encoding": "UTF-8", "text": "from fastapi import APIRouter\nfrom typing import List\nfrom config import database\nimport secrets\n\nfrom .model import posts\nfrom .schema import Post\n\npost_route = APIRouter()\n\n\n@post_route.get(\"/posts\", response_model=List[Post], status_code=200)\nasync def all_posts():\n query = posts.select()\n all_posts = await database.fetch_all(query)\n if posts is None:\n return {\"message\": \" No post found!\"}\n else:\n return all_posts\n\n\n@post_route.get(\"/post/{id}\", response_model=Post, status_code=200)\nasync def get_post(id:int):\n query = posts.select().where(posts.c.id == id)\n return await database.fetch_one(query=query)\n\n\n@post_route.post(\"/create/\", response_model=Post, status_code=201)\nasync def create(post: Post):\n query = posts.insert().values(title=post.title, body=post.body, is_published=post.is_published,\n created=post.created, modified=post.modified)\n last_record_id = await database.execute(query=query)\n return {**post.dict(), \"id\": last_record_id}\n\n\n@post_route.patch(\"/update/{id}\", response_model=Post)\nasync def update(id:int, post: Post):\n query = posts.update().where(posts.c.id == id).values(\n title=post.title, body=post.body,\n is_published=post.is_published, created=post.created,\n modified=post.modified)\n last_record_id = await database.execute(query=query)\n return {**post.dict(), \"id\": last_record_id}\n\n\n@post_route.delete(\"/delete/{id}\", response_model=Post)\nasync def delete(id:int):\n query = posts.delete().where(posts.c.id == id)\n return await database.execute(query)" } ]
6
eva-6-3/cifar10_cnn_model
https://github.com/eva-6-3/cifar10_cnn_model
c124a4293ac1835998c29c3802972721aaa55a76
16a15eea582bb236d4625d0a39928a2e8e2a2a80
deeb6f4b355563762222fd4ebc9351ce322c9e4b
refs/heads/main
2023-06-03T01:06:06.517226
2021-06-22T06:34:38
2021-06-22T06:34:38
376,363,895
0
1
null
2021-06-12T18:55:47
2021-06-18T22:17:13
2021-06-22T06:34:38
Jupyter Notebook
[ { "alpha_fraction": 0.4538866877555847, "alphanum_fraction": 0.4907773435115814, "avg_line_length": 26.10714340209961, "blob_id": "e9ac1d691969be0c0978a614da27fef05942c917", "content_id": "a81a6ef0ef9c8c8af194cced0e33c137205f0710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3036, "license_type": "no_license", "max_line_length": 85, "num_lines": 112, "path": "/utils/model.py", "repo_name": "eva-6-3/cifar10_cnn_model", "src_encoding": "UTF-8", "text": "import torch.nn.functional as F\nimport torch.nn as nn\n\ndropout_value = 0.1\n\nclass SeparableConv2d(nn.Module):\n def __init__(\n self,\n in_channels, out_channels, \n kernel_size=1, stride=1, \n padding=0, dilation=1, \n bias=False\n ):\n super().__init__()\n self.sep_conv = nn.Sequential(\n nn.Conv2d(\n in_channels, in_channels, \n kernel_size, stride, padding, dilation, groups=in_channels, bias=bias\n ),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(),\n nn.Conv2d(\n in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias\n ),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n )\n \n def forward(self, x):\n x = self.sep_conv(x)\n return x\n\n \nclass ConvBNAct(nn.Module):\n def __init__(\n self,\n in_channels, out_channels,\n k=3, s=1, p=1, \n dilation=1, groups=1, bias=False,\n dropout_value=dropout_value,\n regularizers=True,\n ):\n super().__init__()\n layers = []\n layers.extend([\n nn.Conv2d(\n in_channels, out_channels, \n k, s, p, \n dilation, groups, bias,\n )\n ])\n if regularizers:\n layers.extend([\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Dropout(dropout_value),\n ])\n self.custom_conv = nn.Sequential(*layers)\n \n def forward(self, x):\n x = self.custom_conv(x)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n \n # C1 BLOCK\n self.convblock_0 = ConvBNAct(3, 16)\n self.convblock_1 = ConvBNAct(16, 32)\n self.convblock_2 = ConvBNAct(32, 32)\n self.dilated_conv_1 = ConvBNAct(32, 32, k=3, s=2, dilation=2)\n \n # C2 BLOCK\n self.convblock_3 = ConvBNAct(32, 32)\n self.convblock_4 = ConvBNAct(32, 52)\n self.dilated_conv_2 = ConvBNAct(52, 64, k=3, s=2, dilation=2)\n \n # C3 BLOCK\n self.sep_conv_1 = ConvBNAct(64, 64)\n self.convblock_7 = ConvBNAct(64, 64)\n self.strided_conv_1 = ConvBNAct(64, 64, k=1, s=2)\n \n # C4 BLOCK\n self.convblock_5 = ConvBNAct(64, 64)\n self.convblock_6 = ConvBNAct(64, 10, regularizers=False)\n \n # OUTPUT BLOCK\n self.gap = nn.AvgPool2d(kernel_size=5)\n \n def forward(self, x):\n x = self.convblock_0(x)\n x = self.convblock_1(x)\n x = self.convblock_2(x)\n x = self.dilated_conv_1(x)\n\n x = self.convblock_3(x)\n x = self.convblock_4(x)\n x = self.dilated_conv_2(x)\n \n x = self.sep_conv_1(x)\n x = self.convblock_7(x)\n x = self.strided_conv_1(x)\n \n x = self.convblock_5(x)\n x = self.convblock_6(x)\n\n x = self.gap(x)\n \n x = x.view(-1, 10)\n return F.log_softmax(x, dim=-1)\n" }, { "alpha_fraction": 0.6306928396224976, "alphanum_fraction": 0.6669544577598572, "avg_line_length": 45.33000183105469, "blob_id": "986fb9baddb63bae719e54f75e3b703b18fb9915", "content_id": "db88ce0c58bdcc9a4034f3aca6bd166c007f39c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4633, "license_type": "no_license", "max_line_length": 215, "num_lines": 100, "path": "/README.md", "repo_name": "eva-6-3/cifar10_cnn_model", "src_encoding": "UTF-8", "text": "# cifar10 cnn Advanced Concepts\n\n# Group: EVA6 - Group 3\n1. Muhsin Abdul Mohammed - [email protected] \n2. Nilanjana Dev Nath - [email protected]\n3. Pramod Ramachandra Bhagwat - [email protected]\n4. Udaya Kumar NAndhanuru - [email protected]\n------\n\n# Data Exploration\nCIFAR-10 contains 1000 images per class for test, and 5000 images per class for train.<br>\nThe classes on CIFAR-10 are Airplane, Automobile, Bird, Cat, Deer, Dog, Frog, Horse, Ship, Truck.<br>\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/train_class_distribution.png\" alt=\"train_class_distribution\" width=\"500\"/>\n\n### mean and std for dataset\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/mean_std_dataset.png\" alt=\"mean_std_dataset\" width=\"500\"/>\n\n### Some sample images from train set -- \n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/train_rand_images_1.png\" alt=\"train_rand_images_1\" width=\"400\"/>\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/train_rand_images_2.png\" alt=\"train_rand_images_2\" width=\"400\"/>\n\n### Some sample images from test set -- \n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/test_rand_images_1.png\" alt=\"test_rand_images_1\" width=\"400\"/>\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/test_rand_images_2.png\" alt=\"test_rand_images_2\" width=\"400\"/>\n\n# Model Architecture\nOur architecture is C1C2C3C40 without using any pooling operations.<br>\nInstead of pooling we used a combination of dilated convolutions and strided covolutions. <br>\nIn C3 we have also used depthwise seperable convolutions instead of normal covolution layers.<br>\nThe total number of parameters for our model is 96,596. <br>\nWe have not used any Dense Layer, Instead we targeted GAP to get the output classes dim.\n\n```python\n x = self.convblock_1(x)\n x = self.convblock_2(x)\n x = self.dilated_conv_1(x)\n\n x = self.convblock_3(x)\n x = self.convblock_4(x)\n x = self.dilated_conv_2(x)\n \n x = self.sep_conv_1(x)\n x = self.sep_conv_2(x)\n x = self.strided_conv_1(x)\n \n x = self.convblock_5(x)\n x = self.convblock_6(x)\n\n x = self.gap(x)\n x = x.view(-1, 10)\n```\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/model_params.png\" alt=\"model_params\" width=\"400\"/>\n\n\n# Augmentation\n```python\n self.train_transforms = A.Compose([\n A.HorizontalFlip(p=0.5),\n A.ShiftScaleRotate(\n shift_limit=0.0625, scale_limit=0.1, \n rotate_limit=45, interpolation=1, \n border_mode=4, p=0.5\n ),\n A.CoarseDropout(\n max_holes=2, max_height=8, \n max_width=8, p=0.3\n ),\n A.RandomBrightnessContrast(p=0.2),\n A.ToGray(p=0.1),\n A.Normalize(\n mean=self.mean, \n std=self.std,\n always_apply=True\n ),\n ToTensorV2()\n ])\n```\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/img_augmetation_with_normalization.png\" alt=\"img_augmetation_with_normalization\" width=\"400\"/>\n\n\n# Training\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/LOSS_GRAPH.png\" alt=\"LOSS_GRAPH\" width=\"400\"/>\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/ACC_GRAPH.png\" alt=\"ACC_GRAPH\" width=\"400\"/>\n<img src=\"https://github.com/askmuhsin/cifar10_cnn_model/blob/main/resources/misclassified_images.png\" alt=\"misclassified_images\" width=\"400\"/>\n\n\n# Goals \n- [X] Model is trained on GPU\n- [X] change the architecture to C1C2C3C40 (No MaxPooling, but 3 3x3 layers with stride of 2 instead) (If you can figure out how to use Dilated kernels here instead of MP or strided convolution, then 200pts extra!)\n- [X] total RF must be more than 44. _(Bonux points if RF > 52)_\n- [X] one of the layers must use Depthwise Separable Convolution. _(Bonus points for two layers)_\n- [X] one of the layers must use Dilated Convolution\n- [X] use GAP (compulsory):- add FC after GAP to target #of classes (optional) _(if optional achieved Bonus points)_\n- [X] use albumentation library and apply:\n - [X] horizontal flip\n - [X] shiftScaleRotate\n - [x] coarseDropout (max_holes = 1, max_height=16px, max_width=1, min_holes = 1, min_height=16px, min_width=16px, fill_value=(mean of your dataset), mask_fill_value = None)\n - [X] grayscale _(For Bonus points)_\n- [ ] achieve 85% accuracy, as many epochs as you want. Total Params to be less than 200k. _(Bonus for 87% acc, and <100k params)_\n- [X] upload to Github\n" }, { "alpha_fraction": 0.5805330276489258, "alphanum_fraction": 0.5898030400276184, "avg_line_length": 27.5289249420166, "blob_id": "430ed7c3e19cf61d66d0e7ccd640abd08845fb8f", "content_id": "89fb662abe765b321c0f62d4fcb3409feb0ac024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3452, "license_type": "no_license", "max_line_length": 98, "num_lines": 121, "path": "/utils/misc.py", "repo_name": "eva-6-3/cifar10_cnn_model", "src_encoding": "UTF-8", "text": "import torch\nimport random\nimport torchvision\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchsummary import summary\n\n\ndef is_cuda(debug=True):\n cuda = torch.cuda.is_available()\n if debug:\n print(\"[INFO] Cuda Avaliable : \", cuda)\n return cuda\n\n\ndef get_device():\n use_cuda = is_cuda(debug=False)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print(\"[INFO] device : \", device)\n return device\n\n\ndef set_seed(seed=1):\n cuda = is_cuda(debug=False)\n torch.manual_seed(seed)\n if cuda:\n torch.cuda.manual_seed(seed)\n print(f\"[INFO] seed set {seed}\")\n\n\ndef show_random_images_for_each_class(\n train_data,\n num_images_per_class=16\n):\n for c, cls in enumerate(train_data.classes):\n rand_targets = random.sample([\n n\n for n, x in enumerate(train_data.targets)\n if x==c\n ], k=num_images_per_class)\n show_img_grid(\n np.transpose(train_data.data[rand_targets], axes=(0, 3, 1, 2))\n )\n plt.title(cls)\n \n\ndef show_img_grid(data):\n try:\n grid_img = torchvision.utils.make_grid(data.cpu().detach())\n except:\n data = torch.from_numpy(data)\n grid_img = torchvision.utils.make_grid(data)\n \n plt.figure(figsize=(10, 10))\n plt.imshow(grid_img.permute(1, 2, 0))\n \n\ndef show_random_images(data_loader):\n data, target = next(iter(data_loader))\n show_img_grid(data)\n\n\ndef show_model_summary(model, input_size=(1, 28, 28)):\n summary(model, input_size=input_size)\n\n\ndef get_wrong_predictions(model, test_loader, device):\n model.eval()\n test_loss = 0\n correct = 0\n\n wrong_correct = []\n wrong_predicted = []\n wrong_image_data = []\n\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n # test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n status = pred.eq(target.view_as(pred))\n # correct += status.sum().item()\n\n mistakes, _ = torch.where(status==False)\n if len(mistakes):\n m_data = data[mistakes]\n m_target = target[mistakes]\n m_output = output[mistakes]\n m_pred = pred[mistakes]\n correct = [x.item() for x in m_target.cpu().detach()]\n predicted = [x.item() for x in m_pred.cpu().detach()]\n image_data = [x for x in m_data.cpu().detach()]\n\n wrong_correct.extend(correct)\n wrong_predicted.extend(predicted)\n wrong_image_data.extend(image_data)\n \n return wrong_correct, wrong_predicted, wrong_image_data\n\n\ndef show_grid(img):\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n\n\ndef show_wrong_images(targets, predicts, images, size=20, grid=(5, 4)):\n img_data_temp = []\n wps = []\n for n, (wc, wp, wi) in enumerate(zip(targets, predicts, images)):\n wps.append(wp)\n img_data_temp.append(wi)\n if n>18:\n break\n \n wrong_images_temp = torch.stack(img_data_temp)\n print()\n print(f\"Mistakenly predicted as {wps}\")\n\n grid_img = torchvision.utils.make_grid(wrong_images_temp, nrow=grid[0])\n show_grid(grid_img)\n" } ]
3
jogly/datasci
https://github.com/jogly/datasci
1cd0217dc05d5b033dda07ea3ca498cd8342ad88
bfd3e772af0e8f71d55a897969c94c78d6884f68
750b818c6522ca24b0b7c02c32f9c3cd3fcad50e
refs/heads/master
2022-02-26T23:25:57.454546
2014-07-30T01:16:11
2014-07-30T01:16:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.65625, "alphanum_fraction": 0.71875, "avg_line_length": 95, "blob_id": "a389205192cf79dadd0dc58bba0b6b99e05fbd25", "content_id": "56ff335cbaa7defb7526228eb6059e41914a511d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 96, "license_type": "no_license", "max_line_length": 95, "num_lines": 1, "path": "/assignment2/b_select_project.sql", "repo_name": "jogly/datasci", "src_encoding": "UTF-8", "text": "SELECT COUNT(*) FROM ( SELECT TERM FROM FREQUENCY WHERE DOCID = \"10398_txt_earn\" AND COUNT=1 );\n" }, { "alpha_fraction": 0.48651349544525146, "alphanum_fraction": 0.5084915161132812, "avg_line_length": 29.33333396911621, "blob_id": "57f2c4a34755f624d178fd6e2ff2bf0fb8fc1d95", "content_id": "df84f0dc8cc8eb26fd2d4f4972f2909f31742038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1001, "license_type": "no_license", "max_line_length": 72, "num_lines": 33, "path": "/assignment3/multiply.py", "repo_name": "jogly/datasci", "src_encoding": "UTF-8", "text": "import MapReduce\nimport sys\nimport operator\n\nmr = MapReduce.MapReduce()\n\ndef mapper(record):\n if record[0] == 'a':\n for k in range(5):\n # emit key = (a_i, a_k), value = (a_j, a[i,j])\n mr.emit_intermediate((record[1], k), (record[2], record[3]))\n else:\n for i in range(5):\n # emit key = (b_i, b_k), value = (b_j, b[j,k])\n mr.emit_intermediate((i, record[2]), (record[1], record[3]))\n\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence \n dik = {0: [], 1:[], 2:[], 3:[], 4:[]}\n map (lambda tupl: dik[tupl[0]].append(tupl[1]), list_of_values)\n \n mr.emit((key[0], key[1], \\\n reduce(lambda x,y: x + y, \\\n map(lambda lis: lis[0] * lis[1], \\\n filter(lambda lis: len(lis) == 2, dik.itervalues())))))\n \n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n" }, { "alpha_fraction": 0.6598891019821167, "alphanum_fraction": 0.6728280782699585, "avg_line_length": 30.823530197143555, "blob_id": "bd9703e3849407a80b8caccc37ae1122458a6ea5", "content_id": "9a695702b54ee30bb05b48ef608fa2fe04615173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 541, "license_type": "no_license", "max_line_length": 114, "num_lines": 17, "path": "/assignment2/i_keyword_search.sql", "repo_name": "jogly/datasci", "src_encoding": "UTF-8", "text": "SELECT SUM(A.COUNT * B.COUNT) FROM (\n\tSELECT 'Q' AS DOCID, 'washington' AS TERM, 1 AS COUNT\n\tUNION\n\tSELECT 'Q' AS DOCID, 'taxes' AS TERM, 1 AS COUNT\n\tUNION\n\tSELECT 'Q' AS DOCID, 'treasury' AS TERM, 1 AS COUNT\n) A \nJOIN \n(\n\tSELECT * FROM FREQUENCY\n\tUNION\n\tSELECT 'Q' AS DOCID, 'washington' AS TERM, 1 AS COUNT\n\tUNION\n\tSELECT 'Q' AS DOCID, 'taxes' AS TERM, 1 AS COUNT\n\tUNION\n\tSELECT 'Q' AS DOCID, 'treasury' AS TERM, 1 AS COUNT\n) B ON A.TERM = B.TERM WHERE A.DOCID = 'Q' GROUP BY A.DOCID, B.DOCID ORDER BY SUM(A.COUNT * B.COUNT) DESC LIMIT 1;\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 40, "blob_id": "7399e4403e4b1fb44d8f0d51b401ce9294b03fdb", "content_id": "fea61f4addcc5ac2640fd057512027f05d21c4bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 164, "license_type": "no_license", "max_line_length": 66, "num_lines": 4, "path": "/assignment2/f_two_words.sql", "repo_name": "jogly/datasci", "src_encoding": "UTF-8", "text": "SELECT COUNT(*) FROM ( \n\tSELECT DISTINCT(DOCID) FROM FREQUENCY WHERE TERM = 'transactions'\n\tINTERSECT\n\tSELECT DISTINCT(DOCID) FROM FREQUENCY WHERE TERM = 'world');\n" } ]
4
lidavidm/archon
https://github.com/lidavidm/archon
92ea423478bff2d90408a7c31395a02830d53e98
a9eb4b326db17651427cab094d85fe59d9a959d3
3eb8becb7079fefd62068ab98abf754cbf574e73
refs/heads/master
2021-01-20T10:37:32.429776
2011-08-21T16:52:07
2011-08-21T16:52:07
1,751,740
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5535682439804077, "alphanum_fraction": 0.5566684603691101, "avg_line_length": 33.37090301513672, "blob_id": "c515c4f04928318c048bcc0e136ef0774dc17ec1", "content_id": "c6819fcda392927077d09c29ba15f08c1c53c2ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16773, "license_type": "no_license", "max_line_length": 76, "num_lines": 488, "path": "/archon/objects.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import math\nimport copy\nimport types\nimport random\nimport datetime\nimport collections\n\nimport archon.common\nimport archon.templating\n\nfrom archon.entity import (Entity, EntityHook, EntityHookNotFoundError,\n MutableEntityHook)\n\n\nclass RoomEntityHook(MutableEntityHook):\n KIND = \"room\"\n\n def __init__(self, entity, attributes):\n super(RoomEntityHook, self).__init__(entity, attributes)\n self.attributes.update(\n time=datetime.datetime(1000, 1, 1)\n )\n\n @property\n def timeString(self):\n return self.attributes['time'].strftime('%a, %b %d %H:%M')\n\n @property\n def friendlyName(self):\n name = self.attributes['friendlyName']\n if self.entity.area:\n return ': '.join([self.entity.area.attributes['name'], name])\n else:\n return name\n\n def save(self):\n data = self.attributes.copy()\n del data['time']\n return data\n\n\nclass InventoryProxy:\n \"\"\"\n Stores inventory information in a dictionary.\n\n The keys are the locations of the entities. If the entity kind is\n mutable, then the value is a list of entities; else, it is a count\n denoting the quantity held.\n \"\"\"\n def __init__(self, items, cache):\n self.inventory = {}\n self.cache = cache\n\n for path, values in items.items():\n path = cache.fullPathFor(path)\n self.inventory[path] = values\n\n def add(self, item, quantity=1):\n if isinstance(item, Entity):\n if item.mutable:\n if item.location not in self.inventory:\n self.inventory[item.location] = []\n self.inventory[item.location].append(item)\n else:\n if item.location not in self.inventory:\n self.inventory[item.location] = 0\n self.inventory[item.location] += quantity\n else:\n item = self.cache.fullPathFor(item)\n if item not in self.inventory:\n self.inventory[item] = 0\n self.inventory[item] += quantity\n\n def remove(self, item, quantity='all'):\n if isinstance(item, Entity):\n loc = item.location\n if loc in self.inventory:\n if item.mutable:\n self.inventory[loc].remove(item)\n else:\n self.inventory[loc] -= 1\n if not self.inventory[loc]:\n del self.inventory[loc]\n else:\n item = self.cache.fullPathFor(item)\n if item in self.inventory:\n self.inventory[item] -= 1\n if not self.inventory[item]:\n del self.inventory[item]\n\n def locations(self):\n \"\"\"Iterate through the entity locations of held items.\"\"\"\n return self.inventory.keys()\n\n def counts(self):\n \"\"\"Iterate through the locations and counts of held items.\"\"\"\n for loc, items in self.inventory.items():\n if isinstance(items, list):\n yield (loc, len(items))\n else:\n yield (loc, items)\n\n def entities(self):\n \"\"\"\n Iterate through the 3-tuples (location, count, instances).\n\n If the entity is immutable, ``instances`` will be a one-item\n list. Else, it will be the list of entity instances.\n \"\"\"\n for loc, items in self.inventory.items():\n if isinstance(items, list):\n yield (loc, len(items), items)\n else:\n yield (loc, items, [self.cache.lookup(loc)])\n\n def find(self, *args):\n \"\"\"Find an item by location or friendly name.\"\"\"\n if not args:\n return []\n if len(args) == 1:\n # Possibly a location\n if args[0] in self.locations():\n return self.get(args[0])\n criterion = ' '.join(args).lower()\n for loc, count, items in self.entities():\n for item in items:\n if item.friendlyName.lower() == criterion:\n return item\n raise KeyError\n\n def save(self):\n res = {}\n for loc, items in self.inventory.items():\n if isinstance(items, list):\n res[loc] = map(lambda x: x.location, items)\n else:\n res[loc] = items\n return res\n\n def __len__(self):\n return sum(d[1] for d in self.entities())\n\n\nclass PlayerEntityHook(MutableEntityHook):\n KIND = \"player\"\n\n \"\"\"The equations used to calculate stats based on acumen.\"\"\"\n equations = {\n \"increasing\": {\n \"equation\": lambda x: 1 / (1 + math.exp(-x)),\n \"variance\": (-0.3, 0.05),\n \"scale\": 0.007\n },\n \"decreasing\": {\n \"equation\": lambda x: 1 / math.exp(x),\n \"variance\": (-0.05, 0.3),\n \"scale\": 0.007\n }\n }\n\n def __init__(self, entity, attributes):\n if self.templates:\n attributes = self.templates['default'].attributes.viaTemplate(\n attributes)\n super().__init__(entity, attributes)\n # Load inventory, equip, etc. from data\n cache = entity.entityCache\n for slot, location in attributes['equip'].items():\n # can be None - no item equipped, or an entity, because I might\n # be a copy of an entity that already loaded the equipped items\n if isinstance(location, str):\n attributes['equip'][slot] = cache.lookup(location)\n self._inventory = InventoryProxy(attributes['inventory'], cache)\n\n @classmethod\n def defaultInstance(cls):\n \"\"\"Return the default instance of the player.\"\"\"\n return cls.templates['default'].copy(instanced=False)\n\n def damage(self, magnitude, category, kind, target):\n \"\"\"Damage a vital or stat of this player.\n\n :param magnitude: Amount of damage (use negative to heal).\n :param category: Either `'vital'` or `'stat'`.\n :param kind: Either an acumen type, or None, used to determine\n absorption amount.\n :param target: Either a vital or stat name.\"\"\"\n # XXX category ignored - how to deal with damaged stats?\n if kind is None:\n absorb = 0\n else:\n absorb = random.uniform(*self.stats[kind]['absorb'])\n realDamage = magnitude - (absorb * magnitude)\n self.vitals[target] -= realDamage\n if self.vitals[target] < 0:\n self.vitals[target] = 0\n elif self.vitals[target] > self.maxVitals[target]:\n self.vitals[target] = self.maxVitals[target]\n return realDamage\n\n def save(self):\n data = super().save()\n for slot, entity in self.attributes['equip'].items():\n if entity:\n data['equip'][slot] = entity.location\n data['inventory'] = self.inventory.save()\n return data\n\n @property\n def friendlyName(self):\n return 'You' # self.character['name']\n\n @property\n def character(self):\n \"\"\"Return the character information dictionary.\"\"\"\n return self.attributes['character']\n\n @property\n def inventory(self):\n \"\"\"Return the inventory.\"\"\"\n return self._inventory\n\n @property\n def equip(self):\n \"\"\"The equip dictionary.\"\"\"\n return self.attributes['equip']\n\n @property\n def acumen(self):\n \"\"\"The acumen dictionary.\"\"\"\n return self.attributes['acumen']\n\n @property\n def vitals(self):\n \"\"\"The vitals dictionary.\"\"\"\n return self.attributes['vitals']\n\n @property\n def level(self):\n \"\"\"The level of the character.\"\"\"\n return math.floor(sum(abs(x) for x in self.acumen.values()) / 100)\n\n @property\n def maxVitals(self):\n \"\"\"The maximum vital amount.\"\"\"\n res = {}\n for vital, multipliers in self.attributes['maxVitals'].items():\n res[vital] = round(sum(\n multiplier * abs(acumen) for multiplier, acumen in\n zip(multipliers, sorted(self.acumen.values()))))\n return res\n\n @property\n def stats(self):\n \"\"\"The stats dictionary.\"\"\"\n allStats = collections.defaultdict(dict)\n template = self.templates['default'].attributes['stats']['template']\n for acumenName, acumenSkill in self.acumen.items():\n for statName, statType in template.items():\n lbC = ubC = 0 # lower bound, upper bound constant terms\n if isinstance(statType, list):\n statType, lbC, ubC = statType\n eqData = self.__class__.equations[statType]\n baseStat = eqData['equation'](acumenSkill * eqData['scale'])\n allStats[acumenName][statName] = [\n c + (baseStat * (1 + v)) for v, c\n in zip(eqData['variance'], [lbC, ubC])]\n return allStats\n\n @property\n def description(self):\n desc = 'You are {name}, a {gender} of level {level}: {description}'\n return (desc.format(level=self.level, **self.character))\n\n\nclass EntityKey(collections.namedtuple('EntityKey', 'key prefix')):\n \"\"\"Contains the entity's name and its \"prefix\" (a, an, another, etc.)\"\"\"\n def __str__(self):\n return ' '.join([self.prefix, self.key])\n\n def save(self):\n return ', '.join([self.key, self.prefix])\n\n\nclass Room(Entity):\n \"\"\"\n The basic room type, a special-cased :class:`Entity`.\n\n Rooms contain other entities. All player movement and interaction occurs\n within rooms; however, rooms do not contain the actual entity objects,\n simply metadata to describe them. When an interaction occurs, a copy is\n created of the object and is stored in a room-specific cache.\n \"\"\"\n ROOM_ENTITY_KIND = 'room'\n onEnter = archon.common.signal('room.enter')\n\n def __init__(self, name, description, cache):\n super().__init__(name, Room.ROOM_ENTITY_KIND, cache, {})\n self._entityCopies = {}\n self._description = description\n self._contents = {}\n self._outputs = {}\n self.area = None\n\n def naturalFind(self, text):\n \"\"\"\n Attempt to find an entity key based on a variety of criteria.\n\n If there is no unique entity matched, return a set of all possible\n matches. Else, return the only match. Returns None if there is no\n match.\n\n This method is case-insensitive.\n \"\"\"\n criteria = [word.strip().lower() for word in text.split()]\n matches = set()\n for eKey in self.contents:\n prefix = [word.strip().lower() for word in eKey.prefix.split()]\n key = [word.strip().lower() for word in eKey.key.split()]\n prefixLength, keyLength = len(prefix), len(key)\n # 2 cases: only identity, or prefix-identity\n if len(criteria) < prefixLength + keyLength:\n # in this case, only identity\n if criteria == key:\n matches.add(eKey)\n else:\n if (criteria[:prefixLength] == prefix and\n criteria[prefixLength:] == key):\n matches.add(eKey)\n if not matches:\n return None\n elif len(matches) == 1:\n return matches.pop()\n else:\n return matches\n\n def add(self, entityLocation, key, location='', description='',\n prefix='', messages='third_person_neutral',\n options=None, instance=None):\n \"\"\"\n Add an entity or output to the room.\n\n :param entityLocation: The entity object's location\n (e.g. data.items.entity_name).\n :param key: The key for the entity.\n :param location: The location description for the entity.\n :param description: A description of the entity.\n :param messages: The messages for the entity's description.\n :param options: Options for the entity.\n :param instance: An instance of the entity.\n \"\"\"\n options = [] if options is None else options\n if isinstance(messages, str):\n messages = self.entityCache.lookup(messages)\n assert isinstance(messages, Entity)\n self._contents[key] = EntityData(\n entityLocation, key, location,\n description, prefix, messages, options)\n if instance:\n self._entityCopies[key] = instance\n\n def addRoom(self, direction, target):\n \"\"\"Add an exit to this room.\"\"\"\n self._outputs[direction] = target\n\n def remove(self, key):\n \"\"\"Remove an entity from this room.\"\"\"\n del self.contents[key]\n del self._entityCopies[key]\n\n def clearContents(self):\n \"\"\"Clear the contents of this room.\"\"\"\n self.contents.clear()\n self._entityCopies.clear()\n\n def entityFor(self, key):\n \"\"\"Retrieve or create a copy of an entity in this room.\"\"\"\n if key not in self._entityCopies:\n loc = self.contents[key].objectLocation\n self._entityCopies[key] = self._entityCache.lookup(loc).copy()\n return self._entityCopies[key]\n\n def describe(self, key=None, verbose=False):\n \"\"\"\n Describe the specified object, or if not given, the room.\n \"\"\"\n if key:\n entity = self.allContents[key]\n if key in self.contents:\n if verbose:\n entity = self.entityFor(key)\n return entity.description\n else:\n messages = entity.messages.attributes\n text = [messages.message('summary', entityData=entity)]\n if entity.description:\n text.append(messages.message('description',\n entityData=entity))\n return ' '.join(text)\n # text = 'There is {identity}{location}.'.format(\n # identity=entity[1],\n # location=' ' + entity[2] if entity[2] else ''\n # ).strip()\n # if entity[3]:\n # text = ' '.join([text, 'It is', entity[3] + '.'])\n # return text\n elif key in self.outputs:\n return 'You can go {}.'.format(key)\n else:\n outputs = 'Adjoining areas: ' + ', '.join(self.outputs)\n return '\\n'.join(\n ['You are in ' + self._description] +\n [self.describe(key) for key in sorted(self.contents)] +\n [outputs])\n\n def enter(self, elapsedTime):\n \"\"\"Enter the room at the given time.\"\"\"\n self.attributes['time'] = elapsedTime\n self.onEnter.send(self)\n\n def exit(self):\n \"\"\"Exit the room.\n\n :returns: datetime -- The current time\n \"\"\"\n return self.attributes['time']\n\n def copy(self):\n \"\"\"Copy the room - this will return the room itself.\"\"\"\n return self # Rooms are mutable singletons\n\n def save(self):\n \"\"\"Create a saveable representation of the room.\"\"\"\n res = {\"contents\": {}, \"outputs\": {},\n \"describe\": self._description,\n \"attributes\": self.attributes.save()}\n for key, data in self.contents.items():\n res[\"contents\"][key.save()] = data.save()\n for direction, target in self.outputs.items():\n res[\"outputs\"][direction] = target.location\n return {\"type\": \"room\", \"data\": res}\n\n @property\n def contents(self):\n return self._contents\n\n @property\n def outputs(self):\n return self._outputs\n\n @property\n def allContents(self):\n return UnionDict(self.contents, self.outputs)\n\n @property\n def inputs(self):\n pass\n\n\nclass EntityData(collections.namedtuple(\n 'EntityData',\n 'objectLocation key location description prefix messages options'\n )):\n \"\"\"\n Contains the metadata used by a room to describe an entity.\n \"\"\"\n def save(self):\n \"\"\"Saves the metadata.\"\"\"\n data = {key: val for key, val in self._asdict().items() if val}\n data['entity'] = data['objectLocation']\n data['messages'] = data['messages'].location\n del data['objectLocation'], data['key']\n if 'prefix' in data:\n del data['prefix']\n if 'options' in data:\n data['options'] = ','.join(data['options'])\n return data\n\n\nclass UnionDict(dict):\n def __init__(self, *dicts):\n self._dicts = dicts\n\n def __getitem__(self, key):\n for dictionary in self._dicts:\n if key in dictionary:\n return dictionary[key]\n raise KeyError(key)\n" }, { "alpha_fraction": 0.5618634819984436, "alphanum_fraction": 0.5642470121383667, "avg_line_length": 30.39455795288086, "blob_id": "5e321774244bb7aabe3497b73f6bf1fb72f316fc", "content_id": "b480fcf452778676f25ef515be6d540aa2309744", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4615, "license_type": "no_license", "max_line_length": 71, "num_lines": 147, "path": "/archon/templating.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import re\nimport ast\n\nimport archon.common\nimport archon.entity\n\n\nclass transform(archon.common.denoter):\n functions = {}\n\n\nclass predicate(archon.common.denoter):\n functions = {}\n\n\nclass TemplatingDict(dict):\n def __init__(self, dictionary, **kwargs):\n dictionary.update(kwargs)\n super().__init__(dictionary)\n self.kwargs = kwargs\n\n def __getattr__(self, key):\n if key.endswith('_capitalized'):\n return super().__getitem__(key[:-12]).capitalize()\n elif key in self:\n item = super().__getitem__(key)\n if isinstance(item, dict):\n return TemplatingDict(item, **self.kwargs)\n elif isinstance(item, str) and '{' in item and '}' in item:\n return item.format(**self)\n return item\n raise AttributeError(key)\n\n __getitem__ = __getattr__\n\n\nclass MessageTemplateEntityHook(archon.entity.EntityHook):\n KIND = \"message_template\"\n templates = {}\n splitRe = re.compile(r\"({[^{}]*?@.*?})\")\n fieldRe = re.compile(r\"{([^{}]*?)@(.*?)}\")\n funcRe = re.compile(r\"(.?[a-zA-Z0-9]*)(?:\\((.*)\\))?\")\n\n def __init__(self, entity, attributes):\n super().__init__(entity, attributes)\n for key, template in attributes.items():\n MessageTemplateEntityHook.templates[key] = template\n\n @classmethod\n def format(cls, mode, text, *args, **kwargs):\n \"\"\"\n Formatting syntax: {format_string[@new_syntax]}\n new syntax:\n name - calls a function\n name(\" \", 2, 3) - calls a function with parameters\n name + name2 - composes functions (applied right-to-left)\n .upper - calls a string method\n\n The formatting directive is first applied before functions are\n called, then all are substituted into the original string.\n For non-method functions, signatures are as such:\n def function(text, *args, nextFunction=None):\n \"\"\"\n replNumber = -1\n replFormats = {} # value is 2-tuple (format, extension)\n pieces = cls.splitRe.split(text)\n formatKeys = cls.template(mode, **kwargs)\n for index, piece in enumerate(pieces):\n res = cls.fieldRe.match(piece)\n if res:\n original, extension = res.groups()\n original = ''.join(['{', original, '}'])\n subtext = original.format(*args, **formatKeys)\n pieces[index] = cls.formatExtension(subtext, extension)\n return ''.join(pieces).format(*args, **formatKeys)\n\n @classmethod\n def formatExtension(cls, text, fmt):\n \"\"\"Processes the extended part of a format string.\n\n :param text: The expanded original part of the format string.\n :param fmt: The extended part of the format string.\"\"\"\n for func in reversed(fmt.split('+')):\n func = func.strip()\n match = cls.funcRe.match(func)\n if not match:\n raise ValueError(\"Invalid expression \" + func)\n func, args = match.groups()\n res = cls.evaluate(text, func, args)\n if res is True:\n continue\n elif res is False:\n return text\n else:\n text = res\n return text\n\n @classmethod\n def evaluate(cls, text, func, args):\n if func.startswith('!'):\n return not cls.evaluate(text, func[1:], args)\n elif func.startswith('.'):\n return getattr(text, func[1:])()\n else:\n if args:\n args = map(ast.literal_eval, args.split(','))\n else:\n args = []\n if predicate.contains(func):\n return predicate.get(func)(text, *args)\n elif transform.contains(func):\n return transform.get(func)(text, *args)\n else:\n raise ValueError(\n \"Format function {} not found\".format(func))\n\n @classmethod\n def template(cls, key, **kwargs):\n item = cls.templates[key]\n if isinstance(item, dict):\n return TemplatingDict(item, **kwargs)\n return item\n\n\nclass MessagesEntityHook(archon.entity.EntityHook):\n KIND = \"messages\"\n\n def message(self, name, *args, **kwargs):\n return MessageTemplateEntityHook.format(\n self.attributes['mode'],\n self.attributes['messages'][name],\n *args, **kwargs)\n\n\n@predicate('empty')\ndef empty(text):\n return not text\n\n\n@transform('prepend')\ndef prepend(text, char):\n return char + text\n\n\n@transform('drop')\ndef drop(text):\n return ''\n" }, { "alpha_fraction": 0.5529003739356995, "alphanum_fraction": 0.5529003739356995, "avg_line_length": 29.455554962158203, "blob_id": "90a9dd03ced5a46765344e525828d127af6c1313", "content_id": "45312934851ff6500c22ce34429570d2b5d64bd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5482, "license_type": "no_license", "max_line_length": 76, "num_lines": 180, "path": "/archon/common.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import copy\nimport blinker\n\n\ndef signal(name):\n \"\"\"Creates an event signal; based on blinker's signal.\"\"\"\n return blinker.signal(name)\n\n\nclass DenotedNotFoundError(Exception): pass\n\n\nclass DenotedInvalidError(Exception):\n \"\"\"Some function failed :meth:`denoter.verify`.\"\"\"\n\n\nclass denoter:\n \"\"\"\n Decorator to denote a function as having some purpose.\n\n This class is used as a decorator to associate a list of names with a\n function, which, for example, can be used to denote certain functions as\n commands and provide their command names. Subclass this to provide\n semantic usage information; when doing so, make sure to create a\n `functions` class attribute (or else the attribute of the superclass\n will be used).\n \"\"\"\n functions = {}\n\n def __init__(self, *names):\n for name in names:\n if name in self.functions:\n raise ValueError(\n \"Name {} already in use!\".format(name)\n )\n self.names = names\n\n def __call__(self, func):\n valid = self.verify(func)\n if not (valid is True):\n raise DenotedInvalidError(valid)\n for name in self.names:\n self.functions[name] = func\n return func\n\n @classmethod\n def contains(cls, func):\n \"\"\"Checks if this denoter class has a particular function.\"\"\"\n return func in cls.functions\n\n def verify(self, func):\n \"\"\"\n Override to check whether a function meets certain criteria.\n\n This method must return True. Anything else is treated as the error\n message to raise.\n \"\"\"\n return True\n\n @classmethod\n def get(cls, name):\n \"\"\"Get a particular function.\"\"\"\n try:\n return cls.functions[name]\n except KeyError:\n raise DenotedNotFoundError\n\n\nclass MergeItemSemigroup:\n def msum(self, orig, new):\n return new\n\n def mdifference(self, orig, new):\n return new\n\n\nclass NumericItemSemigroup(MergeItemSemigroup):\n def msum(self, orig, new):\n if type(orig) in (int, float) and type(new) in (int, float):\n return new + orig\n return new\n\n def mdifference(self, orig, new):\n if type(orig) in (int, float) and type(new) in (int, float):\n return new - orig\n return new\n\n\nclass Merge:\n \"\"\"\n Merge two dictionaries together using a patch dictionary.\n\n This does not support dictionaries nested within lists, or patching\n lists (they are treated as atomic values as strings and numbers are).\n \"\"\"\n def __init__(self, source, dest=None, patch=None, unsafe=False,\n sgroup=MergeItemSemigroup()):\n self.source = source\n self.dest = {}\n self.patch = patch\n self.sgroup = sgroup\n if dest:\n if unsafe:\n self.dest = dest\n else:\n self.dest = copy.deepcopy(dest)\n if patch:\n self.patch = patch\n\n def patched(self, redo=False):\n \"\"\"Apply the patch to a deepcopy of the source object.\"\"\"\n if not redo and self.dest:\n return self.dest\n self.dest = copy.deepcopy(self.source)\n stack = [self]\n while stack:\n merge = stack.pop()\n for key, value in merge.created.items():\n merge.dest[key] = self.sgroup.msum(\n merge.dest.get(key), value)\n for key in merge.deleted:\n del merge.dest[key]\n for key, patch in merge.updated.items():\n stack.append(\n Merge(merge.source[key], merge.dest[key], patch=patch,\n unsafe=True, sgroup=self.sgroup))\n return self.dest\n\n def compared(self, redo=False):\n \"\"\"Create a patch from a source and destination.\"\"\"\n if not redo and self.patch:\n return self.patch\n self.patch = {}\n stack = [self]\n while stack:\n merge = stack.pop()\n for key, data in merge.dest.items():\n if key not in merge.source:\n merge.create(key, data)\n elif data != merge.source[key]:\n if type(data) == dict:\n merge.update(key, {})\n stack.append(\n Merge(merge.source[key], data,\n patch=merge.patch['update'][key],\n sgroup=self.sgroup))\n else:\n merge.create(key, data)\n for key in merge.source:\n if key not in merge.dest:\n merge.delete(key)\n return self.patch\n\n def create(self, key, data):\n if \"create\" not in self.patch:\n self.patch[\"create\"] = {}\n self.patch[\"create\"][key] = self.sgroup.mdifference(\n self.source.get(key), data)\n\n def delete(self, key):\n if \"delete\" not in self.patch:\n self.patch[\"delete\"] = []\n self.patch[\"delete\"].append(key)\n\n def update(self, key, data):\n if \"update\" not in self.patch:\n self.patch[\"update\"] = {}\n self.patch[\"update\"][key] = data\n\n @property\n def created(self):\n return self.patch.get(\"create\", {})\n\n @property\n def deleted(self):\n return self.patch.get(\"delete\", [])\n\n @property\n def updated(self):\n return self.patch.get(\"update\", {})\n" }, { "alpha_fraction": 0.6650963425636292, "alphanum_fraction": 0.6650963425636292, "avg_line_length": 33.82089614868164, "blob_id": "bfe8438fc9f7d8e0bc446743e0aea739892a1296", "content_id": "45bb6b8ffe20e9a6a24e721ea7e73f3e035227c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2335, "license_type": "no_license", "max_line_length": 76, "num_lines": 67, "path": "/design/designdoc.rst", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "==============================\nOverthrow\n==============================\n\n:Author: David Li\n\nAfter the fall of the Cordelian Empire three centuries ago, no strong ruler\nhas emerged to reunite the now-peaceful Heartland of the former\nnation. However, the ambitious, yet cruel young emperor of the Kingdom of\nJemima has begun a terrorizing campaign of conquest throughout the\nland. One of his victims is the small Kingdom of Marcellus and its\ninhabitants, of which at least one swears revenge for her injuries.\n\nGeneral Information\n==============================\n\n\nGame Description\n==============================\n\nObjective\n------------------------------\nAs Cordelia, reach Sil Rosaria, the Rose City, and defeat Emperor Kyle.\n\nGameplay\n------------------------------\n\nControls\n------------------------------\n\nThe Game World\n==============================\n\nBackground Story\n------------------------------\nThe ambitious young ruler of a small kingdom, Jemima, is now leading his\narmies on a conquest of his neighboring kingdoms. Of course, there is\nalready dissent, and various rebel factions have sprung up in newly taken\nterritories, including the Kingdom of Marcellus, on the fringe of the\nHeartland (a coastal plain surrounded by water and mountains).\n\nCharacters\n------------------------------\nCordelia\n An inhabitant of the Kingdom of Marcellus, Cordelia is a newcomer\n from an unnamed overseas land. She arrived here seeking sanctuary\n from disease and strife in her native country...yet has found\n herself in the middle of Kyle's conquests.\n\nEmperor Kyle\n The young and ambitious ruler of the fast-growing Kingdom of Jemima.\n\nWorld & Locations\n------------------------------\n\nThe Heartland\n A coastal plain, surrounded on the east and south by endless water and\n on the north and west by insurmountable mountains. In the glory days of\n the Cordelian Empire, a few passes were maintained across these\n mountains, and sailing expeditions were occasionally sent to other\n lands. Even today, people occasionally arrive from overseas.\n\nThe Kingdom of Jemima\n Ruled by the ambitious Emperor Kyle, the most powerful of the various\n kingdoms inhabiting the Heartland occupies the center, controlling\n Athena's Lake and the former capital of the Cordelian Empire, Sil\n Rosaria.\n\n\n" }, { "alpha_fraction": 0.4951953887939453, "alphanum_fraction": 0.4958359897136688, "avg_line_length": 23.015384674072266, "blob_id": "7838444be3888e4e77ef5edf72208b09488a96e5", "content_id": "59caaee01174b9bc40a3955582f6394f3d7beadb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1561, "license_type": "no_license", "max_line_length": 105, "num_lines": 65, "path": "/doc/source/modules/common.rst", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "======================================\n:mod:`common` -- Utility functionality\n======================================\n\n.. automodule:: archon.common\n\nThe `denoter` utility\n======================================\n\n.. autoclass:: denoter\n :members:\n\n.. autoclass:: DenotedNotFoundError\n\n.. autoclass:: DenotedInvalidError\n\nThe `Merge` utility\n======================================\n\nPatch Format\n--------------------------------------\n\nA patch is simply a dictionary containing up to three keys: create, update,\nand delete. They define operations to perform on the root of the dictionary\nto patch.\n\nExample Source::\n\n {\n \"key_to_remove\": \"value\",\n \"update_this\": {\n \"subkey_to_remove\": 2,\n \"subkey_to_change\": \"this was changed\"\n }\n }\n\nExample patch::\n\n {\n \"delete\": [\"key_to_remove\"],\n \"create\": {\"i\": \"was created\"},\n \"update\": {\n \"update_this\": {\n \"delete\": [\"subkey_to_remove\"],\n \"create\": {\n \"subkey_to_change\": \"I changed!\",\n \"subkey_created\": \"I'm new!\"\n }\n }\n }\n }\n\nResult::\n\n >>> import archon.common\n >>> merge = archon.common.Merge(source, patch=patch)\n >>> merge.patched()\n {'i': 'was created', 'update_this': {'subkey_to_change': 'I changed!', 'subkey_created': \"I'm new!\"}}\n\n:create: A dictionary of keys and values to create.\n:update: A dictionary of keys and patches to apply.\n:delete: A list of keys to delete.\n\n.. autoclass:: Merge\n :members:\n" }, { "alpha_fraction": 0.5811209678649902, "alphanum_fraction": 0.5844395160675049, "avg_line_length": 32.15403366088867, "blob_id": "0331a236416d83782abc0184d947b4c3572a9c30", "content_id": "46a3ec7827876830324ca2d9956f617c6dd235b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13560, "license_type": "no_license", "max_line_length": 76, "num_lines": 409, "path": "/archon/commands.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import re\nimport sys\nimport ast\nimport inspect\nimport difflib\nimport datetime\nimport traceback\nimport collections\n\nimport archon.common\nimport archon.objects\n\n\nclass command(archon.common.denoter):\n \"\"\"\n Denote a function as an command.\n \"\"\"\n functions = {}\n commandData = collections.defaultdict(None)\n preExecute = archon.common.signal('command.preExecute')\n postExecute = archon.common.signal('command.postExecute')\n\n def __call__(self, func):\n def closure(output, context, player, *args):\n argspec = inspect.getfullargspec(func)\n if argspec.varargs in argspec.annotations:\n args = argspec.annotations[argspec.varargs](\n output, context, player, *args\n )\n if not isinstance(args, collections.Iterable):\n args = [args]\n self.__class__.preExecute.send(self)\n res = func(output, context, player, *args)\n self.__class__.postExecute.send(self)\n if not res:\n res = context\n return res\n\n closure.__name__ = func.__name__\n return super().__call__(closure)\n\n @property\n def data(self):\n return self.__class__.commandData[self.names[0]]\n\n @data.setter\n def data(self, value):\n self.__class__.commandData[self.names[0]] = value\n\n @classmethod\n def nearest(cls, name):\n return difflib.get_close_matches(name, list(cls.functions.keys()))\n\n\n# XXX always return an iterable - else the closure will wrap it in a list\ndef find(output, context, player, *args):\n matches = context.naturalFind(' '.join(args))\n if matches is None:\n return []\n elif isinstance(matches, set):\n result = []\n for key in matches:\n entity = context.allContents[key]\n result.append((entity, context.entityFor(key)))\n return result\n else:\n entity = context.allContents[matches]\n return [(entity, context.entityFor(matches))]\n\n\ndef findMulti(output, context, player, *args):\n criteria = [x.strip().split() for x in ' '.join(args).split(',')]\n return sum((find(output, context, player, *crit) for crit in criteria),\n [])\n\n\ndef findInventory(output, context, player, *args):\n '''Lookup an item by friendly name or location.'''\n try:\n return player.attributes.inventory.find(*args)\n except KeyError:\n raise output.error(\"Item not found.\")\n\n\ndef findEquip(output, context, player, *args):\n criterion = ' '.join(args)\n for slot, item in player.attributes.equip.items():\n if item and criterion in (slot, item.friendlyName):\n return slot, item\n return []\n\n\n@command('me')\ndef me(output, context, player, *args):\n command.get('describe')(output, context, player, 'me')\n for cmdName in ('vitals', 'stats', 'inventory', 'equip'):\n command.get(cmdName)(output, context, player)\n\n\n@command('vitals')\ndef vitals(output, context, player, *args):\n values = player.attributes.vitals\n maxVals = player.attributes.maxVitals\n output.display(\"Health: {}/{}\".format(values['health'],\n maxVals['health']))\n output.display(\"AP : {}/{}\".format(values['ap'], maxVals['ap']))\n\n\n@command('stats')\ndef stats(output, context, player, *args):\n attrs = player.attributes\n for acumenName in sorted(attrs.acumen):\n output.display(\"{name} acumen: {value}\", name=acumenName,\n value=attrs.acumen[acumenName])\n stats = attrs.stats[acumenName]\n for stat in sorted(stats):\n output.display(\n \"\\t{stat}: {value[0]:.2f} to {value[1]:.2f} multiplier\",\n stat=stat, value=stats[stat])\n output.display(\"\\n\")\n\n\n@command('inventory')\ndef inventory(output, context, player, *args):\n output.display(\n 'Inventory ({length})'.format(\n length=len(player.attributes.inventory)\n ))\n for loc, count, items in sorted(player.attributes.inventory.entities(),\n key=lambda k: k[0]):\n output.display('{}: {}'.format(items[0].friendlyName, count))\n output.display('')\n return context\n\n\n@command('equip')\ndef equip(output, context, player, *args):\n if not args:\n for slot, item in sorted(player.attributes.equip.items()):\n if item is None:\n item = '<Empty>'\n else:\n item = item.friendlyName\n output.display('{slot}: {item}'.format(\n slot=slot, item=item\n ))\n else:\n slot, criterion = ' '.join(args).split(':')\n try:\n inventory = player.attributes.inventory\n equip = player.attributes.equip\n item = inventory.find(*criterion.split())\n if not item.attributes.get('equip'):\n raise output.error('Cannot equip item.')\n possibleSlots = item.attributes['equip']\n if slot not in possibleSlots:\n output.display('Unsupported equip location.')\n raise output.error('Supported locations: ' +\n ', '.join(possibleSlots))\n if equip.get(slot):\n inventory.add(equip[slot])\n equip[slot] = item\n inventory.remove(item)\n except KeyError:\n raise output.error('Could not find item.')\n\n\n@command('unequip')\ndef unequip(output, context, player, *args: findEquip):\n slot, item = args\n player.attributes.equip[slot] = None\n player.attributes.inventory.add(item)\n output.display('Unequipped item {} from {}'.format(\n item.friendlyName, slot))\n\n\n@command('take')\ndef take(output, context, player, *item: find):\n if not item:\n output.error(\"You can't take that.\")\n elif len(item) > 1: # ambiguous reference\n raise output.error(\"What did you want to take?\")\n else:\n data, item = item[0]\n if not item.attributes.get(\"take\", False):\n raise output.error(\"You can't take that.\")\n player.attributes.inventory.add(item)\n context.remove(data.key)\n\n\nfunctionRe = re.compile(\n r'(?P<function>[a-zA-Z0-9]+)\\((?P<arguments>[\\S ]+)\\)'\n )\n\n\ndef parseFunction(data):\n result = functionRe.match(data)\n if result:\n result = result.groupdict()\n function, arguments = result['function'], result['arguments']\n args = [ast.literal_eval(x.strip()) for x in arguments.split(',')]\n return function, args\n return ('', '')\n\n\n@command('use')\ndef use(output, context, player, *item: find):\n '''Use an object.'''\n if not item or len(item) > 1:\n raise output.error(\"What did you want to use?\")\n data, item = item[0]\n if not 'use' in item.attributes:\n raise output.error(\"You can't use that.\")\n function, arguments = parseFunction(item.attributes['use'])\n if function == 'script':\n script = context.entityCache.lookup(arguments[0])\n try:\n script.execute('main', output, context, player)\n except: # yes, everything\n output.error(\"It doesn't work.\")\n if output.permissions.get('debug', False):\n output.error(traceback.format_exc())\n else:\n output.error(\"You have no idea how to use that.\")\n\n\n@command('go')\ndef go(output, context, player, *args):\n '''Go in the specified direction.'''\n direction = ' '.join(args)\n target = context.outputs.get(direction)\n if target:\n if target.area != context.area and target.area:\n output.display(target.area.description)\n context.attributes['time'] += datetime.timedelta(minutes=20)\n target.enter(context.exit())\n return command.get('describe')(output, target, player)\n else:\n output.error(\"You can't go that way.\")\n return context\n\n\n@command('enter')\ndef enter(output, context, player, *target: find):\n '''\n If the specified entity is a teleport (e.g. a door), use it.\n\n Also, create an entry in the history chain of visited rooms.\n '''\n if not target or len(target) > 1:\n raise output.error(\"Where did you want to enter?\")\n entityData, entity = target[0]\n for option in entityData.options:\n if option.startswith('to:'):\n target = option[3:].strip()\n target = context.entityCache.lookup(target)\n if output.question(\n \"Go to {}? \".format(target.friendlyName)\n ):\n if target.area != context.area:\n output.display(target.area.description)\n context.attributes['time'] += datetime.timedelta(minutes=20)\n target.enter(context.exit())\n return command.get('describe')(output, target, player)\n return context # we failed teleporting\n\n\n@command('exit', 'back')\ndef exit(output, context, player, *args):\n '''\n Return to the previous room, unless the history chain was reset.\n '''\n\n\n@command('describe')\ndef describe(output, context, player, *args):\n '''Describe the current room or the specified object.'''\n if not args:\n output.display(context.describe())\n elif args[0].lower() in ('me', 'myself'):\n output.display(player.description)\n else:\n items = find(output, context, player, *args)\n if not items:\n output.error(\"What did you want to describe?\")\n elif len(items) > 1:\n output.error(\"That was ambiguous. Did you mean:\")\n for data, entity in items:\n output.display(\"\\t{prefix} {identity}\".format(\n prefix=data[4],\n identity=data[1]\n ))\n else:\n output.display(items[0][1].description)\n\n\n@command('quit', 'test.exit')\ndef quit(output, context, player, *args):\n output.quit()\n\n\n@command('save')\ndef save(output, context, player, *args):\n data = player.save()\n output.display(player.location)\n player.entityCache.save('player', data, immediately=True)\n instances = collections.defaultdict(dict)\n for kind in player.entityCache['instances']:\n ds = player.entityCache['instances'][kind]\n for key in ds:\n entity = ds[key]\n proto = entity.prototype\n patch = archon.common.Merge(proto.attributes.save(),\n entity.attributes.save()).compared()\n if patch:\n output.display(\"Saving entity \" + entity.location)\n instances[proto.location][entity.location] = patch\n instances = {\n \"type\": \"metadata\",\n \"data\": {\n \"savegame_instances\": instances\n }\n }\n\n stack = [context.entityCache.root]\n patches = {}\n while stack:\n ds = stack.pop()\n for key, thunk in ds.thunks.items():\n if isinstance(thunk, archon.objects.Room):\n _, originalData = ds.raw(key, format='.json')\n merge = archon.common.Merge(originalData, thunk.save())\n if merge.compared():\n output.display(\"Saving room \" + thunk.location)\n patches[thunk.location] = merge.compared()\n elif isinstance(thunk, ds.__class__):\n stack.append(thunk)\n patches = {\n \"type\": \"metadata\",\n \"data\": {\n \"savegame\": patches\n }\n }\n player.entityCache.save(\"patches\", patches, immediately=True)\n gameVars = {\n \"type\": \"data\",\n \"data\": {\n \"lastRoom\": context.location\n }\n }\n player.entityCache.save(\"gameVars\", gameVars, immediately=True)\n output.display(\n \"Save game created: {} objects saved\".format(len(patches) + 1))\n\n\n@command('help')\ndef help(output, context, player, *args):\n '''Provides help to the player.'''\n if args:\n try:\n output.display(\n trimDocstring(command.get(args[0]).__doc__)\n )\n except CommandNotFoundError:\n output.error('Command {} not found!'.format(args[0]))\n close = command.nearest(args[0])\n if close:\n output.display('Did you mean: {}'.format(close[0]))\n else:\n output.display(STRING_HELP)\n\n\ndef trimDocstring(docstring):\n '''\n Trim the whitespace in a docstring as per PEP257.\n\n The contents of PEP257, from which this function was taken, are in the\n public domain.\n '''\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)\n\n\nSTRING_HELP = '''\nWelcome to the Archon demo. Type 'help [name]' for help on a specific\ncommand or topic. If you're just starting, try 'describe' to see what your\narea is like.\n'''\n" }, { "alpha_fraction": 0.5857484340667725, "alphanum_fraction": 0.5876904726028442, "avg_line_length": 30.134883880615234, "blob_id": "bedfb59b82985ebe9bf68559a503609d409bf12f", "content_id": "93f1ea928d8b54e4cca6eea91678bbf8648bf324", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6694, "license_type": "no_license", "max_line_length": 76, "num_lines": 215, "path": "/demo/entityhooks.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import random\nimport collections\n\nimport archon.objects\n\n\nclass EnemyEntityHook(archon.objects.PlayerEntityHook):\n KIND = 'enemy'\n\n @property\n def friendlyName(self):\n return self.attributes['character']['name']\n\n @property\n def description(self):\n return self.attributes['character']['description']\n\n\nclass WeaponEntityHook(archon.objects.EntityHook):\n KIND = 'weapon'\n\n @property\n def effect(self):\n return self.attributes['effect']\n\n\nclass EffectMissed(Exception): pass\n\n\nclass NotEnoughAP(Exception): pass\n\n\nclass Effect:\n def __init__(self, hit, target, magnitude, turns, drain, messages):\n self.hit = hit\n self.target = target\n self.magnitude = magnitude\n self.turns = turns\n self.drain = drain\n self.messages = messages\n\n def message(self, name):\n message = self.messages.messages[name]\n return message.format(effect=self, **self.messages.objects)\n\n def apply(self, user, target):\n if self.drain <= user.attributes.vitals['ap']:\n user.attributes.damage(self.drain, 'vital', None, 'ap')\n if self.hit:\n return target.attributes.damage(\n self.magnitude, **self.target._asdict())\n else:\n raise EffectMissed\n else:\n raise NotEnoughAP\n\n def modify(self, other):\n \"\"\"\n Creates a new effect using this effect to modify the other.\n\n Messages and the :attr:`hit` flag are not affected.\n \"\"\"\n return Effect(\n other.hit,\n self.target,\n self.magnitude + other.magnitude,\n self.turns + other.turns,\n self.drain + other.drain,\n other.messages)\n\n def __repr__(self):\n a = \"<Effect {hit} {target} {magnitude} {turns} {drain} {messages}>\"\n return a.format(**self.__dict__)\n\n\nclass EffectTarget(collections.namedtuple('EffectTarget',\n 'category kind target')):\n @classmethod\n def viaString(cls, target):\n target = target.split(':')\n if len(target) == 2: # category-target with no kind\n return EffectTarget(target[0], None, target[1])\n elif len(target) == 3: # category-kind-target\n return EffectTarget(*target)\n else:\n raise ValueError(\n \"Improperly formatted effect target {}\".format(target))\n\nEffectMessage = collections.namedtuple('EffectMessage',\n 'messages objects')\n\n\nclass EffectEntityHook(archon.objects.EntityHook):\n KIND = 'effect'\n\n @classmethod\n def healingT(cls, magnitude, turns, targetAttr=None, **kwargs):\n \"\"\"\n Create a default healing effect from the \"heal\" template.\n \"\"\"\n template = cls.templates['heal']\n targetAttr = (EffectTarget.viaString(targetAttr) or\n template.attributes.target)\n return Effect(\n True, targetAttr, -magnitude, turns, 0,\n EffectMessage(template.attributes['message'], kwargs))\n\n @classmethod\n def fatigueT(cls, magnitude, turns, **kwargs):\n template = cls.templates['fatigue']\n return Effect(\n True, template.attributes.target, magnitude, turns, 0,\n EffectMessage(template.attributes['message'], kwargs))\n\n def instance(self, acumen, stats, **kwargs):\n return Effect(\n self.hits(stats['success']),\n self.target,\n self.magnitude(acumen),\n self.turns,\n self.drain(stats['drain']),\n EffectMessage(self.attributes['message'], kwargs)\n )\n\n def hits(self, multiplier):\n multiplier = random.uniform(*multiplier)\n return random.random() <= (multiplier * self.stats['success'])\n\n def magnitude(self, acumen):\n return random.randint(*self.stats['magnitude']) * (acumen / 20)\n\n def drain(self, multiplier):\n multiplier = random.uniform(*multiplier)\n return multiplier * self.stats['drain']\n\n def fatigue(self, multiplier, **kwargs):\n multiplier = random.uniform(*multiplier)\n return EffectEntityHook.fatigueT(\n multiplier * self.stats['fatigue'][0],\n self.stats['fatigue'][1],\n **kwargs\n )\n\n @property\n def effectKind(self):\n for kind in ('damage', 'heal'):\n if kind in self.attributes:\n return kind\n\n @property\n def stats(self):\n return self.attributes[self.effectKind]\n\n @property\n def target(self):\n return EffectTarget.viaString(self.stats['target'])\n\n def turns(self):\n # default not in data - templating requires always specifying this\n return self.stats.get('turns', 1)\n\n\nChatTopic = collections.namedtuple('ChatTopic', 'contents actions')\n\n\nclass Conversation:\n def __init__(self, npc, cache, *dialouge):\n self.npc = npc\n self.cache = cache\n self.dialouges = [self.cache.lookup(d).attributes for d in dialouge]\n topics = {}\n self._hidden = {}\n for dialouge in self.dialouges:\n topics.update(dialouge['visible'])\n self._hidden.update(dialouge['invisible'])\n self.topicIndex = list(topics.items())\n self.topicIndex.append((\"bye\", None))\n self.actions = {'visible': self.visible, 'script': self.script}\n\n def isEnd(self, choice):\n return choice == len(self.topicIndex) - 1\n\n def removeTopic(self, topic):\n for index, (t, _) in enumerate(self.topicIndex):\n if t == topic:\n del self.topicIndex[index]\n return\n\n def visible(self, output, context, player, *topics):\n self.topicIndex.pop() # remove \"bye\"\n topics = (\n t for t in topics if t in self._hidden and\n t not in self.topicIndex)\n for topic in topics:\n self.topicIndex.append((topic, self._hidden[topic]))\n del self._hidden[topic]\n self.topicIndex.append((\"bye\", None))\n\n def script(self, output, context, player, script, *args):\n s = self.cache.lookup(script)\n s.execute('main', output, context, player, self.npc, self, *args)\n\n @property\n def topics(self):\n return (t[0] for t in self.topicIndex)\n\n\nclass NPCEntityHook(archon.objects.MutableEntityHook):\n KIND = 'npc'\n\n def __init__(self, entity, attributes):\n super().__init__(entity, attributes)\n self.conversation = Conversation(self.entity,\n self.entity.entityCache,\n *self.attributes['dialouge'])\n" }, { "alpha_fraction": 0.5450743436813354, "alphanum_fraction": 0.5557620525360107, "avg_line_length": 44.787235260009766, "blob_id": "f410c5bedaf85f6a068ce554198fe8ddb0f6342d", "content_id": "2bf9c558e70a50cb65c21d665cdb77a0034b1c03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2152, "license_type": "no_license", "max_line_length": 74, "num_lines": 47, "path": "/demo/resources/data/scripts/customizerScript.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nclasses = {\n \"Hunter\": [{\"physical\": 50, \"mental\": 30, \"spiritual\": 30},\n {\"body\": \"data.items.armor.leather_body\",\n \"legs\": \"data.items.armor.leather_legs\",\n \"feet\": \"data.items.armor.leather_boots\",\n \"left hand\": \"data.items.weapons.iron_dagger\"},\n {}],\n \"Mage\": [{\"physical\": 30, \"mental\": 50, \"spiritual\": 30},\n {\"body\": \"data.items.armor.mage_blouse\",\n \"legs\": \"data.items.armor.mage_skirt\",\n \"feet\": \"data.items.armor.leather_boots\",\n \"left hand\": \"data.items.weapons.wooden_staff\"},\n {}],\n \"Swordfighter\": [{\"physical\": 50, \"mental\": 30, \"spiritual\": 30},\n {\"body\": \"data.items.armor.chain_body\",\n \"legs\": \"data.items.armor.chain_legs\",\n \"feet\": \"data.items.armor.chain_boots\",\n \"left hand\": \"data.items.weapons.iron_sword_short\"},\n {}]\n}\n\n\ndef main(output, context, player, npc, conversation):\n conversation.removeTopic('greetings')\n output.display('I see you appear to be a...')\n choices = list(classes.keys())\n prof = output.menu('[{key}]: {description}', '> ', 'Invalid choice.',\n *choices)\n acumen, equipment, inventory = classes[choices[prof]]\n player.attributes.acumen.update(acumen)\n equip = {slot: player.entityCache.lookup(loc)\n for slot, loc in equipment.items()}\n player.attributes.equip.update(equip)\n for loc, count in inventory:\n player.attributes.inventory.add(loc, quantity=count)\n statFormat = ' {stat}: {value[0]:.2} to {value[1]:.2} multiplier'\n stats = player.attributes.stats\n for trait in sorted(player.attributes.acumen):\n output.display('{trait}: {value}'.format(\n trait=trait, value=acumen[trait]\n ))\n for stat in sorted(stats[trait]):\n output.display(statFormat.format(\n stat=stat, value=stats[trait][stat]\n ))\n player.attributes.vitals.update(player.attributes.maxVitals)\n" }, { "alpha_fraction": 0.43937233090400696, "alphanum_fraction": 0.4436519145965576, "avg_line_length": 32.380950927734375, "blob_id": "28b1dea44731869d48f5bcd8e6a44032626cab74", "content_id": "9fbc4974d12ee0e547c519282dd5798bc5756b3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 66, "num_lines": 21, "path": "/demo/resources/data/scripts/shop.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\ndef main(output, context, player, npc, conversation):\n if 'shop' in npc.attributes:\n buyMult, sellMult, inventory = npc.attributes['shop']\n while True:\n choice = output.menu(' {key}. {description}', ' > ',\n 'Invalid choice.',\n 'Buy Item', 'Sell Item', 'Exit')\n if choice == 0:\n while True:\n pass\n elif choice == 1:\n while True:\n pass\n else:\n output.display(\"Goodbye.\")\n return\n else:\n output.error(\"I'm afraid that I have nothing to sell.\")\n" }, { "alpha_fraction": 0.6579416990280151, "alphanum_fraction": 0.6579416990280151, "avg_line_length": 27.491525650024414, "blob_id": "410ec6e48099c926b800a3a298685f84e7d3e9f7", "content_id": "5ac026ca3f67c5794037f6d65fc008ee2e40f86c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1681, "license_type": "no_license", "max_line_length": 78, "num_lines": 59, "path": "/doc/source/modules/objects.rst", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "==================================\n:mod:`objects` -- Core objects\n==================================\n\n.. automodule:: archon.objects\n\nEntity Types\n==================================\n\nEntity Instantiation\n----------------------------------\n\n#. When created, the datastore looks for JSON files in its directory and\n inspects them. If they have the appropriate structure, the data is stored\n in a thunk for later loading.\n#. When code requests something from the datastore, that object is\n loaded. (See :doc:`datastore` for more details.) If that object is an\n entity, it is assumed to be a *prototype* and should not be modified.\n#. Any code that needs to use an entity should call :meth:`Entity.copy`. If\n the entity is mutable, this will create a shallow copy; else, the\n prototype itself will be returned.\n#. Mutable entities will be located in a special location: the `instances`\n datastore in the same datastore as the player entity. They will be saved or\n loaded to this location, so that changes are preserved.\n\n.. autoclass:: Entity\n :members:\n\n .. attribute:: prototype\n\n.. autoclass:: Room\n :members:\n\n .. attribute:: contents\n\n .. attribute:: outputs\n\n .. attribute:: allContents\n\n.. autoclass:: EntityKey\n\n.. autoclass:: EntityData\n\nEntity Hooks\n=================================\n\nWhen an entity is created, it searches for an entity hook with the same\n\"kind\" as the entity. Applications use these hooks to define special\nbehavior for different kinds of entities, such as characters, weapons, and\nconsumables.\n\n.. autoclass:: EntityHook\n :members:\n\n.. autoclass:: MutableEntityHook\n :members:\n\n.. autoclass:: PlayerEntityHook\n :members:\n" }, { "alpha_fraction": 0.34558823704719543, "alphanum_fraction": 0.34558823704719543, "avg_line_length": 26.200000762939453, "blob_id": "123ed7edaba885c4a48129e6e6709c604c0fb7e4", "content_id": "e31f46213b95d5cbb72e7d37e00ea53121f19333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 136, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/doc/source/modules/commands.rst", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "==================================\n:mod:`commands` -- Game commands\n==================================\n\n.. automodule:: archon.commands\n" }, { "alpha_fraction": 0.5670642852783203, "alphanum_fraction": 0.567307710647583, "avg_line_length": 30.478927612304688, "blob_id": "781b8caf52fd951d83cc131c63d5d266846ed383", "content_id": "8b7630cfa491c1f02838f56928520039228a040f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8216, "license_type": "no_license", "max_line_length": 77, "num_lines": 261, "path": "/archon/entity.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import copy\nimport collections\n\n\nclass EntityHookNotFoundError(Exception): pass\n\n\nclass EntityHook(collections.Mapping):\n \"\"\"\n Defines special behavior for the attributes of certain entity kinds.\n \"\"\"\n\n \"\"\"The `kind` of the entity.\"\"\"\n KIND = ''\n\n \"\"\"The templates for this entity kind.\"\"\"\n templates = {}\n mutable = False\n\n def __init__(self, entity, attributes):\n self.entity = entity\n self._attributes = attributes\n\n def __len__(self):\n return len(self._attributes)\n\n def __iter__(self):\n return self._attributes.__iter__()\n\n def __contains__(self, key):\n return key in self._attributes\n\n def __getitem__(self, key):\n \"\"\"Override for custom behavior.\"\"\"\n return self._attributes.__getitem__(key)\n\n def __setitem__(self, key, value):\n \"\"\"Override for custom behavior.\"\"\"\n return self._attributes.__setitem__(key, value)\n\n def __delitem__(self, key):\n \"\"\"Override for custom behavior.\"\"\"\n return self._attributes.__delitem__(key)\n\n def __repr__(self):\n return '{clsname}: Hook for {kind}'.format(\n clsname=self.__class__.__name__,\n kind=self.__class__.KIND)\n\n def copy(self):\n \"\"\"\n Create a copy of the attributes in the entity hook.\n\n By default, hooks are immutable, and so this method simply returns\n self.\n \"\"\"\n return self\n\n def save(self):\n \"\"\"\n Create a saveable copy of the attributes.\n \"\"\"\n return self.attributes.copy()\n\n @classmethod\n def getHook(cls, kind):\n \"\"\"\n Find an entity hook based on class kind.\n \"\"\"\n if cls.KIND == kind:\n return cls\n else:\n for subcls in cls.__subclasses__():\n try:\n return subcls.getHook(kind)\n except EntityHookNotFoundError:\n continue\n raise EntityHookNotFoundError(kind)\n\n @property\n def attributes(self):\n return self._attributes\n\n @property\n def description(self):\n \"\"\" Return the description of this entity. \"\"\"\n return 'No description.'\n\n @property\n def friendlyName(self):\n \"\"\"Return a user-friendly name for this entity.\"\"\"\n if 'friendlyName' in self:\n return self['friendlyName']\n else:\n return self.entity.name\n\n def viaTemplate(self, attributes):\n \"\"\"Format attributes using this entity as a template.\"\"\"\n # TODO use archon.common.Merge\n result = copy.deepcopy(self.attributes)\n stack = [(result, attributes)]\n while stack:\n dst, src = stack.pop()\n for key, data in src.items():\n if isinstance(data, dict):\n if key not in dst:\n dst[key] = {}\n stack.append((dst[key], data))\n else:\n dst[key] = data\n return result\n\n\nclass MutableEntityHook(EntityHook, collections.MutableMapping):\n \"\"\"\n A mutable entity hook.\n \"\"\"\n mutable = True\n\n def copy(self):\n \"\"\"\n Returns a shallow-copy of the attributes dictionary.\n \"\"\"\n return self.attributes.copy()\n\n\nclass Entity(object):\n \"\"\"\n The basic game object.\n\n Entities are usually not directly modified unless they have been copied\n first. When an entity is used in a room, a copy is created for that\n particular area. This copy is stored in a different location in the\n datastore: it is stored in the same datastore as the player under a\n sub-datastore named \"instances\".\n \"\"\"\n\n \"\"\"The instances datastore in the player's datastore.\"\"\"\n instances = None\n\n def __init__(self, name, kind, cache, attributes={}, prototype=None,\n location=None):\n \"\"\"\n :param name: The name of the entity (the key in the datastore)\n :param kind: The entity's kind (enemy, door, object, etc.)\n :param attributes: The attributes for the entity (data).\n :param prototype: The prototype of this entity.\n :param location: If given, an alternate location in the datastore\n for the entity (used for instances).\n\n Change the type of an entity when it needs special\n loading/processing, as with a room, but the kind otherwise.\n \"\"\"\n self.name = name\n self.kind = kind\n self.entityCache = cache\n self.prototype = prototype\n self._location = location if location else cache\n if issubclass(attributes.__class__, EntityHook):\n self._attributes = attributes\n else:\n try:\n kindhook = EntityHook.getHook(kind)\n self._attributes = kindhook(self, attributes)\n except EntityHookNotFoundError:\n self._attributes = EntityHook(self, attributes)\n\n def copy(self, instanced=True, name=None, attributes=None):\n \"\"\"Perform a shallow copy if mutable, else return self.\n\n :param instanced: If `True`, place the copy in Entity.intstances\n under the datastore named after the entity kind.\n :param name: (Implementation parameter.) Specify the name of the\n entity copy.\n :param attributes: (Implementation parameter.) Specify the\n attribute dictionary of the copy.\n \"\"\"\n if self.mutable:\n attributes = attributes if attributes else self.attributes.copy()\n if instanced:\n if self.kind not in Entity.instances:\n Entity.instances.create(self.kind)\n instances = Entity.instances[self.kind]\n if name: # datahandlers - loading an instance\n newName = name\n elif instances:\n newName = max(int(key) for key in instances.keys()) + 1\n else:\n newName = 0\n entity = Entity(\n str(newName), self.kind, self.entityCache,\n attributes, prototype=self,\n location=instances)\n instances.add(entity.name, entity)\n print(\"Created instance of\", self.name)\n return instances[str(newName)]\n else:\n return Entity(self.name + '_copy', self.kind,\n self.entityCache, attributes, prototype=self)\n else:\n return self\n\n def __deepcopy__(self, memo):\n return self.copy()\n\n def save(self):\n \"\"\"Return a dictionary containing all data to serialize.\"\"\"\n return {\n \"type\": \"entity\",\n \"data\": {\n \"kind\": self.attributes.KIND,\n \"attributes\": self.attributes.save()\n }\n }\n\n @property\n def description(self):\n \"\"\"The description for the entity.\"\"\"\n return self.attributes.description\n\n @property\n def friendlyName(self):\n \"\"\"The entity name for display purposes; defaults to name.\"\"\"\n return self.attributes.friendlyName\n\n @property\n def attributes(self):\n \"\"\"The attributes dictionary or entity hook.\"\"\"\n return self._attributes\n\n @attributes.setter\n def attributes(self, value):\n self._attributes.update(value)\n\n @property\n def entityCache(self):\n \"\"\"\n The datastore/cache this entity is located in.\n\n .. warning:: Do NOT use this directly; use :meth:`Room.entityFor`\n instead in most cases.\n \"\"\"\n return self._entityCache\n\n @entityCache.setter\n def entityCache(self, cache):\n self._entityCache = cache\n\n @property\n def location(self):\n \"\"\"The location of this entity in the datastore.\"\"\"\n return '.'.join([self._location.fullName, self.name])\n\n @property\n def mutable(self):\n \"\"\"Returns whether this entity is mutable or not.\"\"\"\n return self.attributes.mutable and not self.prototype\n\n def __repr__(self):\n return \"<Entity '{}' name={} kind={}>\".format(\n self.friendlyName, self.name, self.kind)\n" }, { "alpha_fraction": 0.5498080253601074, "alphanum_fraction": 0.5507115721702576, "avg_line_length": 32.28571319580078, "blob_id": "b437a4d3f458addcb68d8c9ed6193864d3f418bf", "content_id": "01a2a68510ba58297931610a09bfe33b90bafbed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4427, "license_type": "no_license", "max_line_length": 76, "num_lines": 133, "path": "/archon/interface.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import sys\nimport collections\nimport archon.commands\nimport archon.common\n\n\nclass RestartError(Exception): pass\n\n\nclass CommandExecutionError(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass Interface(object):\n \"\"\"\n Defines a player's interface to the game.\n\n Ideally there should be default implementations so that only display,\n restart, and prompt need be implemented (and perhaps repl).\n \"\"\"\n def __init__(self,\n permissions={'debug': False},\n messageTemplates=None,\n questionYes=('y', 'yes'),\n questionNo=('n', 'no'),\n replPrompt='{data}> '):\n self.questionYes = questionYes\n self.questionNo = questionNo\n self.permissions = permissions\n self.messageTemplates = messageTemplates\n self._replPrompt = replPrompt\n self.promptData = collections.OrderedDict()\n\n def prompt(self, prompt):\n pass\n\n def question(self, question, annotate=True):\n if annotate:\n separator = ' ' if question.endswith(' ') else ''\n question = separator.join([\n question,\n '[' + (', '.join(self.questionYes)) + ']',\n '[' + (', '.join(self.questionNo)) + ']',\n ''\n ])\n res = self.prompt(question).strip().lower()\n if self.questionYes and res in self.questionYes:\n return True\n elif self.questionNo and res in self.questionNo:\n return False\n elif self.questionYes and self.questionNo:\n return self.question(question, annotate=False)\n else:\n # If there is no yes-answer list, and the result is not in the\n # no-answer list, then this returns False (anything not negative\n # is True); similar for no no-answer list\n return bool(self.questionYes)\n\n def display(self, text, *kwargs):\n pass\n\n def error(self, error):\n self.display(error)\n return CommandExecutionError(error)\n\n def restart(self, message=''):\n if message:\n self.display(message)\n raise RestartError\n\n def quit(self, message=''):\n if message:\n self.display(message)\n sys.exit()\n\n def menu(self, format, prompt, error, *choices, **keyChoices):\n while True:\n for index, choice in enumerate(choices):\n self.display(format.format(key=index, description=choice))\n for index, choice in sorted(keyChoices.items()):\n self.display(format.format(key=index, description=choice))\n choice = self.prompt(prompt).strip()\n if (choices and choice.isnumeric() and\n 0 <= int(choice) <= len(choices)):\n return int(choice)\n elif choice in choices:\n return list(choices).index(choice)\n elif choice in keyChoices:\n return choice\n else:\n self.error(error)\n\n def repl(self, commands):\n pass\n\n @property\n def replPrompt(self):\n data = ' '.join([''.join(['{', name, '}'])\n for name in self.promptData.keys()])\n prompt = self._replPrompt.format(data=data)\n return prompt.format(**self.promptData)\n\n\nclass ConsoleInterface(Interface):\n def prompt(self, prompt):\n return input(prompt)\n\n def display(self, text, **kwargs):\n if kwargs:\n text = text.format(**kwargs)\n print(text)\n\n def repl(self, context, player, commands):\n lastCommand = ''\n while True:\n try:\n self.promptData['time'] = context.attributes.timeString\n cmd = self.prompt(self.replPrompt).split()\n if cmd:\n lastCommand = cmd[0]\n cmd, args = commands.get(cmd[0]), cmd[1:]\n context = cmd(self, context, player, *args)\n except RestartError:\n return\n except CommandExecutionError as e:\n pass\n except archon.common.DenotedNotFoundError:\n self.error('That is not a valid command.')\n close = commands.nearest(lastCommand)\n if close:\n self.display('Did you mean:')\n self.display('\\n'.join(close))\n" }, { "alpha_fraction": 0.6126279830932617, "alphanum_fraction": 0.6126279830932617, "avg_line_length": 21.538461685180664, "blob_id": "49bef72779d42f4a339bc615e36a4eeaeb7fb575", "content_id": "a5cd44e4140127829e4112596e6be1d72f596b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 50, "num_lines": 26, "path": "/archon/scripting.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import archon.common\nimport archon.commands\n\n\ndef handles(signal):\n def _handles(func):\n archon.common.signal(signal).connect(func)\n return func\n\n\nclass Script:\n baseNamespace = {\n 'handles': handles,\n 'command': archon.commands.command\n }\n\n def __init__(self, script):\n self._script = script\n self._namespace = Script.baseNamespace\n exec(script, self._namespace)\n\n def execute(self, name, *args, **kwargs):\n return self.get(name)(*args, **kwargs)\n\n def get(self, name):\n return self._namespace[name]\n" }, { "alpha_fraction": 0.48695650696754456, "alphanum_fraction": 0.48963209986686707, "avg_line_length": 30.808509826660156, "blob_id": "8823ef3d9cfb71799550b44e7a9f4dffc5d58849", "content_id": "97ae4627a85ee78be7b0d19716de7a640fe91c83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1495, "license_type": "no_license", "max_line_length": 75, "num_lines": 47, "path": "/demo/datahandlers.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import re\nimport textwrap\nimport archon.datahandlers\n\nimport entityhooks\n\n\ndef parse(item):\n try:\n return float(item)\n except ValueError:\n if item.isnumeric():\n return int(item)\n return item\n\n\[email protected]('.chat')\ndef chatType(contents):\n wrapper = textwrap.TextWrapper()\n actionRegex = re.compile('@([\\w]+) *(.*)')\n topics = {\"invisible\": {}, \"visible\": {}}\n topic, text, actions = '', [], []\n for line in contents.split('\\n'):\n if line.startswith(' '):\n line = line.strip()\n if line.startswith('@'):\n actions.append(line)\n else:\n text.append(line)\n else:\n if topic:\n category = 'visible'\n if '@invisible' in topic:\n category = 'invisible'\n topic = topic[:-10].strip()\n text = wrapper.fill(' '.join(text))\n parsedActions = []\n for action in actions:\n groups = actionRegex.match(action).groups()\n parsedActions.append(\n (groups[0],\n [parse(x.strip()) for x in groups[1].split(',')]))\n topics[category][topic] = entityhooks.ChatTopic(\n text, parsedActions)\n topic, text, actions = line.strip(), [], []\n return {\"type\": \"entity\",\n \"data\": {\"kind\": \"chat\", \"attributes\": topics}}\n" }, { "alpha_fraction": 0.5124572515487671, "alphanum_fraction": 0.5144113302230835, "avg_line_length": 36.907405853271484, "blob_id": "dd079dda7a0f86270c1d7a35f69255072be833e8", "content_id": "8d69daab33ed1d36122d24a8f7b59da6aa9dafa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2047, "license_type": "no_license", "max_line_length": 74, "num_lines": 54, "path": "/templatizer/templatize.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport argparse\n\nimport archon\nimport archon.common\nimport archon.datastore\n\n\ndef main():\n datastore = archon.datastore.GameDatastore('data/')\n if 'output' not in datastore:\n datastore.create('output')\n output = datastore['output']\n parser = argparse.ArgumentParser(\"Generate entities via a template.\")\n parser.add_argument('family', nargs=1, help='The item family.')\n parser.add_argument('types', nargs='+', help='Item types.')\n args = parser.parse_args()\n family = datastore['families'].raw(args.family[0], '.json')[1]\n types = []\n for t in args.types:\n types.append(datastore['types'].raw(t, '.json'))\n patches = {}\n for keys, patch in family['data']['item_types'].items():\n keys = keys.split(',')\n for key in keys:\n patches[key.strip()] = patch\n for name, t in types:\n if name in patches:\n patch = patches[name]\n stack = [(patch, t['data'])]\n while stack:\n current, target = stack.pop()\n for key, item in current.items():\n ty = type(item)\n if ty == dict:\n stack.append((item, target[key]))\n elif ty in (int, float):\n target[key] += item\n elif ty == list and len(item) == len(target[key]):\n target[key] = [x + y for x, y in\n zip(item, target[key])]\n for key, item in target.items():\n if type(item) == str:\n target[key] = item.format(**family['data'])\n output.save(family['data']['outputName'].format(type=name),\n t, immediately=True)\n print(\"Saved entity of type {name} of family {family}\".format(\n name=name, family=family['data']['name_l']))\n else:\n print(\"Skipped entity type {} (unsupported)\".format(name))\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6070607304573059, "alphanum_fraction": 0.6101806163787842, "avg_line_length": 36.13414764404297, "blob_id": "57978fc25cde47fcd35771cbcd627ce698e2141b", "content_id": "679699e2097ee0f2662cee3321f0284cbd6ca752", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6090, "license_type": "no_license", "max_line_length": 77, "num_lines": 164, "path": "/demo/battlecommands.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport random\nimport collections\n\nimport archon\nimport archon.objects\nimport archon.commands\nimport archon.common\nimport archon.interface\n\nimport entityhooks\n\n\nclass BattleEnded(Exception): pass\n\n\nclass battlecommand(archon.commands.command):\n functions = {}\n preExecute = archon.common.signal('battlecommand.preExecute')\n postExecute = archon.common.signal('battlecommand.postExecute')\n\n\nclass Battle:\n def __init__(self, output, scene, player, enemies):\n self.output = output\n self.scene = scene\n self.player = player\n self.enemies = enemies\n self.effects = collections.defaultdict(list)\n self.effects[player] = [\n entityhooks.EffectEntityHook.healingT(\n player.attributes.vitals['ap'] / 30, -1,\n 'vital:ap', target=player\n )]\n\n def applyEffects(self, target, targetT):\n for effect in self.effects.get(target, []):\n if effect.hit:\n target.attributes.damage(effect.magnitude,\n **effect.target._asdict())\n self.output.display(effect.message('success'))\n effect.turns -= 1\n if effect.turns == 0: # negative value -> infinite turns\n self.effects[target].remove(effect)\n\n def playerTurn(self):\n self.scene.attributes['turn'] += 1\n for enemy in self.enemies:\n self.output.display('{}: {:.1f} HP'.format(\n enemy.friendlyName,\n enemy.attributes.vitals['health']\n ))\n vitals = self.player.attributes.vitals\n self.output.promptData.update(\n turn='Turn {}'.format(self.scene.attributes['turn']),\n hp='HP: {:.1f}'.format(vitals['health']),\n ap='AP: {:.1f}'.format(vitals['ap'])\n )\n self.applyEffects(self.player, 'second_person')\n\n def enemyTurn(self):\n for enemy in self.enemies:\n self.applyEffects(enemy, 'third_person')\n if enemy.attributes.vitals['health'] <= 0:\n self.enemies.remove(enemy)\n self.output.display(enemy.friendlyName + ' died.')\n args = archon.commands.parseFunction(\n enemy.attributes.character['ai'])[1]\n script = enemy.entityCache.lookup(args[0])\n script.execute(args[1],\n self.output,\n self.scene,\n performAttacks,\n enemy,\n self.player)\n if not self.enemies:\n self.output.display(\"You win!\")\n raise BattleEnded\n\n def run(self):\n olddata = self.output.promptData.copy()\n self.output.promptData.clear()\n # postExecute is not run in case of error\n battlecommand.postExecute.connect(\n lambda cmd, **args: (self.enemyTurn(), self.playerTurn()),\n weak=False)\n if random.random() > 0.5: self.enemyTurn() # surprised!\n try:\n self.output.repl(self.scene, self.player, battlecommand)\n except BattleEnded:\n self.output.promptData.clear()\n self.output.promptData.update(olddata)\n self.scene.clearContents()\n\n\nenemy = archon.commands.find\n# TODO: also lookup by index (for duplicate enemies)\n\n\[email protected]('fight')\ndef fight(output, context, player, *enemies: archon.commands.findMulti):\n if not enemies:\n raise output.error(\"You need to fight something.\")\n scene = context.entityCache.lookup(\n context.area.attributes['battleScene']\n )\n # TODO fallback for no area\n scene.attributes['turn'] = 0\n scene.entityCache = context.entityCache\n enemyList = []\n for data, enemy in enemies:\n if enemy.kind != 'enemy':\n raise output.error(\"You can't fight that.\")\n enemy.attributes.vitals.update(enemy.attributes.maxVitals)\n enemyList.append(enemy)\n scene.add(data.objectLocation, data.key, data.location,\n data.description, data.prefix, data.options, enemy)\n battle = Battle(output, scene, player, enemyList)\n battlecommand('battle').data = battle\n battle.run()\n for data, enemy in enemies:\n context.remove(data.key)\n output.display('Battle ended.')\n\n\n@battlecommand('wait', 'skip')\ndef wait(output, context, player):\n output.display(\"You do nothing.\")\n\n\n@battlecommand('attack')\ndef attack(output, context, player, *target: enemy):\n if len(target) > 1 or not target:\n raise output.error(\"You can attack exactly one enemy at a time.\")\n data, target = target[0]\n weapons = []\n for slot in ('left hand', 'right hand'):\n if slot in player.attributes.equip and player.attributes.equip[slot]:\n weapons.append(player.attributes.equip[slot])\n if not weapons:\n raise output.error(\"You need a weapon equipped to attack!\")\n stats = player.attributes.stats['physical']\n physicalAcumen = player.attributes.acumen['physical']\n performAttacks(output, context, player, target, 'physical', *weapons)\n\n\ndef performAttacks(output, context, user, target, acumenType, *weapons):\n stats = user.attributes.stats[acumenType]\n acumen = user.attributes.acumen[acumenType]\n for weapon in weapons:\n wEffect = weapon.attributes.effect.attributes\n effect = wEffect.instance(acumen, stats, user=user, target=target)\n for item in target.attributes.equip.values():\n if item and item.kind == 'armor':\n item.attributes.modifier.modify(effect)\n try:\n realDamage = effect.apply(user, target)\n battlecommand('battle').data.effects[user].append(\n wEffect.fatigue(stats['fatigue'], target=user))\n output.display(effect.message('success'))\n except entityhooks.EffectMissed:\n output.display(effect.message('failure'))\n except entityhooks.NotEnoughAP:\n output.display(effect.message('insufficient_ap'))\n" }, { "alpha_fraction": 0.5896115899085999, "alphanum_fraction": 0.5903915166854858, "avg_line_length": 32.742103576660156, "blob_id": "6b9478ea8b3afb0574ed0e89acb67aa2147cdb69", "content_id": "d18433c8cdc4399544f8938dfb58badfe6cc7388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6411, "license_type": "no_license", "max_line_length": 76, "num_lines": 190, "path": "/archon/datahandlers.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import json\nimport warnings\n\nimport archon.common\nimport archon.objects\nimport archon.scripting\n\n\n# Types dictate loading, kind denotes semantic data (\"room\" vs \"indoors\")\nclass dataloader(archon.common.denoter):\n \"\"\"Denotes a function that takes JSON and creates an object.\"\"\"\n functions = {}\n\n\nclass dataparser(archon.common.denoter):\n \"\"\"Denotes a function that converts a file to a JSON-like representation\n of the data.\"\"\"\n functions = {}\n\n\nclass JSONDiff:\n def __init__(self):\n pass\n\n\n@dataparser('.json')\ndef jsonType(contents):\n try:\n data = json.loads(contents)\n assert 'type' in data\n assert 'data' in data\n return data\n except ValueError:\n warnings.warn(\"Error loading JSON data!\",\n RuntimeWarning, stacklevel=2)\n return None\n except AssertionError:\n warnings.warn('JSON data is not well-formed!',\n RuntimeWarning, stacklevel=2)\n return None\n\n\n@dataparser('.py')\ndef pythonType(contents):\n return {\"type\": \"script\", \"data\": contents}\n\n\n@dataloader('metadata')\ndef metadata(key, data, cache):\n for kind, data in data.items():\n if kind == \"entity_templates\":\n for entityKind, templates in data.items():\n try:\n ehook = archon.objects.EntityHook.getHook(entityKind)\n for key, template in templates.items():\n templates[key] = cache.lookup(template)\n ehook.templates = templates\n except archon.objects.EntityHookNotFoundError:\n warnings.warn(entityKind +\n \" entity hook not found for templating!\")\n elif kind == \"metadata\":\n for path in data:\n cache.lookup(path) # side effect is what matters here\n elif kind == \"savegame\":\n for contentPath, patch in data.items():\n thunk = cache.root.thunkFor(contentPath)\n data = archon.common.Merge(thunk.data, patch=patch)\n thunk.data = data.patched()\n elif kind == \"savegame_instances\":\n for proto, instances in data.items():\n if instances:\n proto = cache.lookup(proto)\n for iname, patch in instances.items():\n attributes = proto.attributes.copy()\n merge = archon.common.Merge(attributes, patch=patch)\n attributes = merge.patched()\n instance = proto.copy(name=iname, attributes=attributes)\n else:\n warnings.warn(kind + \" metadata kind not recognized!\")\n return data\n\n\n@dataloader('entity')\ndef entity(key, data, cache):\n kind = data['kind']\n attributes = data['attributes']\n for attr, value in attributes.items():\n if (isinstance(value, dict) and\n 'template' in value and 'data' in value): # embedded template\n try:\n template = cache.lookup(value['template']).copy()\n # deal with mutables, use templating mechanism\n template.attributes.attributes.update(\n template.attributes.viaTemplate(value['data']))\n # XXX this would be more resilient if it recursed into\n # subvalues so that they could also be used as defaults\n attributes[attr] = template\n except KeyError: # didn't find entity\n warnings.warn(\n \"Error templating {}\".format(value['template']),\n RuntimeWarning, stacklevel=2\n )\n entity = archon.objects.Entity(key, kind, cache, data['attributes'])\n return entity\n\n\n@dataloader('area')\ndef area(key, data, cache):\n name = data['name']\n attributes = {'name': name}\n attributes.update(data['attributes'])\n area = archon.objects.Entity(key, cache, 'area', attributes)\n area.entityCache = cache\n return area\n\n\n@dataloader('room')\ndef room(key, data, cache):\n description = data['describe']\n room = archon.objects.Room(key, description, cache)\n\n for name, val in data['attributes'].items():\n room.attributes[name] = val\n\n contents = []\n for eKey, eData in data['contents'].items():\n entityInfo = {}\n entityLocation = eData['entity']\n\n if ',' in eKey:\n eKey, prefix = eKey.split(',')\n prefix = prefix.strip()\n eKey = archon.objects.EntityKey(eKey, prefix)\n entityInfo['prefix'] = prefix\n\n if 'options' in eData:\n eData['options'] = eData['options'].split(',')\n entityInfo.update(eData)\n del entityInfo['entity'] # this key doesn't need to be there\n contents.append((entityLocation, eKey, entityInfo))\n ids = [eKey for _, eKey, _ in contents]\n for eLocation, eKey, eInfo in contents:\n # identity (or key) is not unique, no prefix and the key collides\n # with an identity\n if 'prefix' not in eInfo and ids.count(eKey) > 1:\n # we need to generate a prefix\n eInfo['prefix'] = ('yet another ' *\n (ids.count(Key) - 1)).strip()\n\n for eLocation, eKey, eInfo in contents:\n room.add(eLocation, eKey, **eInfo)\n\n # Load the area if present.\n if 'area' in cache:\n room.area = cache['area']\n\n # Unlike the others, this MUST be here to break circular references when\n # loading rooms (although the thunk is present in the cache,\n # dereferencing it will cause a loop where we continually reload the\n # same room)\n cache.add(key, room)\n\n for direction, target in data['outputs'].items():\n try:\n troom = cache.lookup(target)\n room.addRoom(direction, troom)\n except KeyError:\n raise ValueError(\"Room {} not found!\".format(target))\n return room\n\n\n@dataloader('data')\ndef data(key, data, cache):\n \"\"\"\n Loads unstructured JSON data, essentially.\n \"\"\"\n # Possibly look for \"#reference(key)\" strings and replace them so that\n # links to other data files can be made?\n return data\n\n\n@dataloader('script')\ndef script(key, data, cache):\n \"\"\"Loads a Python script.\"\"\"\n return archon.scripting.Script(compile(data, '<string>', 'exec'))\n\n\n@dataloader('diff')\ndef diff(key, data, cache):\n \"\"\"Creates a diff object that patches entities.\"\"\"\n" }, { "alpha_fraction": 0.5105633735656738, "alphanum_fraction": 0.5105633735656738, "avg_line_length": 17.933332443237305, "blob_id": "0acb91f26e938e1ae8c2f655bb2cf75ab9a9c500", "content_id": "4ceb7a759121cbfa423d76e6b2c608999ab70cbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 284, "license_type": "no_license", "max_line_length": 34, "num_lines": 15, "path": "/doc/source/modules/datastore.rst", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "==================================\n:mod:`datastore` -- Game database\n==================================\n\n.. automodule:: archon.datastore\n\n.. autoclass:: Datastore\n\n.. autoclass:: GameDatastore\n :members:\n :undoc-members:\n\n.. autoclass:: DataThunk\n :members:\n :undoc-members:\n" }, { "alpha_fraction": 0.6254901885986328, "alphanum_fraction": 0.6284313797950745, "avg_line_length": 34.17241287231445, "blob_id": "334903abff5ecfe871133878a3e8adc1cfe25655", "content_id": "f01e4f84c4741088039f39fd77cb0618950e6022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/demo/gamecommands.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "import collections\nimport archon\nimport archon.objects\nimport archon.commands\n\nfrom archon.commands import find, command\n\n\n@command('chat', 'talk')\ndef chat(output, context, player, *npc: find):\n if not npc:\n raise output.error(\"Who did you want to talk to?\")\n elif len(npc) > 1:\n raise output.error(\"You can only talk to one NPC at a time.\")\n data, npc = npc[0]\n if not npc.attributes.get(\"dialouge\"):\n raise output.error(\"That person has nothing to say.\")\n conversation = npc.attributes.conversation\n while True:\n choice = output.menu('{key}. {description}', '> ', 'Invalid topic.',\n *conversation.topics)\n if conversation.isEnd(choice):\n break\n choice = conversation.topicIndex[choice][1]\n output.display(choice.contents)\n output.display(\"\")\n for action, params in choice.actions:\n conversation.actions.get(action, lambda *args: None)(\n output, context, player, *params)\n" }, { "alpha_fraction": 0.5610799193382263, "alphanum_fraction": 0.5632787942886353, "avg_line_length": 30.12547492980957, "blob_id": "efdae75a71b95a840025a551b289d988f164d7d6", "content_id": "271fd0b2081b7282acb2d1715ef8c598c39685ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8186, "license_type": "no_license", "max_line_length": 75, "num_lines": 263, "path": "/archon/datastore.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "\"\"\"\nDefines the game database, a mapping from JSON to game objects.\n\"\"\"\n\nimport os\nimport json\nimport types\nimport collections\n\nimport archon.objects\nimport archon.datahandlers\n\n\nclass DataThunk:\n \"\"\"\n Represents an unloaded object.\n \"\"\"\n def __init__(self, ds, key, data):\n self.ds = ds\n self.key = key\n self.data = data\n\n def __call__(self):\n # for code that checks with callable()\n item = self.ds.load(self.key, self.data)\n self.ds._didLoad[self.key] = True\n self.ds._cache[self.key] = item\n return item\n\n evaluate = __call__\n\n\n# TODO possibly use super() as described\n# http://rhettinger.wordpress.com/2011/05/26/super-considered-super/ to\n# implement mixin classes for datastore storage and type (e.g. a file-based\n# storage mechanism and a JSON type, or a DB and binary) Probably not\n# needed, though\nclass Datastore:\n \"\"\"\n The basic Datastore class.\n \"\"\"\n\n def __init__(self, parent=None):\n pass\n\n def add(self, key, item):\n pass\n\n def remove(self, key):\n pass\n\n def __getitem__(self, key):\n pass\n\n @property\n def root(self):\n pass\n\n\nclass GameDatastore(Datastore):\n \"\"\"\n A lazy, mostly-read datastore.\n\n This datastore, when created, will scan its directory for possible game\n files (any that have a file extension specified by\n archon.datahandlers.dataparser), but will not load them until an object\n requests them. After loaded, the datastore will continue to hold on to\n the object. No changes made will be saved unless the save() method is\n called.\n \"\"\"\n\n def __init__(self, path, parent=None):\n self._path = os.path.abspath(path)\n self._name = os.path.basename(os.path.normpath(path))\n # normpath deals with trailing slash, basename gets directory name\n self.parent = parent\n self._cache = {}\n self._didLoad = collections.defaultdict(lambda: False)\n self._shouldSave = set()\n # don't add myself - my parent takes care of it\n for fname in os.listdir(self._path):\n fullpath = os.path.join(self._path, fname)\n if os.path.isfile(fullpath):\n key, data = self.raw(fname)\n if data and archon.datahandlers.dataloader.contains(\n data['type']\n ):\n self.add(key, DataThunk(self, key, data))\n elif os.path.isdir(fullpath):\n child = self.__class__(fullpath, self)\n self.add(child.name, child)\n\n def load(self, key, data):\n \"\"\"\n Load the given data. This is an internal method!\n \"\"\"\n objtype = data['type']\n objdata = data['data']\n if archon.datahandlers.dataloader.contains(objtype):\n obj = archon.datahandlers.dataloader.get(objtype)(\n key,\n objdata,\n self\n )\n return obj\n\n def save(self, key, data=None, immediately=False):\n \"\"\"\n Save the data to disk, or if the key exists, mark it for saving.\n\n By default, GameDatastore will not save any changes to disk. If\n changes should be saved, then call this method to mark it for\n saving. The data will not be immediately saved unless otherwise\n specified; instead, it will be saved at the end of the datastore's\n lifetime. This method can also add a new object to the database at\n runtime.\n \"\"\"\n self._shouldSave.add(key)\n if data:\n self.add(key, data)\n if immediately:\n json.dump(data,\n open(os.path.join(self._path, key + '.json'), 'w'),\n indent=1,\n cls=EntityJSONEncoder)\n\n def add(self, key, item):\n \"\"\"Add an item into the datastore.\"\"\"\n self._cache[key] = item\n if not isinstance(item, DataThunk):\n self._didLoad[key] = True # Strict add\n\n def remove(self, key):\n \"\"\"Remove an item from the datastore.\"\"\"\n del self._cache[key]\n\n def keys(self):\n return self._cache.keys()\n\n def create(self, key):\n \"\"\"Create a sub-datastore with the given name.\"\"\"\n assert key not in self\n fullpath = os.path.join(self._path, key)\n os.mkdir(fullpath)\n child = self.__class__(fullpath, self)\n self.add(child.name, child)\n return child\n\n def raw(self, key, format=None):\n \"\"\"Returns the raw dict object loaded by the datastore.\n\n If `key` is a filename, pass in a format of `None`. Else, `format`\n should be a file extension (with period).\"\"\"\n if not format:\n key, format = os.path.splitext(key)\n fullpath = os.path.join(self._path, key + format)\n if os.path.isfile(fullpath):\n if archon.datahandlers.dataparser.contains(format):\n loader = archon.datahandlers.dataparser.get(format)\n f = open(fullpath)\n data = loader(f.read())\n f.close()\n if not data:\n raise ValueError('Error loading data from ' + fullpath)\n return key, data\n else:\n raise ValueError(\n 'Format {} unsupported (key {} in {})'.format(\n format, key, self.name))\n else:\n raise IOError('No such key {} (format {}) in {}'.format(\n key, format, self.name))\n\n @property\n def name(self):\n \"\"\"The name of this datastore (the folder name).\"\"\"\n return self._name\n\n @property\n def fullName(self):\n \"\"\"The full name: the name of the parent with my name.\"\"\"\n names = [self.name]\n current = self\n while current.parent:\n names.append(current.parent.name)\n current = current.parent\n return '.'.join(reversed(names))\n\n @property\n def root(self):\n \"\"\"The root, which contains all other datastores.\"\"\"\n if self.parent:\n return self.parent.root\n else:\n return self\n\n @property\n def isRoot(self):\n return not self.parent\n\n @property\n def thunks(self):\n return self._cache\n\n def lookup(self, key):\n \"\"\"Convenience function: try relative, then absolute.\"\"\"\n if key in self:\n return self[key]\n else:\n return self.root[key]\n\n def __iter__(self):\n for key in self._cache:\n yield key\n\n def datastoreFor(self, key):\n \"\"\"Find the containing datastore of the given key.\"\"\"\n if self.isRoot and key.split('.', 1)[0] == self.name:\n return self.datastoreFor(key.split('.', 1)[1])\n elif key.startswith('.'): # absolute lookup\n return self.root.datastoreFor(key[1:])\n elif '.' in key:\n key, subkey = key.split('.', 1)\n # get the parent datastore, then the datastore itself\n return self.datastoreFor(key)[1][key].datastoreFor(subkey)\n else:\n return key, self\n\n def thunkFor(self, key):\n \"\"\"Get the thunk if possible, else the object/datastore.\"\"\"\n key, ds = self.datastoreFor(key)\n if key not in ds._cache:\n raise KeyError(key)\n return ds._cache[key]\n\n def fullPathFor(self, key):\n return '.'.join([self.datastoreFor(key).fullName,\n key.split('.')[-1]])\n\n def __getitem__(self, key):\n thunk = self.thunkFor(key)\n if isinstance(thunk, DataThunk):\n thunk = thunk.evaluate()\n return thunk\n\n def __contains__(self, key):\n if '.' in key:\n key, subkey = key.split('.', 1)\n return key in self._cache and subkey in self[key]\n else:\n return key in self._cache\n\n def __bool__(self):\n return bool(self._cache)\n\n\nclass EntityJSONEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, archon.objects.Entity):\n # TODO check for mutable entities\n return o.location\n else:\n return super().default(o)\n" }, { "alpha_fraction": 0.5332751870155334, "alphanum_fraction": 0.5347282886505127, "avg_line_length": 39.011627197265625, "blob_id": "666c52b9d757d1b6470dc64d4f5c01b13f9fe292", "content_id": "148083ad64cd703928432ad5d47307df29caacaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3441, "license_type": "no_license", "max_line_length": 76, "num_lines": 86, "path": "/test/formatting.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport unittest\n\nimport archon\nimport archon.datastore\n\n\nclass TestFormatting(unittest.TestCase):\n def setUp(self):\n self.ds = archon.datastore.GameDatastore('data')\n self.messages = self.ds['formatting']['templates'].attributes\n\n def test_function(self):\n res = self.messages.format('third_person_female',\n '{test@prepend(\"b\")}', test='a')\n self.assertEqual(res, 'ba')\n res = self.messages.format('third_person_female',\n '{test@drop}', test='a')\n self.assertEqual(res, '')\n self.assertRaises(ValueError, self.messages.format,\n 'third_person_female',\n '{test@!@#@}', test='a')\n self.assertRaises(ValueError, self.messages.format,\n 'third_person_female',\n '{test@i_do_not_exist}', test='a')\n\n def test_method(self):\n res = self.messages.format('third_person_female',\n '{[email protected]}', test='a')\n self.assertEqual(res, 'A')\n self.assertRaises(AttributeError, self.messages.format,\n 'third_person_female',\n '{[email protected]_do_not_exist}', test='a')\n\n def test_predicate(self):\n res = self.messages.format('third_person_female',\n '{test@empty}', test='')\n self.assertEqual(res, '')\n res = self.messages.format('third_person_female',\n '{test@!empty}', test='')\n self.assertEqual(res, '')\n\n def test_composition(self):\n res = self.messages.format('third_person_female',\n '{test@drop + !empty}', test='a')\n self.assertEqual(res, '')\n res = self.messages.format('third_person_female',\n '{[email protected] + prepend(\"a\")}', test='b')\n self.assertEqual(res, 'AB')\n\n def test_regression_adjacent_directives(self):\n res = self.messages.format('third_person_female',\n '{first}{second@prepend(\"a\")}', first=2,\n second=3)\n self.assertEqual(res, '2a3')\n\n\nclass TestMessages(unittest.TestCase):\n def setUp(self):\n self.ds = archon.datastore.GameDatastore('data')\n self.messages = self.ds['formatting']['templates'].attributes\n self.friendlyName = 'Cordelia'\n\n def test_third_female(self):\n res = self.messages.format('third_person_female',\n '{noun}', user=self)\n self.assertEqual(res, self.friendlyName)\n res = self.messages.format('third_person_female',\n '{noun} {to_be.present} {possessive}',\n user=self)\n self.assertEqual(res, \"Cordelia is Cordelia's\")\n\n\nclass TestFormattedMessages(unittest.TestCase):\n def setUp(self):\n self.ds = archon.datastore.GameDatastore('data')\n self.messages = self.ds['formatting']['templates'].attributes\n self.friendlyName = 'Cordelia'\n\n def test_first_person(self):\n text = 'Hello! {[email protected]} {to_be.present} {noun}.'\n res = self.messages.format('first_person', text, user=self)\n self.assertEqual(res, \"Hello! I am Cordelia.\")\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5573703646659851, "alphanum_fraction": 0.5675786137580872, "avg_line_length": 36.10606002807617, "blob_id": "ccd747d5808aa003b802a3c04032c011d275ab3c", "content_id": "fdb99e7851731036cbbd69e187d524e83e1c4c92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2449, "license_type": "no_license", "max_line_length": 74, "num_lines": 66, "path": "/demo/game.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport sys\nimport uuid\nimport base64\nimport datetime\n\nimport archon\nimport archon.common\nimport archon.datahandlers\nimport archon.datastore\nimport archon.objects\nimport archon.interface\nimport archon.commands\n\nimport datahandlers\nimport gamecommands\nimport battlecommands\nimport entityhooks\n\nif __name__ == '__main__':\n ds = archon.datastore.GameDatastore('resources')\n data = ds['data']\n save = ds['save']\n metadata = data['metadata'] # load the metadata\n room = None\n\n interface = archon.interface.ConsoleInterface(\n permissions={'debug': True},\n messageTemplates=data['messages']['templates']\n )\n\n while True:\n interface.display('Welcome to the demo.')\n interface.display('Choose an option:')\n choice = interface.menu('[{key}]: {description}', '> ',\n 'Invalid choice.',\n 'New Game', 'Load', 'Quit')\n if choice == 0:\n room = data['areas.marcellus.marcellia.square']\n player = archon.objects.PlayerEntityHook.defaultInstance()\n player.name = base64.urlsafe_b64encode(\n uuid.uuid4().bytes).decode('utf-8')\n player.entityCache = save.create(player.name)\n player.entityCache.create(\"instances\")\n elif choice == 1:\n players = {}\n for key in save.keys():\n savegame = save[key]\n player = savegame.raw('player', '.json')[1]\n player = player['data']['attributes']['character']['name']\n gameVars = savegame.raw('gameVars', '.json')[1]\n roomName = gameVars['data']['lastRoom'].rsplit('.', 1)[1]\n players[\"{} ({})\".format(player, roomName)] = key\n choice = interface.menu('[{key}]: {description}', '> ',\n 'Invalid save file.',\n *players.keys())\n savegame = save[players[list(players.keys())[choice]]]\n gameVars = savegame['gameVars']\n player = savegame['player']\n patches = savegame['patches']\n room = data.lookup(gameVars['lastRoom'])\n elif choice == 2:\n sys.exit()\n archon.objects.Entity.instances = player.entityCache['instances']\n room.enter(datetime.datetime(1000, 1, 1, 12, 0))\n interface.repl(room, player, archon.commands.command)\n" }, { "alpha_fraction": 0.63922518491745, "alphanum_fraction": 0.63922518491745, "avg_line_length": 40.29999923706055, "blob_id": "9e3d1a5a6c3a37857285c7e352d7d63c0bd36647", "content_id": "f7da0138f505e59ed4cf08cfb9bbd904ec1753a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 76, "num_lines": 10, "path": "/demo/resources/data/scripts/combat_ai.py", "repo_name": "lidavidm/archon", "src_encoding": "UTF-8", "text": "def basic_ai(output, context, performAttacks, enemy, player):\n weapons = []\n for slot in ('left hand', 'right hand'):\n if enemy.attributes.equip.get(slot):\n weapons.append(enemy.attributes.equip[slot])\n if weapons:\n output.display(\"Attack!\")\n performAttacks(output, context, enemy, player, 'physical', *weapons)\n else:\n output.display(\"The enemy does nothing.\")\n" } ]
24
blueskyz/net-tools
https://github.com/blueskyz/net-tools
8d29be5920fb375a32091e6098e43dc13235384d
d472ad5b3760c289f931b9f7046f9999cb358df2
c37d3f70e40c9b774cead8c031b96f09e030be97
refs/heads/master
2021-01-25T06:40:18.902505
2014-09-28T01:52:50
2014-09-28T01:52:50
4,759,572
4
1
null
null
null
null
null
[ { "alpha_fraction": 0.5263671875, "alphanum_fraction": 0.5380859375, "avg_line_length": 28.058822631835938, "blob_id": "304af9f492547ccb3a373aa30be1d65c5139cbf6", "content_id": "a0b066aa1dbec070a30febaa2042aefea3edbc22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 85, "num_lines": 34, "path": "/pytools/geo_crawl.py", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\nimport json\r\nimport urllib2\r\n\r\napiUrl = 'http://api.map.baidu.com/geocoder/v2/?address=%s&output=json&ak=xx&city=%s'\r\n\r\ndef name2Location(name, city):\r\n url = apiUrl % (name, city)\r\n print url\r\n req = urllib2.Request(url)\r\n req.add_header('Referer', 'www.xxx.com')\r\n result = urllib2.urlopen(req)\r\n result = json.loads(result.read())\r\n if result['status'] == 0: \r\n if result.has_key('result') and len(result['result']) != 0:\r\n result = result['result']\r\n elif result.has_key('results') and len(result['results']) != 0:\r\n result = results[0]['result']\r\n else:\r\n return None\r\n location = result['location']\r\n return '%f,%f' % (location['lng'], location['lat'])\r\n return None\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n name = raw_input('> ')\r\n if len(name) > 0:\r\n result = name2Location(name, '上海')\r\n print result\r\n else:\r\n break\r\n\r\n" }, { "alpha_fraction": 0.4934556484222412, "alphanum_fraction": 0.5151070356369019, "avg_line_length": 41.352333068847656, "blob_id": "37d6f3feea3c9c1f842ba445614948fcbab88160", "content_id": "426a00ba3ce74a8381492339143fb91299bbfe93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8359, "license_type": "no_license", "max_line_length": 280, "num_lines": 193, "path": "/pytools/simple_bt.py", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport libtorrent as lt\nimport time\nimport os\n\n\nclass SimpleDownloader(object):\n '''\n 一个简单的 bt 下载工具,依赖开源库 libtorrent.\n '''\n # 主循环 sleep 时间\n _sleep_time = 1\n # session\n _session = None\n # 下载的 torrent handle 列表\n _handle_list = []\n # 默认下载配置\n _upload_rate_limit = 100000\n _download_rate_limit = 0\n _active_downloads = 8\n _torrent_upload_limit = 50000\n _torrent_download_limit = 0\n _info_hash_set = {}\n def __init__(self, session_file='session.state', delay_interval=5):\n self._session_file = session_file\n self._delay_interval = delay_interval\n\n # 辅助函数\n # 事件通知处理函数\n def _handle_alerts(self, alerts):\n while len(alerts):\n alert = alerts.pop()\n #print 'message: ', alert.msage()\n if isinstance(alert, lt.add_torrent_alert):\n alert.handle.set_upload_limit(self._torrent_upload_limit)\n alert.handle.set_download_limit(self._torrent_download_limit)\n self._handle_list.append(alert.handle)\n elif isinstance(alert, lt.dht_announce_alert):\n info_hash = alert.info_hash.to_string().encode('hex')\n self._info_hash_set[info_hash] = (alert.ip, alert.port)\n\n # 从文件中载入 session 状态\n def _load_state(self, ses_file):\n if os.path.isfile(ses_file):\n with open(ses_file, 'rb') as f:\n content = f.read()\n print 'load session state. len%d' % len(content)\n entry = lt.bdecode(content)\n self._session.load_state(entry)\n print 'load session state. nodes=%d' % self._session.status().dht_nodes\n print self._session.get_settings()\n else:\n print 'new session state.'\n\n # 创建 session 对象\n def create_session(self, tcp_port=32881, udp_port=32881):\n self._session = lt.session()\n self._session.set_alert_mask(lt.alert.category_t.all_categories)\n self._session.listen_on(tcp_port, udp_port)\n self._session.add_dht_router('router.bittorrent.com', 6881)\n self._session.add_dht_router('router.utorrent.com', 6881)\n self._session.add_dht_router('router.bitcomet.com', 6881)\n settings = self._session.get_settings()\n settings['upload_rate_limit'] = self._upload_rate_limit\n settings['download_rate_limit'] = self._download_rate_limit\n settings['active_downloads'] = self._active_downloads\n self._session.set_settings(settings)\n return self._session\n\n\n # 添加磁力链接\n def add_magnet(self, link):\n params = {'save_path': os.curdir,\n 'storage_mode': lt.storage_mode_t.storage_mode_sparse,\n 'paused': False,\n 'auto_managed': True,\n 'duplicate_is_error': True,\n 'url': link}\n self._session.async_add_torrent(params)\n\n\n # 添加种子文件\n def add_torrent(self, torrent_file):\n e = lt.bdecode(open(torrent_file, 'rb').read())\n info = lt.torrent_info(e)\n params = {'save_path': os.curdir,\n 'storage_mode': lt.storage_mode_t.storage_mode_sparse,\n 'paused': False,\n 'auto_managed': True,\n 'duplicate_is_error': True,\n 'ti': info }\n self._session.async_add_torrent(params)\n\n\n # 下载工作的状, 示循环\n def start_work(self):\n # 清理屏幕\n clear = lambda: os.system(['clear','cls'][os.name == 'nt'])\n self._load_state(self._session_file)\n show_interval = self._delay_interval\n while True:\n self._session.post_torrent_updates()\n self._handle_alerts(self._session.pop_alerts())\n time.sleep(self._sleep_time)\n if show_interval > 0:\n show_interval -= 1\n continue\n show_interval = self._delay_interval\n clear()\n # 下载信息显示\n show_content = ['torrents:\\n']\n for (i, h) in enumerate(self._handle_list):\n s = h.status()\n name = 'unknown'\n files = []\n if h.torrent_file() is not None:\n torrent_file = h.get_torrent_info()\n name = torrent_file.name()\n file_nums = torrent_file.num_files()\n for idx in range(file_nums):\n files.append(torrent_file.file_at(idx).path)\n if os.name == 'nt':\n name = name.decode('utf-8')\n files = [item.decode('utf-8') for item in files]\n\n show_content.append(' idx: %d => name %s\\n'\n ' %.2f%% complete state: %s\\n' %\n (i, \n name,\n s.progress * 100, \n [s.state, 'paused'][s.paused]))\n show_content.append(' info => %s\\n' %\n h.info_hash().to_string().encode('hex'))\n if files:\n show_content.append(' files: %s\\n' % \n '\\n '.join(files))\n show_content.append(' down: %.1f kB/s up: %.1f kB/s\\n'\n ' peers: %d all_peers: %d\\n'\n ' total_download: %.3f MB/s '\n 'total_upload: %.3f MB/s\\n' %\n (s.download_rate / 1000, \n s.upload_rate / 1000,\n s.num_peers,\n s.list_peers,\n s.total_download / 1000000.0,\n s.total_upload / 1000000.0))\n # 统计信息显示\n ses_state = self._session.status()\n show_content.append('\\nstatistics:\\n')\n show_content.append(' session state file: %s\\n' %\n self._session_file)\n show_content.append(' download rate limit: %.3f KB/s\\n' % \n (self._download_rate_limit / 1000.0))\n show_content.append(' upload rate limit: %.3f KB/s\\n' % \n (self._upload_rate_limit / 1000.0))\n show_content.append(' download torrent rate limit: %.3f KB/s\\n' % \n (self._torrent_download_limit / 1000.0))\n show_content.append(' upload torrent rate limit: %.3f KB/s\\n' % \n (self._torrent_upload_limit / 1000.0))\n show_content.append(' work numbers active(all): %d(%d)\\n' % \n (min(self._active_downloads, len(self._handle_list)), \n len(self._handle_list)))\n show_content.append(' DHT node-id: %s\\n' %\n self._session.dht_state()['node-id'].encode('hex'))\n show_content.append(' DHT nodes: %d\\n' % \n ses_state.dht_nodes)\n show_content.append(' DHT cache nodes: %d\\n' % \n ses_state.dht_node_cache)\n show_content.append(' DHT global nodes: %d\\n' % \n ses_state.dht_global_nodes)\n show_content.append(' download rate: %.1f kB/s\\n' %\n (ses_state.download_rate / 1000))\n show_content.append(' upload rate: %.1f kB/s\\n' %\n (ses_state.upload_rate / 1000))\n show_content.append(' info hash collection: %d, %r\\n' %\n (len(self._info_hash_set), self._info_hash_set))\n show_content.append('\\n')\n print ''.join(show_content)\n\n\nif __name__ == '__main__':\n link = 'magnet:?xt=urn:btih:0951c8405728344220872c2311a2bfa53b3c54ef&tr=udp://open.demonii.com:1337&tr=udp://tracker.publicbt.com:80/announce&tr=udp://tracker.openbittorrent.com:80/announce&tr=udp://tracker.istole.it:80/announce&tr=http://tracker.torrentfrancais.com/announce'\n testlink = 'magnet:?xt=urn:btih:29c29ffb940a104e70425fe58175e1df54f48088'\n torrent_file = './test.torrent'\n\n sd = SimpleDownloader()\n sd.create_session()\n #sd.add_torrent(torrent_file)\n #sd.add_magnet(link)\n sd.add_magnet(testlink)\n sd.start_work()\n\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 26, "blob_id": "2b56c722fc1628182badd63eee59d6ddde609918", "content_id": "302b781a30c883c7f5638009ab0aa0fbf6143833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 54, "license_type": "no_license", "max_line_length": 48, "num_lines": 2, "path": "/servp-agent/makefile", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "all:\n\tg++ -O2 main.cpp transferdata.cpp -o serv_agent\n" }, { "alpha_fraction": 0.3947368562221527, "alphanum_fraction": 0.4226006269454956, "avg_line_length": 19.1875, "blob_id": "44f04545290918cd744524da51ed8084f76f0ebe", "content_id": "a69d01e4368ebbe3a80501e8c8a2ae3dc1492e4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 658, "license_type": "no_license", "max_line_length": 88, "num_lines": 32, "path": "/servp-agent/common.h", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "/*\n * =====================================================================================\n *\n * Filename: common.h\n *\n * Description: \n *\n * Version: 1.0\n * Created: 2012年03月28日 14时41分30秒\n * Revision: none\n * Compiler: gcc\n *\n * =====================================================================================\n */\n\n#ifndef __common_h__\n#define __common_h__\n\n#include <string>\nusing namespace std;\n\ntypedef struct _serv_map\n{\n\t_serv_map():m_usrc_port(0),m_utarget_port(0)\n\t{ }\n\tstring m_sip;\n\tunsigned int m_usrc_port;\n\tstring m_tip;\n\tunsigned int m_utarget_port;\n} serv_map;\n\n#endif //__common_h__\n" }, { "alpha_fraction": 0.4827611446380615, "alphanum_fraction": 0.49941563606262207, "avg_line_length": 38.56647491455078, "blob_id": "93f24530b5b7fa6ab61509c8879932c507a0821a", "content_id": "dfe26391aabbcdc6ce5ccfba3fc502c669c73b43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13988, "license_type": "no_license", "max_line_length": 79, "num_lines": 346, "path": "/pytools/collector.py", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n\nimport os\nimport sys\nimport time\nimport json\nimport random\nimport traceback as tb\n\nimport libtorrent as lt\n\n\nclass Collector(object):\n '''\n 一个简单的 bt 下载工具,依赖开源库 libtorrent.\n '''\n # 主循环 sleep 时间\n _sleep_time = 0.5\n # 下载的 torrent handle 列表\n _handle_list = []\n # 默认下载配置\n _upload_rate_limit = 200000\n _download_rate_limit = 200000\n _active_downloads = 30\n _alert_queue_size = 4000\n _dht_announce_interval = 60\n _torrent_upload_limit = 20000\n _torrent_download_limit = 20000\n _auto_manage_startup = 30\n _auto_manage_interval = 15\n\n _start_port = 32800\n _sessions = []\n _session_work_num = 3\n _download_metadata_nums = 0\n _infohash_queue_from_getpeers = []\n _info_hash_set = {}\n _meta_count = 0\n _meta_list = {}\n _tpath = None\n _auto_magnet_count = 0\n\n def __init__(self,\n session_nums=50,\n delay_interval=20,\n # exit_time=2*60,\n exit_time=5*60,\n result_file=None,\n stat_file=None):\n self._session_nums = session_nums\n self._delay_interval = delay_interval\n self._exit_time = exit_time\n self._result_file = result_file\n self._stat_file = stat_file\n if self._create_torrent_dir():\n self._backup_result()\n try:\n with open(self._result_file, 'rb') as f:\n self._meta_list = json.load(f)\n except Exception as err:\n pass\n\n def _create_torrent_dir(self):\n self._tpath = os.path.join('mytorrent', time.strftime('%Y%m%d'))\n if not os.path.isdir(self._tpath):\n os.mkdir(self._tpath)\n return True\n return False\n\n def _backup_result(self):\n os.system('cp %s %s_%s' %\n (self._result_file,\n time.strftime('%Y%m%d'),\n self._result_file))\n\n def _get_runtime(self, interval):\n day = interval / (60*60*24)\n interval = interval % (60*60*24)\n hour = interval / (60*60)\n interval = interval % (60*60)\n minute = interval / 60\n interval = interval % 60\n second = interval\n return 'day: %d, hour: %d, minute: %d, second: %d' % \\\n (day, hour, minute, second)\n\n # 辅助函数\n # 事件通知处理函数\n def _handle_alerts(self, session, alerts):\n while len(alerts):\n alert = alerts.pop()\n if isinstance(alert, lt.add_torrent_alert):\n alert.handle.set_upload_limit(self._torrent_upload_limit)\n alert.handle.set_download_limit(self._torrent_download_limit)\n elif isinstance(alert, lt.dht_announce_alert):\n info_hash = alert.info_hash.to_string().encode('hex')\n if info_hash in self._meta_list:\n self._meta_list[info_hash] += 1\n elif info_hash in self._info_hash_set:\n pass\n else:\n self._info_hash_set[info_hash] = (alert.ip, alert.port)\n self._add_magnet(session, info_hash)\n elif isinstance(alert, lt.dht_get_peers_alert):\n info_hash = alert.info_hash.to_string().encode('hex')\n if info_hash in self._meta_list:\n self._meta_list[info_hash] += 1\n elif info_hash in self._info_hash_set:\n pass\n else:\n self._info_hash_set[info_hash] = None\n self._infohash_queue_from_getpeers.append(info_hash)\n self._add_magnet(session, info_hash)\n elif isinstance(alert, lt.metadata_received_alert):\n info_hash = alert.handle.info_hash().to_string().encode('hex')\n if info_hash in self._info_hash_set:\n current_meta_counts = self._meta_list.get(info_hash, 0)\n self._meta_list[info_hash] = current_meta_counts + 1\n with open(os.path.join(self._tpath, info_hash), 'wb') as f:\n info = alert.handle.get_torrent_info()\n entry = lt.create_torrent(info).generate()\n f.write(lt.bencode(entry))\n self._meta_count += 1\n session.remove_torrent(alert.handle)\n self._download_metadata_nums -= 1\n # fixme: 当从同一个初始link下载时,\n # 可能并不是_download_meta_session的handle,误删崩溃\n # self._download_meta_session.remove_torrent(alert.handle)\n\n # 从文件中载入 session 状态\n def _load_state(self, ses_file):\n if os.path.isfile(ses_file):\n with open(ses_file, 'rb') as f:\n content = f.read()\n entry = lt.bdecode(content)\n self._session.load_state(entry)\n\n def _add_magnet(self, session, info_hash):\n params = {'save_path': os.path.join(os.curdir,\n 'collections',\n 'magnet_' + info_hash),\n 'storage_mode': lt.storage_mode_t.storage_mode_sparse,\n 'paused': False,\n 'auto_managed': True,\n 'duplicate_is_error': True,\n 'url': 'magnet:?xt=urn:btih:%s' % info_hash}\n session.async_add_torrent(params)\n self._download_metadata_nums += 1\n\n # 创建 session 对象\n def create_session(self, begin_port=32800):\n self._start_port = begin_port\n for port in range(begin_port, begin_port + self._session_nums):\n session = lt.session()\n session.set_alert_mask(lt.alert.category_t.all_categories)\n session.listen_on(port, port)\n session.add_dht_router('router.bittorrent.com', 6881)\n session.add_dht_router('router.utorrent.com', 6881)\n session.add_dht_router('router.bitcomet.com', 6881)\n session.add_dht_router('dht.transmissionbt.com', 6881)\n settings = session.get_settings()\n settings['upload_rate_limit'] = self._upload_rate_limit\n settings['download_rate_limit'] = self._download_rate_limit\n settings['active_downloads'] = self._active_downloads\n settings['auto_manage_startup'] = self._auto_manage_startup\n settings['auto_manage_interval'] = self._auto_manage_interval\n settings['dht_announce_interval'] = self._dht_announce_interval\n settings['alert_queue_size'] = self._alert_queue_size\n session.set_settings(settings)\n self._sessions.append(session)\n return self._sessions\n\n def add_hot_magnet(self, link=None):\n count = len(self._sessions) * self._session_work_num\n hot_magnets = []\n for info_hash in self._meta_list:\n if self._meta_list[info_hash] > 50:\n hot_magnets.append('magnet:?xt=urn:btih:%s' % info_hash)\n\n self._auto_magnet_count = len(hot_magnets)\n if len(hot_magnets) < count:\n step = count - len(hot_magnets)\n if link:\n while True:\n hot_magnets.append(link)\n step -= 1\n if step <= 0:\n break\n else:\n for info_hash in self._meta_list:\n hot_magnets.append('magnet:?xt=urn:btih:%s' % info_hash)\n step -= 1\n if step <= 0:\n break\n else:\n random.shuffle(hot_magnets)\n\n count = 0\n workids = range(self._session_work_num)\n for session in self._sessions:\n for i in workids:\n url = hot_magnets[count]\n # if i == 0:\n # url = link\n params = {'save_path': os.path.join(os.curdir,\n 'collections',\n 'magnet_' + str(count)),\n 'storage_mode':\n lt.storage_mode_t.storage_mode_sparse,\n 'paused': False,\n 'auto_managed': True,\n 'duplicate_is_error': True,\n 'url': url}\n session.async_add_torrent(params)\n count += 1\n\n # 添加磁力链接\n def add_magnet(self, link):\n count = 0\n workids = range(self._session_work_num)\n for session in self._sessions:\n for i in workids:\n params = {'save_path': os.path.join(os.curdir,\n 'collections',\n 'magnet_' + str(count)),\n 'storage_mode':\n lt.storage_mode_t.storage_mode_sparse,\n 'paused': False,\n 'auto_managed': True,\n 'duplicate_is_error': True,\n 'url': link}\n session.async_add_torrent(params)\n count += 1\n\n # 添加种子文件\n def add_torrent(self, torrent_file):\n count = 0\n for session in self._sessions:\n e = lt.bdecode(open(torrent_file, 'rb').read())\n info = lt.torrent_info(e)\n params = {'save_path': os.path.join(os.curdir,\n 'collections',\n 'torrent_' + str(count)),\n 'storage_mode': lt.storage_mode_t.storage_mode_sparse,\n 'paused': False,\n 'auto_managed': True,\n 'duplicate_is_error': True,\n 'ti': info}\n session.async_add_torrent(params)\n count += 1\n\n def start_work(self):\n # 清理屏幕\n begin_time = time.time()\n show_interval = self._delay_interval\n while True:\n for session in self._sessions:\n session.post_torrent_updates()\n self._handle_alerts(session, session.pop_alerts())\n time.sleep(self._sleep_time)\n if show_interval > 0:\n show_interval -= 1\n continue\n show_interval = self._delay_interval\n\n # 下载信息显示\n show_content = ['torrents:']\n # 统计信息显示\n interval = time.time() - begin_time\n torrent_nums = self._download_metadata_nums\n show_content.append(' pid: %s' % os.getpid())\n show_content.append(' time: %s' %\n time.strftime('%Y-%m-%d %H:%M:%S'))\n show_content.append(' run time: %s' % self._get_runtime(interval))\n show_content.append(' start port: %d' % self._start_port)\n show_content.append(' collect session num: %d' %\n len(self._sessions))\n show_content.append(' session work num: %d' %\n self._session_work_num)\n show_content.append(' auto magnets: %d' % self._auto_magnet_count)\n show_content.append(' new info hash nums: %d' %\n len(self._info_hash_set))\n show_content.append(' info hash nums from get peers: %d' %\n len(self._infohash_queue_from_getpeers))\n show_content.append(' downloading meta: %d' % torrent_nums)\n show_content.append(' torrent collection rate: %f /minute' %\n (self._meta_count * 60 / interval))\n show_content.append(' current torrent count: %d' %\n self._meta_count)\n show_content.append(' total torrent count: %d' %\n len(self._meta_list))\n show_content.append('\\n')\n try:\n with open(self._stat_file, 'wb') as f:\n f.write('\\n'.join(show_content))\n with open(self._result_file, 'wb') as f:\n json.dump(self._meta_list, f)\n except Exception as err:\n pass\n\n if interval >= self._exit_time:\n # stop\n break\n\n # 创建新的目录\n if self._create_torrent_dir():\n self._backup_result()\n\n # destory\n for session in self._sessions:\n torrents = session.get_torrents()\n for torrent in torrents:\n session.remove_torrent(torrent)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n sys.exit(-1)\n\n result_file = sys.argv[1]\n stat_file = sys.argv[2]\n\n link = 'magnet:?xt=urn:btih:ceab7a5dac14eef7a6614ac5927b90bbe8a2149d'\\\n '&tr=udp://open.demonii.com:1337' \\\n '&tr=udp://tracker.publicbt.com:80/announce' \\\n '&tr=udp://tracker.openbittorrent.com:80/announce&' \\\n 'tr=udp://tracker.istole.it:80/announce&' \\\n 'tr=http://tracker.torrentfrancais.com/announce'\n link = 'magnet:?xt=urn:btih:0cb0a5ac267d04b027997b6259592996221ee17d'\n testlink = 'magnet:?xt=urn:btih:f5b642f55aa44634b96521ba271ecce7b4ed5e99'\n torrent_file = './test.torrent'\n\n # 创建一个每小时固定的端口段,解决因get_peers收集过快,创建任务过多,\n # 带来的下载torrent文件过慢\n hour = time.localtime().tm_hour\n port = range(32800, 33800, 100)\n port = port[hour % len(port)]\n sd = Collector(session_nums=100,\n result_file=result_file,\n stat_file=stat_file)\n sd.create_session(port)\n sd.add_hot_magnet(link)\n # sd.add_magnet(link)\n sd.start_work()\n" }, { "alpha_fraction": 0.5177229046821594, "alphanum_fraction": 0.5349087119102478, "avg_line_length": 20.159090042114258, "blob_id": "0d2d807a450fb82c7f842973edeaf124d13ef6ba", "content_id": "8db8222ee90753d8e6b22e889e9fb0a110e25002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 943, "license_type": "no_license", "max_line_length": 88, "num_lines": 44, "path": "/servp-agent/transferdata.h", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "/*\n * =====================================================================================\n *\n * Filename: transferdata.h\n *\n * Description: \n *\n * Version: 1.0\n * Created: 2012年03月28日 14时29分06秒\n * Revision: none\n * Compiler: gcc\n *\n * =====================================================================================\n */\n\n#ifndef __transfer_data_h__\n#define __transfer_data_h__\n\n#include \"common.h\"\n\nclass transserver{\n\tpublic:\n\t\ttransserver(int nInBufSize, int nOutBufSize);\n\t\tvirtual ~transserver();\n\n\tint transferdata(int sockfd, serv_map* pserv_map);\n\n\tprotected:\n\tint connect_serv(const char* ptarget_ip, unsigned int uport);\n\tvoid set_nonblock(int fd, bool bblock = true);\n\n\n\tprotected:\n\tint m_remotesockfd;\n\tint m_targetsockfd;\n\n\tunsigned int\tm_nInBuffSize;\n\tunsigned int\tm_nOutBuffSize;\n\tunsigned char*\tm_pInBuff;\n\tunsigned char*\tm_pOutBuff;\n\n};\n\n#endif //__transfer_data_h__\n" }, { "alpha_fraction": 0.6480686664581299, "alphanum_fraction": 0.6480686664581299, "avg_line_length": 11.263157844543457, "blob_id": "0f2c36224e470461b75b18a642697bb3bb2b2907", "content_id": "4714ca794ab40dfe08d595da596fce4fef615a80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 367, "license_type": "no_license", "max_line_length": 31, "num_lines": 19, "path": "/README.md", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "net-tools\n=========\n\nnetwork tools\n\n\npytools\n-------\n Python开发的网络小程序\n\n### dht 网络爬虫\n 抓取dht网络的磁力链接和种子文件\n collector.py\n 使用 libtorrent 库开发\n\n### dht 爬虫服务监控程序\n 启动并监控dht爬虫进程,在爬虫进程退出后重启启动爬虫\n collectord.py\n 使用 twisted 开发\n" }, { "alpha_fraction": 0.5505552291870117, "alphanum_fraction": 0.568673312664032, "avg_line_length": 30.090909957885742, "blob_id": "a999211123147db5898b6d046bdf2ebbf41fcde1", "content_id": "3fae093f76036378c7cf357da6ebcacc3d77afc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1711, "license_type": "no_license", "max_line_length": 70, "num_lines": 55, "path": "/pytools/get_torrent_info.py", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env /python\n# coding: utf-8\n\nimport os\nimport sys\n\nimport libtorrent as lt\n\n\ndef show_torrent_file(torrent_file):\n print torrent_file\n e = lt.bdecode(open(torrent_file, 'rb').read())\n torrent_file = lt.torrent_info(e)\n name = torrent_file.name()\n files = torrent_file.files()\n show_content = []\n if os.name == 'nt':\n try:\n name = name.decode('utf-8').encode('gbk')\n except Exception as err:\n name = 'unknown'\n for file_item in files:\n try:\n file_item.path = \\\n file_item.path.decode('utf-8').encode('gbk')\n except Exception as err:\n pass\n\n show_content.append(' idx: name %s\\n' % name)\n for file_item in files:\n if (file_item.size / (1024*1024.0)) > 50:\n show_content.append(' files(%.3f MB): %s\\n' % \n (file_item.size/(1024*1024.0), file_item.path))\n show_content.append('-' * 70)\n show_content.append('\\n')\n print '\\n'.join(show_content)\n\n\ndef show_torrent_dir(torrent_dir):\n for current_dir, subdirs, torrent_files in os.walk(torrent_dir):\n for subdir in subdirs:\n show_torrent_dir(os.path.join(current_dir, subdir))\n for torrent_file in torrent_files:\n print os.path.join(current_dir, torrent_file)\n show_torrent_file(os.path.join(current_dir, torrent_file))\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print 'argument err!'\n print 'input torrent file or directory.'\n elif os.path.isfile(sys.argv[1]):\n show_torrent_file(sys.argv[1])\n elif os.path.isdir(sys.argv[1]):\n show_torrent_dir(sys.argv[1])\n\n" }, { "alpha_fraction": 0.5943108797073364, "alphanum_fraction": 0.6037927269935608, "avg_line_length": 19.088436126708984, "blob_id": "d5b603ecabcd1d377549f8c56ebb96e8b1640698", "content_id": "fe2de6e1372c0a413e176c61d36023b16410f304", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2953, "license_type": "no_license", "max_line_length": 80, "num_lines": 147, "path": "/servp-agent/main.cpp", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n#include <getopt.h>\n#include <signal.h>\n#include <sys/wait.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <string>\n#include <vector>\nusing namespace std;\n\n#include \"common.h\"\n#include \"transferdata.h\"\n\nvoid compile_info()\n{\n\tprintf(\"compile time: %s %s\\n\", __DATE__, __TIME__);\n\tprintf(\"platform: %s\\n\", __VERSION__);\n}\n\nvoid usage()\n{\n\tprintf(\"usage: -d daemon\\n\"\n\t\t\t\" -s src ip\\n\"\n\t\t\t\" -p src port\\n\"\n\t\t\t\" -t target ip\\n\"\n\t\t\t\" -c target port\\n\");\n}\n\nstatic vector<pid_t> g_s_pidSet;\n\nvoid sig_chld(int signo)\n{\n\tpid_t pid;\n\tint stat;\n\twhile ((pid = waitpid(-1, &stat, WNOHANG)) > 0){\n\t\tprintf(\"child %d terminated\\n\", pid);\n\t}\n}\n\nvoid setsig()\n{\n\tsignal(SIGCHLD, sig_chld); \n}\n\nint work(bool bdaemon, serv_map* pservm)\n{\n\tif (bdaemon){\n\t\tprintf(\"start run[daemon] ...\");\n\t\tdaemon(1, 1);\n\t}\n\telse{\n\t\tprintf(\"start run ...\");\n\t}\n\t// begin listen\n\tstruct sockaddr_in lsn_addr;\n\tbzero(&lsn_addr, sizeof(lsn_addr));\n\tint sock_lsn_fd = socket(AF_INET, SOCK_STREAM, 0);\n\tlsn_addr.sin_family = AF_INET;\n\tif (pservm->m_sip.length() == 0)\n\t\tlsn_addr.sin_addr.s_addr = INADDR_ANY;\n\telse{\n\t\tinet_pton(AF_INET, pservm->m_sip.c_str(), &lsn_addr.sin_addr.s_addr);\n\t}\n\tlsn_addr.sin_port = htons(pservm->m_usrc_port);\n\tif (bind(sock_lsn_fd, (struct sockaddr*)&lsn_addr, sizeof(struct sockaddr))<0){\n\t\tperror(\"bind error\");\n\t\treturn -1;\n\t}\n\tif (listen(sock_lsn_fd, 20) < 0){\n\t\tperror(\"listen error\");\n\t\treturn -1;\n\t}\n\tprintf(\"listen port: %d\\n\", pservm->m_usrc_port);\n\n\tint client_fd;\n\tfor ( ; ; ){\n\t\tstruct sockaddr_in remote_addr;\n\t\tsocklen_t nsocksize = sizeof(struct sockaddr_in);\n\t\tclient_fd=accept(sock_lsn_fd, (struct sockaddr*)&remote_addr, &nsocksize);\n\t\tif (client_fd < 0){\n\t\t\tperror(\"accept error\");\n\t\t\tcontinue;\n\t\t}\n\t\tint child_pid = fork();\n\t\tif (child_pid == 0){\n\t\t\tprintf(\"create child process: %d\\n\", getpid());\n\t\t\ttransserver otserver(32, 32);\n\t\t\tif (otserver.transferdata(client_fd, pservm) < 0)\n\t\t\t\treturn -1;\n\t\t\treturn 0;\n\t\t}\n\t\telse if (child_pid > 0){\n\t\t\t// g_s_pidSet.push_back(child_pid);\n\t\t}\n\t\telse {\n\t\t\tperror(\"create child proc fail\");\n\t\t}\n\t\tclose(client_fd);\n\t}\n\treturn 0;\n}\n\nint main(int argc, char* argv[])\n{\n\tif (argc == 1){\n\t\tusage();\n\t\treturn EXIT_SUCCESS;\n\t}\n\tserv_map oserv_map;\n\tbool bdaemon = false;\n\tunsigned int uport=0;\n\tint c = 0;\n\twhile ((c = getopt(argc, argv, \"dvs:p:t:c:\")) != -1){\n\t\tswitch(c){\n\t\t\tcase 'd':\n\t\t\t\tbdaemon = true;\n\t\t\t\tbreak;\n\t\t\tcase 'v':\n\t\t\t\tcompile_info();\n\t\t\t\treturn EXIT_SUCCESS;\n\t\t\t\tbreak;\n\t\t\tcase 's':\n\t\t\t\toserv_map.m_sip = optarg;\n\t\t\t\tbreak;\n\t\t\tcase 'p':\n\t\t\t\toserv_map.m_usrc_port = atoi(optarg);\n\t\t\t\tbreak;\n\t\t\tcase 't':\n\t\t\t\toserv_map.m_tip = optarg;\n\t\t\t\tbreak;\n\t\t\tcase 'c':\n\t\t\t\toserv_map.m_utarget_port = atoi(optarg);\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tusage();\n\t\t\t\treturn EXIT_SUCCESS;\n\t\t}\n\t}\n\tsetsig();\n\tif (work(bdaemon, &oserv_map) <0){\n\t\treturn EXIT_FAILURE;\n\t}\n\treturn EXIT_SUCCESS;\n}\n" }, { "alpha_fraction": 0.5988875031471252, "alphanum_fraction": 0.6118664741516113, "avg_line_length": 22.449275970458984, "blob_id": "aaf55330525872067941075bc3bac291ebe57855", "content_id": "811e7b7627ea2551080d67f4abf8ba18edf79df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3248, "license_type": "no_license", "max_line_length": 89, "num_lines": 138, "path": "/servp-agent/transferdata.cpp", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "/*\n * =====================================================================================\n *\n * Filename: transferdata.cpp\n *\n * Description: \n *\n * Version: 1.0\n * Created: 2012年03月28日 14时44分47秒\n * Revision: none\n * Compiler: gcc\n *\n * =====================================================================================\n */\n\n#include <stdio.h>\n#include <errno.h>\n#include <strings.h>\n#include <unistd.h>\n#include <sys/time.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <fcntl.h>\n\n#include \"transferdata.h\"\n\n#define M_SIZE 0x1<<20 \n\ntransserver::transserver(int nInBufSize, int nOutBufSize)\n\t:m_pInBuff(NULL), m_pOutBuff(NULL)\n{\n\tm_nInBuffSize = nInBufSize * M_SIZE;\n\tm_nOutBuffSize = nOutBufSize * M_SIZE;\n\tm_pInBuff = new unsigned char[m_nInBuffSize];\n\tm_pOutBuff = new unsigned char[m_nOutBuffSize];\n}\n\ntransserver::~transserver()\n{\n\tif (m_pInBuff == NULL){\n\t\tdelete[] m_pInBuff;\n\t\tm_pInBuff = NULL;\n\t}\n\tif (m_pOutBuff == NULL){\n\t\tdelete[] m_pOutBuff;\n\t\tm_pOutBuff = NULL;\n\t}\n}\n\nint transserver::transferdata(int sockfd, serv_map* pserv_map)\n{\n\tm_remotesockfd = sockfd;\n\tm_targetsockfd = connect_serv(pserv_map->m_tip.c_str(), pserv_map->m_utarget_port);\n\tif (m_targetsockfd < 0)\n\t\treturn -1;\n\tprintf(\"targetsockfd %d\\n\", m_targetsockfd);\n\tfd_set rset;\n\tFD_ZERO(&rset);\n\tFD_SET(m_remotesockfd, &rset);\n\tFD_SET(m_targetsockfd, &rset);\n\tstruct timeval tv;\n\ttv.tv_sec = 3;\n\ttv.tv_usec = 0;\n\n\tset_nonblock(m_remotesockfd);\n\tset_nonblock(m_targetsockfd);\n\n\twhile (true){\n\t\tfd_set tmp_rset = rset;\n\t\tstruct timeval tmp_tv = tv;\n\t\tint retval = select(m_targetsockfd+1, &tmp_rset, NULL, NULL, &tmp_tv);\n\t\tif (retval = 0){\n\t\t\tcontinue;\n\t\t}\n\t\tif (retval < 0){\n\t\t\tif (errno == EINTR)\n\t\t\t\tcontinue;\n\t\t\tperror(\"select error\");\n\t\t\tbreak;\n\t\t}\n\t\tif (FD_ISSET(m_remotesockfd, &tmp_rset) != 0){\n\t\t\tint datasize = recv(m_remotesockfd, m_pInBuff, m_nInBuffSize, 0);\n\t\t\tif (datasize > 0){\n\t\t\t\tsend(m_targetsockfd, m_pInBuff, datasize, 0);\n\t\t\t}\n\t\t\telse if (datasize == 0){\n\t\t\t\tprintf(\"close connect\\n\");\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif (FD_ISSET(m_targetsockfd, &tmp_rset) != 0){\n\t\t\tint datasize = recv(m_targetsockfd, m_pOutBuff, m_nOutBuffSize, 0);\n\t\t\tif (datasize > 0){\n\t\t\t\tsend(m_remotesockfd, m_pOutBuff, datasize, 0);\n\t\t\t}\n\t\t\telse if (datasize <= 0){\n\t\t\t\tperror(\"out data failure\");\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\t\n\treturn 0;\n}\n\n// method\nint transserver::connect_serv(const char* ptarget_ip, unsigned int uport)\n{\n\tstruct sockaddr_in socktarget;\n\tbzero(&socktarget, sizeof(struct sockaddr_in));\n\tinet_pton(AF_INET, ptarget_ip, &socktarget.sin_addr.s_addr);\n\tsocktarget.sin_family = AF_INET;\n\tsocktarget.sin_port = htons(uport);\n\tprintf(\"target ip %s\\n\", ptarget_ip);\n\tprintf(\"target port %u\\n\", uport);\n\tint sockfd = socket(AF_INET, SOCK_STREAM, 0);\n\n\t// connect server port\n\tint retval = connect(sockfd, (struct sockaddr*)&socktarget, sizeof(struct sockaddr_in));\n\tprintf(\"connect %d success\\n\", retval);\n\tif (retval < 0){\n\t\tprintf(\"connect target fail: %s\\n\", ptarget_ip);\n\t\treturn -1;\n\t}\n\treturn sockfd;\n}\n\nvoid transserver::set_nonblock(int fd, bool bblock)\n{\n\tint flags = fcntl(fd, F_GETFL, 0);\n\tif (bblock){\n\t\tflags |= O_NONBLOCK;\n\t}\n\telse{\n\t\tflags |= ~O_NONBLOCK;\n\t}\n\tfcntl(fd, F_SETFL, flags);\n}\n" }, { "alpha_fraction": 0.5075090527534485, "alphanum_fraction": 0.5225271582603455, "avg_line_length": 25.95652198791504, "blob_id": "434d3b3ff6b012c05b18ec37bd94ac4209057adf", "content_id": "50930ef188fb534858f2150737be15939bd6b331", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1935, "license_type": "no_license", "max_line_length": 88, "num_lines": 69, "path": "/pytools/wiki_parser.py", "repo_name": "blueskyz/net-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport codecs\r\nimport re\r\nfrom urllib import urlopen\r\nfrom BeautifulSoup import BeautifulSoup as bsoup\r\n\r\ndef compileReg():\r\n #char200E = u'\\ufeff'\r\n char200E = codecs.BOM_UTF8.decode('utf-8')\r\n charBOM = u'\\u200e'\r\n charComment = u'\\[[^\\]]*\\]'\r\n pattern = u'%s|%s|%s' % (char200E, charBOM, charComment)\r\n #print {pattern}\r\n pattern = re.compile(pattern)\r\n return pattern\r\n\r\npattern = compileReg()\r\n\r\ndef filterInvalidChar(pattern, text):\r\n \"\"\"\r\n text must be unicode character.\r\n \"\"\"\r\n text = pattern.sub('', text)\r\n return text\r\n\r\ndef parseWikiContent(text):\r\n soup = bsoup(text)\r\n # check exists\r\n noarticle = soup('div', {\"class\" : \"noarticletext\"})\r\n if len(noarticle) != 0:\r\n print 'not exist!!!'\r\n return None\r\n pSet = soup('div', {'id' : 'mw-content-text'})[0].findChildren('p', recursive=False)\r\n loops = 3\r\n contents = ''\r\n for p in pSet:\r\n if loops == 0:\r\n break\r\n #print p\r\n content = p.getText()\r\n #print content\r\n if len(content) >= 4 and content[0:6].find(u'坐标') == -1:\r\n content = filterInvalidChar(pattern, content)\r\n contents += content.encode('utf-8') + '\\n'\r\n loops -= 1\r\n if len(contents) > 0:\r\n return contents\r\n else:\r\n return None\r\n\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n name = raw_input('> ')\r\n #name = name.decode('gbk').encode('utf-8')\r\n if len(name) > 0:\r\n url = 'http://zh.wikipedia.org/zh-cn/%s' % (name)\r\n #print url.decode('utf-8').encode('gbk')\r\n print url\r\n text = urlopen(url).read()\r\n content = parseWikiContent(text)\r\n if content is not None:\r\n print content\r\n else:\r\n print \"can't get content!\"\r\n else:\r\n break\r\n\r\n" } ]
11
satyandrab/Google-coding-exercise
https://github.com/satyandrab/Google-coding-exercise
a4f80dec797e85a0a5abdeb614498dc233e23bba
aa1ee616cfad391035efe1989c285176493258e8
981b9cf9eed053b61a85a54bd7cf41cb4d271642
refs/heads/master
2016-09-02T00:54:55.433306
2013-11-05T07:49:31
2013-11-05T07:49:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5845997929573059, "alphanum_fraction": 0.588652491569519, "avg_line_length": 29.84375, "blob_id": "3a8d505b5af25c844564137df30e783af8c8eb7a", "content_id": "72504f998c85b103bd13e8e359443baaef7ae8aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 987, "license_type": "no_license", "max_line_length": 100, "num_lines": 32, "path": "/satyandra_coding_exercise/sharing_price.py", "repo_name": "satyandrab/Google-coding-exercise", "src_encoding": "UTF-8", "text": "import csv\n\ndef return_max():\n #open csv file\n f = open(\"sharing_price.csv\", \"rb\")\n csv_reader = csv.reader(f)\n \n #create list of header values\n header = csv_reader.next()\n \n # create a dict corresponding for each header ignoring first one, as its 'year' so not required.\n complete_data = {}\n for i in range(2, len(header)):\n complete_data[header[i]] = {}\n \n # read csv line by line, mentioning key as \"Year month\" and share price as values\n for r in csv_reader:\n values = r\n \n for i in range(2, len(values)):\n complete_data[header[i]][r[0]+\" \"+r[1]] = values[i]\n \n # find max values for each company and compare it mont year wise\n max_values ={}\n for key, values in complete_data.iteritems():\n max_value = max(values.iterkeys(), key= (lambda key: int(values[key])))\n max_values[key] = max_value\n \n return max_values\n\nif __name__ == \"__main__\":\n print return_max()\n" } ]
1
bibeksubedi11/Conference-Website
https://github.com/bibeksubedi11/Conference-Website
65087caf72aaf8d821852a16e68afc9b2ff98781
6b0dbfc43333ab13e6b091b433cf110d77bfaa1c
9285f99f0c7bd5b09dab3bc9302348e8cf3d5338
refs/heads/master
2020-12-03T11:14:16.768544
2020-01-02T02:47:49
2020-01-02T02:47:49
231,294,833
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6861656904220581, "alphanum_fraction": 0.7015371322631836, "avg_line_length": 32.4571418762207, "blob_id": "56377c1f8a179c2b3b24ec13f87969354235c33c", "content_id": "682bae871274950b1ac7521493e2c3ea604a6e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4684, "license_type": "no_license", "max_line_length": 134, "num_lines": 140, "path": "/conference/website/models.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom datetime import datetime\n\n# Create your models here.\n\nclass BannerImage(models.Model):\n image = models.ImageField(null=True, blank=True)\n\n\nclass Counter(models.Model):\n date = models.DateField(default=datetime.now, blank = True)\n time = models.TimeField(default=datetime.now, blank = True)\n\n\nclass Join(models.Model):\n name = models.CharField(max_length=100, null=True, blank=True)\n email = models.CharField(max_length=100, null=True, blank=True)\n phone = models.CharField(max_length=100, null=True, blank=True)\n \n def __str__(self):\n return self.name\n\n\nclass DetailOfConferernce(models.Model):\n icon = models.CharField(max_length=100, null=True, blank=True)\n heading = models.CharField(max_length=100, null=True, blank=True)\n description = models.TextField(default='describe here')\n\n def __str__(self):\n return self.heading\n\n\nclass SideImage(models.Model):\n image = models.ImageField(null=True, blank=True)\n\n\nclass ConferenceDay(models.Model):\n day = models.CharField(max_length=100, null=True, blank=True)\n date = models.DateField(default=datetime.now, blank = True)\n\n def __str__(self):\n return self.day\n\n\nclass ConferenceSchedule(models.Model):\n heading = models.CharField(max_length=100, null=True, blank=True)\n about = models.TextField(default='describe hehe')\n description = models.TextField(default='describe hehe')\n image = models.ImageField(null=True, blank=True)\n start_time = models.TimeField(default=datetime.now, blank = True)\n end_time = models.TimeField(default=datetime.now, blank = True)\n date = models.DateField(default=datetime.now, blank = True)\n location = models.CharField(max_length=100, null=True, blank=True)\n posted_by = models.CharField(max_length=100, null=True, blank=True)\n position = models.CharField(max_length=100, null=True, blank=True)\n conference_day = models.ForeignKey(ConferenceDay, on_delete=models.CASCADE, related_name='conference_day', null =True, blank=True)\n\n def __str__(self):\n return self.heading\n\n\nclass Gallery(models.Model):\n image = models.ImageField(null=True, blank=True)\n\n\nclass HappyClients(models.Model):\n image = models.ImageField(null=True, blank=True)\n name = models.CharField(max_length=100, null=True, blank=True)\n details = models.TextField(default='here')\n position = models.CharField(max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\nclass Active(models.Model):\n name = models.CharField(max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Pricing(models.Model):\n types = models.CharField(max_length=100, null=True, blank=True)\n price = models.IntegerField()\n about = models.CharField(max_length=100, null=True, blank=True)\n features = models.TextField(default='here')\n active = models.ForeignKey(Active, on_delete=models.CASCADE, related_name='active', null =True, blank=True)\n\n def __str__(self):\n return self.types\n\n\nclass AboutCounter(models.Model):\n speaker = models.IntegerField()\n sponsor = models.IntegerField()\n total_seats = models.IntegerField()\n topic = models.IntegerField()\n\n\n\n\nclass Speakers(models.Model):\n name = models.CharField(max_length=100, null=True, blank=True)\n profession = models.CharField(max_length=100, null=True, blank=True)\n image = models.ImageField(null=True, blank=True)\n description = models.TextField(default='here')\n\n def __str__(self):\n return self.name\n\n\nclass BlogSingle(models.Model):\n image = models.ImageField(null=True, blank=True)\n heading = models.CharField(max_length=100, null=True, blank=True)\n description = models.TextField(default='here')\n date = models.DateField(default=datetime.now, blank = True)\n posted_by = models.CharField(max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.heading\n\n\nclass Comment(models.Model):\n name = models.CharField(max_length=100, null=True, blank=True)\n email = models.CharField(max_length=100, null=True, blank=True)\n message = models.TextField(default='here')\n date = models.DateField(default=datetime.now, blank=True)\n time = models.TimeField(default=datetime.now, blank =True)\n\n def __str__(self):\n return self.name\n\nclass Contact(models.Model):\n name = models.CharField(max_length=100, null=True, blank=True)\n email = models.CharField(max_length=100, null=True, blank=True)\n subject = models.CharField(max_length=100, null=True, blank=True)\n message = models.TextField(default='here')\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.48351648449897766, "alphanum_fraction": 0.5686812996864319, "avg_line_length": 19.22222137451172, "blob_id": "cc29e89424a74bd76dec598a03e9b664fa71969e", "content_id": "74694c5ca47949d03929b7853005c2ecd6d32d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/conference/website/migrations/0007_auto_20191205_0759.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-05 07:59\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0006_auto_20191205_0753'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='pricing',\n old_name='heading',\n new_name='types',\n ),\n ]\n" }, { "alpha_fraction": 0.5357828140258789, "alphanum_fraction": 0.5464765429496765, "avg_line_length": 46.98684310913086, "blob_id": "5a94bcabc484d972dd57bddb2c6bcf4017637e3c", "content_id": "a01b5c645f04f83bd9ddb8252a2a4229cb3a1bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7294, "license_type": "no_license", "max_line_length": 114, "num_lines": 152, "path": "/conference/website/migrations/0001_initial.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-02 08:14\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AboutCounter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('speaker', models.IntegerField()),\n ('sponsor', models.IntegerField()),\n ('total_seats', models.IntegerField()),\n ('topic', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='BannerImage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ],\n ),\n migrations.CreateModel(\n name='Blog',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ('heading', models.CharField(blank=True, max_length=100, null=True)),\n ('description', models.TextField(default='here')),\n ('date', models.DateField(blank=True, default=datetime.datetime.now)),\n ('posted_by', models.CharField(blank=True, max_length=100, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(blank=True, max_length=100, null=True)),\n ('email', models.CharField(blank=True, max_length=100, null=True)),\n ('message', models.TextField(default='here')),\n ],\n ),\n migrations.CreateModel(\n name='ConferenceDay',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('day', models.CharField(blank=True, max_length=100, null=True)),\n ('date', models.DateField(blank=True, default=datetime.datetime.now)),\n ],\n ),\n migrations.CreateModel(\n name='ConferenceSchedule',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('heading', models.CharField(blank=True, max_length=100, null=True)),\n ('description', models.TextField(default='describe hehe')),\n ('start_time', models.TimeField(blank=True, default=datetime.datetime.now)),\n ('end_time', models.TimeField(blank=True, default=datetime.datetime.now)),\n ('date', models.DateField(blank=True, default=datetime.datetime.now)),\n ('location', models.CharField(blank=True, max_length=100, null=True)),\n ('posted_by', models.CharField(blank=True, max_length=100, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(blank=True, max_length=100, null=True)),\n ('email', models.CharField(blank=True, max_length=100, null=True)),\n ('subject', models.CharField(blank=True, max_length=100, null=True)),\n ('message', models.TextField(default='here')),\n ],\n ),\n migrations.CreateModel(\n name='Counter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateField(blank=True, default=datetime.datetime.now)),\n ('time', models.TimeField(blank=True, default=datetime.datetime.now)),\n ],\n ),\n migrations.CreateModel(\n name='DetailOfConferernce',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('icon', models.CharField(blank=True, max_length=100, null=True)),\n ('heading', models.CharField(blank=True, max_length=100, null=True)),\n ('description', models.TextField(default='describe here')),\n ],\n ),\n migrations.CreateModel(\n name='Gallery',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ],\n ),\n migrations.CreateModel(\n name='HappyClients',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ('name', models.CharField(blank=True, max_length=100, null=True)),\n ('details', models.TextField(default='here')),\n ('position', models.CharField(blank=True, max_length=100, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='JoinForm',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(blank=True, max_length=100, null=True)),\n ('email', models.CharField(blank=True, max_length=100, null=True)),\n ('phone', models.CharField(blank=True, max_length=100, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Pricing',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('price', models.IntegerField()),\n ('about', models.CharField(blank=True, max_length=100, null=True)),\n ('features', models.TextField(default='here')),\n ],\n ),\n migrations.CreateModel(\n name='SideImage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ],\n ),\n migrations.CreateModel(\n name='Speakers',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(blank=True, max_length=100, null=True)),\n ('profession', models.CharField(blank=True, max_length=100, null=True)),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ('description', models.TextField(default='here')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8348271250724792, "alphanum_fraction": 0.8348271250724792, "avg_line_length": 36.19047546386719, "blob_id": "7820cd2583b71788765229bfb8eb2a61f7dc4a9d", "content_id": "7bd06cef58a5867a85563a94f448c05e31f4070d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 207, "num_lines": 21, "path": "/conference/website/admin.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BannerImage, Counter, Join, DetailOfConferernce, SideImage, ConferenceDay, ConferenceSchedule, Gallery, HappyClients, Pricing, AboutCounter, Speakers, BlogSingle, Comment, Contact, Active\n\n\n# Register your models here.\nadmin.site.register(BannerImage)\nadmin.site.register(Counter)\nadmin.site.register(Join)\nadmin.site.register(DetailOfConferernce)\nadmin.site.register(SideImage)\nadmin.site.register(ConferenceDay)\nadmin.site.register(ConferenceSchedule)\nadmin.site.register(Gallery)\nadmin.site.register(HappyClients)\nadmin.site.register(Pricing)\nadmin.site.register(AboutCounter)\nadmin.site.register(Speakers)\nadmin.site.register(BlogSingle)\nadmin.site.register(Comment)\nadmin.site.register(Contact)\nadmin.site.register(Active)\n" }, { "alpha_fraction": 0.5620437860488892, "alphanum_fraction": 0.6082724928855896, "avg_line_length": 21.83333396911621, "blob_id": "340e66ea043e142735a3885447f4d6f2b4f36971", "content_id": "5c777c0292e6c1f6a35f585db778332792fb4407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/conference/website/migrations/0004_conferenceschedule_about.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-05 06:49\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0003_conferenceschedule_image'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='conferenceschedule',\n name='about',\n field=models.TextField(default='describe hehe'),\n ),\n ]\n" }, { "alpha_fraction": 0.7012345790863037, "alphanum_fraction": 0.7061728239059448, "avg_line_length": 39.474998474121094, "blob_id": "2faf1c907db60a90213b155e08e5be218e5fd251", "content_id": "3378073dfac6d6454742182a83d1a3296e1cba1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1620, "license_type": "no_license", "max_line_length": 78, "num_lines": 40, "path": "/conference/conference/urls.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "\"\"\"conference URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom website import views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.IndexView.as_view(), name='index' ),\n path('about', views.AboutView.as_view(), name='about'),\n path('speakers', views.speakers, name='speakers'),\n path('schedule', views.ScheduleView.as_view(), name='schedule'),\n path('blog/<int:pk>', views.BlogSingleView.as_view(), name='blogSingle'),\n path('blog', views.BlogView.as_view(), name='blog'),\n path('comment/<int:pk>',views.commentview, name='comment'),\n path('contact', views.contactview, name ='contact'),\n path('join',views.joinview, name='join'),\n \n\n \n\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n" }, { "alpha_fraction": 0.568129301071167, "alphanum_fraction": 0.6120092272758484, "avg_line_length": 23.05555534362793, "blob_id": "01ad01e43cccb80e8a57770ae54fa43e4af17729", "content_id": "c8440e94450f573e41d23eebddd4442b85d95a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 73, "num_lines": 18, "path": "/conference/website/migrations/0003_conferenceschedule_image.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-05 06:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0002_conferenceschedule_conference_day'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='conferenceschedule',\n name='image',\n field=models.ImageField(blank=True, null=True, upload_to=''),\n ),\n ]\n" }, { "alpha_fraction": 0.5280898809432983, "alphanum_fraction": 0.5716292262077332, "avg_line_length": 24.428571701049805, "blob_id": "21cb4c65f6417e95b23e50b05be5579e7d27add0", "content_id": "9b8ebcee5c65a37c30e5211537bbd5e133158c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 78, "num_lines": 28, "path": "/conference/website/migrations/0008_auto_20191208_0704.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-08 07:04\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0007_auto_20191205_0759'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Blog',\n new_name='BlogSingle',\n ),\n migrations.AddField(\n model_name='comment',\n name='date',\n field=models.DateField(blank=True, default=datetime.datetime.now),\n ),\n migrations.AddField(\n model_name='comment',\n name='time',\n field=models.TimeField(blank=True, default=datetime.datetime.now),\n ),\n ]\n" }, { "alpha_fraction": 0.5626242756843567, "alphanum_fraction": 0.5874751210212708, "avg_line_length": 31.45161247253418, "blob_id": "ffdd3d6cb58ea854f1404f0efae10c61fb012340", "content_id": "ff349f245726d2853ad6d428788749a13b307cb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1006, "license_type": "no_license", "max_line_length": 148, "num_lines": 31, "path": "/conference/website/migrations/0006_auto_20191205_0753.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-05 07:53\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0005_conferenceschedule_position'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Active',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(blank=True, max_length=100, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='pricing',\n name='heading',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AddField(\n model_name='pricing',\n name='active',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='active', to='website.Active'),\n ),\n ]\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "db63be7e823493a6a692e4e88799f731822a2d61", "content_id": "99353ebbccc1e28d501c860269805f034db485ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/conference/website/apps.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass WebisteConfig(AppConfig):\n name = 'webiste'\n" }, { "alpha_fraction": 0.5630841255187988, "alphanum_fraction": 0.6144859790802002, "avg_line_length": 22.77777862548828, "blob_id": "bd70c5d8c3132accdefe0bcca42e991586df47a2", "content_id": "cc83886f26eee8797a534b94e4a0f02764146272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/conference/website/migrations/0005_conferenceschedule_position.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-05 06:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0004_conferenceschedule_about'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='conferenceschedule',\n name='position',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6676049828529358, "alphanum_fraction": 0.6676049828529358, "avg_line_length": 34.831932067871094, "blob_id": "c92ba67720064d55a0513996bbdf2f26af8b2cc6", "content_id": "76390e097ec3ba83d07f589e308afbbf96679734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4263, "license_type": "no_license", "max_line_length": 199, "num_lines": 119, "path": "/conference/website/views.py", "repo_name": "bibeksubedi11/Conference-Website", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import BannerImage, Counter, Join, DetailOfConferernce, SideImage, ConferenceDay, ConferenceSchedule, Gallery, HappyClients, Pricing, AboutCounter, Speakers, BlogSingle, Comment, Contact\nfrom django.views.generic import TemplateView\nfrom django.template.context import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.db.models import Q\nfrom datetime import datetime\n\n# Create your views here.\n\nclass IndexView(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n context['banner_image'] = BannerImage.objects.last()\n context['detail_of_conference'] = DetailOfConferernce.objects.all()\n context['side_image'] = SideImage.objects.last()\n context['conference_day'] = ConferenceDay.objects.all()\n context['conference_schedule'] =ConferenceSchedule.objects.all()\n context['gallery']= Gallery.objects.all()\n context['happy_clients'] = HappyClients.objects.all()\n context['pricing']= Pricing.objects.all()\n context['blog_single'] = BlogSingle.objects.all()\n \n\n return context\n\nclass AboutView(TemplateView):\n template_name = 'about.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(AboutView, self).get_context_data(**kwargs)\n context['side_image'] = SideImage.objects.last()\n context['about_counter'] = AboutCounter.objects.last()\n context['gallery'] = Gallery.objects.all()\n context['happy_clients'] = HappyClients.objects.all()\n\n\n return context\n\n\ndef speakers(request):\n return render(request, \"speakers.html\") \n\nclass ScheduleView(TemplateView):\n template_name = 'schedule.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ScheduleView, self).get_context_data(**kwargs)\n context['conference_day'] = ConferenceDay.objects.all()\n context['conference_schedule'] =ConferenceSchedule.objects.all()\n\n return context\n\n\n\n\n\nclass BlogView(TemplateView):\n template_name = 'blog.html'\n\n def get_context_data(self, *args, **kwargs):\n context= super(BlogView, self).get_context_data(**kwargs)\n context['blog_single'] = BlogSingle.objects.all()\n\n return context\n\nclass BlogSingleView(TemplateView):\n template_name = 'blogSingle.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(BlogSingleView,self).get_context_data(**kwargs)\n context['blog_single'] = BlogSingle.objects.get(id=kwargs.get('pk'))\n context['comment'] = Comment.objects.all()\n\n return context\n\ndef commentview(request, *args, **kwargs):\n if request.method == 'POST':\n name = request.POST.get(\"name\")\n email = request.POST.get(\"email\")\n message = request.POST.get(\"message\")\n blog = BlogSingle.objects.get(pk=kwargs.get('pk'))\n Comment.objects.create(name=name, email=email, message=message, )\n print(kwargs.get('pk'))\n \n\n return HttpResponseRedirect('/blog/'+ str(kwargs.get('pk')))\n else:\n banner_image = BannerImage.objects.last()\n return render(request,'blogSingle.html',{'banner_image':banner_image})\n\n\ndef contactview(request, *args, **kwargs):\n if request.method == 'POST':\n name = request.POST.get(\"name\")\n email = request.POST.get(\"email\")\n subject = request.POST.get(\"subject\")\n message = request.POST.get(\"message\")\n\n Contact.objects.create(name=name, email=email, subject=subject, message=message)\n\n return HttpResponseRedirect('/')\n else:\n banner_image = BannerImage.objects.last()\n return render(request, 'contact.html', {'banner_image': banner_image}) \n\ndef joinview(request, *args, **kwargs):\n if request.method == 'POST':\n name = request.POST.get(\"name\")\n email= request.POST.get(\"email\")\n phone = request.POST.get(\"phone\")\n Join.objects.create(name=name, phone=phone, email= email)\n\n return HttpResponseRedirect('/')\n else:\n banner_image = BannerImage.objects.last()\n return render(request, 'index.html', {'banner_image':banner_image})" } ]
12
viralj/HackerRank
https://github.com/viralj/HackerRank
74666b18286c634dc22c782db5753950db415458
86e9fa18e82ca326bd9ceb0bbb473225c277ad6b
31c50b6c1a24d032ebebc2d3a34050ceaf3a5467
refs/heads/master
2021-07-04T20:30:22.645276
2017-09-25T21:34:59
2017-09-25T21:34:59
104,778,118
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4035714268684387, "alphanum_fraction": 0.4392857253551483, "avg_line_length": 17.66666603088379, "blob_id": "23ce7e6390688a34c8661705288ff5267c5b6536", "content_id": "b79d4cd5cec96642fcde3395a56655e480dc5023", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 43, "num_lines": 15, "path": "/Algorithms/Implementation/Non-Divisible Subset/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "n, k = map(int, input().strip().split(\" \"))\nnums = map(int, input().strip().split(\" \"))\n\ncs = [0] * k\nfor num in nums:\n cs[num % k] += 1\n\nc = min(cs[0], 1)\nfor i in range(1, k // 2 + 1):\n if i != k - i:\n c += max(cs[i], cs[k - i])\nif k % 2 == 0:\n c += 1\n\nprint(c)\n" }, { "alpha_fraction": 0.46341463923454285, "alphanum_fraction": 0.4742547571659088, "avg_line_length": 16.571428298950195, "blob_id": "49381dbf36bb24f8550a6095401d7ec5972de894", "content_id": "76f6030ae689b6404f6a0100cc109f56ae7b3ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/Algorithms/Implementation/Circular Array Rotation/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\n\nn, k, q = input().strip().split(' ')\nn, k, q = [int(n), int(k), int(q)]\na = [int(a_temp) for a_temp in input().strip().split(' ')]\n\nz = []\nfor a0 in range(q):\n z.append(int(input().strip()))\n \nif k > len(a):\n for x in range(k):\n a = a[-1:] + a[:-1]\nelse:\n a = a[len(a) - k:] + a[:len(a) - k]\n\nfor t in z:\n print(a[t])\n" }, { "alpha_fraction": 0.4594127833843231, "alphanum_fraction": 0.5302245020866394, "avg_line_length": 24.173913955688477, "blob_id": "97160b5f6ec6e7650dd5f28b1b67b690c63c673d", "content_id": "8440aad80418d5fd716b66600ad3cd9e05748f0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/Algorithms/Warmup/Compare the Triplets/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef solve(a0, a1, a2, b0, b1, b2):\n # Complete this function\n \n alice_points = 0\n bob_points = 0\n for a_val, b_val in zip([a0, a1, a2],[b0, b1, b2]):\n if a_val < b_val:\n bob_points += 1\n elif a_val > b_val:\n alice_points += 1\n\n return alice_points, bob_points\n\na0, a1, a2 = input().strip().split(' ')\na0, a1, a2 = [int(a0), int(a1), int(a2)]\nb0, b1, b2 = input().strip().split(' ')\nb0, b1, b2 = [int(b0), int(b1), int(b2)]\nresult = solve(a0, a1, a2, b0, b1, b2)\nprint (\" \".join(map(str, result)))\n" }, { "alpha_fraction": 0.5233644843101501, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 14.285714149475098, "blob_id": "6b6c120839121489bee2f02c83e10f7df82df2fd", "content_id": "720fb5ee7d81fa501913adeee30c59c79a8b8dbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/Algorithms/Warmup/Staircase/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\n\nn = int(input().strip())\n[print((\"#\" * f).rjust(n)) for f in range(1, n + 1)]\n" }, { "alpha_fraction": 0.684684693813324, "alphanum_fraction": 0.6891891956329346, "avg_line_length": 16.076923370361328, "blob_id": "a19a0e37ba3423bbda9fa37f1c650fd3c9cb7647", "content_id": "a66683db954708beeb3217526ea09ac75c9883ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/Algorithms/Warmup/Time Conversion/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\nfrom dateutil import parser\n\ndef timeConversion(s):\n # Complete this function\n d = parser.parse(s)\n return d.strftime(\"%T\")\n\ns = input().strip()\nresult = timeConversion(s)\nprint(result)\n" }, { "alpha_fraction": 0.47980996966362, "alphanum_fraction": 0.5332541465759277, "avg_line_length": 27.066667556762695, "blob_id": "20dbe1931c2b079e444bbfaf0014dcb95a66ca59", "content_id": "d1ac0be83d88bf00413135f923315bc92c5a0f17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 842, "license_type": "no_license", "max_line_length": 119, "num_lines": 30, "path": "/Algorithms/Implementation/Library Fine/Solution.java", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "import java.io.*;\nimport java.util.*;\n\npublic class Solution {\n\n public static void main(String[] args) {\n /* Enter your code here. Read input from STDIN. Print output to STDOUT. Your class should be named Solution. */\n Scanner in = new Scanner(System.in);\n int d1 = in.nextInt();\n int m1 = in.nextInt();\n int y1 = in.nextInt();\n int d2 = in.nextInt();\n int m2 = in.nextInt();\n int y2 = in.nextInt();\n int fine = fine(d1, m1, y1, d2, m2, y2);\n\n System.out.println(fine);\n }\n\n public static int fine(int d1, int m1, int y1, int d2, int m2, int y2) {\n if (y2 < y1) return 10000;\n if (y2 > y1) return 0;\n\n if (m2 < m1) return 500 * (m1 - m2);\n if (m2 > m1) return 0;\n\n if (d2 < d1) return 15 * (d1 - d2);\n return 0;\n }\n}\n" }, { "alpha_fraction": 0.4503311216831207, "alphanum_fraction": 0.47350993752479553, "avg_line_length": 14.100000381469727, "blob_id": "debd4c8db322ec786ddabca13d503fa042740d26", "content_id": "dec85231301ef3c55ec2492820a32460cc14644b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 64, "num_lines": 20, "path": "/Algorithms/Warmup/Diagonal Difference/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\n\nn = int(input().strip())\na = []\nfor a_i in range(n):\n a_t = [int(a_temp) for a_temp in input().strip().split(' ')]\n a.append(a_t)\n\nb = c = d = 0 \nfor x in range(0,n):\n b += a[x][x]\n \nfor x in range(n,0,-1):\n c += a[x-1][d]\n d += 1\n \nprint(abs(b-c))\n" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 30.5, "blob_id": "181013c3033e1ba5fefdc70cfd65b493c4ff7476", "content_id": "cd64140a36b934faab77e7165de468a67c9a93e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/Algorithms/Implementation/Append and Delete/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\n\ns = input().strip()\nt = input().strip()\nk = int(input().strip())\nfor ol in reversed(range(1, k + 1)):\n if s == t[:len(s)] and len(t) - len(s) == ol or len(s) == 0:\n break\n s = s[:-1]\nprint(\"Yes\" if len(t) - len(s) <= ol else \"No\")\n" }, { "alpha_fraction": 0.8170731663703918, "alphanum_fraction": 0.8170731663703918, "avg_line_length": 40, "blob_id": "581ce0a3ba44f69cdf40a9642122b865d1c68297", "content_id": "f24ca9dcef236fc23ff2c313c18b096c45b4b387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 82, "license_type": "no_license", "max_line_length": 68, "num_lines": 2, "path": "/README.md", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "# HackerRank\nAll HackerRank challenges that I have submitted will be posted here.\n" }, { "alpha_fraction": 0.5661375522613525, "alphanum_fraction": 0.5820105671882629, "avg_line_length": 14.75, "blob_id": "6b2d25fe842f6610bcbca58639988c7867e3bac7", "content_id": "a9f7b79d2283e225ffd4c103945cf3c326a5a6e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 69, "num_lines": 12, "path": "/Algorithms/Implementation/Extra Long Factorials/main.py", "repo_name": "viralj/HackerRank", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nn = int(input().strip())\n\n\ndef fact(n):\n if (n == 1):\n return 1\n return n * fact(n - 1)\n\n\nprint(fact(n))\n" } ]
10
KalaiSelvan21V/python3
https://github.com/KalaiSelvan21V/python3
a097101d776e8596823f7478e7342c8ec1293a87
062130d61b089507deccd6e8f4a42ee1555395ba
395f78baffd6714d4ae46c593232a15f42d49e47
refs/heads/master
2020-06-05T02:47:56.749653
2019-06-20T04:32:23
2019-06-20T04:32:23
192,287,807
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6507936716079712, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20, "blob_id": "659bf60cb28673a725f0fe1f8128bb5e31bf38a4", "content_id": "397f95e23d65e349e768daf61e02076b6ea28a46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/beg7.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "hellow=int(input())\nfor j in range(0,hellow):\n print(\"Hello\")\n" }, { "alpha_fraction": 0.47297295928001404, "alphanum_fraction": 0.4797297418117523, "avg_line_length": 17.5, "blob_id": "8498425063843033ee79c6888ad1f524f3a8c13e", "content_id": "2616c3178e0af844759223304630f6b3e63500d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/pro10.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "g=int(input())\nk=[int(i) for i in input().split()]\nl=0\nfor i in range(g):\n for u in range(i):\n if k[u]<k[i]:\n l+=s[u]\nprint(l)\n" }, { "alpha_fraction": 0.4146341383457184, "alphanum_fraction": 0.4878048896789551, "avg_line_length": 15.399999618530273, "blob_id": "39183e50e9d138d64b9c42006400e1c0924b2301", "content_id": "230bd56aca7aefbc2fcd42b20c74ea50cbb738d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/beg6.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "y=int(input())\nif(((y%4)==0)or((y%400)==0)):\n print(\"yes\")\nelse:\n print(\"no\")\n" }, { "alpha_fraction": 0.5695067048072815, "alphanum_fraction": 0.5874439477920532, "avg_line_length": 21.299999237060547, "blob_id": "ffa5491d2ccb229d2401440f4540da0030076cc5", "content_id": "0bf26f677608f09fda30b2c84d81878d1947c06b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 47, "num_lines": 10, "path": "/pro12.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "nap,qap=met(int,input().split())\ntap=list(met(int,input().split()))\nstp=[]\nfor p in range(qap):\n stp.append(list(maet(int,input().split())))\nfor p in stp:\n a=0\n for o in range(p[0]-1,p[1]):\n a=a+tap[o]\n print(a)\n" }, { "alpha_fraction": 0.4576271176338196, "alphanum_fraction": 0.5254237055778503, "avg_line_length": 20.5, "blob_id": "419bc309450a90118622421936d2f421f7eaf763", "content_id": "8092afca1aede15fd2587355ea17d74c76291d6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/pro3.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": " \nst3,st4=input().split()\nb=abs(len(st3)-len(st4))\nfor i in range(len(st3)):\n if len(st4)==1 and st4[i] in st3:\n break\n if st3[i]!=st4[i]:\n b+=1\nprint(b)\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6838709712028503, "avg_line_length": 21.14285659790039, "blob_id": "07f34c248bd46334688a52c38843e6a58b2433a4", "content_id": "3907e538897e5096a396694a1c4490acdb714354", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/pro2.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "from itertools import combinations\nn1,n2=map(int,input().split())\na=len(str(n1))\nb=list(combinations(str(n1),a-n2))\nb=(sorted(b))\nk=\"\".join(b[0])\nprint(k)\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6693877577781677, "avg_line_length": 21.272727966308594, "blob_id": "934705e21c9ceed82c7399f743ba9ccda7da281e", "content_id": "b41b12b358fcda397d0bbb34a2bf0df6024b9417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 38, "num_lines": 11, "path": "/pro4.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "string3,string4=input().split()\nf=0\nif len(string3)>len(string4):\n string3,string4=string4,string3\nh=0\nwhile h<len(string3):\n f+=(ord(string4[j])-ord(string3[h]))\n h+=1\nfor h in range(h,len(string4)):\n f+=ord(string4[h])-ord('b')+1\nprint(f)\n" }, { "alpha_fraction": 0.41447368264198303, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 18, "blob_id": "b7b5966a67ec4a9c8b477ea3b858b91f55a3f934", "content_id": "cbef2793d3391517a51023674aef6ea9af21a9f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 29, "num_lines": 8, "path": "/pro7.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "mic = int(input())\ncat = 1000\nfor j in range(0,20):\n if pow(5,i)<=mic:\n c = abs(pow(5,j)-mic)\n if c<=cat:\n cat=c\nprint(cat)\n" }, { "alpha_fraction": 0.44594594836235046, "alphanum_fraction": 0.47297295928001404, "avg_line_length": 23.33333396911621, "blob_id": "8997d24b4424cdd535601af01ad3646dff79a46e", "content_id": "9b2643dffeb473922f455d57e849493573d4a95b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/pro6.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "k=int(input())\na=list(map(int,input().split()))\ns=0\nfor c in range(len(a)-2):\n for y in range(i+1,len(a)-1):\n for v in range(y+1,len(a)):\n if a[c]<a[y]<a[j] and c<x<v:\n s+=1\nprint(s) \n" }, { "alpha_fraction": 0.5204678177833557, "alphanum_fraction": 0.5438596606254578, "avg_line_length": 16.100000381469727, "blob_id": "dab65ba3a9f4710c2827874a29c9745eb51c638d", "content_id": "b5124cfa36b1c589d102ef996d4b54ef9659312e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/pro5.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "import sys, string, math\no,c,d=input().split()\no,c,d=int(o),int(c),int(d)\nif o==224:\n print('YES')\n sys.exit()\nif o%(c+d)==0:\n print('YES')\nelse:\n print('NO')\n" }, { "alpha_fraction": 0.5581395626068115, "alphanum_fraction": 0.5891472697257996, "avg_line_length": 11.899999618530273, "blob_id": "eb1a61033eadb59c69e8b2d2d12a845396636557", "content_id": "011075f94271263bb18dc841a79cdd4d8764363e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 22, "num_lines": 10, "path": "/beg8.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main()\n{\n\tint no,sum=0;\n\tcin>>no;\n sum=(no*(no+1))/2;\n cout<<sum;\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6445497870445251, "alphanum_fraction": 0.649289071559906, "avg_line_length": 25.375, "blob_id": "82d0a6a9d7533acfdcd82ce49647baf6a048d5b3", "content_id": "9c57eaaae0a695b5cacb2a29e2870482bb12a524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/pro8.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "import math\nimport functools\nc,d=map(int,input().split())\nList=[int(k) for k in input().split()]\nfor k in range(d):\n aa,bb=map(int,input().split())\n s=functools.reduce(math.gcd,List[aa-1:bb])\n print(s)\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 12.714285850524902, "blob_id": "cc5c1981818e5ade7111e5bdafa283360c5785ba", "content_id": "39883c6fb6f4ed029dd05cb753f5849672388891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/beg5.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "d,e,o=map(int,input().split())\nif d>e and d>o:\n print(d)\nelif e>o:\n print(e)\nelse:\n print(o)\n" }, { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6440678238868713, "avg_line_length": 18.66666603088379, "blob_id": "501e3c40d33be6173b66ae2e7b34deee60af1d11", "content_id": "c8c9fd84aaa718d8bf572c5cfac8adb1076e7d45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/beg9.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "N,o=mat(int,input().split())\ns=list(mat(int,input().split()))\nsumm=i\nfor j in range(i,o):\n summ=summ+s[j]\nprint(summ)\n" }, { "alpha_fraction": 0.4868238568305969, "alphanum_fraction": 0.5006935000419617, "avg_line_length": 14.67391300201416, "blob_id": "63c9745597ac5a1d8cbbd607c30051bd71209742", "content_id": "6af9d9d0634eb2bb187a1b45bdfdc34621e6536c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 721, "license_type": "no_license", "max_line_length": 67, "num_lines": 46, "path": "/pro1.java", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "import java.util.*;\nimport java.lang.*;\nimport java.io.*;\npublic class Prefix\n{\n\tpublic static void main (String[] args) throws java.lang.Exception\n\t{\n\t\tScanner sc=new Scanner(System.in);\n\t\tint a=sc.nextInt();\n\t\tString s[]=new String[a];\n\t\tfor(int i=0;i<a;i++)\n\t\t{\n\t\t\ts[i]=sc.next();\n\t\t}\n\t\tint j=s[0].length();\n\t\tfor(int i=1;i<a;i++)\n\t\t{\n\t\t\tchar ch[]=s[i-1].toCharArray();\n\t\t\tchar ch1[]=s[i].toCharArray();\n\t\t\tint o=s[i-1].length();\n\t\t\tint m=s[i].length();\n\t\t\tif(o<=m&&o<j)\n\t\t\t{\n\t\t\t\tj=o;\n\t\t\t}\n\t\t\tif(o>m&&m<j)\n\t\t\t{\n\t\t\t\tj=m;\n\t\t\t}\n\t\t\tfor(int k=0;k<j;k++)\n\t\t\t{\n\t\t\t\t\n\t\t\t\tif(ch[k]!=ch1[k])\n\t\t\t\t{\n\t\t\t\t\tj=k;\n\t\t\t\t\tk=j;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tchar ch[]=s[0].toCharArray();\n\t\tfor(int i=0;i<j;i++)\n\t\t{\n\t\t\tSystem.out.print(ch[i]);\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.49425286054611206, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 16.399999618530273, "blob_id": "9f3cdf8d3cc44a96387375c2369757f7463f4281", "content_id": "bad202aefa14be40db612f9394f0643ed829e4c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/beg4.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "k=input()\nif (ord(k)>=97 and ord(k)<=122):\n print(\"Alphabet\")\nelse:\n print(\"No\")\n" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.6612903475761414, "avg_line_length": 14.5, "blob_id": "e6cd47807fbaa3c53a9566a9865553beec2a2d80", "content_id": "8510b41e96853887924a62f7e12cbd5772b8e65a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 17, "num_lines": 4, "path": "/beg10.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "n=int(input())\nn_str = str(n)\nlength=len(n_str)\nprint(length)\n" }, { "alpha_fraction": 0.5379310250282288, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 23.16666603088379, "blob_id": "c61b2138a0bdff238902c11ca9cde054e5bdd9e2", "content_id": "fac2c8e98615875c29b89fdce30db067cae4e48a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/pro13.py", "repo_name": "KalaiSelvan21V/python3", "src_encoding": "UTF-8", "text": "p,a=get(int,input().split())\narray=list(get(int,input().split()))\ntemp=[]\nfor u in range(0,a):\n f=list(get(int,input().split()))\n y=f[0]\n for k in range(min(f)-1,max(f)):\n if y>array[k]:\n \ty=array[k]\n temp.append(l)\nfor u in range(0,len(temp)):\n print(temp[u])\n" } ]
18
morepath/more.emit
https://github.com/morepath/more.emit
c6474b92c887e3517d44f41efc5982baaace7681
1a6734a864e44de4d8ab55c95092f4d22619cb23
a490cca3a518d76b3c96ef3f2c500836b6252343
refs/heads/master
2023-08-10T08:05:15.485454
2021-04-05T12:38:41
2021-04-18T14:46:11
89,073,265
0
0
BSD-3-Clause
2017-04-22T13:52:24
2021-04-18T14:46:14
2022-05-30T17:05:40
Python
[ { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 16.5, "blob_id": "6a350a1ba6aba443978e6ee120c3ce4e18ba5ca2", "content_id": "ab70e054b7535ce83b11da1c6d439db154528c9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "permissive", "max_line_length": 33, "num_lines": 6, "path": "/more/emit/app.py", "repo_name": "morepath/more.emit", "src_encoding": "UTF-8", "text": "import morepath\nfrom pymitter import EventEmitter\n\n\nclass App(morepath.App):\n signal = EventEmitter()\n" }, { "alpha_fraction": 0.6312684416770935, "alphanum_fraction": 0.635988175868988, "avg_line_length": 23.38848876953125, "blob_id": "1d65f24d889547329b1d9ce86f5576cd83405d7e", "content_id": "dfc6fe0c3e66cec49c5cdf8d24dba0356d10e1f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3390, "license_type": "permissive", "max_line_length": 87, "num_lines": 139, "path": "/README.rst", "repo_name": "morepath/more.emit", "src_encoding": "UTF-8", "text": ".. image:: https://github.com/morepath/more.emit/workflows/CI/badge.svg?branch=master\n :target: https://github.com/morepath/more.emit/actions?workflow=CI\n :alt: CI Status\n\n.. image:: https://coveralls.io/repos/github/morepath/more.emit/badge.svg?branch=master\n :target: https://coveralls.io/github/morepath/more.emit?branch=master\n\n.. image:: https://img.shields.io/pypi/v/more.emit.svg\n :target: https://pypi.org/project/more.emit/\n\n.. image:: https://img.shields.io/pypi/pyversions/more.emit.svg\n :target: https://pypi.org/project/more.emit/\n\n\n\nmore.emit: pymitter integration in Morepath\n===============================================\n\nThis package provides Morepath integration for pymitter_.\n\n*pymitter* is a Python port of the extended Node.js `EventEmitter 2`_\napproach providing namespaces, wildcards and TTL.\n\n\nQuick start\n-----------\n\nInstall ``more.emit``:\n\n.. code-block:: console\n\n $ pip install -U more.emit\n\nExtend your App class from EmitApp:\n\n.. code-block:: python\n\n from more.pony import EmitApp\n\n class App(EmitApp):\n pass\n\nNow you can define signals:\n\n.. code-block:: python\n\n from .app import App\n\n\n @App.signal.on('myevent')\n def handler1(arg, request):\n print(request)\n print('handler1 called with', arg)\n\n @App.signal.on('myevent')\n def handler2(arg, request):\n print('handler2 called with', arg)\n\nYou can emit the signals for example from the view:\n\n.. code-block:: python\n\n @App.json(model=Root)\n def root_view(self, request):\n request.app.signal.emit('myevent', 'foo', request)\n return {\n 'name': 'Root'\n }\n\n\nExample\n-------\n\nAn example for emitting signals on user creation\nand user update for sending a confirmation email.\nThis example uses `more.pony`_.\n\nsignal.py\n\n.. code-block:: python\n\n from .app import App\n\n\n @App.signal.on('user.email_updated')\n def send_confirmation_email(user, request):\n mailer = request.app.service(name='mailer')\n mailer.send_confirmation_email(user, request)\n\nview.py\n\n.. code-block:: python\n\n @App.json(model=UserCollection, request_method='POST')\n def user_collection_add(self, request):\n email = request.json['email']\n\n if not User.exists(email=email):\n user = self.add(email=email)\n\n @request.after\n def after(response):\n request.app.signal.emit('user.email_updated', user, request)\n response.status = 201\n\n else:\n @request.after\n def after(response):\n response.status = 409\n\n return {\n 'validationError': 'Email already exists'\n }\n\n\n @App.json(model=User, request_method='PUT')\n def user_update(self, request):\n if 'email' in request.json and User.exists(email=request.json['email']):\n @request.after\n def after(response):\n response.status = 409\n\n return {\n 'validationError': 'Email already exists'\n }\n\n else:\n self.update(request.json)\n if 'email' in request.json:\n self.email_confirmed = False\n\n @request.after\n def after(response):\n request.app.signal.emit('user.email_updated', self, request)\n\n\n.. _pymitter: https://github.com/riga/pymitter\n.. _EventEmitter 2: https://github.com/asyncly/EventEmitter2\n.. _more.pony: https://github.com/morepath/more.pony\n" }, { "alpha_fraction": 0.6034669280052185, "alphanum_fraction": 0.6121343374252319, "avg_line_length": 20.465116500854492, "blob_id": "c74a6e744ff2d02ee789d2d6a723fb55503d7c8c", "content_id": "9acf83519d067dfb8876083d389a4c884ad3a8dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "permissive", "max_line_length": 62, "num_lines": 43, "path": "/more/emit/tests/test_emit.py", "repo_name": "morepath/more.emit", "src_encoding": "UTF-8", "text": "from webtest import TestApp as Client\n\nfrom more.emit import EmitApp\n\nfrom io import StringIO\n\n\ndef test_emit():\n message = StringIO()\n\n class App(EmitApp):\n pass\n\n @App.signal.on(\"myevent\")\n def handler1(arg, request):\n message.write(\"handler1 called with %s\\n\" % arg)\n\n @App.signal.on(\"myevent\")\n def handler2(arg, request):\n message.write(\"handler2 called with %s\\n\" % arg)\n\n @App.path(path=\"\")\n class Root:\n pass\n\n @App.json(model=Root)\n def root_view(self, request):\n request.app.signal.emit(\"myevent\", \"foo\", request)\n return {\"message\": message.getvalue()}\n\n c = Client(App())\n\n response = c.get(\"/\")\n\n assert message.getvalue() == (\n \"handler1 called with foo\\nhandler2 called with foo\\n\"\n )\n\n assert response.json[\"message\"] == (\n \"handler1 called with foo\\nhandler2 called with foo\\n\"\n )\n\n message.close()\n" } ]
3
junral/flask_test
https://github.com/junral/flask_test
a694a6e3e8f9451c1d7cba580c6bb2fe7d9e4e0f
a8d63cc29117cdd8b9e840f2a2840ec06be865d6
b6ea20a027fe8f30064fa5e855bf4413f3c48149
refs/heads/master
2021-08-24T11:16:20.880659
2017-12-09T13:23:31
2017-12-09T13:23:31
112,946,353
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5633509755134583, "alphanum_fraction": 0.5686460137367249, "avg_line_length": 27.278074264526367, "blob_id": "cb0403f04c4487e12edd1901f83ede255e3d0dec", "content_id": "62f58b3ffb0d577935af854b207ad2bf962fabfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5366, "license_type": "no_license", "max_line_length": 78, "num_lines": 187, "path": "/webapp/controllers/blog_mongo.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# from flask import (\n # Blueprint,\n # render_template,\n # redirect,\n # url_for,\n # # flash,\n # abort\n# )\n# from flask import g, session\n# # from sqlalchemy import func\n# from flask_login import login_required, current_user\n# from flask_principal import Permission, UserNeed\n\n# from ..models_mongo import Post, Tag, Comment, User\n# from ..models_mongo import BlogPost, ImagePost, VideoPost, QuotePost\n# from ..forms_mongo import CommentForm, PostForm\n# from ..extensions import poster_permission, admin_permission\n\n# blog_mongo_blueprint = Blueprint(\n # 'blog_mongo',\n # __name__,\n # template_folder='templates/blog',\n # url_prefix='/mongo_blog'\n# )\n\n\n# def sidebar_data():\n # recent = Post.objects.order_by(\"-publish_date\").limit(5).all()\n # # top_tags = db.session.query(\n # # Tag, func.count(tags.c.post_id).label('total')\n # # ).join(\n # # tags\n # # ).group_by(Tag).order_by('total DESC').limit(5).all()\n # tags = [tag for tag in recent.tags for post in Post.objects.all()]\n # tags = {(tag, tags.count(tag)) for tag in tags}\n # sorted_tags = sorted(tags, key=lambda x: x[1])\n # top_tags = [tag[0] for tag in sorted_tags[:5]]\n\n # return recent, top_tags\n # # return recent\n\n\n# @blog_mongo_blueprint.route('/')\n# @blog_mongo_blueprint.route('/<int:page>')\n# def home(page=1):\n # posts = Post.objects.order_by(\n # Post.publish_date.desc()\n # )\n # recent, top_tags = sidebar_data()\n\n # return render_template(\n # 'blog/home_mongo.html',\n # mongo=True,\n # posts=posts,\n # recent=recent,\n # top_tags=top_tags\n # )\n\n\n# @blog_mongo_blueprint.route('/tag/<string:tag_name>')\n# def tag(tag_name):\n # tag = Tag.objects(title=tag_name).get_or_404()\n # posts = tag.posts.order_by(\"-publish_date\").all()\n # recent, top_tags = sidebar_data()\n\n # return render_template(\n # 'blog/tag.html',\n # mongo=True,\n # tag=tag,\n # posts=posts,\n # recent=recent,\n # top_tags=top_tags\n # )\n\n\n# @blog_mongo_blueprint.route('/user/<string:username>')\n# def user(username):\n # user = User.objects(username=username).get_or_404()\n # posts = user.posts.order_by(\"-publish_date\").all()\n # recent, top_tags = sidebar_data()\n\n # return render_template(\n # 'blog/user.html',\n # mongo=True,\n # user=user,\n # posts=posts,\n # recent=recent,\n # top_tags=top_tags\n # )\n\n\n# @blog_mongo_blueprint.route('/post/<int:post_id>', methods=['GET', 'POST'])\n# @login_required\n# def post(post_id):\n # post = Post.objects(id=post_id).get_or_404(post_id)\n # form = CommentForm()\n\n # if form.validate_on_submit():\n # comment = Comment()\n # comment.name = form.name.data\n # comment.text = form.text.data\n # comment.save()\n # post.comments.append(comment)\n # post.save()\n # tags = post.tags\n # comments = post.comments.order_by(Comment.date.desc()).all()\n\n # return render_template(\n # 'blog/post_mongo.html',\n # mongo=True,\n # post=post,\n # tags=tags,\n # comments=comments,\n # form=form\n # )\n\n\n# @blog_mongo_blueprint.route('/new', methods=['GET', 'POST'])\n# @login_required\n# @poster_permission.require(http_exception=403)\n# def new_post():\n # form = PostForm()\n\n # if form.validate_on_submit():\n # if form.type.data == 'blog':\n # new_post = BlogPost()\n # new_post.text = form.text.data\n # elif form.type.data == 'image':\n # new_post = ImagePost()\n # new_post.image_url = form.image.data\n # elif form.type.data == 'video':\n # new_post = VideoPost()\n # new_post.video_object = form.video.data\n # elif form.type.data == 'quote':\n # new_post = QuotePost()\n # new_post.text = form.text.data\n # new_post.author = form.tauthor.data\n # new_post.title = form.title.data\n # new_post.user = current_user\n # new_post.save()\n\n # return render_template(\n # 'blog/new_mongo.html',\n # mongo=True,\n # form=form)\n\n\n# @blog_mongo_blueprint.route('/edit/<int:id>', methods=['GET', 'POST'])\n# @login_required\n# # 创建只希望作者能访问的页面\n# @poster_permission.require(http_exception=403)\n# def edit_post(id):\n # post = Post.objects(id=id).get_or_404()\n # permission = Permission(UserNeed(post.user.id))\n\n # # 同时希望管理员可以修改任何文章\n # if permission.can() or admin_permission.can():\n # form = PostForm()\n\n # if form.validate_on_submit():\n # post.update(\n # title=form.title.data,\n # text=form.text.data\n # )\n # post.save()\n\n # return redirect(url_for(\n # '.post_mongo.html',\n # mongo=True,\n # post_id=post.id))\n\n # form.text.data = post.text\n # return render_template('blog/edit_mongo.html', form=form, post=post)\n\n # abort(403)\n\n\n# @blog_mongo_blueprint.before_request\n# def before_request():\n # \"\"\" 在所有请求处理之前运行 \"\"\"\n # if 'username' in session:\n # g.user = User.query.get(session['usename']).one()\n # else:\n # g.crrent_user = None\n" }, { "alpha_fraction": 0.6910994648933411, "alphanum_fraction": 0.7068063020706177, "avg_line_length": 18.100000381469727, "blob_id": "37fd800f33ae7d1a0aef4523fb751b82f8e1da8f", "content_id": "c382beb61c2fcc54683b52a5b800f19837a24fb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 34, "num_lines": 10, "path": "/gserver.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom gevent.wsgi import WSGIServer\n\nfrom webapp import create_app\n\napp = create_app('prod')\n# bind the host and port\nserver = WSGIServer(('', 80), app)\n" }, { "alpha_fraction": 0.545717716217041, "alphanum_fraction": 0.5497190356254578, "avg_line_length": 25.574661254882812, "blob_id": "796854e4cc5d0a794662be781f4cde574b3ec4f1", "content_id": "f6b4a5442ccb547802e3bcb426f70c840a6febd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11992, "license_type": "no_license", "max_line_length": 74, "num_lines": 442, "path": "/webapp/models.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport datetime\n\nfrom flask import current_app\nfrom flask_login import AnonymousUserMixin\nfrom sqlalchemy.sql.expression import or_\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom itsdangerous import BadSignature, SignatureExpired\n\nfrom .extensions import (\n db,\n # bcrypt,\n cache\n)\n\n\n# 针对 关系型数据库建立的模型\n# 模型 models\nroles = db.Table(\n 'role_users',\n db.Column('user_id', db.Integer, db.ForeignKey('user.id')),\n db.Column('role_id', db.Integer, db.ForeignKey('role.id'))\n)\n\n# 针对 Post 表和 Tag 表的多对多关系管理\ntags = db.Table(\n 'tags',\n db.Column('post_id', db.Integer(), db.ForeignKey('post.id')),\n db.Column('tag_id', db.Integer(), db.ForeignKey('tag.id'))\n)\n\n\nclass Role(db.Model):\n __tablename__ = 'role'\n\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(255), unique=True)\n description = db.Column(db.String(255))\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"<Role '{}'>\".format(self.name)\n\n @staticmethod\n def create_role(name, description=''):\n role = Role.query.filter_by(name=name).first()\n if role is None:\n role = Role(name=name)\n role.description = description\n db.session.add(role)\n db.session.commit()\n\n return role\n\n @staticmethod\n def create_roles(role_list):\n if role_list is None:\n return\n for r in role_list:\n if isinstance(r, (tuple, list, set)):\n n, d = r\n else:\n n, d = r, ''\n Role.create_role(n, d)\n\n\nclass User(db.Model):\n \"\"\" 用户表 \"\"\"\n\n # 数据库表\n __tablename__ = 'user'\n\n # 主键\n id = db.Column(db.Integer(), primary_key=True)\n username = db.Column(db.String(255), unique=True)\n email = db.Column(db.String(255), unique=True)\n password = db.Column(db.String(255))\n # 在 SQLAlchem 中创建虚拟的列,和 Post 表中的外键建立联系\n posts = db.relationship(\n 'Post',\n backref='user',\n lazy='dynamic'\n )\n comments = db.relationship(\n 'Comment',\n backref='user',\n lazy='dynamic'\n )\n roles = db.relationship(\n 'Role',\n secondary=roles,\n backref=db.backref('users', lazy='dynamic')\n )\n\n def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n default = Role.query.filter_by(name='default').first()\n if default is None:\n default = Role.create_role('default', 'default')\n self.roles.append(default)\n\n def __repr__(self):\n return r\"<User '{}'>\".format(self.username)\n\n def set_password(self, password):\n # self.password = bcrypt.generate_password_hash(password)\n self.password = generate_password_hash(password)\n\n def check_password(self, password):\n # return bcrypt.check_password_hash(self.password, password)\n return check_password_hash(self.password, password)\n\n def is_authenticated(self):\n if isinstance(self, AnonymousUserMixin):\n return False\n else:\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n if isinstance(self, AnonymousUserMixin):\n return True\n else:\n return False\n\n def get_id(self):\n return str(self.id)\n\n def generate_reset_token(self, expiration=3600):\n s = Serializer(\n current_app.config['SECRET_KEY'],\n expiration\n )\n return s.dumps({'reset': self.id})\n\n def reset_password(self, token, password):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except SignatureExpired:\n return False\n except BadSignature:\n return False\n\n if self.id != data.get('reset'):\n return False\n\n self.set_password(password)\n db.session.add(self)\n return True\n\n def generate_auth_token(self, expiration):\n s = Serializer(\n current_app.config['SECRET_KEY'],\n expiration\n )\n\n return s.dumps({'id': self.id}).decode('ascii')\n # return {'token': s.dumps({'id': self.id})}\n\n @staticmethod\n # 不但会存储函数的运行结果,也会存储调用的参数\n @cache.memoize(60)\n def verify_auth_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except SignatureExpired:\n return None\n except BadSignature:\n return None\n\n return User.query.get(data['id'])\n\n @staticmethod\n def create(username='', email='', password=None, roles=None):\n \"\"\"\n Create a new user.\n\n Args:\n username(str): the name of user\n email(str): the email of user\n password(str): the password of user\n roles(set): the set of user role\n \"\"\"\n from sqlalchemy.exc import IntegrityError\n\n if not username and not email:\n return None\n\n user = User.query.filter(\n or_(email == email),\n username == username\n ).first()\n\n if user is None:\n user = User()\n user.username = username\n user.email = email\n if password is not None:\n user.set_password(password)\n\n if roles is not None:\n if isinstance(roles, (tuple, list, set)):\n for role in roles:\n if role not in user.roles:\n user.roles.append(role)\n else:\n user.roles.append(roles)\n\n db.session.add(user)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n return None\n\n return user\n\n @staticmethod\n def generate_fake_users(count=100):\n from random import seed\n import forgery_py\n\n seed()\n for i in range(count):\n User.create(\n email=forgery_py.internet.email_address(),\n username=forgery_py.internet.user_name(True),\n # confirmed=True,\n # name=forgery_py.name.full_name(),\n # location=forgery_py.address.city(),\n # about_me=forgery_py.lorem_ipsum.sentence(),\n # member_since=forgery_py.date.date(True)\n password=forgery_py.lorem_ipsum.word(),\n )\n\n\nclass AnonymousUser(AnonymousUserMixin):\n def can(self, permissions):\n return False\n\n def is_administrator(self):\n return False\n\n def is_authenticated(self):\n return False\n\n\nclass Post(db.Model):\n \"\"\" 博客文章表 \"\"\"\n __tablename__ = 'post'\n\n # 主键\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(255))\n text = db.Column(db.Text())\n publish_date = db.Column(db.DateTime())\n update_date = db.Column(db.DateTime())\n comments = db.relationship('Comment', backref='post', lazy='dynamic')\n # 外键\n user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))\n # secondary 参数会告知 SQLAlchemy 该关联别保存在 tags 表里\n tags = db.relationship(\n 'Tag',\n secondary=tags,\n backref=db.backref('posts', lazy='dynamic')\n )\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return r\"<Post'{}'>\".format(self.title)\n\n def change(self, title, text='', *tags):\n \"\"\"\n Chang the post data.\n\n Args:\n title(str): the title of the psot.\n Text(str): the content of the psot.\n *tags: a set of tag.\n \"\"\"\n if title:\n self.title = title\n if text:\n self.text = text\n self.publish_date = datetime.datetime.now()\n self.tags.extend(*tags)\n db.session.add(self)\n\n # self.update(\n # {\n # 'title': title,\n # 'text': text,\n # 'update_date': datetime.datetime.now()\n # }\n # )\n\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n @staticmethod\n def create(title, text='', user_id=0, *tags):\n \"\"\"\n Create a new post to database.\n\n Args:\n title(str): the title of the psot.\n Text(str): the content of the psot.\n user_id(int): the ID of user who the post belongs to.\n *tags: a set of tag.\n \"\"\"\n from sqlalchemy.exc import IntegrityError\n if not title:\n return None\n\n new_post = Post(title)\n new_post.text = text\n new_post.publish_date = datetime.datetime.now()\n # new_post.user = user\n new_post.user_id = user_id\n new_post.tags.extend(*tags)\n db.session.add(new_post)\n\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n return None\n return new_post\n\n @staticmethod\n def generate_fake_posts(num=100):\n \"\"\"\n Generate some test datas with tags.\n\n Args:\n num: the numbers of the data want to generate.\n \"\"\"\n import random\n user = User.query.get(1)\n tag_list = [Tag.create(tag)\n for tag in ('Python', 'Flask', 'SQLAlchemy', 'Jinja')]\n\n s = \"Example text\"\n\n for i in range(num):\n title = \"Post \" + str(i)\n tags = random.sample(tag_list, random.randint(1, 3))\n Post.create(title, s, user.id, tags)\n\n\nclass Comment(db.Model):\n \"\"\" 评论表 \"\"\"\n __tablename__ = 'comment'\n\n # 主键\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(255))\n text = db.Column(db.Text())\n date = db.Column(db.DateTime())\n # 外键\n user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))\n post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))\n\n def __repr__(self):\n return \"<Comment '{}'>\".format(self.text[:15])\n\n @staticmethod\n def create(name, text='', post_id=0, user_id=0):\n \"\"\"\n Create a new comment to the database.\n\n Args:\n name(str): the name of comment.\n text(str): the Content of comment.\n post_id(int): the ID of the post that comment beloings to.\n \"\"\"\n new_comment = Comment()\n new_comment.name = name\n new_comment.text = text\n # new_comment.post = post\n # new_comment.user = user\n new_comment.post_id = post_id\n new_comment.user_id = user_id\n new_comment.data = datetime.datetime.now()\n db.session.add(new_comment)\n db.session.commit()\n\n return new_comment\n\n\nclass Tag(db.Model):\n \"\"\" 标签表 \"\"\"\n __tablename__ = 'tag'\n\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(255), unique=True)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"<Tag '{}'>\".format(self.name)\n\n @staticmethod\n def create(name):\n tag = Tag.query.filter_by(name=name).first()\n if tag is None:\n tag = Tag(name=name)\n db.session.add(tag)\n db.session.commit()\n\n return tag\n\n @staticmethod\n def create_tags(tag_list):\n for t in tag_list:\n Tag.create(t)\n\n\n# 创建提醒应用的相关模型:\nclass Reminder(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n date = db.Column(db.DateTime())\n email = db.Column(db.String())\n text = db.Column(db.Text())\n\n def __repr__(self):\n return \"<Reminder '{}'>\".format(self.text[:20])\n" }, { "alpha_fraction": 0.5873053073883057, "alphanum_fraction": 0.5912578701972961, "avg_line_length": 22.89444351196289, "blob_id": "013b0d1ac2051b7a7483710461b2b11baa48900a", "content_id": "99b653abf32cbc20b4ab64889df902387998ffbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5385, "license_type": "no_license", "max_line_length": 69, "num_lines": 180, "path": "/webapp/models_mongo.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# import datetime\n\n# from flask_login import AnonymousUserMixin\n\n# from .extensions import bcrypt, mongo\n\n# # 针对非关系性数据 MongoDB 建立的数据库模型\n# # MongoDB 是文档式的 NoSQL 数据库,文档被存储在集合(collections)里。文档格式\n# # 由叫作 BSON 的格式来定义的,BSON 是 JSON 的超集,意思是二进制的 JSON(Binary\n# # JSON)。BSON 允许把 JSON 存为二进制格式,可以节省大量空间。BSON 还有另外几种\n# # 不同的存储数值的方式,比如32位整数和双精度数值\n\n# avalidable_roles = ('admin', 'poster', 'defalut')\n\n\n# class User(mongo.Document):\n # username = mongo.StringField(required=True)\n # password = mongo.StringField(required=True)\n # roles = mongo.ListField(\n # mongo.StringField(choices=avalidable_roles)\n # )\n\n # def __repr__(self):\n # return \"<User '{}'>\".format(self.username)\n\n # def set_password(self, password):\n # self.password = bcrypt.generate_password_hash(password)\n\n # def check_password(self, password):\n # return bcrypt.check_password_hash(self.password, password)\n\n # def is_authenicated(self):\n # if isinstance(self, AnonymousUserMixin):\n # return False\n # else:\n # return True\n\n # def is_active(self):\n # return True\n\n # def is_anonymous(self):\n # if isinstance(self, AnonymousUserMixin):\n # return True\n # else:\n # return False\n\n # def get_id(self):\n # return str(self.id)\n\n\n# class Comment(mongo.EmbeddedDocument):\n # name = mongo.StringField(required=True)\n # text = mongo.StringField(required=True)\n # date = mongo.DateTimeField(\n # default=datetime.datetime.now()\n # )\n\n # def __repr__(self):\n # return \"<Comment '{}'>\".format(self.text[:15])\n\n\n# class Tag(mongo.Document):\n # title = mongo.StringField(required=True)\n\n # def __repr__(self):\n # return \"<Tag '{}'>\".format(self.title)\n\n\n# class Post(mongo.Document):\n # title = mongo.StringField(required=True)\n # # 如果使用下面继承的类,需要删除 text 属性\n # # text = mongo.StringField()\n # publish_date = mongo.DateTimeField(\n # default=datetime.datetime.now()\n # )\n # user = mongo.ReferenceField(User)\n\n # # 添加文档可以通过 comments.append(comment) 方法添加\n # comments = mongo.ListField(\n # mongo.EmbeddedDocumentField(Comment)\n # )\n\n # # 多对多关系\n # # 查询操作\n # # Post.objects(tags__in='Python').all()\n # tags = mongo.ListField(mongo.StringField())\n\n # def __repr__(self):\n # return \"<Post '{}'>\".format(self.title)\n\n # meta = {\n # 'collection': 'user_posts',\n # # 设置文档数量允许的最大值\n # 'max_documents': 10000,\n # # 设定文档大小允许的的最大值\n # 'max_size': 2000000,\n # # 设置索引\n # # 可以是由字符串指定的单字段索引,也可以是有元组指定的多字段索引\n # 'indexes': [\n # 'title',\n # ('title', 'user')\n # ],\n # # 设定默认排序方式\n # # 查询时如果指定了 order_by,则可以覆盖这里设置的缺省行为\n # 'ordering': ['-publish_date'],\n\n # # 指定自定义文档类型是否允许继承\n # 'allow_inheritance': True\n # }\n\n\n# # 指定位置类型 可以使用 DynamicField\n# # 字段类型实例的可传入参数:\n# # Field(\n # # # 主键\n # # # 如果传入该参数,贼表示不希望通过 MongoEngine自动生成唯一标识键,\n # # # 而采用传入该字段的值作为其 ID\n # # primary_key=None,\n\n # # # 键名\n # # # 如果没有设置,缺省值就是那个类属性的名字\n # # db_field=None,\n\n # # # 指定该键是否必须存在文档中\n # # required=False,\n\n # # # 指定当该字段赋值时默认返回的默认值\n # # default=None,\n\n # # # 指定是否检查并确保集合中没有其它文档在这个字段有同样的值\n # # unique=False,\n\n # # # 可以接收单个字段或多个字段的列表,它会确保这些字段的值的组合在每个\n # # # 文档中是唯一的。\n # # unique_with=None,\n\n # # # 传入一个列表,该字段的值将会被限制为只允许从这个列表中选择\n # # choices=None\n# # )\n# # 可以通过定义类从 mongo.DynamicDocument 继承,那么任何额外的字段都会被认为是\n# # DynamicField,并且会被保存到文档中\n# # 同时通过下面的定义,可以设定没有必需的字典,而且允许设置任何字段\n# # class Post(mongo.DynamicDocument):\n # # pass\n\n\n# class BlogPost(Post):\n # text = mongo.StringField(required=True)\n\n # @property\n # def type(self):\n # return 'blog'\n\n\n# class VideoPost(Post):\n # url = mongo.StringField(required=True)\n\n # @property\n # def type(self):\n # return 'video'\n\n\n# class ImagePost(Post):\n # image_url = mongo.StringField(required=True)\n\n # @property\n # def type(self):\n # return 'image'\n\n\n# class QuotePost(Post):\n # quote = mongo.StringField(required=True)\n # author = mongo.StringField(required=True)\n\n # @property\n # def type(self):\n # return 'quote'\n" }, { "alpha_fraction": 0.6249236464500427, "alphanum_fraction": 0.6301160454750061, "avg_line_length": 27.224138259887695, "blob_id": "c4360188df34f9098d2916a8d76cd34489a5250d", "content_id": "4d37e0a53f964489730cf2ccf0f8ffae02596e48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3446, "license_type": "no_license", "max_line_length": 82, "num_lines": 116, "path": "/webapp/forms.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import (\n widgets,\n StringField,\n TextAreaField,\n PasswordField,\n SubmitField,\n # SelectField,\n BooleanField\n)\nfrom wtforms.validators import Required, Length, EqualTo, Email, URL\n\nfrom .models import User\n\n\n# 表单 forms\nclass CommentForm(FlaskForm):\n \"\"\" 评论的表单 \"\"\"\n name = StringField('Name', validators=[Required(), Length(max=255)])\n text = TextAreaField('Comment', validators=[Required()])\n submit = SubmitField('Add Comment')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" 用户登录表单 \"\"\"\n username = StringField('Username', [Required(), Length(max=255)])\n password = PasswordField('Password', [Required()])\n remember = BooleanField('Remember Me')\n submit = SubmitField('Login')\n\n def validators(self):\n check_validate = super(LoginForm, self).validate()\n\n # 如果验证没有通过\n if not check_validate:\n return False\n\n # 检查是否存在该用户\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('Invalid username or password')\n return False\n\n # 检查密码是否匹配\n if not self.user.check_password(self.password.data):\n self.username.errors.append(\n 'Invalid username or password'\n )\n return False\n\n return True\n\n\nclass RegisterForm(FlaskForm):\n \"\"\" 用户注册表单 \"\"\"\n username = StringField('Username', [Required(), Length(max=255)])\n email = StringField('Email', [Required(), Length(max=255), Email()])\n password = PasswordField('Password', [Required(), Length(min=8)])\n confrim = PasswordField('Confirm Password', [Required(), EqualTo('password')])\n submit = SubmitField('Register')\n\n # recaptcha = RecaptchaField()\n\n def validates(self):\n check_validate = super(RegisterForm, self).validate()\n\n # 如果验证没有通过\n if not check_validate:\n return False\n\n # 检查用户名是否已存在\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('User with that name already exists')\n return False\n\n # 检查用户邮箱是否已存在\n user = User.query.filter_by(email=self.email.data).first()\n if not user:\n self.username.errors.append('User with that email already exists')\n return False\n\n return True\n\n\nclass PostForm(FlaskForm):\n \"\"\" 文章表单 \"\"\"\n title = StringField('Title', [Required(), Length(max=255)])\n text = TextAreaField('Content', [Required()])\n submit = SubmitField('Submit')\n\n\nclass OpenIDForm(FlaskForm):\n openid = StringField('OpenID URL', [Required(), URL()])\n submit = SubmitField('Submit')\n\n\nclass CKTextAreaWidget(widgets.TextArea):\n def __call__(self, field, **kwargs):\n kwargs.setdefault('class_', 'ckeditor')\n return super(CKTextAreaWidget, self).__call__(field, **kwargs)\n\n\nclass CKTextAreaField(TextAreaField):\n widget = CKTextAreaWidget()\n\n\ndef custom_email_checker(form, field):\n \"\"\" 自定义表单邮箱验证 \"\"\"\n import re\n import wtforms\n if not re.match(r'[^@]+@[^@]+\\.[^@]+', field.data):\n raise wtforms.ValidationError('Field must be a valid email address.')\n" }, { "alpha_fraction": 0.606401801109314, "alphanum_fraction": 0.6089754104614258, "avg_line_length": 24.796680450439453, "blob_id": "f50c3df578c8313892ed8185aa2ef3c12a869f9d", "content_id": "7cdf87e62612241419bdc7bffbe7a62b21a055dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6303, "license_type": "no_license", "max_line_length": 74, "num_lines": 241, "path": "/webapp/controllers/auth.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask import Blueprint\nfrom flask import render_template, redirect, url_for, flash, abort\nfrom flask import request, session, g, current_app\n\nfrom flask_login import login_user, logout_user, login_required\nfrom flask_principal import Identity, AnonymousIdentity, identity_changed\n\nfrom ..models import User\nfrom ..forms import LoginForm, RegisterForm, OpenIDForm\nfrom ..extensions import (\n oid,\n # facebook,\n # twitter,\n login_manager\n\n)\n\n\nauth_blueprint = Blueprint(\n 'auth',\n __name__,\n # template_folder='templates/auth'\n)\n\n\n@auth_blueprint.route('/restricted')\ndef admin():\n if g.user is None:\n abort(403)\n\n return render_template('admin.html')\n\n\n@auth_blueprint.errorhandler(404)\ndef page_not_found(error):\n \"\"\" 处理 404 错误 \"\"\"\n return render_template('404.html'), 404\n\n\n@auth_blueprint.route('/')\ndef index():\n return redirect(url_for('blog.home'))\n\n\n@auth_blueprint.route('/login', methods=['GET', 'POST'])\n# 告诉 Flask-OpenID 接受从中继方返回的认证信息。\[email protected]\ndef login():\n form = LoginForm()\n openid_form = OpenIDForm()\n\n if openid_form.validate_on_submit():\n return oid.try_login(\n openid_form.openid.openid.data,\n ask_for=['nickname', 'email'],\n ask_for_optional=['fullname']\n )\n\n if form.validate_on_submit():\n # Add the user's name to the cookie\n # session['username'] = form.username.data\n user = User.query.filter_by(\n username=form.username.data\n ).one()\n login_user(user, remember=form.remember.data)\n\n identity_changed.send(\n current_app._get_current_object(),\n identity=Identity(user.id)\n )\n\n flash('You have been logged in.', category='success')\n return redirect(url_for('blog.home'))\n\n openid_errors = oid.fetch_error()\n if openid_errors:\n flash(openid_errors, category='danger')\n\n return render_template('auth/login.html', form=form)\n\n\n@auth_blueprint.route('/register', methods=['GET', 'POST'])\[email protected]\ndef regester():\n form = RegisterForm()\n openid_form = OpenIDForm()\n\n if openid_form.validate_on_submit():\n return oid.try_login(\n openid_form.openid.openid.data,\n ask_for=['nickname', 'email'],\n ask_for_optional=['fullname']\n )\n\n if form.validate_on_submit():\n username = form.username.data\n email = form.email.data\n password = form.password.data\n User.create(username, email, password)\n\n flash(\n 'Your user has been created, please login.',\n category='success'\n )\n\n return redirect(url_for('.login'))\n\n openid_errors = oid.fetch_error()\n if openid_errors:\n flash(openid_errors, category='danger')\n\n return render_template('auth/register.html', form=form)\n\n\n@auth_blueprint.route('/logout', methods=['GET', 'POST'])\n@login_required\ndef logout():\n # Remove the username from the cookie\n # session.pop('usename', None)\n logout_user()\n identity_changed.send(\n current_app._get_current_object(),\n identity=AnonymousIdentity()\n )\n\n flash('You have been logged out.', category='success')\n\n return redirect(url_for('.login'))\n\n\n@login_manager.user_loader\ndef load_user(userid):\n from ..models import User\n return User.query.get(userid)\n\n\[email protected]_login\ndef create_or_login(resp):\n from .models import User\n\n username = resp.fullname or resp.nickname\n email = resp.email\n if not username and not email:\n flash('Invalid login. Please try again.', 'danger')\n return redirect(url_for('main.login'))\n\n user = User.query.filter_by(username=username, email=email).first()\n if user is None:\n User.create_user(username, email)\n\n # 在这里登录用户\n return redirect(url_for('blog.home'))\n\n\n# # facebook 登录\n# @auth_blueprint.route('/facebook')\n# def facebook_login():\n # return facebook.authorize(\n # callback=url_for(\n # '.facebook_authorized',\n # next=request.referrer or None,\n # _external=True\n # )\n # )\n\n\n# @auth_blueprint.route('/facebook/authorized')\n# @facebook.authorized_hander\n# def facebook_authorized(resp):\n # if resp is None:\n # return 'Access denied: reason=%s error=%s' % (\n # request.args['error_reason'],\n # request.args['error_description']\n # )\n\n # session['facebook_oauth_token'] = (resp['access_token'], '')\n\n # me = facebook.get('/me')\n # user = User.query.filter_by(\n # ).first()\n\n # if not user:\n # User.create(me.data['first_name'] + ' ' + me.data['last_name'])\n\n # username=me.data['first_name'] + ' ' + me.data['last_name']\n # User.create(usename=username)\n # # 从这里登录用户\n # flash('You have been logged in.', category=\"success\")\n\n # return redirect(request.args.get('next') or url_for('blog.home'))\n\n\n# @auth_blueprint.round('/twitter-login')\n# def twitter_login():\n # return twitter.authorize(\n # callback=url_for(\n # '.twitter_authorized',\n # next=request.referrer or None,\n # _external=True\n # )\n # )\n\n\n# @auth_blueprint.route('/twitter-login/authorized')\n# @twitter.authorized_handler\n# def twitter_authenorize(resp):\n # if resp is None:\n # return 'Access denied: reason: {} error: {}'.format(\n # request.args['error_reason'],\n # request.args['error_description']\n # )\n\n # session['twitter_oauth_token'] = resp['oauth_token'] + \\\n # resp['oauth_token_secret']\n\n # user = User.query.filter_by(\n # username=resp['screen_name']\n # ).first()\n\n # if not user:\n # User.create(username=resp['screen_name'])\n\n # # 从这里登录用户\n # flash('You have been logged in.', category='success')\n\n # return redirect(\n # request.args.get('next') or url_for('blog.home')\n # )\n\n\n# @facebook.tokengetter\n# def get_facebook_oauth_token():\n # return session.get('facebook_oauth_token')\n\n\n# @twitter.tokengetter\n# def get_twitter_oauth_token():\n # return session.get('face_oauth_token')\n" }, { "alpha_fraction": 0.5723771452903748, "alphanum_fraction": 0.5856573581695557, "avg_line_length": 25.89285659790039, "blob_id": "b375e293e2e31f9cf7156cd42ad7bed8eb043c6e", "content_id": "5105cf25bccbd6e726f02d79e48aead1a730538e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/webapp/controllers/rest/auth.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask import abort\nfrom flask_restful import Resource\n# from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n\nfrom .parsers import user_post_parser\n\nfrom ...models import User\n\n\nclass AuthApi(Resource):\n def post(self):\n args = user_post_parser.parse_args()\n user = User.query.filter_by(\n username=args['username']\n ).one()\n\n if user.check_password(args['password']):\n # s = Serializer(\n # current_app.config['SECRET_KEY'],\n # expirse_in=600\n # )\n # return {'token': s.dumps({'id': user.id})}\n return user.generate_auth_token(600)\n else:\n abort(401)\n" }, { "alpha_fraction": 0.613888144493103, "alphanum_fraction": 0.6141583323478699, "avg_line_length": 23.838926315307617, "blob_id": "e8af7c367fbd145a206f698aaead67f3f255c4fd", "content_id": "1a6ec2674c852e84d597bbbc11912ee5c79daa52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3719, "license_type": "no_license", "max_line_length": 68, "num_lines": 149, "path": "/webapp/__init__.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# import os\n\nfrom flask import Flask\nfrom flask_login import current_user\nfrom flask_principal import identity_loaded, UserNeed, RoleNeed\nfrom sqlalchemy import event\n\nfrom .extensions import (\n bootstrap,\n db,\n bcrypt,\n oid,\n login_manager,\n principals,\n mongo,\n rest_api,\n celery,\n debug_toolbar,\n cache,\n assets_env,\n main_js,\n main_css,\n admin,\n mail,\n # youtube_ext,\n babel\n)\n# from .extensions import oauth\nfrom .config import config\nfrom .controllers.rest.post import PostApi\nfrom .controllers.rest.auth import AuthApi\nfrom .models import (\n User, Role, Post, Comment, Tag, Reminder, AnonymousUser\n)\nfrom .tasks import on_reminder_save\nfrom .controllers.admin import (\n CustomView,\n # CustomFileAdmin,\n PostView,\n CustomModelView\n)\n\n\ndef create_app(object_name):\n \"\"\"\n 工厂函数:用于生成 app\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config[object_name])\n\n bootstrap.init_app(app)\n bcrypt.init_app(app)\n db.init_app(app)\n oid.init_app(app)\n # oauth.init_app(app)\n login_manager.anonymous_user = AnonymousUser\n login_manager.login_view = 'auth.login'\n # login_manager.login_view = 'main_mongo.login'\n login_manager.session_protection = 'strong'\n login_manager.login_message = 'Please login to access this page'\n login_manager.login_message_category = 'info'\n login_manager.init_app(app)\n principals.init_app(app)\n mongo.init_app(app)\n event.listen(\n Reminder,\n 'after_insert',\n on_reminder_save\n )\n rest_api.add_resource(\n PostApi,\n '/api/post',\n '/api/post/<int:post_id>',\n endpoint='api'\n )\n rest_api.add_resource(\n AuthApi,\n '/api/auth'\n )\n rest_api.init_app(app)\n celery.init_app(app)\n debug_toolbar.init_app(app)\n cache.init_app(app)\n assets_env.init_app(app)\n assets_env.register('main_js', main_js)\n assets_env.register('main_css', main_css)\n admin.init_app(app)\n admin.add_view(CustomView(name='Custom'))\n models = [User, Role, Post, Comment, Tag, Reminder]\n\n for model in models:\n admin.add_view(\n CustomModelView(\n model,\n db.session,\n category='models'\n )\n )\n\n # admin.add_view(\n # PostView(\n # Post,\n # db.session,\n # category='PostsAdmin'\n # )\n # )\n\n # admin.add_view(\n # CustomFileAdmin(\n # os.path.join(os.path.dirname(__file__), 'static'),\n # '/static/',\n # name='Static Files'\n # )\n # )\n\n mail.init_app(app)\n babel.init_app(app)\n # youtube_ext.init_app(app)\n\n @identity_loaded.connect_via(app)\n def on_identity_loaded(sender, identity):\n # set the identity user object\n identity.user = current_user\n\n # Add the UserNeed to the identity\n if hasattr(current_user, 'id'):\n identity.provides.add(UserNeed(current_user.id))\n\n # Add each role to the identity\n if hasattr(current_user, 'roles'):\n for role in current_user.roles:\n identity.provides.add(RoleNeed(role.name))\n\n from .controllers.blog import blog_blueprint\n app.register_blueprint(blog_blueprint)\n\n from .controllers.auth import auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n # from .controllers.blog_mongo import blog_mongo_blueprint\n # app.register_blueprint(blog_mongo_blueprint)\n\n # from .controllers.auth_mongo import auth_mongo_blueprint\n # app.register_blueprint(auth_mongo_blueprint)\n\n return app\n" }, { "alpha_fraction": 0.47444531321525574, "alphanum_fraction": 0.4855388402938843, "avg_line_length": 27.845714569091797, "blob_id": "6a73cced35848950d1940436090a4a53c1c22693", "content_id": "392f41cab2d6afe74572fefea9fed319dc50bf06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5528, "license_type": "no_license", "max_line_length": 74, "num_lines": 175, "path": "/webapp/controllers/rest/post.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# import datetime\n\nfrom flask import abort\nfrom flask_restful import Resource, fields, marshal_with\n\nfrom .fields import HTMLField\nfrom .parsers import post_get_parser, post_post_parser, post_put_parser, \\\n Post_delete_parser\n\nfrom ...models import Post, Tag, User\n# from ...extensions import db\n\n# Flask Restful 提供的多种默认字段:\n# fields.String: 会使用 str() 对值进行转换\n# fields.FormattedString: 接受 Python 中的格式字符串,变量名包括高大括号中\n# fields.Url: 跟 Flask 中的 url_for 功能一样\n# fields.DateTime: 把 Python 的data或者 datetime 对象转换成字符串\n# 关键字参数format 之赐你个了应该使用 ISO8601 还是 RFC822 规范来格式化\n# fields.Float: 将值转换成以字符串表示的浮点数\n# fields.Integer: 将值转换成以字符串表示的整数\n# fields.Nested: 允许通过其他字段对象构成的字典来格式化嵌套的对象\n# fields.List: 很像 MongoEngine 中的 API,这个字段接收另一种字段类型作为参数,\n# 尝试将值的列表转换成该字段类型的 JSON 列表\n# fields.Boolean: 将值转换以字符串表示的布尔类型\n\n# 自定义字段集合\nnested_tag_fields = {\n 'id': fields.Integer(),\n 'title': fields.String()\n}\n\npost_fields = {\n 'author': fields.String(attribute=lambda x: x.user.username),\n 'title': fields.String(),\n 'text': HTMLField(),\n 'tags': fields.List(fields.Nested(nested_tag_fields)),\n 'publish_date': fields.DateTime(dt_format='iso8601')\n}\n\n\nclass PostApi(Resource):\n @marshal_with(post_fields)\n def get(self, post_id=None):\n if post_id:\n post = Post.query.get(post_id)\n if not post:\n abort(404)\n\n return post\n else:\n # posts = Post.query.all()\n # return posts\n args = post_get_parser.parse_args()\n page = args['page'] or 1\n\n if args['user']:\n user = User.query.filter_by(\n username=args['user']\n ).first()\n\n posts = user.query.order_by(\n Post.publish_date.desc()\n ).paginate(page, 30)\n else:\n posts = Post.query.order_by(\n Post.publish_date.desc()\n ).paginate(page, 30)\n\n return posts.items\n\n def post(self, post_id=None):\n if post_id:\n abort(405)\n else:\n args = post_post_parser.parse_args(strict=True)\n\n user = User.verify_auth_token(args['token'])\n if not user:\n abort(401)\n\n # new_post = Post(args['title'])\n # new_post.date = datetime.datetime.now()\n # new_post.text = args['text']\n # new_post.user = user\n\n # if args['tags']:\n # for item in args['tags']:\n # tag = Tag.query.filter_by(name=item).first()\n\n # # 如果存在该标签,就添加\n # # 如果不存在,就先创建再添加\n # if tag:\n # new_post.tags.append(tag)\n # else:\n # new_tag = Tag(item)\n # new_post.tags.append(new_tag)\n\n # db.session.add(new_post)\n # db.session.commit()\n\n tags = [Tag.create(tag)\n for tag in args['tags']\n if args['tags']]\n new_post = Post.create(\n args['title'],\n args['text'],\n user,\n tags\n )\n return new_post.id, 201\n\n def put(self, post_id=None):\n if not post_id:\n abort(404)\n\n post = Post.query.get(post_id)\n\n if not post:\n abort(404)\n\n args = post_put_parser.parse_args(strice=True)\n user = User.verify_auth_token(args['token'])\n if not user:\n abort(401)\n if user != post.user:\n abort(403)\n\n # if args['title']:\n # post.title = args['title']\n\n # if args['text']:\n # post.title = args['text']\n\n # if args['tags']:\n # for item in args['tags']:\n # tag = Tag.query.filter_by(name=item).first()\n\n # # 标签若存在则添加:\n # # 如果不存在则创建并添加\n # if tag:\n # post.tags.append(tag)\n # else:\n # new_tag = Tag(item)\n # post.tags.append(new_tag)\n\n # db.session.add(post)\n # db.session.commit()\n\n tags = [Tag.create(tag)\n for tag in args['tags']\n if args['tags']]\n post.change(args['title'], args['text'], tags)\n\n return post.id, 201\n\n def delete(self, post_id=None):\n if not post_id:\n abort(400)\n\n post = Post.query.get(post_id)\n if not post:\n abort(404)\n\n args = Post_delete_parser.parse_args(strice=True)\n user = User.verify_auth_token(args['token'])\n if user != post.user:\n abort(403)\n\n # db.session.delete(post)\n # db.session.comit()\n post.delete()\n return '', 204\n" }, { "alpha_fraction": 0.674815833568573, "alphanum_fraction": 0.6781647801399231, "avg_line_length": 25.900901794433594, "blob_id": "692a7c89e7512433f4bba30f1ad836c0e900ca32", "content_id": "02a920f6fddcdfe7de37f44d56bf4a0b2b0ccc77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2986, "license_type": "no_license", "max_line_length": 76, "num_lines": 111, "path": "/webapp/extensions.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom gzip import GzipFile\nfrom io import BytesIO\n\nfrom flask import request, g, session, current_app\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\nfrom flask_openid import OpenID\n# from flask_oauth import OAuth\nfrom flask_login import LoginManager\nfrom flask_principal import Principal, Permission, RoleNeed\nfrom flask_mongoengine import MongoEngine\nfrom flask_restful import Api\nfrom flask_celery import Celery\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom flask_cache import Cache\nfrom flask_assets import Environment, Bundle\nfrom flask_admin import Admin\nfrom flask_mail import Mail\nfrom flask_babel import Babel\n\nbootstrap = Bootstrap()\ndb = SQLAlchemy()\nbcrypt = Bcrypt()\noid = OpenID()\n# oauth = OAuth()\nlogin_manager = LoginManager()\nprincipals = Principal()\nmongo = MongoEngine()\nrest_api = Api()\ncelery = Celery()\ndebug_toolbar = DebugToolbarExtension()\ncache = Cache()\nassets_env = Environment()\nmain_css = Bundle(\n 'css/bootstrap.css',\n filters='cssmin',\n output='css/common.css'\n)\n\nmain_js = Bundle(\n 'js/query.js',\n 'js/bootstrap.js',\n filters='jsmin',\n output='js/common.js'\n)\nadmin = Admin()\nmail = Mail()\nbabel = Babel()\n\nadmin_permission = Permission(RoleNeed('admin'))\nposter_permission = Permission(RoleNeed('poster'))\ndefault_permission = Permission(RoleNeed('default'))\n\n# facebook = oauth.remote_app(\n # 'facebook',\n # base_url='https://graph.facebook.com/',\n # request_token_url=None,\n # access_token_url='/oauth/access_token',\n # consumer_key=' FACEBOOk_APP_SCRET',\n # request_token_params={'scope': 'email'}\n# )\n\n# twitter = oauth.remote_app(\n # 'twitter',\n # base_url='https://api.twitter.com/1.1/',\n # request_token_url='https://api.twitter.com/oauth/request_token',\n # access_token_url='https://api.twitter.com/oauth/access_token',\n # authorize_url='https://api.twitter.com/oauth/authenticate',\n # consumer_key='',\n # consumer_secret='',\n # request_token_params={'scope': 'email'}\n# )\n\n\nclass Gzip(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.after_request(self.after_request)\n\n def after_request(self, response):\n encoding = request.headers.get('Accept-Encoding', '')\n\n if 'gzip' not in encoding or not response.status_code in (200, 201):\n return response\n\n response.direct_passthrough = False\n\n contents = BytesIO()\n with GzipFile(\n mode='wb',\n compresslevel=5,\n fileobj=contents) as gzip_file:\n gzip_file.write(response.get_date())\n\n response.set_data(bytes(contents.getvalue()))\n\n response.headers['Content-Encoding'] = 'gzip'\n response.headers['Content-Length'] = response.content_length\n\n return response\n\nflask_gzip = Gzip()\n" }, { "alpha_fraction": 0.7724359035491943, "alphanum_fraction": 0.7820512652397156, "avg_line_length": 23, "blob_id": "3ef85d1924ea251b8f8e6273d5db761d140fba83", "content_id": "cb781c37e97f8435b751b15f09041273f4da3f83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/tserver.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom tornado.wsgi import WSGIContainer\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom webapp import create_app\n\napp = WSGIContainer(create_app('prod'))\nhttp_server = HTTPServer(app)\n# 绑定端口号\nhttp_server.listen(80)\nIOLoop.instance().start()\n" }, { "alpha_fraction": 0.662873387336731, "alphanum_fraction": 0.6635846495628357, "avg_line_length": 23.66666603088379, "blob_id": "7ffe4cb9c533d11e2e0ca6a83ccbaaeb6c511617", "content_id": "483080c763788a64bc35c523b02b87967c898f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1406, "license_type": "no_license", "max_line_length": 90, "num_lines": 57, "path": "/manage.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\n\nfrom flask_script import Manager, Server\nfrom flask_script.commands import ShowUrls, Clean\nfrom flask_migrate import Migrate, MigrateCommand\n\nfrom webapp import create_app\nfrom webapp.models import db, User, Post, Tag, Comment, Role\n# from webapp.models_mongo import db, User, Post, Tag, Comment\n\nenv = os.environ.get('WEBAPP', 'default')\napp = create_app(env)\nmigrate = Migrate(app, db)\nmanager = Manager(app)\n\nmanager.add_command('server', Server())\nmanager.add_command('show-urls', ShowUrls)\nmanager.add_command('clean', Clean)\nmanager.add_command('db', MigrateCommand)\n\n\[email protected]\ndef make_shell_context():\n return dict(app=app, db=db, User=User, Post=Post, Comment=Comment, Tag=Tag, Role=Role)\n\n\[email protected]\ndef hello():\n print (\"Hello, World!\")\n\n\[email protected]\ndef setup_db():\n db.create_all()\n\n # role_list = (('admin', 'admin'), ('default', 'default'))\n # Role.create_roles(role_list)\n # tag_list = [Tag.create(tag) for tag in ('Python', 'Flask', 'SQLAlchemy', 'Jinja')]\n # default_role = Role.create_role('default', 'default')\n # s = 'Body text'\n admin_role = Role.create_role('admin', 'admin')\n\n User.create(\n username='admin',\n email='[email protected]',\n password='password',\n roles=admin_role\n )\n\n Post.generate_fake_posts()\n\n\nif __name__ == '__main__':\n manager.run()\n" }, { "alpha_fraction": 0.7194444537162781, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 19, "blob_id": "166ec58e691bde66c205707160f8e61581fb94b9", "content_id": "c7866ffe22e8b1d3bba734cfa44cdd1c7ba663ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "no_license", "max_line_length": 77, "num_lines": 18, "path": "/run_test_server.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom webapp import create_app\nfrom webapp.models import User, Role\nfrom webapp.extensions import db\n\napp = create_app('test')\n\ndb.app = app\ndb.create_all()\n\ndefault = Role.create_role('defalut')\nposter = Role.create_role('poster')\n\ntest_user = User.create(username='test', password='test', roles=poster)\n\napp.run()\n" }, { "alpha_fraction": 0.6630872488021851, "alphanum_fraction": 0.7020134329795837, "avg_line_length": 15.217391014099121, "blob_id": "238c63d9ac605ac74b36c4c18eca935a6f8593e7", "content_id": "e6037511d1ef072e7ed3020ced0cf0247d623a7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 975, "license_type": "no_license", "max_line_length": 38, "num_lines": 46, "path": "/uwsgi.ini", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "[uwsgi]\n\n\n# variables\n# 项目名称\n# projectname = django_cms\n# 项目域名\n# projectdomain = djangocms.cn\n# base = /root/www/django_cms\n\n# 使用 Heroku 安排的端口\n# config\n# 使用的后台程序\nplugins = python\n# 监控IP和端口\nhttp-socket = :$[PORT]\n# socket = 127.0.0.1:9090\n# socket = 127.0.0.1:8001\n# 项目主目录\n# chdir = /root/web/django_cms\ncallable = app\n# uwsgi文件路径\n# module = django_cms.wsgi:application\nwsgi-file = wsgi.py\n# 虚拟环境路基(绝对路径)\nhome = /path/to/virtualenv\n# 允许主进程存在\nmaster = True\n# 开启的进程数\n# workers = 1\nthreads = 2\n# 允许开启的最大进程数\n# processes = 10\nprocesses = 4\n# 服务器退出时自动清楚环境\nvacuum = True\n# 设置工作进程请求上限\nmax-requests = 1000\n# 记录主进程的pid\npidfile = /var/flask_test.pid\n# 使进程在后台运行\ndaemonize = /var/flask_test.log\ndie-on-term = True\n\n# 使用以下命令拉起 django 应用\n# uwsgi --ini uwsgi.ini" }, { "alpha_fraction": 0.6750524044036865, "alphanum_fraction": 0.6757512092590332, "avg_line_length": 23.25423812866211, "blob_id": "c86545d787073792e38cc3e6dcbb852b283f905a", "content_id": "d3e0f72d393204f784e8932b0e6b40b043d83b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1985, "license_type": "no_license", "max_line_length": 63, "num_lines": 59, "path": "/celery_runner.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\n\nfrom webapp import create_app\nfrom webapp.tasks import log\nfrom celery import Celery\n\n\ndef make_celery(app):\n \"\"\"\n 把每个对 celery 任务的调用,都包含到一个 Python\n 的 with 代码块中,这样就可以确保代码对每个 Falsk\n 扩展的调用都会正常工作。\n \"\"\"\n celery = Celery(\n app.import_name,\n broker=app.config['CELERY_BROKER_URL'],\n # backend=app.config['CELERY_BACKEND_URL']\n backend=app.config['CELERY_RESULT_BACKEND']\n )\n celery.conf.update(app.config)\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstrace = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n\n return celery\n\nenv = os.environ.get('WEBAPP', 'default')\nflask_app = create_app(env)\ncelery = make_celery(flask_app)\n\n# 通过使用 celery 命令运行此文件\n# 执行下面的命令:\n# celery worker -A celery_runner --loglevel=info\n\n# 如果要运行定期任务,需要另外启动一个叫作 beat 的工作进程\n# 执行下面的命令:\n# celery -A celery_runner beat\n\n# Celery 提供了一下命令行参数来监控 Celery 工作进程和任务\n# 这些命令的形式如下:\n# celery -A celery_runner <command>\n# 主要命令,用于查看工作进程的状态\n # status:会打印正在运行的工作进程的状态\n # result:传入一个任务 id,会显示这个任务的返回值及最终的状态\n # purge:使用这个命令会删除中间人的所有消息\n # inspect active:列出所有当前正在执行的任务\n # inspect scheduled:列出所有使用 eta 参数进行排期中的任务\n # inspece registered:列出所有等待被执行的任务\n # inspect stats:返回一个字典,包含了当前正在跑的工作进程和中间人的统计信息\n" }, { "alpha_fraction": 0.5429362654685974, "alphanum_fraction": 0.5512465238571167, "avg_line_length": 17.512821197509766, "blob_id": "99bbf75529cd2363b704f1ae42b2257483eb4771", "content_id": "bc871742af8ba97ee0516a0ec3f99f6452526c03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 53, "num_lines": 39, "path": "/tests/test_urls.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom webapp import create_app\nfrom webapp.extensions import (\n db,\n admin,\n rest_api\n)\n\n\nclass TestURLs(unittest.TestCase):\n # pass\n # 弥补 bug 的方法\n def setUp(self):\n\n admin._views = []\n rest_api.resources = []\n\n app = create_app('test')\n self.client = app.test_client()\n\n # 弥补 bug 的方法\n db.app = app\n\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def test_root_redirect(self):\n \"\"\" 检测根路径是否返回了302 \"\"\"\n result = self.client.get('/')\n assert result.status_code == 302\n assert '/blog/' in result.headers['Location']\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5329052805900574, "alphanum_fraction": 0.533707857131958, "avg_line_length": 18.4375, "blob_id": "a92661eb38991c6e602ec4ad59c99fc485af30f0", "content_id": "17e619c3c46bd1d2a37a88f40551c1d515380a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 48, "num_lines": 64, "path": "/flask_youtube/__init__.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask import (\n flash,\n redirect,\n url_for,\n session,\n render_template,\n Blueprint,\n Markup\n)\n\n\nclass Youtube(object):\n \"\"\"\n 一个 YouTube 的 Flask 扩展。\n \"\"\"\n def __init__(self, app=None, **kwargs):\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n self.register_blueprint(app)\n # 添加 HTML 模板\n app.add_template_global(youtube)\n\n def register_blueprint(self, app):\n module = Blueprint(\n 'youtube',\n __name__,\n template_folder=\"templates\"\n )\n app.register_blueprint(module)\n return module\n\n\nclass Video(object):\n \"\"\"\n 用于描述嵌入的视频。\n 处理从 Jinja 传入的参数,并渲染一段 HTML 显示在模板里\n \"\"\"\n def __init__(self, video_id, cls=\"youtube\"):\n self.video_id = video_id\n self.cls = cls\n\n def render(self, *args, **kwargs):\n return render_template(*args, **kwargs)\n\n @property\n def html(self):\n return Markup(\n self.render(\n 'youtube/video.html',\n video=self\n )\n )\n\n\ndef youtube(*args, **kwargs):\n video = Video(*args, **kwargs)\n return video.html\n\nyoutube_ext = Youtube()\n\n\n" }, { "alpha_fraction": 0.6899852752685547, "alphanum_fraction": 0.6951398849487305, "avg_line_length": 23.690908432006836, "blob_id": "6bee09c628cc86c582bd417a21c22d4cff73923c", "content_id": "cd6ef93116e1b3ec6b0d0132627c2515e9979ffd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 73, "num_lines": 55, "path": "/webapp/controllers/admin.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask_admin import (\n BaseView,\n # FileAdmin,\n expose\n)\n# 使用的是 SQLAlchemy:\nfrom flask_admin.contrib.sqla import ModelView\n# 如果使用的是 MongoEngine:\n# from flask_admin.contrib.mongoengine import ModelView\nfrom flask_login import (\n login_required,\n # AnonymousUser,\n current_user\n)\n\nfrom ..forms import CKTextAreaField\nfrom ..extensions import admin_permission\n\n\nclass CustomView(BaseView):\n @expose('/')\n @login_required\n @admin_permission.require(http_exception=403)\n def index(self):\n return self.render('admin/custom.html')\n\n @expose('/second_page')\n @login_required\n @admin_permission.require(http_exception=403)\n def second_page(self):\n return self.render('admin/second_page.html')\n\n\nclass CustomModelView(ModelView):\n # pass\n def is_accessible(self):\n # 设置管理员访问权限\n return current_user.is_authenticated() and admin_permission.can()\n\n\nclass PostView(CustomModelView):\n form_overrides = dict(text=CKTextAreaField)\n column_searchable_title = ('text', 'title')\n column_filters = ('publish_date',)\n\n create_template = 'admin/post_edit.html'\n edit_template = 'admin/post_edit.html'\n\n\n# class CustomFileAdmin(FileAdmin):\n # def is_accessible(self):\n # return current_user.is_authenicated() and admin_permission.can()\n" }, { "alpha_fraction": 0.6097306609153748, "alphanum_fraction": 0.6161598563194275, "avg_line_length": 22.97916603088379, "blob_id": "07aefa88f5d0acc32d42d1036eef8014a4ee0d42", "content_id": "788ba1a4ec09948fe8274b8e5dc978f751d8ed41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5929, "license_type": "no_license", "max_line_length": 70, "num_lines": 240, "path": "/webapp/controllers/blog.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# import datetime\n# from urllib.parse import urlencode\n\nfrom flask import (\n Blueprint,\n render_template,\n redirect,\n url_for,\n flash,\n abort,\n g,\n session,\n request,\n current_app\n)\n# from flask.views import View, MethodView\nfrom sqlalchemy import func\nfrom flask_login import login_required, current_user\nfrom flask_principal import Permission, UserNeed\n\nfrom ..models import Post, Tag, Comment, User, tags\nfrom ..forms import CommentForm, PostForm\nfrom ..extensions import (\n db,\n poster_permission,\n admin_permission,\n cache,\n babel,\n)\n\nblog_blueprint = Blueprint(\n 'blog',\n __name__,\n template_folder='templates/blog',\n # static_folder='static/blog',\n url_prefix='/blog'\n)\n\n\n# key_prefix 用于指定再次调用的函数\[email protected](timeout=7200, key_prefix='sidebar_data')\ndef sidebar_data():\n recent = Post.query.order_by(\n Post.publish_date.desc()).limit(5).all()\n\n top_tags = db.session.query(\n Tag, func.count(tags.c.post_id).label('total')\n ).join(\n tags\n ).group_by(\n Tag\n ).order_by('total DESC').limit(5).all()\n\n return recent, top_tags\n\n\n@blog_blueprint.route('/')\n@blog_blueprint.route('/<int:page>')\n# timeout 参数表示结果将会缓存多少秒,超过这个时长之后,就会再次执行该函数并缓存\n# 结果\[email protected](timeout=60)\ndef home(page=1):\n # return '<h1>Hello World!</h1>'\n posts_query = Post.query.order_by(\n Post.publish_date.desc()\n )\n posts = posts_query.all()\n pagination = posts_query.paginate(\n page,\n per_page=current_app.config['POSTS_PER_PAGE'],\n error_out=False\n )\n recent, top_tags = sidebar_data()\n\n return render_template(\n 'blog/home.html',\n posts=posts,\n pagination=pagination,\n recent=recent,\n top_tags=top_tags\n )\n\n\n@blog_blueprint.route('/tag/<string:tag_name>')\ndef tag(tag_name):\n tag = Tag.query.filter_by(\n title=tag_name).first_or_404()\n\n posts = tag.posts.order_by(\n Post.publish_date.desc()).all()\n\n recent, top_tags = sidebar_data()\n\n return render_template(\n 'blog/tag.html',\n tag=tag,\n posts=posts,\n recent=recent,\n top_tags=top_tags\n )\n\n\n@blog_blueprint.route('/user/<string:username>')\ndef user(username):\n user = User.query.filter_by(\n username=username).first_or_404()\n\n posts = user.posts.order_by(\n Post.publish_date.desc()).all()\n\n recent, top_tags = sidebar_data()\n\n return render_template(\n 'blog/user.html',\n user=user,\n posts=posts,\n recent=recent,\n top_tags=top_tags\n )\n\n\[email protected]\ndef get_locale():\n # if a user is logged in, use the local from the user settings\n user = getattr(g, 'user', None)\n if user is not None:\n return user.locale\n # otherwise try to guess the language from the user accept\n # header the browser tranmits. We support de/fr/en in this\n # example. The best match wins.\n return request.accept_languages.best_match(['de', 'fr', 'en'])\n\n\[email protected]\ndef get_timezone():\n user = getattr(g, 'user', None)\n if user is not None:\n return user.timezone\n\n\ndef make_cache_key(*args, **kwargs):\n path = request.path\n args = str(hash(frozenset(request.args.items())))\n lang = get_locale()\n return (path + args + lang).encode('utf-8')\n\n\n@blog_blueprint.route('/post/<int:post_id>', methods=['GET', 'POST'])\[email protected](timeout=600, key_prefix=make_cache_key)\n@login_required\ndef post(post_id):\n form = CommentForm()\n post = Post.query.get_or_404(post_id)\n\n if form.validate_on_submit():\n name = form.name.data\n text = form.text.data\n Comment.create(name, text, post.id, current_user.id)\n\n tags = post.tags\n comments = post.comments.order_by(Comment.date.desc()).all()\n recent, top_tags = sidebar_data()\n\n return render_template(\n 'blog/post.html',\n post=post,\n tags=tags,\n comments=comments,\n recent=recent,\n top_tags=top_tags,\n form=form\n )\n\n\n@blog_blueprint.route('/new', methods=['GET', 'POST'])\n@login_required\n@poster_permission.require(http_exception=403)\ndef new_post():\n if not g.current_user:\n return redirect(url_for('main.login'))\n\n form = PostForm()\n\n if form.validate_on_submit():\n title = form.title.data\n text = form.text.data\n new_post = Post.create(title, text, current_user.id)\n if new_post is None:\n flash(\n 'The new post create unsuccess',\n category='danger'\n )\n\n return render_template(\n 'blog/new.html',\n form=form\n )\n\n\n@blog_blueprint.route('/edit/<int:id>', methods=['GET', 'POST'])\n@login_required\n# 创建只希望作者能访问的页面\n@poster_permission.require(http_exception=403)\ndef edit_post(id):\n # if not g.current_user:\n # return redirect(url_for('main.login'))\n\n post = Post.query.get_or_404(id)\n permission = Permission(UserNeed(post.user.id))\n\n # 同时希望管理员可以修改任何文章\n if permission.can() or admin_permission.can():\n form = PostForm()\n\n if form.validate_on_submit():\n title = form.title.data\n text = form.text.data\n post.change(title, text)\n\n return redirect(url_for('.post', post_id=post.id))\n\n form.text.data = post.text\n return render_template('blog/edit.html', form=form, post=post)\n\n abort(403)\n\n\n@blog_blueprint.before_request\ndef before_request():\n \"\"\" 在所有请求处理之前运行 \"\"\"\n # if 'user_id' in session:\n # g.user = User.query.get(session['user_id'])\n\n if 'username' in session:\n g.user = User.query.get(session['usename']).one()\n else:\n g.crrent_user = None\n" }, { "alpha_fraction": 0.657254159450531, "alphanum_fraction": 0.6582278609275818, "avg_line_length": 21.822221755981445, "blob_id": "091cfaa189feafa30c3d97b01c84ca4cf5daef21", "content_id": "83f93111558f6ce6733e97ffdafdfb8f31e497ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 60, "num_lines": 45, "path": "/webapp/email.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# import smtplib\nfrom threading import Thread\n\nfrom flask import render_template, current_app\nfrom flask_mail import Message\n\nfrom .extensions import mail\n\n\ndef send_sync_email(app, msg):\n with app.app_context():\n mail.send(msg)\n\n\ndef send(to, subject, template, **kwargs):\n app = current_app._get_current_object()\n msg = Message(\n app.config['EMAIL_SUBJECT_PREFIX'] +' '+ subject,\n sender=app.config['MAIL_SENDER'],\n recipients=[to]\n )\n msg.body = render_template(template + '.txt', **kwargs)\n msg.html = render_template(template + '.html', **kwargs)\n thr = Thread(target=send_sync_email, args=[app, msg])\n thr.start()\n return thr\n\n\n# msg = Message('Your reminder',\n# sender=current_app['MAIL_SENDER'],\n# recipients=[reminder.email]\n# )\n# msg.body = reminder.txt\n# mail.send(msg)\n\n\n# msg = Message('Your reminder',\n# sender=current_app['MAIL_SENDER'],\n# recipients=[reminder.email]\n# )\n# msg.body = reminder.txt\n# mail.send(msg)\n" }, { "alpha_fraction": 0.644444465637207, "alphanum_fraction": 0.6507936716079712, "avg_line_length": 23.230770111083984, "blob_id": "1326166fd31488de800466ebafd9cb2008fcc7b8", "content_id": "2d9d6f1bb11a520bf966a9158a1842199a8e8db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 71, "num_lines": 13, "path": "/flask_youtube/setup.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_pages\n\nsetup(\n name='Flask_Youtube',\n version='0.1',\n license='MIT',\n description='Flask extension of allow embedding of YouTube videos',\n author='Jack Stouffer',\n author_email='[email protected]',\n platforms='any',\n install_required=['Flask'],\n packages=find_pages()\n)\n" }, { "alpha_fraction": 0.606377124786377, "alphanum_fraction": 0.6193879246711731, "avg_line_length": 29.486034393310547, "blob_id": "6a714d7c997593f84d17db993c61a654e7abee90", "content_id": "ce9f6a49319cb727509cdb99aa8aef3273ca5e6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5701, "license_type": "no_license", "max_line_length": 79, "num_lines": 179, "path": "/webapp/config.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport datetime\nimport tempfile\n\nfrom celery.schedules import crontab\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Config(object):\n # generate 32 SECRET_KEY from random choices('a-z0-9') command:\n # Bash:\n # cat /dev/urandom | tr -cd 'a-z0-9' | head -c 32\n # Mac:\n # cat /dev/urandom | env LC-CTYPE=C tr -cd 'a-z0-9' | head -c 32\n SECRET_KEY = os.environ.get('SECRET_KEY', 'hard to guess string')\n RECAPTCHA_PUBLIC_KEY=\"\"\n RECAPTCHA_PRIVATE_KEY=\"\"\n SSL_DISABLE = False\n SQLALCHEMY_COMMIT_ON_TEARDOWN = True\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_RECORD_QUERIES = True\n # MAIL_SERVER = 'smtp.googlemail.com'\n MAIL_SERVER = 'smtp.googlemail.com'\n MAIL_PORT = 587\n MAIL_USE_TLS = True\n MAIL_USERNAME = os.environ.get('MAIL_USERNAME')\n MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')\n MAIL_SUBJECT_PREFIX = '[BLOG]'\n MAIL_SENDER = 'BLOG Admin <[email protected]>'\n ADMIN = os.environ.get('BLOG_ADMIN')\n POSTS_PER_PAGE = 20\n FOLLOWERS_PER_PAGE = 50\n COMMENTS_PER_PAGE = 30\n SLOW_DB_QUERY_TIME=0.5\n\n # celery + redis 的配置\n # the URL is in the format of:\n # redis://:password@hostname:port/db_number\n # CELERY_BACKEND_URL = 'redis://localhost:6379/0'\n REDIS_URL = 'redis://localhost:6379/0'\n CELERY_BROKER_URL = 'redis://localhost:6379/0'\n CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass ProdConfig(object):\n \"\"\" 生产环境配置 \"\"\"\n # MYSQL\n # mysql+pymysql://user:password@ip:port/db_name\n # Postgres\n # postgresql+psycopg2://user:password@ip:port/db_name\n # MSSQL\n # mssql+pyodbc://user:password@dsn_name\n # Oracle\n # oracle+cx_oracle://user:password@ip:port/db_name\n # pass\n\n # 将缓存存储在内存中\n CACHE_TYPE = 'simple'\n\n # 用redis作为缓存后端\n # CACHE_TYPE = 'redis'\n # CACHE_REDIS_HOST = 'localhost'\n # CACHE_REDIS_PORT = '6379'\n # CACHE_REDIS_PASSWORD = 'password'\n # CACHE_REDIS_DB = '0'\n\n # 用memcached作为缓存后端的配置\n # CACHE_TYPE = 'memcached'\n # CACHE_KEY_PREFIX = 'flask_cache'\n # CACHE_MEMCACHED_SAVERS = ['localhost:11211']\n\n @classmethod\n def init_app(cls, app):\n Config.init_app(app)\n\n # email errors to administrator\n import logging\n from logging.handler import SMTPHandler\n credentials = None\n secure = None\n if getattr(cls, 'MAIL_USERNAME', None) is not None:\n credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)\n if getattr(cls, 'MAIL_USE_TLS', None):\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),\n fromaddr=cls.FLASK_MAIL_SENDER,\n toaddrs=[cls.FLASK_ADMIN],\n subject=cls.FLASK_MAIL_SUBJECT_PREFIX + 'Application Error',\n credentials=credentials,\n secure=secure)\n mail_handler.setlevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n\nclass TestConfig(Config):\n \"\"\" 测试环境配置 \"\"\"\n db_file = tempfile.NamedTemporaryFile()\n TESTING = True\n # SQLite\n SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URI') or \\\n 'sqlite:///' + os.path.join(BASE_DIR, '../database.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n WTF_CSRF_ENABLED = False\n CACHE_TYPE = 'null'\n\n\nclass DevConfig(Config):\n \"\"\" 开发环境配置 \"\"\"\n DEBUG = True\n # 使用MongoEngine 时需要进行配置\n # debug_tb_panels = [\n # 'flask_debugtoolbar.panels.versions.VersionDebugPanel',\n # 'flask_debugtoolbar.panels.timer.TimerDebugPanel',\n # 'flask_debugtoolbar.panels.headers.HeaderDebugPanel',\n # 'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',\n # 'flask_debugtoolbar.panels.panels.config_vars.ConfigVarsDebugPanel',\n # 'flask_debugtoolbar.panels.panels.template.TemplateDebugPanel',\n # 'flask_debugtoolbar.panels.panels.logger.LoggingDebugPanel',\n # 'flask_debugtoolbar.panels.panels.route_list.RouteListDebugPanel',\n # 'flask_debugtoolbar.panels.panels.profiler.ProfilerDebugPanel',\n # 'flask_mongoengine.panels.panels.MongoDebugPanel'\n # ]\n DEBUG_TB_INTERCEPT_REDIRECTS = False\n ASSETS_DEBUG = True\n # SQLite\n # DB_URI = 'sqlite:///database.db'\n SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URI') or \\\n 'sqlite:///' + os.path.join(BASE_DIR, '../database.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n # 查看数据库 SQL 查询语句设置\n # SQLALCHEMY_ECHO = True\n\n # 针对 NoSQL 的数据库操作,选用 MongoDB\n # MongoDB 连接配置\n MONGODB_SETTINGS = {\n 'db': 'local',\n 'host': 'localhost',\n 'port': 27017\n }\n\n # 设定一个定期执行的任务\n CELERY_SCHEDULE = {\n # 每间隔30秒执行一次\n 'log-every-30-secondes': {\n 'task': 'webapp.tasks.log',\n 'schedule': datetime.timedelta(seconds=30),\n 'args': (\"Message\",)\n },\n 'weekly-digest': {\n 'task': 'weebapp.task.digest',\n 'schedule': crontab(day_of_week=6, hour='10')\n },\n }\n\n # simple 选项会告诉 Flask Cache 把结果保存到内存中的一个 Python 字典里面\n CACHE_TYPE = 'simple'\n # CACHE_TYPE = 'null'\n MAIL_SERVER = 'localhost'\n MAIL_PORT = 25\n MAIL_USERNAME = 'username'\n MAIL_PASSWORD = 'password'\n\n\nconfig = {\n 'dev': DevConfig,\n 'test': TestConfig,\n 'prod': ProdConfig,\n\n 'default': DevConfig\n}\n" }, { "alpha_fraction": 0.5622377395629883, "alphanum_fraction": 0.56829833984375, "avg_line_length": 21.819149017333984, "blob_id": "8ebef90daa9638439cdc4ca3bcc23fcafae7c258", "content_id": "3b4c062d0fdf9a0523904bbbd5af43aeb5231c88", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 2205, "license_type": "no_license", "max_line_length": 70, "num_lines": 94, "path": "/fabfile.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom fabric.api import (\n env,\n local,\n run,\n sudo,\n cd\n)\n\n\ndef test():\n local('python -m unittest discover')\n\n\ndef upgrade_libs():\n sudo('apt-get update')\n sudo('apt-get upgrade')\n\n\ndef setup():\n test()\n upgrade_libs()\n\n # 安装很多必备的 Python 库\n lib_list = [\n 'build-essential',\n 'git',\n 'python',\n 'python-pip',\n 'python-all-dev',\n # 如果使用 supervisor 部署\n 'supervisor',\n # 如果要使用 Nginx\n 'nginx',\n # 如果使用 Apache\n 'apache2',\n 'libapache2-mod-proxy-uwsgi'\n ]\n for lib in lib_list:\n sudo('apt-get install -y {}')\n\n run('useradd -d /home/deploy/ deploy')\n run('gpasswd -a deploy sudo')\n\n # 允许 deploy 用户安装 Python 包\n sudo('chown -R deploy /usr/local/')\n\n # Python3\n sudo('chown -R deploy /usr/lib/python3.5/')\n # Python2\n # sudo('chown -R deploy /usr/lib/python2.7/')\n\n run('git config --global credential.helper store')\n\n with cd('/home/deploy'):\n run('git clone (your repo URL)')\n\n with cd('home/deploy/webapp'):\n run('pip install -r requirements.txt')\n run('python manage.py createdb')\n\ndef deploy():\n test()\n upgrade_libs()\n with cd('/home/deploy/webapp'):\n run('git pull')\n run('pip install -r requirements.txt')\n\n # if use supervisor to setup\n sudo('cp supervisord.conf /etc/supervisor/conf.d/webapp.conf')\n\n # if use Nginx\n sudo('cp nginx.conf /etc/nginx/sites-available/[your_domain]')\n sudo('ln -s /etc/nginx/sites-available/[your_domain] '\n '/etc/nginx/sites-enabled/[your_domain]')\n\n # if use Apache\n sudo('cp apache.conf '\n '/etc/apach2/site-avaliable/[your_domain]'\n )\n sudo('ln -sf /etc/apach2/site-avaliable/[your_domain]'\n '/etc/apach2/site-enabled/[your_domain]'\n )\n\n # restart the Apache service\n sudo('service apache2 restart')\n\n # restart the Nginx service\n sudo('service nginx restart')\n\n # restart supervisor service\n sudo('service supervisor restart')\n" }, { "alpha_fraction": 0.6083086133003235, "alphanum_fraction": 0.7091988325119019, "avg_line_length": 13.041666984558105, "blob_id": "26fb13dac5a55df4d9b3a18f9d8663244335fdc9", "content_id": "d150c4006ad0c2bc7c98ff910bd41fe6248d3ec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 40, "num_lines": 24, "path": "/migrations/versions/5e283b006467_.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 5e283b006467\nRevises: e941389acb2e\nCreate Date: 2017-12-06 21:12:39.122928\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5e283b006467'\ndown_revision = 'e941389acb2e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n pass\n\n\ndef downgrade():\n pass\n" }, { "alpha_fraction": 0.5803448557853699, "alphanum_fraction": 0.6093103289604187, "avg_line_length": 24.550661087036133, "blob_id": "8f96be1c0df390fdb06a25328dc4b5b856bf8238", "content_id": "6ae0fe1eb3e8875b2ff2e3fef25da981be090b8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7932, "license_type": "no_license", "max_line_length": 75, "num_lines": 227, "path": "/webapp/tasks.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport datetime\n\n# from flask import render_template\n# from email.mime.text import MIMEText\n\nfrom .extensions import celery\nfrom .models import Reminder, Post\nfrom .email import send\n\n\[email protected]()\ndef log(msg):\n return msg\n\n\[email protected]()\ndef multiply(x, y):\n return x * y\n\n\[email protected](\n bind=True,\n ignore_result=True,\n default_retry_delay=300,\n max_retries=5\n)\ndef reminder(self, pk):\n reminder = Reminder.query.get(pk)\n send(\n reminder.email,\n 'Your reminder',\n 'reminder',\n {'text': reminder.text}\n )\n # msg = MIMEText(reminder.text)\n # msg['Subject'] = 'your reminder'\n # msg['Form'] = '[email protected]'\n # msg['To'] = reminder.email\n\n # try:\n # smtp_server = smtplib.SMTP('localhost')\n # smtp_server.starttls()\n # smtp_server.login('junral', 'wujunrong1994;')\n # smtp_server.sendmail(\n # '[email protected]',\n # [reminder.email],\n # msg.as_string()\n # )\n # smtp_server.close()\n # return\n # except Exception as e:\n # self.retry(exc=e)\n\n\[email protected](\n bind=True,\n ignore_result=True,\n default_retry_delay=300,\n max_retries=5\n)\ndef digest(self):\n # 找出这周的起始和结束日\n year, week = datetime.datetime.now().isocalendar()[0:2]\n date = datetime.date(year, 1, 1)\n if (date.weekday() > 3):\n date = date + datetime.timedelta(days=7 - datetime.weekday())\n else:\n date = date - datetime.timedelta(days=date.weekday())\n delta = datetime.timedelta(days=(week - 1) * 7)\n start, end = date + delta, date + delta + datetime.timedelta(days=6)\n\n posts = Post.query.filter(\n Post.publish_date >= start,\n Post.publish_date <= end\n ).all()\n\n if (len(posts) == 0):\n return\n send(\n reminder.emial,\n '',\n 'digest',\n {'posts': posts}\n )\n\n # msg = MIMEText(\n # render_template(\"digest.html\", posts=posts),\n # 'html'\n # )\n\n # msg['Subject'] = 'Weekly Digest'\n # msg['Form'] = ''\n\n # try:\n # smtp_server = smtplib.SMTP('localhost')\n # smtp_server.starttls()\n # smtp_server.login('junral', 'wujunrong1994;')\n # smtp_server.sendmail(\n # '[email protected]',\n # [reminder.email],\n # msg.as_string()\n # )\n # smtp_server.close()\n # return\n # except Exception as e:\n # self.retry(exc=e)\n\n\n\ndef on_reminder_save(mapper, connect, self):\n reminder.apply_async(args=(self.id), eta=self.date)\n\n\n# 如果一个任务失败可以通过 retry() 方法再次调用自己,如下所示:\n# @celery.task(bind=True)\n# def task():\n# try:\n# some_code\n# except Exception as e:\n# self.retry(exc=e)\n\n# bind 参数会通知 Celery 在调用函数时,把任务对象作为第1个参数传入。\n# 这样就可以通过 self 参数访问任务对象,并调用 retry 方法,它会使用\n# 相同的参数把任务重跑一次。\n# 更多参数及其相应的行为:\n# max_retries: 任务可以重试的最大次数。达到该次数后,任务将被标记为失败\n# default_retry_delay: 以秒为单位,表示重跑任务之前应该等待的时间。\n# rate_limit: 限定了在一段给定时间内这个任务最多能进行多少次不同的调用。\n# 如果这个值是一个整数,则意味着每秒钟允许这个任务跑多少次,这个值也可以\n# 是 x/m 格式的字符串,意味着每分钟跑 x 次,或者 x/h,意思是每小时跑 x 次。\n# time_limit: 如果带了这个参数,且任务运行时间超过了此参数规定的秒数,就会把任务杀掉\n# ignore_result: 如果任务的返回值没有被使用,就不要把值传回\n\n# celery 工作流:\n# 签名(signature)的使用:\n# from celery import signature\n# 调用签名时,使用跟 apply_async 同样的参数:\n# >>> signature('webapp.tasks.multiply', args=(4, 4), countdown=10)\n# webtask.multiply(4, 4)\n# >>> from webapp.tasks import multiply\n# >>> multiply.substask((4, 4), countdown=10)\n# webtask.multiply(4, 4)\n# # 上一功能的缩略版本,和 delay 方法一样,没有 apply_async 的关键字参数\n# >>> multiply.s(4, 4)\n# webtask.multiply(4, 4)\n# >>> multiply.s(4, 4)()\n# 16\n# 在调用一个任务的签名(或者叫子任务)时,就生成了一个函数,可以把它传给其它函数,让\n# 其它函数去执行它。如果直接执行这个签名,就会在当前进程中执行,而不是在工作进程中执行\n# >>> multiply.s(4, 4).delay()\n\n# 偏函数:\n# 任务签名的第一个应用是具有函数是编程风格的特性:偏函数。偏函数(partial)来源于\n# 一个要接收很多参数的函数,这个函数被施加某种操作后,生成了一个新的函数,在调用\n# 这个函数时,前n个参数永远是一样的。\n# >>> partial = multiply.s(4)\n# >>> partial.delay(4)\n# 16\n# 事实上,我们创建了一个作为偏函数保存的新函数,这个函数永远只接收一个参数,并将其乘以4\n\n# 回调函数:\n# 适用于当一个任务完成时,需要基于这个任务执行结构去执行另一个任务,为了实现这个目的,apply_async\n# 函数提供了一种 link 方法:\n# >>> multiply.apply_async((4, 4), link=log.s())\n# 如果回调函数不接收输入,或者不需要上一个任务输出的结果,则其任务签名就必须使用\n# si 方法,并设置为不可变(immutable)类型:\n# >>> multiply.apply_async((4, 4), link=log.si('Message'))\n\n# 偏函数和回调函数可以结合起来使用,以实现某些强大的功能:\n# >>> multiply.apply_async((4, 4), link=multiply.s(4))\n# 如果使用 get 方法获取了结果,结果会是16,而不是64,因为 get 方法不会返回回调函数的结果\n\n# 任务组:\n# 任务组(group)函数接收一组任务签名的列表,并生成一个函数,调用该函数可并行执行所有的任务签名,\n# 并返回所有结果的列表:\n# from celery import group\n# >>> sig = group(multiply.s(i, i + 5) for i in range(10))\n# >>> result = sig.delay()\n# >>> result.get()\n# [0, 6, 14, 24, 36, 50, 66, 84, 104, 126]\n\n# 任务链:\n# 任务链(chain)函数接收一组任务签名,把每个签名的执行结果传给任务链中的下一个,最后只会返回\n# 一个结果,如下所示:\n# >>> from celery import chain\n# >>> sig = chain(multiply.s(10, 10), multiply.s(4), multiply.s(20))\n# 也可写成:\n# >>> sig = (multiply.s(10, 10) | multiply.s(4) | multiply.s(20))\n# >>> result = sig.delay()\n# >>> result.get()\n# 8000\n\n# 任务链和偏函数可以做更多的事情。可以使用任务链,通过组合偏函数来穿件新的函数,还可以把任务链\n# 互相嵌套组合,如下所示:\n# >>> func = (multiply.s(10) | multiply.s(2))\n# >>> result = func.delay(16)\n# >>> result.get()\n# 320\n\n# 任务链可以嵌套:\n# >>> func = (\n# ... multiply.s(10) | multiply.s(2) | (multiply.s(4) | multiply.s(5))\n# ... )\n# >>> result = func.delay(16)\n# >>> result.get()\n\n# 复合任务:\n# 复合任务(chord)函数生成了一个任务签名时,会先执行一个任务组,然后把最终结果传给回调函数:\n# >>> from celery import chord\n# >>> sig = chord(\n# >>> group(multiply.s(i, i + 5) for i in range(10)),\n# >>> log.s()\n# >>> )\n# >>> result = sig.delay()\n# >>> result.get()\n# [0, 6, 14, 24, 36, 50, 66, 84, 104, 126]\n# 跟 lin 参数一样,这里的回调函数不会把结果返回给 get 方法\n\n# 如果使用任务链的语法,把一个任务组和一个回调函数组合在一起,那么会自动生成一个\n# 复合任务的签名:\n# sig = (group(multiply.s(i, i + 5) for i in range(10)) | log.s())\n# result = sig.delay()\n# result.get()\n# [0, 6, 14, 24, 36, 50, 66, 84, 104, 126]\n" }, { "alpha_fraction": 0.6375545859336853, "alphanum_fraction": 0.6375545859336853, "avg_line_length": 18.08333396911621, "blob_id": "2e696d24e29898cce090667f30c4c4b980a23ba4", "content_id": "dce0766d606d45db3b5134bfb86361f749b83c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/webapp/example/__init__.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "from flask import Blueprint\n\nexample = Blueprint(\n 'example',\n __name__,\n # 指定模板文件路径\n template_folder='temlates/example',\n # 指定静态文件路径\n static_folder='static/example',\n # 指定URL访问前缀\n url_prefix='/example'\n)\n" }, { "alpha_fraction": 0.6817969083786011, "alphanum_fraction": 0.6822648644447327, "avg_line_length": 18.605504989624023, "blob_id": "1efb44c7274e28dfd43ed6c838ef4af519310eda", "content_id": "a2c1efdc02dc7e2cb4e24885042b3300b77fcb33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 50, "num_lines": 109, "path": "/webapp/controllers/rest/parsers.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask_restful import reqparse\n\n# add_argument() 方法参数说明:\n# action: 生命了当参数值成功传入后,解释器进入哪种后继操作。两个可选项是 store 和\n# append。store 会把解析后的值加到返回的字典中,append 会把解析后的值加到了一个\n# 列表中,并加入返回的字典\n# case_sensitive: 布尔值,声明该参数名是否大小写敏感\n# choices: 跟MongoEngine 类似,提供一个可选值的列表。\n# default: 该参数没有传入时生成的默认值\n# dest: 加入返回的字典所使用的键名\n# help: 如果参数不符合要求,则会像用户显示此信息\n# ignore: 布尔值,声明当类型检查失败时是否返回错误\n# location: 指出应该从哪里寻找所需要的数据。可用的选项如下:\n # args: 在 GET 参数字符串中查找\n # headers: 在 HTTP 请求头中查找\n # form: 在HTPP的 POST 表单数据中查找\n # cookies: 在 HTTP 的 cookie 中查找\n # files: 在 POST 的文件域中查找\n# required: 布尔值,声明该参数是否可选\n# store_missing: 布尔值,当请求中缺失该参数时是否使用默认值进行填充\n# type: 把传入的参数值转换成那种 Python 类型\n\npost_get_parser = reqparse.RequestParser()\npost_get_parser.add_argument(\n 'page',\n type=int,\n location=['join', 'args', 'headers']\n)\n\npost_get_parser.add_argument(\n 'user',\n type=str,\n location=['join', 'args', 'headers']\n)\n\npost_post_parser = reqparse.RequestParser()\npost_post_parser.add_argument(\n 'title',\n type=str,\n required=True,\n help=\"Title is required\"\n)\n\npost_post_parser.add_argument(\n 'text',\n type=str,\n required=True,\n help=\"Body text is required\"\n)\n\npost_post_parser.add_argument(\n 'token',\n type=str,\n required=True,\n help=\"Auth Token is required to add posts\"\n)\n\npost_post_parser.add_argument(\n 'tags',\n type=str,\n action='append'\n)\n\npost_put_parser = reqparse.RequestParser()\npost_put_parser.add_argument(\n 'token',\n type=str,\n help='Auth Token is required to edit posts'\n)\n\npost_put_parser.add_argument(\n 'title',\n type=str\n)\n\npost_put_parser.add_argument(\n 'text',\n type=str\n)\n\npost_put_parser.add_argument(\n 'tags',\n type=str,\n action='append'\n)\n\nPost_delete_parser = reqparse.RequestParser()\nPost_delete_parser.add_argument(\n 'token',\n type=str,\n required=True,\n help=\"Auth Token is required to delete posts\"\n)\n\nuser_post_parser = reqparse.RequestParser()\nuser_post_parser.add_argument(\n 'username',\n type=str,\n required=True\n)\n\nuser_post_parser.add_argument(\n 'password',\n type=str,\n required=True\n)\n" }, { "alpha_fraction": 0.5486305356025696, "alphanum_fraction": 0.5558971762657166, "avg_line_length": 25.90225601196289, "blob_id": "4c4292f2a8545a2c3fb4fb2ecc9803058e611542", "content_id": "2f72d0c1b5ce674985ab74dbb583c234c8e358af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3768, "license_type": "no_license", "max_line_length": 89, "num_lines": 133, "path": "/webapp/forms_mongo.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# from flask_wtf import FlaskForm\n# from wtforms import StringField, TextAreaField, PasswordField, SubmitField, SelectField\n# from wtforms import BooleanField\n# from wtforms.validators import Required, Length, EqualTo, Email, URL\n\n# from .models_mongo import User\n\n\n# # 配合 MongoDB 定义的数据模型使用\n# class CommentForm(FlaskForm):\n # \"\"\" 评论的表单 \"\"\"\n # name = StringField(\n # 'Name', validators=[Required(), Length(max=255)]\n # )\n\n # text = TextAreaField(\n # 'Comment', validators=[Required()]\n # )\n # # submit = SubmitField('Add Comment')\n\n\n# class LoginForm(FlaskForm):\n # \"\"\" 用户登录表单 \"\"\"\n # username = StringField(\n # 'Username', [Required(), Length(max=255)]\n # )\n\n # password = PasswordField(\n # 'Password', [Required()]\n # )\n # remember = BooleanField('Remember Me')\n\n # def validators(self):\n # check_validate = super(LoginForm, self).validate()\n\n # # 如果验证没有通过\n # if not check_validate:\n # return False\n\n # # 检查是否存在该用户\n # user = User.objects(\n # username=self.username.data).first()\n # if not user:\n # self.username.errors.append(\n # 'Invalid username or password')\n # return False\n\n # # 检查密码是否匹配\n # if not self.user.check_password(self.password.data):\n # self.username.errors.append(\n # 'Invalid username or password'\n # )\n # return False\n\n # return True\n\n\n# class RegisterForm(FlaskForm):\n # \"\"\" 用户注册表单 \"\"\"\n # username = StringField(\n # 'Username',\n # [Required(), Length(max=255)]\n # )\n\n # email = StringField(\n # 'Email',\n # [Required(), Length(max=255), Email()]\n # )\n\n # password = PasswordField(\n # 'Password',\n # [Required(), Length(min=8)]\n # )\n\n # confrim = PasswordField(\n # 'Confirm Password',\n # [Required(), EqualTo('password')]\n # )\n\n # submit = SubmitField('Register')\n\n # # recaptcha = RecaptchaField()\n\n # def validates(self):\n # check_validate = super(RegisterForm, self).validate()\n\n # # 如果验证没有通过\n # if not check_validate:\n # return False\n\n # # 检查用户名是否已存在\n # user = User.objects(username=self.username.data).first()\n # if not user:\n # self.username.errors.append('User with that name already exists')\n # return False\n\n # # 检查用户邮箱是否已存在\n # user = User.objects(email=self.email.data).first()\n # if not user:\n # self.username.errors.append('User with that email already exists')\n # return False\n\n # return True\n\n\n# class PostForm(FlaskForm):\n # \"\"\" 文章表单 \"\"\"\n # title = StringField('Title', [Required(), Length(max=255)])\n # type = SelectField('Post Type', choices=[\n # ('blog', 'Blog Post'),\n # ('image', 'Image'),\n # ('video', 'Video'),\n # ('quote', 'Quote')\n # ])\n # text = TextAreaField('Content')\n # image = StringField('Image URL', [URL(), Length(max=255)])\n # video = StringField('Video Code', [URL(), Length(max=255)])\n # author = StringField('Author', [URL(), Length(max=255)])\n\n\n# class OpenIDForm(FlaskForm):\n # openid = StringField('OpenID URL', [Required(), URL()])\n\n\n# def custom_email(form, field):\n # \"\"\" 自定义表单邮箱验证 \"\"\"\n # import re\n # import wtforms\n # if not re.match(r'[^@]+@[^@]+\\.[^@]+', field.data):\n # raise wtforms.ValidationError('Field must be a valid email address.')\n" }, { "alpha_fraction": 0.7321428656578064, "alphanum_fraction": 0.7321428656578064, "avg_line_length": 17.66666603088379, "blob_id": "2abab0d4bb790f32b4f1b8c3f63248f89fe9c73d", "content_id": "e36e58863c61e9bc2f8c11349fedecbbbec1e82b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/wsgi.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "from webapp import create_app\n\napp = create_app('prod')\n" }, { "alpha_fraction": 0.5385023951530457, "alphanum_fraction": 0.545671820640564, "avg_line_length": 26.691177368164062, "blob_id": "8ccfaee5c6a281ac453385e95ead01d685c248f7", "content_id": "73a89a8d7aa6a0d5c5bf0d296b554a70a5d7485e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4000, "license_type": "no_license", "max_line_length": 73, "num_lines": 136, "path": "/tests/test_selenium.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport time\nimport unittest\n# import threading\nfrom selenium import webdriver\n\n\nclass SeleniumTestCase(unittest.TestCase):\n client = None\n\n @classmethod\n def setUpClass(cls):\n # start Firefox\n try:\n cls.client = webdriver.Firefox()\n except:\n pass\n\n # skip these tests if the browser could not be started\n if cls.client:\n # create the application\n # cls.app = create_app('testing')\n # cls.app_context = cls.app.app_context()\n # cls.app_context.push()\n\n # suppress logging to keep unittest output clean\n import logging\n logger = logging.getLogger('werkzeug')\n logger.setLevel(\"ERROR\")\n\n # create the database and populate with some fake data\n # db.create_all()\n # Role.insert_roles()\n # User.generate_fake(10)\n # Post.generate_fake(10)\n\n # add an administrator user\n # admin_role = Role.query.filter_by(permissions=0xff).first()\n # admin = User(email='[email protected]',\n # username='john', password='cat',\n # role=admin_role, confirmed=True)\n # db.session.add(admin)\n # db.session.commit()\n\n # start the Flask server in a thread\n # threading.Thread(target=cls.app.run).start()\n\n # give the server a second to ensure it is up\n time.sleep(1)\n\n @classmethod\n def tearDownClass(cls):\n if cls.client:\n cls.client.close()\n # stop the flask server and the browser\n # cls.client.get('http://localhost:5000/shutdown')\n # cls.client.close()\n\n # # destroy database\n # db.drop_all()\n # db.session.remove()\n\n # # remove application context\n # cls.app_context.pop()\n\n def setUp(self):\n if not self.client:\n self.skipTest('Web browser not available')\n\n def tearDown(self):\n pass\n\n # def setUp(self):\n # self.driver = webdriver.Firefox()\n\n # def tearDown(self):\n # self.driver.close()\n\n def test_add_new_post(self):\n \"\"\" 测试是否使用文章创建页面新增一篇文章\n\n 1. 用户登录网站\n 2. 前往新文章创建页面\n 3. 填写表单各域,并提交表单\n 4. 前往博客首页,确认这篇新文章出现在首页\n \"\"\"\n # pass\n\n # 登录\n self.driver.get('http://localhost:5000/login')\n\n username_field = self.driver.find_element_by_name(\n \"username\"\n )\n username_field.send_keys('test')\n\n password_field = self.driver.find_element_by_name(\n 'password'\n )\n password_field.send_keys('test')\n\n login_button = self.driver.find_element_by_name(\n 'Login'\n )\n login_button.click()\n\n # 填写表单\n self.driver.get('http://localhost:5000/blog/new')\n\n title_field = self.driver.find_element_by_name('title')\n title_field.send_keys('Test Title')\n\n # 定位到 iframe 里面的编辑器\n # switch_to 方法可以切换驱动的上下文,从而允许进入另一个\n # iframe 里面去选择元素。\n self.driver.switch_to.fram(\n self.driver.find_element_by_tag_name('iframe')\n )\n post_field = self.driver.find_element_by_class_name(\n 'cke_editable'\n )\n\n post_field.add_keys('Test content')\n self.driver.switch_to.parent_frame()\n\n post_button = self.driver.find_element_by_name(\n 'Submit'\n )\n post_button.click()\n\n # 确认文章已经创建\n self.driver.get('http://localhost:5000/blog')\n self.assertIn('Test Title', self.driver.page_source)\n self.assertIn('Test content', self.driver.page_source)\n" }, { "alpha_fraction": 0.5548622012138367, "alphanum_fraction": 0.5571156144142151, "avg_line_length": 25.957944869995117, "blob_id": "2b1e37a6d33bbff2f34c8d8cc06794eeb6c87867", "content_id": "2eb3c1bb071950259e164973c638a113d30905f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5841, "license_type": "no_license", "max_line_length": 81, "num_lines": 214, "path": "/webapp/controllers/auth_mongo.py", "repo_name": "junral/flask_test", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# from flask import Blueprint\n# from flask import render_template, redirect, url_for, flash, abort\n# from flask import request, session, g, current_app\n\n# from flask_login import login_user, logout_user, login_required\n# from flask_principal import Identity, AnonymousIdentity, identity_changed\n\n# from ..models_mongo import User\n# from ..forms_mongo import LoginForm, RegisterForm, OpenIDForm\n# from ..extensions import oid\n# # from ..extensions import facebook, twitter\n\n\n# auth_mongo_blueprint = Blueprint(\n # 'auth_mongo',\n # __name__,\n # url_prefix='/mongo'\n# )\n\n\n# @auth_mongo_blueprint.route('/restricted')\n# def admin():\n # if g.user is None:\n # abort(403)\n\n # return render_template(\n # 'admin.html',\n # mongo=True\n # )\n\n\n# @auth_mongo_blueprint.errorhandler(404)\n# def page_not_found(error):\n # \"\"\" 处理 404 错误 \"\"\"\n # return render_template('page_not_found.html'), 404\n\n\n# @auth_mongo_blueprint.route('/')\n# def index():\n # return redirect(url_for(\n # 'blog_mongo.home',\n # mongo=True\n # ))\n\n\n# @auth_mongo_blueprint.route('/login', methods=['GET', 'POST'])\n# # 告诉 Flask-OpenID 接受从中继方返回的认证信息。\n# @oid.loginhandler\n# def login():\n # form = LoginForm()\n # openid_form = OpenIDForm()\n\n # if openid_form.validate_on_submit():\n # return oid.try_login(\n # openid_form.openid.openid.data,\n # ask_for=['nickname', 'email'],\n # ask_for_optional=['fullname']\n # )\n\n # if form.validate_on_submit():\n # user = User.objects(username=form.username.data).one()\n # login_user(user, remember=form.remember.data)\n\n # identity_changed.send(\n # current_app._get_current_object(),\n # identity=Identity(user.id)\n # )\n\n # flash('You have been logged in.', category='success')\n\n # openid_errors = oid.fetch_error()\n # if openid_errors:\n # flash(openid_errors, category='danger')\n\n # return render_template(\n # 'login_mongo.html',\n # mongo=True,\n # form=form\n # )\n\n\n# @auth_mongo_blueprint.route('/register', methods=['GET', 'POST'])\n# @oid.loginhandler\n# def regester():\n # form = RegisterForm()\n # openid_form = OpenIDForm()\n\n # if openid_form.validate_on_submit():\n # return oid.try_login(\n # openid_form.openid.openid.data,\n # ask_for=['nickname', 'email'],\n # ask_for_optional=['fullname']\n # )\n\n # if form.validate_on_submit():\n # user = User()\n # user.username = form.username.data\n # user.email = form.email.data\n # user.set_password(form.password.data)\n # user.save()\n\n # flash(\n # 'Your user has been created, please login.',\n # category='success'\n # )\n\n # return redirect(url_for(\n # '.login',\n # mongo=True))\n\n # openid_errors = oid.fetch_error()\n # if openid_errors:\n # flash(openid_errors, category='danger')\n\n # return render_template(\n # 'register_mongo.html',\n # mongo=True,\n # form=form\n # )\n\n\n# @auth_mongo_blueprint.route('/logout', methods=['GET', 'POST'])\n# @login_required\n# def logout():\n # logout_user()\n # identity_changed.send(\n # current_app._get_current_object(),\n # identity=AnonymousIdentity()\n # )\n\n # flash('You have been logged out.', category='success')\n\n # return redirect(url_for(\n # '.login',\n # mongo=True\n # ))\n\n\n# # # facebook 登录\n# # @auth_mongo_blueprint.route('/facebook')\n# # def facebook_login():\n # # return facebook.authorize(\n # # callback=url_for(\n # # '.facebook_authorized',\n # # next=request.referrer or None,\n # # _external=True\n # # )\n # # )\n\n\n# # @auth_mongo_blueprint.route('/facebook/authorized')\n# # @facebook.authorized_hander\n# # def facebook_authorized(resp):\n # # if resp is None:\n # # return 'Access denied: reason=%s error=%s' % (\n # # request.args['error_reason'],\n # # request.args['error_description']\n # # )\n\n # # session['facebook_oauth_token'] = (resp['access_token'], '')\n\n # # me = facebook.get('/me')\n # # user = User.query.filter_by(\n # # username=me.data['first_name'] + ' ' + me.data['last_name']\n # # ).first()\n\n # # if not user:\n # # User.create_user(me.data['first_name'] + ' ' + me.data['last_name'])\n\n # # # 从这里登录用户\n # # flash('You have been logged in.', category=\"success\")\n\n # # return redirect(request.args.get('next') or url_for('blog.home'))\n\n\n# # @auth_mongo_blueprint.round('/twitter-login')\n# # def twitter_login():\n # # return twitter.authorize(\n # # callback=url_for(\n # # '.twitter_authorized',\n # # next=request.referrer or None,\n # # _external=True\n # # )\n # # )\n\n\n# # @auth_mongo_blueprint.route('/twitter-login/authorized')\n# # @twitter.authorized_handler\n# # def twitter_authenorize(resp):\n # # if resp is None:\n # # return 'Access denied: reason: {} error: {}'.format(\n # # request.args['error_reason'],\n # # request.args['error_description']\n # # )\n\n # # session['twitter_oauth_token'] = resp['oauth_token'] + \\\n # # resp['oauth_token_secret']\n\n # # user = User.query.filter_by(\n # # username=resp['screen_name']\n # # ).first()\n\n # # if not user:\n # # User.create_user(username=resp['screen_name'])\n\n # # # 从这里登录用户\n # # flash('You have been logged in.', category='success')\n\n # # return redirect(\n # # request.args.get('next') or url_for('blog.home')\n # # )\n" } ]
31
finger563/cbsat
https://github.com/finger563/cbsat
b62590e21c22d4d5ae1fd01c8f841249011aeb19
9290d125f714069fe89f8c3eea10426547cffb5d
130c5d45a42c82446e8bb6cc793cbb4c2f40df9f
refs/heads/master
2021-01-17T10:21:07.669709
2016-04-04T17:18:19
2016-04-04T17:18:19
30,722,835
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5304268598556519, "alphanum_fraction": 0.5404177904129028, "avg_line_length": 34.51612854003906, "blob_id": "e16708dfc87b82e967df5e10980c8f705280a6f3", "content_id": "25757172a421c49e2ab5772043d6b17d688a356f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2202, "license_type": "no_license", "max_line_length": 111, "num_lines": 62, "path": "/doc/src/conf.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "project = 'Research Progress Report'\nhtml_short_title = 'Research Progress Report'\n#project = 'PNP2'\n#version = '0.2'\n#release = '0.2.0'\ncopyright = '2015, finger563'\n\nimport os,inspect,sys\nanalysis = os.path.realpath(os.path.abspath\n (os.path.join\n (os.path.split\n (inspect.getfile\n (inspect.currentframe()\n )\n )[0], \"../../src/analysis/v2.0/\")\n ))\nif analysis not in sys.path:\n sys.path.insert(0, analysis)\nmiddleware = os.path.realpath(os.path.abspath\n (os.path.join\n (os.path.split\n (inspect.getfile\n (inspect.currentframe()\n )\n )[0], \"../../src/middleware/v2.0/\")\n ))\nif middleware not in sys.path:\n sys.path.insert(0, middleware)\n\nsys.path.insert(0, os.path.abspath('.'))\n\nmaster_doc = 'index'\nsource_suffix = '.rst'\nexclude_patterns = ['**/.#*']\nextensions = ['numfig','sphinx.ext.pngmath', 'sphinx.ext.autodoc', 'sphinxcontrib.spelling', 'sphinx.ext.todo']\ntemplates_path = ['_templates']\nautoclass_content = \"both\"\nautodoc_member_order = \"bysource\"\n\npygments_style = 'sphinx'\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtmlhelp_basename = 'cbsat-doc'\nhtml_static_path = ['_static']\nhtml_context = { 'css_files': ['./_static/custom.css'] }\n\ntodo_include_todos = True\n\nadd_module_names = False\nshow_authors = True\n\nspelling_word_list_filename = 'dictionary.txt'\n\nlatex_elements = {}\nlatex_elements['preamble'] = '\\usepackage{amsmath}\\n\\usepackage{amssymb}\\n'\nlatex_elements['classoptions'] = ',openany,oneside'\nlatex_elements['babel'] = '\\\\usepackage[english]{babel}'\nlatex_elements['title'] = 'Research Progress Report'\nlatex_elements['release'] = ''\nlatex_elements['releasename'] = ''\nlatex_elements['author'] = 'William Emfinger'\n" }, { "alpha_fraction": 0.585106372833252, "alphanum_fraction": 0.585106372833252, "avg_line_length": 14.5, "blob_id": "0feba46664399937348df3a2d62589f0cc0414ed", "content_id": "8e70ab6ad1d564e36dc8677d68018c249a063b65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 94, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/docs/_sources/python-api/analysis/network-config.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Network Config\n==============\n\n.. automodule:: networkConfig\n :members:\n :undoc-members:\n\n" }, { "alpha_fraction": 0.47564470767974854, "alphanum_fraction": 0.49426934123039246, "avg_line_length": 16.670886993408203, "blob_id": "b496fc06beb5209235de2e8f08febc0d43338908", "content_id": "718096605b92033fdd21dfb3f389f5c1ffbbeac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1396, "license_type": "no_license", "max_line_length": 66, "num_lines": 79, "path": "/src/middleware/v1.0/tcWrapper.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef TCWRAPPER_HPP\n#define TCWRAPPER_HPP\n\n#include <unistd.h>\n#include <sys/types.h>\n#include <time.h>\n#include <errno.h>\n\n#include <math.h>\n#include <string>\n\n#include \"NetworkProfile.hpp\"\n#include \"log_macro.hpp\"\n\nclass Options {\npublic:\n std::string interface;\n std::string profile;\n\n Options() {\n interface = \"eth0\";\n profile = \"node_profile.csv\";\n }\n\n int Parse(int argc, char **argv) {\n \n if ( argc < 2 )\n return 0;\n int c;\n char str[256];\n sprintf(str,\"%s\",argv[1]);\n if ( argc > 2 ) {\n for (int i=2;i<argc;i++) {\n\tsprintf(str,\"%s %s\",str,argv[i]);\n }\n }\n char *p = strtok(str,\"-\");\n while (p != 0) {\n switch (p[0])\n\t{\n\tcase 'p':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t this->profile = p+2;\n\t break;\n\tcase 'i':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t this->interface = p+2;\n\t break;\n\tcase '?':\n\tdefault:\n\t TG_LOG(\"usage: \\n\\t%s\\n\"\n\t\t \"\\t\\t -p <profile name>\\n\"\n\t\t \"\\t\\t -i <interface name>\\n\"\n\t\t ,argv[0]);\n\t return -1;\n\t}\n p = strtok(NULL,\"-\");\n }\n return 0;\n }\n \n void Print() {\n TG_LOG(\"Options():\\n\");\n TG_LOG(\"\\t profile name\\t\\t: %s\\n\",this->profile.c_str());\n TG_LOG(\"\\t interface name\\t\\t: %s\\n\",this->interface.c_str());\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.8079625368118286, "alphanum_fraction": 0.8149883151054382, "avg_line_length": 46.44444274902344, "blob_id": "39954bbd017fa4c64ebc51e2ab6c69282dd31a75", "content_id": "e1288f76c033fb5332e6016d2822842363d2714a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 427, "license_type": "no_license", "max_line_length": 96, "num_lines": 9, "path": "/README.md", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "# cbsat\nComponent-Based Software Analysis Tools\n\nDocumentation for the tools in this repository can be found here:\n * [CBSAT github documentation](http://finger563.github.io/cbsat/docs)\n\nThis repository contains my research work towards\n* Design-Time network analysis techniques and tools for distributed cyber-physical systems (CPS)\n* Run-Time profile-based network traffic generation, measurement, detection, and enforcement\n" }, { "alpha_fraction": 0.7027971744537354, "alphanum_fraction": 0.7062937021255493, "avg_line_length": 21, "blob_id": "bf4a5597918beb7eee9e9d96677ef025e5062fec", "content_id": "6198174124c2ad8305bba3781f0dbaa05275de52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 286, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/docs/_sources/analysis-api.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Analysis API\n===============\n\n.. toctree::\n :includehidden:\n :maxdepth: 2\n\n python-api/analysis/analysis\n python-api/analysis/network-profile\n python-api/analysis/network-config\n python-api/analysis/plotting\n python-api/analysis/utils\n python-api/analysis/generate-tdma\n" }, { "alpha_fraction": 0.6895897388458252, "alphanum_fraction": 0.6921982169151306, "avg_line_length": 39.54807662963867, "blob_id": "d11615a81a73eb6e223d5cdb1c13e35c887bb30f", "content_id": "2bf802c271684474795fea82f5b21afb4302788d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4217, "license_type": "no_license", "max_line_length": 83, "num_lines": 104, "path": "/src/middleware/v2.0/list_func.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "// note: timespec here is whatever representation you have in your\n// CPN; this could be integer or whatever, but it must support\n// modulus. and should be able to store floating point values or\n// handle conversion from floaint point.\ndouble Delay(INTEGER dataLenBits,\n\t TIMESPEC sentTime,\n\t TIMESPEC period,\n\t LIST resources) {\n // expects a size of data, a current time, and a profile defintion\n // profile has a period (unsigned long in this case I suppose)\n // profile has a resources list, where each resource has an\n // associated time, bandwidth, and data value; the data in an entry\n // is the data at the **end** of the entry\n\n TIMESPEC offset = sentTime % period;\n\n // Need to find what the bandwidth, start, and start_data are for\n // the current profile interval.\n\n // The start is a the latest time point in the profile that is less\n // than the current time.\n\n // The start_data is the amount of data that has been sent\n // (according to the profile!) by the start time of the current\n // interval. this corresponds to the data value of the previous\n // interval.\n\n // The bandwidth is the current interval's bandwidth; it is constant\n // from the start time of the current interval until the start time\n // of the next interval.\n TIMESPEC start = resources.back().time;\n INTEGER offsetData = resources.back().data;\n INTEGER bandwidth = resources.back().bandwidth;\n int res_id = resources.size() - 1;\n for (int i=0;i<resources.size();i++) {\n if ( resources[i].time > offset ) {\n res_id = i;\n start = resources[i-1].time;\n bandwidth = resources[i-1].bandwidth;\n offsetData = resources[i-1].data;\n break;\n }\n }\n\n // offsetData is the actual amount of data that has been sent\n // (according to the profile!) by sentTime\n offsetData += (INTEGER)((double)(offset-start)*((double)bandwidth));\n\n // Now that we know exactly where (time, data) = (sentTime,\n // offsetData) in the profile we are, we need to figure out exactly\n // how long it will take us to send the data\n\n // timeDiff is an accumulator which contains the amount of time it\n // will take to send the data. It is the time difference between\n // sentTime and when the last bit of data will have been sent.\n TIMESPEC timeDiff = 0;\n\n // dataInPeriod is the total amount of data that can have been sent\n // by the end of the period\n INTEGER dataInPeriod = resources.back().data;\n \n INTEGER numPeriods = dataLenBits / dataInPeriod;\n if ( numPeriods > 0 ) { // will take more than numPeriods to send data\n timeDiff += (TIMESPEC)(numPeriods * period);\n }\n\n // dataToEnd is the amount of data that can be sent before the end\n // of the period\n INTEGER dataToEnd = dataInPeriod - offsetData;\n // modData is the amount of data that has to be sent in the last\n // period of transmission; this may be the first/only period if the\n // data is smaller than one period's worth of data.\n INTEGER modData = dataLenBits % dataInPeriod;\n if ( dataToEnd < modData ) { // will have to cycle back to beginning to send data\n timeDiff += period - offset;\n offsetData = 0;\n offset = 0;\n res_id = 0;\n modData = modData - dataToEnd;\n }\n\n // This code works for all cases; It simply takes the modData (which\n // is the actual amount of data which will be sent now that the\n // periodicity has been taken into account) and the current resource\n // ID (res_id) and iterates through the profile accumulating into timeDiff\n INTEGER remainder = modData;\n if ( (resources[res_id].data - offsetData) <= modData ) {\n remainder = modData - (resources[res_id].data - offsetData);\n timeDiff += resources[res_id++].time - offset;\n while ( (resources[res_id].data - offsetData) < modData ) {\n remainder = modData - (resources[res_id].data - offsetData);\n timeDiff += resources[res_id].time - resources[res_id-1].time;\n res_id++;\n }\n }\n res_id--;\n\n // By this point, we've taken care of every complete interval if any\n // exist. now we take care of the remainder data that is left over\n // after the most recent complete interval.\n timeDiff += (TIMESPEC) (double)remainder / (double)resources[res_id].bandwidth;\n\n return timeDiff;\n}\n" }, { "alpha_fraction": 0.5236471891403198, "alphanum_fraction": 0.5326335430145264, "avg_line_length": 38.902626037597656, "blob_id": "06aa636932e67ddd14c09ea156aebf3a4e551d2b", "content_id": "dd7606a873352c9e4e364652b9fb4e5778aca14b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25817, "license_type": "no_license", "max_line_length": 169, "num_lines": 647, "path": "/src/analysis/v1.0/acceptance-plot.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"\nThis program is designed to do admissibilty tests for admission of an application\nor set of applications to the F6 satellite cluster. Each application may be \nsplit accross multiple nodes of the cluster. Each node has its own network\ninterface and as such, each node's bandwidth is independent of the other nodes'\nnetwork utilization. Therefore each node's bandwidth is modeled as a network\n\"link\" which connects from that node to all other nodes. \n\"\"\" \n\n# QoS files have 4 columns: time (s), BW(bps), latency (ms), Network Link (id #)\nimport sys, os, csv, copy, glob\n\nfrom acceptancemathlib import *\nfrom utils import havePLT, getDataAtTimeFromProfile, plotProfile, get_app_node_map, get_appProfiles, get_nodeProfiles,plt\n\nclass Options:\n def __init__(self):\n self.period = (90*60) # orbital period in seconds\n self.plot_profiles = havePLT\n self.num_periods = 1\n self.selected_node = ''\n self.selected_interface = ''\n self.plot_line_width = 4 # line width for plots\n self.font_size = 25 # font size for plots\n self.nc_mode = False\n self.nc_step_size = 1\n\n def parse_args(self,args):\n argind = 1\n while argind < len(args):\n if args[argind] == \"-P\":\n self.period = int(args[argind+1])\n if self.period <= 0:\n print \"Error! You must specify a time period > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-n\":\n self.num_periods = int(args[argind+1])\n if self.num_periods <= 0:\n print \"Error! You must specify a number of periods > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-p\":\n self.plot_profiles = False\n argind += 1\n elif args[argind] == \"-nc_mode\":\n self.nc_mode = True\n argind += 1\n elif args[argind] == \"-nc_step_size\":\n self.nc_step_size = float(args[argind+1])\n argind += 2\n elif args[argind] == \"-N\":\n self.selected_node = args[argind+1]\n argind += 2\n elif args[argind] == \"-I\":\n self.selected_interface = args[argind+1]\n argind += 2\n elif args[argind] == \"-?\" or args[argind] == \"-h\":\n print \"Usage:\\n\\tpython \",args[0],\"\"\"\n \\t\\t-N <node name>\n \\t\\t-I <node interface name>\n \\t\\t-P <period (s)>\n \\t\\t-n <number of periods to analyze>\n \\t\\t-nc_mode (to run network calculus calcs)\n \\t\\t-nc_step_size <step size for windows in NC>\n \\t\\t-p (to not output any plots)\\n\"\"\"\n return -1\n else:\n print \"\"\"Usage:\\n\\t\"\"\",args[0],\"\"\"\n \\t\\t-N <node name>\n \\t\\t-I <node interface name>\n \\t\\t-P <period (s)>\n \\t\\t-n <number of periods to analyze>\n \\t\\t-nc_mode (to run network calculus calcs)\n \\t\\t-nc_step_size <step size for windows in NC>\n \\t\\t-p (to not output any plots)\\n\"\"\"\n return -1\n return 0\n\n\nclass ProfileEntry:\n def __init__(self,start=0,end=0,slope=0,data=0,interface='none',ptype='none'):\n self.start = start\n self.end = end\n self.slope = slope\n self.data = data\n self.interface = interface\n self.ptype = ptype\n\n def __lt__(self, other):\n return self.start < other.start\n\n def __repr__(self):\n retstr = \"{}\\n\".format(self)\n return retstr #\"ProfileEntry()\"\n \n def __str__(self):\n return \"{0},{1},{2},{3},{4},{5}\".format(self.start,self.end,self.slope,self.data,self.interface,self.ptype)\n\n\nclass NodeProfile:\n def __init__(self,period,num_periods):\n self.profile = []\n self.required = []\n self.provided = []\n self.link = []\n self.period = period\n self.num_periods = num_periods\n self.buffer = [0,0,0]\n self.delay = [0,0,0]\n self.interfaces = []\n\n def getProvidedProfile(self,interface):\n retProfile = []\n for e in self.provided:\n if e.interface == interface:\n retProfile.append(e)\n return retProfile\n\n def addProvidedProfile(self,profile):\n p = profile.split('\\n')\n self.provided = []\n if p == None or profile == '':\n return\n for line in p:\n entry = get_entry_from_line(line)\n if entry != None:\n entry.ptype = 'provided'\n self.provided.append(entry)\n if len(self.provided) == 0:\n return\n for i in range(0,len(self.provided)-1):\n if self.provided[i].interface not in self.interfaces:\n self.interfaces.append(self.provided[i].interface)\n if self.provided[i].interface == self.provided[i+1].interface:\n self.provided[i].end = self.provided[i+1].start\n else:\n self.provided[i].end = self.period\n self.provided[-1].end = self.period \n self.provided = sorted(self.provided)\n for intf in self.interfaces:\n prof = self.getProvidedProfile(intf)\n if prof[0].start > 0:\n entry = ProfileEntry()\n entry.start = 0\n entry.end = prof[0].start\n entry.ptype = 'provided'\n entry.interface = intf\n self.provided.insert(0,entry)\n\n originalProvided = copy.deepcopy(self.provided)\n pData = {}\n for intf in self.interfaces:\n prof = self.getProvidedProfile(intf)\n pData[intf] = prof[-1].data\n for i in range(1,self.num_periods):\n tmpProvided = copy.deepcopy(originalProvided)\n for e in tmpProvided:\n e.data += pData[e.interface]\n e.start += self.period*i\n e.end += self.period*i\n self.provided.append(e)\n for data in pData:\n data += data\n return\n\n def addRequiredEntry(self, entry):\n if self.required == [] or entry.start >= self.required[-1].end:\n self.required.append(entry)\n elif entry.start > self.required[-1].start:\n entry.slope += self.required[-1].slope\n self.required[-1].end = entry.start\n self.required.append(entry)\n elif entry.end < self.required[0].start:\n self.required.insert(0,entry)\n else:\n for i in range(0,len(self.required)):\n if entry.start <= self.required[i].start:\n endTime = entry.end\n addedBW = entry.slope\n if i != 0:\n self.required[i-1].end = entry.start\n entry.slope = self.required[i-1].slope + addedBW\n if endTime >= self.required[i-1].end:\n entry.end = self.required[i].start\n self.required.insert(i,entry)\n i+=1 \n while i < len(self.required) and endTime >= self.required[i].end:\n self.required[i].slope += addedBW\n i+=1\n if i < len(self.required) and endTime < self.required[i].end:\n remainingEntry = ProfileEntry(start=endTime,end=self.required[i].end,slope=self.required[i].slope,ptype='required')\n self.required[i].slope += addedBW\n self.required[i].end = endTime\n self.required.insert(i+1,remainingEntry)\n break\n for r in self.required:\n if r.start == r.end:\n self.required.remove(r)\n return\n\n def addRequiredProfile(self,profile):\n p = profile.split('\\n')\n if p == None or profile == '':\n return\n if len(self.required) == 0:\n for line in p:\n entry = get_entry_from_line(line)\n if entry != None:\n entry.ptype = 'required'\n self.required.append(entry)\n if len(self.required) > 0:\n self.required = sorted(self.required)\n for i in range(0,len(self.required)-1):\n self.required[i].end = self.required[i+1].start\n self.required[-1].end = self.period\n else:\n entryList = []\n for line in p:\n entry = get_entry_from_line(line)\n if entry != None:\n entry.ptype = 'required'\n entryList.append(entry)\n entryList = sorted(entryList)\n for i in range(0,len(entryList)-1):\n entryList[i].end = entryList[i+1].start\n entryList[-1].end = self.period\n for e in entryList:\n self.addRequiredEntry(e)\n if len(self.required) > 0 and self.required[0].start > 0:\n entry = ProfileEntry()\n entry.start = 0\n entry.end = self.required[0].start\n entry.ptype = 'required'\n self.required.insert(0,entry)\n\n if len(self.required) > 0:\n originalRequired = copy.deepcopy(self.required)\n pData = self.required[-1].data\n for i in range(1,self.num_periods):\n tmpRequired = copy.deepcopy(originalRequired)\n for e in tmpRequired:\n e.data += pData\n e.start += self.period*i\n e.end += self.period*i\n self.required.append(e)\n pData += pData\n return\n\n def makeNetworkCalculusCurves(self,step):\n # MUST UPDATE THE SLOPE FOR ALL ENTRIES\n # CONVERT self.required into max arrival curve\n self.required_nc = []\n time_list = []\n prof = self.required\n for e in prof:\n time_list.append(e.end)\n start_time = 0\n #print prof\n prev_data = 0\n for tw in time_list:\n max_data = 0\n t = tw\n while t <= prof[-1].end:\n startData = getDataAtTimeFromProfile(prof,t-tw)\n endData = getDataAtTimeFromProfile(prof,t)\n diff = endData - startData\n if diff > max_data:\n max_data = diff\n t += step\n entry = ProfileEntry()\n #print \"NEW POINT @ {} has {}\\n\".format(start_time,max_data)\n entry.data = max_data\n entry.start = start_time\n start_time = tw\n entry.end = start_time\n entry.ptype = 'required'\n entry.slope = (entry.data-prev_data) / (entry.end - entry.start)\n prev_data = entry.data\n entry.interface = 'none'\n self.required_nc.append(entry)\n # CONVERT self.provided into min service curve\n self.provided_nc = []\n for intf in self.interfaces:\n prof = self.getProvidedProfile(intf)\n #print prof\n time_list = []\n for e in prof:\n time_list.append(e.end)\n start_time = 0\n prev_data = 0\n for tw in time_list:\n min_srv = prof[-1].data\n t = tw\n while t <= prof[-1].end:\n startData = getDataAtTimeFromProfile(prof,t-tw)\n endData = getDataAtTimeFromProfile(prof,t)\n diff = endData - startData\n if diff < min_srv:\n min_srv = diff\n t += step\n entry = ProfileEntry()\n #print \"NEW POINT @ {} has {}\\n\".format(start_time,min_srv)\n entry.data = min_srv\n entry.start = start_time\n start_time = tw\n entry.end = start_time\n entry.ptype = 'provided'\n entry.slope = (entry.data-prev_data) / (entry.end - entry.start)\n prev_data = entry.data\n entry.interface = intf\n self.provided_nc.append(entry)\n #print self.provided\n #print self.provided_nc\n #print self.required\n #print self.required_nc\n self.provided = self.provided_nc\n self.required = self.required_nc\n \n def convolve(self,interface):\n if len(self.required) == 0 or len(self.provided) == 0:\n return -1\n self.profile = []\n for e in self.provided:\n if e.interface == interface:\n self.profile.append(e)\n for e in self.required:\n self.profile.append(e)\n self.profile = sorted(self.profile)\n pInterval = None\n rInterval = None\n self.link = []\n buff = 0\n delay = [0,0,0]\n pOffset = 0\n pEndData = 0\n rEndData = 0\n for e in self.profile:\n if e.ptype == 'provided':\n pInterval = e\n else:\n rInterval = e\n # note: the way intervals are created, the\n # req and prov intervals will always overlap\n # and adjacent intervals will never overlap\n if pInterval != None and rInterval != None:\n start = 0\n end = 0\n # get the later start value\n if pInterval.start < rInterval.start:\n start = rInterval.start\n elif pInterval.start == rInterval.start:\n start = rInterval.start\n elif pInterval.start > rInterval.start:\n start = pInterval.start\n # get the earlier end value\n if pInterval.end < rInterval.end:\n end = pInterval.end\n pEndData = pInterval.data - pOffset\n rEndData = rInterval.data - rInterval.slope*(rInterval.end-end)\n elif pInterval.end == rInterval.end:\n end = pInterval.end\n pEndData = pInterval.data - pOffset\n rEndData = rInterval.data\n elif pInterval.end > rInterval.end:\n end = rInterval.end\n pEndData = pInterval.data - pOffset - pInterval.slope*(pInterval.end-end)\n rEndData = rInterval.data \n # create interval entry for link profile\n entry = ProfileEntry()\n entry.ptype = 'link'\n entry.start = start\n entry.end = end\n # link interval time bounds configured; now to calc data\n if pEndData <= rEndData:\n # set entry data\n entry.data = pEndData\n buff = rEndData - pEndData\n if buff > self.buffer[2]:\n self.buffer = [entry.end,entry.data,buff]\n else:\n # set entry data and see if there was a profile crossing\n if len(self.link) == 0 or self.link[-1].data < rEndData:\n rData = rInterval.slope*(rInterval.end - start)\n rStart= rInterval.data - rInterval.slope*(rInterval.end - rInterval.start)\n pStart= pInterval.data - pOffset - pInterval.slope*(pInterval.end - pInterval.start)\n point = get_intersection([pInterval.start,pStart],[pInterval.end,pInterval.data-pOffset],[rInterval.start,rStart],[rInterval.end,rInterval.data])\n if point[0] != -1:\n xEntry = ProfileEntry()\n xEntry.ptype = 'link'\n xEntry.start = start\n xEntry.end = point[0]\n xEntry.data = point[1]\n self.link.append(xEntry)\n entry.start = xEntry.end\n entry.data = rEndData\n self.link.append(entry)\n # do we need to add to the offset?\n if pEndData >= rEndData:\n pOffset += pEndData - rEndData\n self.link = [e for e in self.link if e.start != e.end]\n lData = 0\n for e in self.link:\n e.slope = (e.data - lData)/(e.end-e.start)\n lData = e.data\n self.calcDelay()\n return 0\n\n def calcDelay(self):\n if len(self.required) == 0 or len(self.link) == 0:\n return\n delay = [0,0,0]\n # match required points to link profile horizontally\n for r in self.required:\n for l in self.link:\n if l.data > r.data:\n offset = l.end-(l.data-r.data)/l.slope\n timeDiff = offset-r.end\n if timeDiff > delay[2]:\n delay = [r.end,r.data,timeDiff]\n break\n elif l.data == r.data:\n timeDiff = l.end - r.end\n if timeDiff > delay[2]:\n delay = [r.end,r.data,timeDiff]\n break\n # match link points to required profile horizontally\n for l in self.link:\n for r in self.required:\n if l.data < r.data:\n offset = r.end-(r.data-l.data)/r.slope\n timeDiff = l.end - offset\n if timeDiff > delay[2]:\n delay = [offset,l.data,timeDiff]\n break\n self.delay = delay\n return\n\n def calcData(self):\n if len(self.required) == 0 or len(self.provided) == 0:\n return\n rData = 0\n pData = {}\n for intf in self.interfaces:\n pData[intf] = 0\n for e in self.required:\n rData += e.slope*(e.end-e.start)\n e.data = rData\n for e in self.provided:\n pData[e.interface] += e.slope*(e.end-e.start)\n e.data = pData[e.interface]\n return\n\n def plotData(self,line_width):\n plt.figure(2)\n plt.hold(True)\n plotProfile('data',self.profile,'required',[8,4,2,4,2,4],'r[t]: ',line_width)\n plotProfile('data',self.profile,'provided',[2,4],'p[t]: ',line_width)\n plotProfile('data',self.link,'link',[6,12],'l[t]: ',line_width)\n\n buffplotx = [self.buffer[0],self.buffer[0]]\n buffploty = [self.buffer[1],self.buffer[1]+self.buffer[2]]\n plt.plot(buffplotx,buffploty,'0.5',label=r\"Buffer\",linewidth=line_width) #:%d B\"%(int(buff)/8)\n\n delayplotx = [self.delay[0],self.delay[0]+self.delay[2]]\n delayploty = [self.delay[1],self.delay[1]]\n plt.plot(delayplotx,delayploty,'0.8',label=r\"Delay\",linewidth=line_width) #:%0.4f s\"%float(delay)\n \n '''\n line, =plt.plot([self.period,self.period],[0,max(column(req,1))],linewidth=2,color='black', label=r\"Period End\")\n for i in range(2,self.num_periods+1):\n line, =plt.plot([period*i,period*i],[0,max(column(req,1))],linewidth=2,color='black')\n '''\n\n plt.title(\"Network Traffic vs. Time over %d period(s)\"%self.num_periods)\n plt.ylabel(\"Data (bits)\")\n plt.xlabel(\"Time (s)\")\n plt.legend(loc='upper left')\n #plt.grid(True)\n frame1 = plt.gca()\n frame1.axes.get_xaxis().set_ticks([])\n frame1.axes.get_yaxis().set_ticks([])\n plt.show()\n return\n\n def plotSlope(self,line_width):\n plt.figure(1)\n plt.hold(True)\n plotProfile('slope',self.profile,'required',[4,8],'',line_width)\n plotProfile('slope',self.profile,'provided',[2,4],'',line_width)\n plotProfile('slope',self.link,'link',[2,4],'',line_width)\n \n '''\n line, =plt.plot([self.period,self.period],[0,max(column(linkbw,1))],linewidth=2,color='black', label=r\"Period End\")\n for i in range(2,self.num_periods+1):\n line, =plt.plot([self.period*i,self.period*i],[0,max(column(linkbw,1))],linewidth=2,color='black')\n '''\n\n plt.title(\"Network Bandwidth vs. Time over %d period(s)\"%self.num_periods)\n plt.ylabel(\"Bandwidth (bps)\")\n plt.xlabel(\"Time (s)\")\n plt.legend(loc='lower left')\n #plt.grid(True)\n plt.show()\n return\n\n def __repr__(self):\n return \"NodeProfile()\"\n\n def __str__(self):\n retStr = 'Buffer: {0}\\nDelay: {1}\\n'.format(self.buffer,self.delay)\n retStr += \"Provided:\\n\"\n for e in self.provided:\n retStr += \"{0}\\n\".format(e)\n retStr += \"Required:\\n\"\n for e in self.required:\n retStr += \"{0}\\n\".format(e)\n retStr += \"Link:\\n\"\n for e in self.link:\n retStr += \"{0}\\n\".format(e)\n return retStr\n\nclass NetworkProfile:\n def __init__(self,_period,_num_periods):\n self.nodeProfiles = {}\n self.period = _period\n self.num_periods = _num_periods\n\n def addNodeProfile(self,node,profile):\n self.nodeProfiles[node] = profile\n\n def calcData(self):\n for n,p in self.nodeProfiles.iteritems():\n p.calcData()\n\n def convolve(self,node,interface):\n self.nodeProfiles[node].convolve(interface)\n return self.nodeProfiles[node]\n\n def makeNetworkCalculusCurves(self, node, step):\n self.nodeProfiles[node].makeNetworkCalculusCurves(step)\n\n def __repr__(self):\n return \"NetworkProfile()\"\n\n def __str__(self):\n retStr = \"NetworkProfile:\\n\"\n retStr += \"has period {0} and node profiles:\\n\".format(self.period)\n for n,p in self.nodeProfiles.iteritems():\n retStr += \"Node {0} has profiles:\\n{1}\\n\".format(n,p)\n return retStr\n\ndef get_entry_from_line(line=None):\n if line == None or len(line) == 0:\n return None\n fields = line.split(',')\n if len(fields) == 0 or fields[0][0] == '%':\n return None\n entry = ProfileEntry()\n entry.start = float(fields[0])\n entry.slope = float(fields[1])\n entry.latency = float(fields[2])\n if len(fields) == 4:\n entry.interface = fields[3]\n return entry\n\ndef gen_network_profile(nodeProfiles,appProfiles,app_node_map,period,num_periods):\n profiles = NetworkProfile(period,num_periods)\n for node,apps in app_node_map.iteritems():\n nodeProfile = NodeProfile(period,num_periods)\n nodeProfile.addProvidedProfile(nodeProfiles[node])\n for app in profiles:\n nodeProfile.addRequiredProfile(profiles[app])\n profiles.addNodeProfile(node,nodeProfile)\n\ndef main(): \n args = sys.argv\n options = Options()\n if options.parse_args(args):\n return -1\n\n nodes = get_nodeProfiles('scripts')\n apps = get_appProfiles('profiles')\n app_node_map = get_app_node_map(nodes,apps)\n networkProfile = NetworkProfile(options.period,options.num_periods)\n for node,profile in nodes.iteritems():\n nodeProfile = NodeProfile(options.period,options.num_periods)\n nodeProfile.addProvidedProfile(profile)\n if node in app_node_map.keys():\n for app in app_node_map[node]:\n if \",\" in apps[app]:\n nodeProfile.addRequiredProfile(apps[app])\n networkProfile.addNodeProfile(node,nodeProfile)\n networkProfile.calcData()\n\n if options.selected_node == '':\n options.selected_node=nodes.keys()[0]\n if options.selected_node not in nodes:\n print 'ERROR: node {0} not found in system!'.format(options.selected_node)\n return -1\n\n if options.selected_interface == '':\n if len(networkProfile.nodeProfiles[options.selected_node].interfaces) > 0:\n options.selected_interface = networkProfile.nodeProfiles[options.selected_node].interfaces[0]\n else:\n print 'ERROR: node {0} has no interfaces that can be analyzed!'.format(options.selected_node)\n return -1\n if options.selected_interface not in networkProfile.nodeProfiles[options.selected_node].interfaces:\n print 'ERROR: node {0} has no interface named {1}!'.format(options.selected_node,options.selected_interface)\n return -1\n\n print 'Using node: interface {0} on node {1}'.format(options.selected_interface,options.selected_node)\n print \"Using period \",options.period,\" over \",options.num_periods,\" periods\"\n\n if options.nc_mode:\n networkProfile.makeNetworkCalculusCurves(options.selected_node,options.nc_step_size)\n\n if networkProfile.convolve(options.selected_node,options.selected_interface) == -1:\n print 'Node {0} has cannot be analyzed for interface {1}: no usable profile'.format(options.selected_node,options.selected_interface)\n\n '''\n font = {'family' : 'monospace',\n 'weight' : 'bold',\n 'size' : options.font_size}\n matplotlib.rc('font', **font)\n '''\n\n if options.plot_profiles == True:\n networkProfile.nodeProfiles[options.selected_node].plotSlope(options.plot_line_width)\n networkProfile.nodeProfiles[options.selected_node].plotData(options.plot_line_width)\n\n buff = networkProfile.nodeProfiles[options.selected_node].buffer\n print \"\\n[Time location, buffersize]:\",[buff[0],buff[2]]\n\n delay = networkProfile.nodeProfiles[options.selected_node].delay\n print \"[Time location, delay]:\",[delay[0],delay[2]]\n\n\n #if max(column(req,1)) > max(column(util,1)):\n # print \"\\nWARNING: DATA HAS NOT BEEN SENT BY END OF THE ANALYZED PERIOD(s)\"\n # print \"\\t APPLICATION MAY HAVE UNBOUNDED BUFFER GROWTH ON NETWORK\\n\"\n\n return\n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5053457021713257, "alphanum_fraction": 0.54347825050354, "avg_line_length": 29.47282600402832, "blob_id": "4517dbebe0dffcf7cee2d37724932cbea320e279", "content_id": "a006fb874fd29876b0ad26345feedfdd0e5af225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11224, "license_type": "no_license", "max_line_length": 88, "num_lines": 368, "path": "/src/analysis/v2.0/utils.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "from decimal import *\nfrom fractions import gcd\nimport copy\n\nclass bcolors:\n \"\"\"Extended characters used for coloring output text.\"\"\"\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \ndef lcm(a,b):\n \"\"\"\n Returns the least-common-multiple (LCM) of *a* and *b* as\n\n .. math::\n lcm = (a*b)/gcd(a,b)\n\n \"\"\"\n if int(a) != a:\n r = a - int(a)\n a = 1.0/r * a\n if int(b) != b:\n r = b - int(b)\n b = 1.0/r * b\n n = a*b\n d = gcd(int(a),int(b))\n return n/d\n\ndef makeVLine(v):\n \"\"\"\n Returns a list of [x,y] series for plotting a vertical line.\n\n :param list v: A list of values of the form::\n\n [ <bottom x location>, <bottom y location>, <height> ]\n \"\"\"\n y = [v[1],v[1]+v[2]]\n x = [v[0],v[0]]\n return [x,y]\n\ndef makeHLine(h):\n \"\"\"\n Returns a list of [x,y] series for plotting a horizontal line.\n\n :param list h: A list of values of the form::\n\n [ <left x location>, <left y location>, <length> ]\n \"\"\"\n y = [h[1],h[1]]\n x = [h[0],h[0]+h[2]]\n return [x,y]\n\ndef remove_degenerates(values):\n \"\"\"Make sure all value pairs are unique and sorted by time.\"\"\"\n tup = [ tuple(x) for x in values ]\n tmp = list(set(tup))\n retVals = []\n for t in tmp:\n times = [x[0] for x in retVals]\n if t[0] not in times:\n retVals.append(list(t))\n return sorted(retVals)\n\ndef repeat(values, period, num_periods):\n \"\"\"Repeat values periodically for some number of periods.\"\"\"\n original = copy.deepcopy(values)\n values = []\n for i in range(0, int(num_periods)):\n tmpValues = copy.deepcopy(original)\n shift(tmpValues, period * i)\n values.extend(tmpValues)\n return remove_degenerates(values)\n\ndef aggregate(values):\n \"\"\"Remove any sequential entries with the same value.\"\"\"\n prevVal = None\n vals = []\n for val in values:\n if val[1] != prevVal:\n vals.append(val)\n prevVal = val\n return vals\n\ndef integrate(values, t):\n \"\"\"Integrate all the values cumulatively and return the integrated values.\"\"\"\n intVals = []\n integrator = 0\n pVal = 0\n pTime = 0\n for x,y in values:\n integrator += pVal * Decimal(x - pTime)\n intVals.append([x, integrator])\n pTime = x\n pVal = y\n if intVals[-1][0] < t:\n intVals.append([t, integrator + pVal * Decimal(t-pTime)])\n return remove_degenerates(intVals)\n\ndef derive(values):\n \"\"\"Derive all the entries slopes from their data.\"\"\"\n dVals = []\n pTime = values[0][0]\n pVal = values[0][1]\n for x,y in values[1:]:\n d = Decimal(y - pVal) / Decimal(x - pTime)\n dVals.append([pTime, d])\n pTime = x\n pVal = y\n return remove_degenerates(dVals)\n\ndef split(values, t):\n \"\"\"\n Remove and return every entry from *values* whose time > *t*.\n \"\"\"\n tVal = get_value_at_time(values, t)\n\n remainder = [x for x in values if x[0] > t]\n remainder.insert(0,[t,tVal])\n remainder = remove_degenerates(remainder)\n\n values = [x for x in values if x[0] < t]\n values.append([t,tVal])\n values = remove_degenerates(values)\n\n return values, remainder\n\ndef shift(values, t):\n \"\"\"Add *t* to every value in *values*.\"\"\"\n for value in values:\n value[0] += t\n\ndef get_index_containing_time(values, t):\n \"\"\"\n Get the index of a value in *values* which contains time *t*\n \n :param list values: a :func:`list` of [x,y] values\n :param double t: time value for indexing\n \"\"\"\n for index, value in enumerate(values):\n if value[0] > t:\n return index - 1\n return len(values) - 1\n\ndef get_value_at_time(values, t, interpolate = True):\n \"\"\"\n Get the value at the given time *t* from *values* \n \n :param list values: :func:`list` of [x,y] values\n :param double t: time value \n :param bool interpolate: is the value interpolated or constant between values\n \"\"\"\n i = get_index_containing_time(values, t)\n if not interpolate:\n return values[i][1]\n else:\n nextInd = (i+1) % len(values)\n slope = values[nextInd][1] - values[i][1]\n timeDiff = values[nextInd][0] - values[i][0]\n if t == values[i][0] or timeDiff == 0.0:\n return values[i][1]\n else:\n return values[i][1] + slope / timeDiff * (t - values[i][0])\n\ndef get_times_at_value(values, value, interpolate = True):\n \"\"\"\n Get a list of times at which *values* match *value*.\n\n :param list values: a :func:`list` of [x,y] values\n :param double value: value to test against\n :param bool interpolate: is the value interpolated or constant between values\n \"\"\"\n times = []\n prevY = 0\n prevX = 0\n for x,y in values:\n if y == value:\n times.append(x)\n elif interpolate:\n if y > value and prevY < value or\\\n y < value and prevY > value:\n slope = (y-prevY)/(x-prevX)\n t = (value-prevY) / slope + prevX\n times.append(t)\n prevX = x\n prevY = y\n if times:\n times = [min(times), max(times)]\n return times\n\ndef subtract_values(values1, values2, interpolate = True):\n \"\"\"\n Subtract *values2* from *values1*, using either interpolated\n values or constant values.\n \"\"\"\n newVals = []\n allVals = []\n for val1 in values1:\n allVals.append([val1,'1'])\n for val2 in values2:\n allVals.append([val2,'2'])\n allVals = sorted(allVals)\n for val in allVals:\n if val[1] == '1':\n t = val[0][0]\n y = get_value_at_time(values2, t, interpolate)\n y = max(0, val[0][1] - y)\n newVals.append([t, y])\n else:\n t = val[0][0]\n y = get_value_at_time(values1, t, interpolate)\n y = max(0, y - val[0][1])\n newVals.append([t, y])\n newVals = remove_degenerates(newVals)\n return newVals\n\ndef add_values(values1, values2, interpolate = True):\n \"\"\"\n Add *values2* to *values1*, using either interpolated\n values or constant values.\n \"\"\"\n newVals = []\n allVals = []\n for val1 in values1:\n allVals.append([val1,'1'])\n for val2 in values2:\n allVals.append([val2,'2'])\n allVals = sorted(allVals)\n for val in allVals:\n t = val[0][0]\n if t < values2[0][0] or t < values1[0][0] or\\\n t > values2[0][0] or t > values1[0][0]:\n newVals.append(val[0])\n elif val[1] == '1':\n y = get_value_at_time(values2, t, interpolate)\n newVals.append([t, y + val[0][1]])\n elif val[1] == '2':\n y = get_value_at_time(values1, t, interpolate)\n newVals.append([t, y + val[0][1]])\n newVals = remove_degenerates(newVals)\n return newVals\n\ndef max_vertical_difference(values1, values2, interpolate = True, epsilon = 0.1):\n \"\"\"Get maximum vertical difference of *values2* - *values1*.\"\"\"\n max_diff = [0, 0, 0]\n times = [ x[0] for x in values1 ]\n times.extend( [ x[0] for x in values2 ] )\n times = sorted(list(set(times)))\n for t in times:\n d1 = get_value_at_time(values1, t, interpolate)\n d2 = get_value_at_time(values2, t, interpolate)\n diff = abs(d2 - d1)\n if diff > max_diff[2] and diff > epsilon:\n max_diff = [t, min(d1,d2), diff]\n max_diff = [float(max_diff[0]), float(max_diff[1]), float(max_diff[2])]\n return max_diff\n\ndef max_horizontal_difference(values1, values2, interpolate = True, epsilon = 0.000001):\n \"\"\"Get maximum horizontal difference of *values2* - *values1*.\"\"\"\n max_diff = [0, 0, 0]\n datas = [ x[1] for x in values1 ]\n datas.extend( [ x[1] for x in values2 ] )\n datas = sorted(list(set(datas)))\n for d in datas:\n t_1 = get_times_at_value(values1, d, interpolate)\n t_2 = get_times_at_value(values2, d, interpolate)\n if t_1 and t_2:\n if d > 0:\n diff = abs(t_2[1] - t_1[0])\n else: # if d is 0, we want to know the full max distance\n diff = abs(t_2[1] - t_1[0])\n if diff > max_diff[2] and diff > epsilon:\n max_diff = [ min(t_1[0], t_2[0]), d, diff ]\n elif t_1:\n if d > 0:\n diff = abs(values1[-1][0] - t_1[0])\n if diff > max_diff[2] and diff > epsilon:\n max_diff = [ t_1[0], d, diff ]\n max_diff = [float(max_diff[0]), float(max_diff[1]), float(max_diff[2])]\n return max_diff\n\ndef convert_values_to_graph(values, interpolate = True):\n \"\"\"Make the *values* plottable by separating the x,y values into separate lists.\"\"\"\n xvals = []\n yvals = []\n prevY = 0\n for x,y in values:\n if not interpolate:\n xvals.append(float(x))\n yvals.append(float(prevY))\n xvals.append(float(x))\n yvals.append(float(y))\n prevY = float(y)\n return [xvals, yvals]\n\ndef get_intersection(p11,p12,p21,p22):\n \"\"\"\n Simple function to get a intersection of two lines defined by their endpoints\n\n :param p11: :func:`list` [x,y] starting point of line 1\n :param p12: :func:`list` [x,y] ending point of line 1\n :param p21: :func:`list` [x,y] starting point of line 2\n :param p22: :func:`list` [x,y] ending point of line 2\n \"\"\"\n if not p11 or not p12 or not p21 or not p22:\n return []\n if p11==p12 or p21==p22:\n return []\n x1 = p11[0]; y1 = p11[1]\n x2 = p12[0]; y2 = p12[1]\n x3 = p21[0]; y3 = p21[1]\n x4 = p22[0]; y4 = p22[1]\n m1 = (y2-y1)/(x2-x1)\n m2 = (y4-y3)/(x4-x3)\n x = -1\n y = -1\n point = []\n if m1 != 0.0 and m2 != 0.0 and m1 != m2:\n x = ((y3-y1)+(m1*x1-m2*x3))/(m1-m2)\n y = ((x3-x1)+(y1/m1-y3/m2))/(Decimal(1.0)/m1-Decimal(1.0)/m2)\n else:\n if m1 == 0.0 and m2 != 0.0:\n y = y1\n x = (1/m2)*(y-y3) + x3\n elif m2 == 0.0 and m1 != 0.0:\n y = y3\n x = (1/m1)*(y-y1) + x1\n else: # both slopes are 0\n y = y1\n if x1 >= x3 and x1 <= x4:\n x = x1\n elif x2 >= x3 and x2 <= x4:\n x = x2\n if x >= x1 and x <= x2 and x >= x3 and x <= x4 and\\\n y >= y1 and y <= y2 and y >= y3 and y <= y4:\n point = [x,y]\n return point\n\n\nif __name__ == \"__main__\":\n v1 = [[0,0],[10,10],[35,15]]\n v2 = [[0,0],[5,10],[35,50]]\n print 'v1 & v2'\n print '\\t',v1\n print '\\t',v2\n print 'Interpolated:'\n print '\\t',add_values(v1,v2)\n print '\\t',subtract_values(v2,v1)\n print 'Non Interpolated:'\n print '\\t',add_values(v1,v2, False)\n print '\\t',subtract_values(v2,v1, False)\n print 'Remainder:'\n print '\\t',split(v1,20)\n print '\\t',split(v2,20)\n print 'Vertical:'\n print '\\t',max_vertical_difference(v1,v2, True)\n print 'Horizontal:'\n print '\\t',max_horizontal_difference(v1,v2, True)\n print 'Derive:'\n print '\\t',derive(v1)\n print '\\t',derive(v2)\n print 'Integrate:'\n print '\\t',integrate(derive(v1), 35)\n print '\\t',integrate(derive(v2), 35)\n \n \n" }, { "alpha_fraction": 0.6218553185462952, "alphanum_fraction": 0.6352201104164124, "avg_line_length": 23.941177368164062, "blob_id": "912946b5da56deda9be33c0df835a72d4c014b7f", "content_id": "ad6d4549d343c22c19063e25e740f4176af1acb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1272, "license_type": "no_license", "max_line_length": 64, "num_lines": 51, "path": "/src/middleware/v1.0/Server.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#include \"Server.hpp\"\n\nint main(int argc, char **argv) {\n Options options;\n if ( options.Parse(argc,argv) == -1 )\n return -1;\n options.Print();\n\n IPV6_Connection interface;\n\n std::string outputFile = options.outputFile;\n interface.serverIP = options.ip;\n interface.serverPort = options.port;\n\n long messageBitLength = options.bitLength;\n long messageStrLength = ceil((double)messageBitLength/8.0f);\n char *messageData = new char[messageStrLength+2];\n\n if ( interface.Initialize(true,false) != 0 ) {\n TG_LOG(\"ERROR: Couldn't initialize interface!\\n\");\n return -1;\n }\n\n long id = 0;\n while ( true ) {\n memset(messageData,0,messageStrLength+2);\n if ( interface.receive(messageData,messageStrLength) > 0 ) {\n long id = atol(messageData);\n if ( id >=0 ) {\n\tMessage msg;\n\tmsg.TimeStamp();\n\tmsg.Id(id);\n\tmsg.Bytes(strlen(messageData));\n\tappend_data(outputFile,msg);\n }\n }\n }\n}\n\nlong precision = 30;// for file output\nint append_data(std::string fname, Message& data) {\n std::ofstream file(fname.c_str(),std::ofstream::app);\n if ( !file.is_open() )\n return -1;\n file << data.Id() << \",\" << std::setprecision(precision)\n << data.LastDoubleTime() << \",\"\n << data.Bits()\n << \"\\n\";\n file.close();\n return 0;\n}\n" }, { "alpha_fraction": 0.7396268248558044, "alphanum_fraction": 0.7451963424682617, "avg_line_length": 27.95967674255371, "blob_id": "570b6d5134ad19e10713c2cc30bc28b3c834f791", "content_id": "a1db13aca4eea7b2e53aa7c87c09a920cbc66296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3591, "license_type": "no_license", "max_line_length": 311, "num_lines": 124, "path": "/src/middleware/v2.0/README.md", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "# Using the Network Middleware\n\nThis README covers the building, configuration, and usage of the\nnetworking middleware modules `TC Wrapper`, `Sender`, `Receiver`,\n`Client`, `Server`. These modules provide functionality for network\ntraffic management and generation.\n\nThese modules require network profiles to be provided to them\n(generally through an API or through the command line). These\nprofiles have the form:\n\n```csv\n# period = <period in seconds>\n# start = <start time in seconds>\n# priority = <priority of the profile>\n# uuid = <uuid of the profile>\n< time (s), bandwidth (bps), max bandwidth (bps), latency (ms) >\n```\n\n## TC Wrapper\n\nThe TC Wrapper is an automated utility for configuring the linux\nadvanced routing and traffic control (TC) utilities. These utilities\nallow the configuration of prioritized network queues and filters with\nvarious options, including bandwidth and latency enforcement on\noutgoing traffic. The TC Wrapper is a convenience utility for\nconfiguring a token bucket filter (TBF) or heirarchical token buket\nfilter (HTB) that already exists in the system according to a network\nprofile. Following the profile, the TC Wrapper configures the\nfollowing parameters for these filters as a function of time:\n\n* Bandwidth (bits/sec)\n* Buffer Size (bits)\n* Latency (ms)\n\n### Building\n\nBuild the TC Wrapper by issuing `make tcWrapper`.\n\n### Configuration\n\nThe machine on which TC Wrapper is run must be preconfigured with TC\nto have either a Token Bucket Filter (TBF) or a Heirarchical Token\nBucket Filter (HTB). These filters are specified with a parent node\nand a handle ID, in addition to configuration parameters such as\nbandwidth, buffer size, and latency. _It is recommended to simply use\na single TBF for the entire enterface_.\n\nSuch TC configuration can be configured for instance through the\nfollowing command:\n\n```bash\nsudo tc qdisc del dev $INTERFACE root\nsudo tc qdisc add dev $INTERFACE root handle 1: tbf\\\n\trate 100Mbit peakrate 101Mbit mtu 8192 latency 1ms burst 1540\n```\n\nwhere `$INTERFACE` corresponds to the network interface, e.g. `eth0`. In this example, the `parent` of the TBF is `root` and the `handle` is `1:`\n\nSome example configurations for TC configuration can be found in this repository in various `tc_config*.sh` scripts, in subfolders of the [analysis subfolder](../../analysis/v2.0/).\n\nThe TC Wrapper is configured through the command line through the\nfollowing options:\n\n```bash\n--profile <profile name>\n--is_router (this node is a router node)\n--use_tbf (TC filter is TBF)\n--use_htb (TC filter is HTB)\n--buffer <buffer size>\n--interface <interface name>\n--parent <parent TC object>\n--handle <handle TC object>\n```\n\n### Usage\n\nThe TC Wrapper must be run as `root`, and the arguments available to be interpreted by the program are described above. TC Wrapper will continue to regulate the interface according to the profile until it is cancelled or killed. The interface's filter should be removed at the end of the experiment by calling\n\n```bash\nsudo tc qdisc del dev $INTERFACE root\n```\n\n## Sender\n\n### Building\n\nThe sender is a library to be used in other code and as such does not\nhave a build target.\n\n### Configuration\n\n### Usage\n\n## Receiver\n\n### Building\n\nThe receiver is a library to be used in other code and as such does\nnot have a build target.\n\n### Configuration\n\n### Usage\n\n## Client\n\n### Building\n\nBuild the Client by issuing `make client`.\n\n### Configuration\n\n### Usage\n\n## Server\n\n### Building\n\nBuild the Server by issuing `make server`.\n\n### Configuration\n\n### Usage\n" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 17, "blob_id": "c1813419abc83c6fe357fc40ea9231dab1dc7f9a", "content_id": "29bd207c1eea4e980e0347dae65e2f7ad182ef78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 36, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/docs/_sources/python-api/middleware/connection-subsys.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Connection Subsys\n=================\n" }, { "alpha_fraction": 0.5846704840660095, "alphanum_fraction": 0.5924068689346313, "avg_line_length": 36.326202392578125, "blob_id": "5c6e3de5ed5b9eba1594c07055e76f662c08e831", "content_id": "38cffa7f3f25037e5e734263b8efb4c319596447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6980, "license_type": "no_license", "max_line_length": 109, "num_lines": 187, "path": "/src/analysis/v2.0/plotting.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "from utils import makeHLine, makeVLine\nfrom decimal import *\n\nhavePLT = False\ntry:\n import matplotlib.pyplot as plt\n from matplotlib.text import OffsetFrom\n havePLT=True\nexcept ImportError:\n print \"Package python-matplotlib not found, plotting disabled.\"\n\nclass PlotOptions:\n \"\"\"\n Options for setting up a plot in a figure.\n \"\"\"\n def __init__(self,\n profileList,\n labelList,\n dashList,\n plotDict,\n annotationList,\n title,\n xlabel,\n ylabel,\n legend_loc ):\n \"\"\"\n :param list profileList: A list of [x,y] data series (profiles) to be plotted together\n :param list labelList: A list of strings which label the profiles\n :param list dashList: A list of integer lists which specify the dash properties for each profile\n :param list annotationList: A list of annotations to be added to the plot\n :param int plotDict: The dictionary containing options for the plot, e.g. line width, etc.\n :param string title: The title to be given to the figure\n :param string xlabel: The label for the x-axis\n :param string ylabel: The label for the y-axis\n :param string legend_loc: A string specifying the location of the legend, e.g. \"upper left\"\n \"\"\"\n self.profileList = profileList\n self.labelList = labelList\n self.dashList = dashList\n self.annotationList = annotationList\n self.plotDict = plotDict\n self.title = title\n self.xlabel = xlabel\n self.ylabel = ylabel\n self.legend_loc = legend_loc\n\ndef plot_bandwidth_and_data( profList, delay, buffer, num_periods, plot_dict, xaxislabel = \"Time (s)\" ):\n \"\"\"\n :param in profList: a list of :class:`networkProfile.Profile` to be plotted\n :param in delay: a delay structure as generated from :func:`networkProfile.Profile.Convolve`\n :param in buffer: a buffer structure as generated from :func:`networkProfile.Profile.Convolve`\n :param in num_periods: how many periods the plot covers\n :param in plot_dict: dictionary containing plotting options\n \"\"\"\n # SET UP THE BANDWIDTH VS TIME PLOT\n profileList = []\n labelList = []\n dashList = []\n annotationList = []\n dashBase = 4\n for p in profList:\n profileList.append(p.MakeGraphPointsSlope())\n labelList.append('{}'.format(p.name))\n dashList.append([dashBase,dashBase/2])\n annotationList.append([])\n dashBase += 2\n plot1 = PlotOptions(\n profileList = profileList,\n labelList = labelList,\n dashList = dashList,\n plotDict = plot_dict,\n annotationList = annotationList,\n title = \"Network Data Rate vs. Time over {} period(s)\".format(num_periods),\n ylabel = \"Data Rate (bps)\",\n xlabel = xaxislabel,\n legend_loc = \"lower left\"\n )\n # SET UP THE DATA VS TIME PLOT\n profileList = []\n labelList = []\n dashList = []\n annotationList = []\n dashBase = 4\n for p in profList:\n profileList.append(p.MakeGraphPointsData())\n labelList.append('{}'.format(p.name))\n dashList.append([dashBase,dashBase/2])\n annotationList.append([])\n dashBase += 2\n profileList.extend( [makeHLine(delay), makeVLine(buffer)] )\n labelList.extend( ['Delay', 'Buffer'] )\n dashList.extend( [ [], [] ] )\n annotationList.append( [ \"Delay = {} s\".format(delay[2]), delay[0], delay[1] ] )\n annotationList.append( [ \"Buffer = {} b\".format(buffer[2]), buffer[0], buffer[1] ] )\n plot2 = PlotOptions(\n profileList = profileList,\n labelList = labelList,\n dashList = dashList,\n plotDict = plot_dict,\n annotationList = annotationList,\n title = \"Network Capacity vs. Time over {} period(s)\".format(num_periods),\n ylabel = \"Cumulative Capacity (bits)\",\n xlabel = xaxislabel,\n legend_loc = \"upper left\"\n )\n # Plot both of the graphs now they have been set up\n makeGraphs([plot1,plot2])\n\ndef makeGraphs(pOptionsList):\n \"\"\"\n This function makes a figure for each PlotOptions object it receives in the list.\n\n :param list pOptionsList: A list of :class:`PlotOptions` describing the figures to be drawn\n \"\"\"\n figNum = 0\n for pOpt in pOptionsList:\n clearAnnotations()\n plt.figure(figNum)\n plt.hold(True)\n for i in range(0,len(pOpt.profileList)):\n line, = plt.plot( pOpt.profileList[i][0], pOpt.profileList[i][1],\n label = r\"{}\".format(pOpt.labelList[i]),\n linewidth = pOpt.plotDict['linewidth'] )\n if pOpt.dashList and pOpt.plotDict['dashes']:\n line.set_dashes( pOpt.dashList[i] )\n if pOpt.annotationList[i] and pOpt.plotDict['annotations']: addAnnotation(pOpt.annotationList[i])\n setFigureOpts( title = pOpt.title,\n ylabel = pOpt.ylabel,\n xlabel = pOpt.xlabel,\n legend_loc = pOpt.legend_loc )\n figNum += 1\n if not pOpt.plotDict['axes_tickmarks']:\n plt.xticks(())\n plt.yticks(())\n plt.show()\n\nannotations = []\ndef clearAnnotations():\n global annotations\n annotations = []\n\ndef addAnnotation(annotation):\n \"\"\"\n Adds an annotation to the currently active figure.\n\n :param in list annotation: a :func:`list` of the form [ <string>, <x position>, <y position> ]\n \"\"\"\n xy = (annotation[1],annotation[2])\n xt = 20\n xmax = plt.xlim()[1]\n ymax = plt.ylim()[1]\n if abs(xy[0] - xmax) < xmax/5.0:\n xt = -100\n yt = 20 \n if abs(xy[1] - ymax) < ymax/5.0:\n yt = -20\n if annotations and abs(annotations[-1].xy[1] - xy[1]) < ymax/5.0:\n yt -= 20\n ann = plt.annotate(annotation[0],\n xy=xy, xycoords='data',\n xytext=(xt,yt), textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"-|>\",\n connectionstyle=\"arc3,rad=-0.2\",\n fc=\"w\"), \n )\n annotations.append(ann)\n \ndef setFigureOpts(title, ylabel, xlabel, legend_loc):\n \"\"\"\n Configure the figure's options\n\n :param string title: The title to be given to the figure\n :param string xlabel: The label for the x-axis\n :param string ylabel: The label for the y-axis\n :param string legend_loc: A string specifying the location of the legend, e.g. \"upper left\"\n \"\"\"\n plt.title(title)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.legend(loc=legend_loc)\n \ndef disablePlotTicks():\n \"\"\" Disable the numbers on the x and y axes.\"\"\"\n frame1 = plt.gca()\n frame1.axes.get_xaxis().set_ticks([])\n frame1.axes.get_yaxis().set_ticks([])\n" }, { "alpha_fraction": 0.5538461804389954, "alphanum_fraction": 0.596247673034668, "avg_line_length": 21.02479362487793, "blob_id": "4b61c4675d2f102e606315cc47e792b0670fbd59", "content_id": "719dd1f7bc6077fee4238f8145331124854961eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2665, "license_type": "no_license", "max_line_length": 88, "num_lines": 121, "path": "/src/analysis/v2.0/routing_test/tc_config_nodes.sh", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nTC=/sbin/tc\nDEV=eth0\n\nUSETBF=\"true\"\nBW=70000\nDELAY=0\nBUCKET=100\nBUFFER=10000\n\nPRINT_STATUS=\"false\"\nCLEAR=\"false\"\n\nfor i in \"$@\"\ndo\n case $i in\n\t-p|--print)\n\t PRINT_STATUS=\"true\"\n\t shift # past argument=value\n\t ;;\n\t--clear)\n\t CLEAR=\"true\"\n\t shift # past argument=value\n\t ;;\n\t-d=*|--delay=*)\n\t DELAY=\"${i#*=}\"\n\t shift # past argument=value\n\t ;;\n\t-b=*|--bandwidth=*)\n\t BW=\"${i#*=}\"\n\t shift # past argument=value\n\t ;;\n\t-k=*|--bucket=*)\n\t BUCKET=\"${i#*=}\"\n\t shift # past argument=value\n\t ;;\n\t-f=*|--buffer=*)\n\t BUFFER=\"${i#*=}\"\n\t shift # past argument=value\n\t ;;\n\t--use_tbf)\n\t USETBF=\"true\"\n\t shift # past argument with no value\n\t ;;\n\t--use_htb)\n\t USETBF=\"false\"\n\t shift # past argument with no value\n\t ;;\n\t*)\n\t # unknown option\n\t ;;\n esac\n done\n\necho \"Using options:\"\necho \" use_tbf: ${USETBF}\"\necho \" bandwidth: ${BW}\"\necho \" delay: ${DELAY}\"\necho \" bucket: ${BUCKET}\"\necho \" buffer: ${BUFFER}\"\n\nif [[ \"$PRINT_STATUS\" = \"true\" ]]\nthen\n $TC -s qdisc ls dev $DEV\n $TC -s class ls dev $DEV\nexit\nfi\n\n# clean existing down- and uplink qdiscs, hide errors\n$TC qdisc del dev $DEV root 2> /dev/null > /dev/null\n$TC qdisc del dev $DEV ingress 2> /dev/null > /dev/null\n\nif [[ \"$CLEAR\" = \"true\" ]]\nthen\n exit\nfi\n\nlet \"PEAKBW=${BW}+10\"\nlet \"BW2=${BW}+${BW}\"\n\n###### uplink\n\n$TC qdisc add dev ${DEV} root handle 1: prio\n$TC qdisc add dev ${DEV} parent 1:1 handle 11: netem delay ${DELAY}ms\n$TC qdisc add dev ${DEV} parent 1:2 handle 12: pfifo\n\nif [[ \"$USETBF\" = \"true\" ]]\nthen\n $TC qdisc add dev ${DEV} parent 11:1 handle 2: tbf \\\n\trate ${BW}bit limit ${BUFFER}k burst ${BUCKET}\nelse\n $TC qdisc add dev ${DEV} parent 11:1 handle 2: htb \n $TC class add dev ${DEV} parent 2: classid 2:1 htb rate ${BW2}bit\n $TC class add dev ${DEV} parent 2:1 classid 2:10 htb rate ${BW}bit ceil ${PEAKBW}bit\n $TC qdisc add dev ${DEV} parent 2:10 handle 21: pfifo\n # ceil ${PEAKBW}bit # burst ${BUCKET # cburst 10\n # burst ${BUCKET} # cburst 10\nfi\n\necho \"set qdiscs up\"\n\n# FILTER APPLICATION TRAFFIC VERSUS NON APP TRAFIC\n$TC filter add dev ${DEV} protocol ip parent 1: prio 1 u32 \\\n match ip dst 10.1.1.0/24 flowid 1:1\n\nif [[ \"$USETBF\" = \"true\" ]]\nthen\n echo \"\"\nelse\n $TC filter add dev ${DEV} protocol ip parent 2: prio 1 u32 \\\n\tmatch ip dst 10.1.1.0/24 flowid 2:10\nfi\n\necho \"set priority filters up\"\n\n$TC filter add dev ${DEV} protocol ip parent 1: prio 2 u32 \\\n match ip src 192.168.122.0/24 flowid 1:2\n#$TC filter add dev ${DEV} protocol ip parent 1: prio 2 u32 \\\n# match ip src 10.1.1.0/24 flowid 1:2\necho \"set other filters up\"\n" }, { "alpha_fraction": 0.5742827653884888, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 26.11111068725586, "blob_id": "ae092a2a5e2c4bfee419b5ca49a8297614737ec4", "content_id": "b25738a7d4c6ff5fd0c9d75b627515a87244fb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1952, "license_type": "no_license", "max_line_length": 102, "num_lines": 72, "path": "/src/middleware/v1.0/tcWrapper.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#include \"tcWrapper.hpp\"\n\nNetworkProfile profile;\n\nint main(int argc, char **argv) {\n Options options;\n if ( options.Parse(argc,argv) == -1 )\n return -1;\n options.Print();\n\n std::string interface = options.interface;\n std::string profileFile = options.profile;\n if ( profile.initializeFromFile(profileFile.c_str()) != 0 ) {\n TG_LOG(\"ERROR: couldn't initialize node profile!\\n\");\n return -1;\n }\n\n unsigned long long bandwidth;\n unsigned long long latency;\n timespec remainingTime, wakeTime;\n int pid = 1;\n while ( true ) {\n if ( profile.getNextInterval( wakeTime, bandwidth, latency ) == 0 ) {\n TG_LOG(\"Sleeping until %lu.%09lu\\n\", wakeTime.tv_sec, wakeTime.tv_nsec);\n while ( clock_nanosleep( CLOCK_REALTIME, TIMER_ABSTIME, &wakeTime, &remainingTime ) == EINTR ) {\n\tTG_LOG(\"WHO HAS AWOKEN ME FROM MY SLUMBER?!\\n\");\n }\n\n TG_LOG(\"Setting bandwidth to %llu bps and latency to %llu ms\\n\",bandwidth, latency);\n\n char tcCommand[256];\n sprintf( tcCommand,\n\t \"qdisc replace dev %s root tbf rate %llu.0bit latency %llu.0ms burst 1540\", \n\t interface.c_str(),\n\t bandwidth,\n\t latency);\n\n char *pch;\n char *tc_argv[50];\n\n int num_args = 1;\n\n tc_argv[0] = (char*)malloc(strlen(\"/sbin/tc\")+1);\n sprintf(tc_argv[0],\"/sbin/tc\");\n\n pch = strtok(tcCommand,\" \");\n while ( pch != NULL && num_args < 50 ) {\n\ttc_argv[num_args] = (char*)malloc(strlen(pch)+1);\n\tsprintf(tc_argv[num_args],\"%s\",pch);\n\tnum_args++;\n\tpch = strtok(NULL,\" \");\n }\n\n tc_argv[num_args] = (char *)NULL;\n\n pid = vfork();\n if ( pid == 0 ) { // child\n\tint tc_ret_val = execv(tc_argv[0],tc_argv);\n\tTG_LOG(\"ERROR: execv failed with retval: %d\\n\",tc_ret_val);\n\texit(1);\n }\n else if ( pid > 0 ) { // parent\n\tfor (int i=0;i<num_args;i++) {\n\t free(tc_argv[i]);\n\t}\n }\n else { // error\n\tTG_LOG(\"ERROR: could not spawn child to run tc!\\n\");\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.5259835124015808, "avg_line_length": 32.93955993652344, "blob_id": "125e7b77c3094b5cf6a5fdc1f709c1b29d98d57e", "content_id": "0f8114d073c663da2a534622f35911a3aebbd09a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6177, "license_type": "no_license", "max_line_length": 104, "num_lines": 182, "path": "/src/analysis/v2.0/generateTDMA.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"\n\"\"\" \n\n# QoS files have 4 columns: time (s), BW(bps), latency (ms), Network Link (id #)\nimport sys, os, csv, copy, glob\n\nclass Options:\n def __init__(self,\n tdmaPeriod = 0.01,\n tdmaSlots = 2,\n node = \"notdma\",\n outputFilename = \"tdma\",\n activeSlot = 0\n ):\n self.tdmaPeriod = tdmaPeriod\n self.tdmaSlots = tdmaSlots\n self.node = node\n self.outputFilename = outputFilename\n self.activeSlot = activeSlot\n\n def parseArgs(self,args):\n argind = 1\n while argind < len(args):\n if args[argind] == \"-P\":\n self.tdmaPeriod = float(args[argind+1])\n if self.tdmaPeriod <= 0:\n print \"Error! You must specify a tdma time period > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-N\":\n self.node = args[argind+1]\n argind += 2\n elif args[argind] == \"-O\":\n self.outputFilename = args[argind+1]\n argind += 2\n elif args[argind] == \"-S\":\n self.tdmaSlots = int(args[argind+1])\n if self.tdmaSlots <= 0:\n print \"Error! You must specify a number of tmda slots > 0\"\n argind += 2\n elif args[argind] == \"-A\":\n self.activeSlot = float(args[argind+1])\n if self.activeSlot <= 0:\n print \"Error! You must specify an active TDMA slot > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-?\" or args[argind] == \"-h\":\n print \"\"\"Usage:\\n\\t\"\"\",args[0],\"\"\"\n \\t\\t-N <(N)ode name>\n \\t\\t-O <(O)utput Filename>\n \\t\\t-S <number of tdma (S)lots in each period>\n \\t\\t-A <0-indexed ID of (A)ctive slot>\n \\t\\t-P <tdma (P)eriod (seconds)>\\n\"\"\"\n return -1\n else:\n print \"\"\"Usage:\\n\\t\"\"\",args[0],\"\"\"\n \\t\\t-N <(N)ode name>\n \\t\\t-O <(O)utput Filename>\n \\t\\t-S <number of tdma (S)lots in each period>\n \\t\\t-A <0-indexed ID of (A)ctive slot>\n \\t\\t-P <tdma (P)eriod (seconds)>\\n\"\"\"\n return -1\n return 0\n\nclass TDMA:\n \"\"\"\n A TDMA period describes when nodes can transmit in which slots on the network during a given period.\n \"\"\"\n def __init__(self,period,slots,selectedSlot):\n \"\"\"\n :param double period: the period of the TDMA profile\n :param int slots: number of slots this TDMA profile has\n :param int selectedSlot: which slot are we analyzing?\n \"\"\"\n self.period = period\n self.slots = slots\n self.selectedSlot = selectedSlot\n\nclass ProfileEntry:\n def __init__(self,start=-1,end=-1,bandwidth=-1,interface=''):\n self.start = start\n self.end = end\n self.bandwidth = bandwidth\n self.interface = interface \n\n def __str__(self):\n retStr = \"\"\n retStr += \"{0},{1},0,{2}\".format(self.start,self.bandwidth,self.interface)\n return retStr\n\n def fromLine(self,line=None):\n if line == None or len(line) == 0:\n self.__init__()\n return\n fields = line.split(',')\n if len(fields) == 0 or fields[0][0] == '%':\n self.__init__()\n return\n self.start = float(fields[0])\n self.bandwidth = float(fields[1])\n self.interface = fields[3]\n\ndef generateNewProfile(oldProfile,tdma):\n \"\"\"\n Create a TDMA profile network Profile from a regular network Profile\n\n :param list oldProfile: list of :class:`networkProfile.ProfileEntry` describing the original profile\n :param class tdma: a :class:`generateTDMA.TDMA` object describing the TDMA scheme\n \"\"\"\n newProfile = []\n for interval in oldProfile:\n tdmaBandwidth = interval.bandwidth * tdma.slots\n tdmaTime = interval.start\n while tdmaTime < interval.end:\n for slotNum in range(0,tdma.slots):\n selectedBW = 0\n if slotNum == tdma.selectedSlot:\n selectedBW = tdmaBandwidth\n newProfile.append(\n ProfileEntry(\n tdmaTime,\n tdmaTime + tdma.period / tdma.slots,\n selectedBW,\n interval.interface\n )\n )\n tdmaTime += tdma.period / tdma.slots\n return newProfile\n\ndef get_nodeProfiles(folder):\n profile_dir = os.getcwd()+os.sep+folder\n nodes = {}\n if os.path.isdir(profile_dir):\n print 'Found ',profile_dir\n for file in glob.glob(profile_dir+os.sep+'*config.csv'):\n node_name = file.replace('_crm_config.csv','')\n node_name = node_name.replace(profile_dir+os.sep,'')\n if node_name != 'crm_config.csv':\n with open(file,'r+') as f:\n content = f.read()\n nodes[node_name] = content\n else:\n print \"ERROR: \",profile_dir,\" doesn't exist!\"\n return nodes\n\ndef generateProfile(txtProfile):\n nodeProfile = []\n lines = txtProfile.split('\\n')\n for line in lines:\n newEntry = ProfileEntry()\n newEntry.fromLine(line)\n if newEntry.start >= 0:\n if len(nodeProfile) > 0:\n nodeProfile[-1].end = newEntry.start\n nodeProfile.append(newEntry)\n return nodeProfile\n\ndef main(): \n args = sys.argv\n\n options = Options()\n if options.parseArgs(args) < 0:\n return\n tdma = TDMA(options.tdmaPeriod,options.tdmaSlots,options.activeSlot)\n\n nodes = get_nodeProfiles('scripts')\n\n print nodes[options.node]\n\n nodeProfile = generateProfile(nodes[options.node])\n\n tdmaProfile = generateNewProfile(nodeProfile,tdma)\n\n with open(\"./scripts/{0}_crm_config.csv\".format(options.outputFilename),'w') as f:\n for interval in tdmaProfile:\n f.write(\"{0}\\n\".format(interval))\n\n return\n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6337209343910217, "alphanum_fraction": 0.75, "avg_line_length": 27.66666603088379, "blob_id": "2f21c41d803c9f53185f0c5aa6ebb74faf7f44eb", "content_id": "78fb578ba9a8c3866cb90c54f19ccd252ec3ff95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 172, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/src/analysis/v2.0/routing_test/setup.sh", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "IP_SUFFIX=$1\n\nsource /opt/ros/jade/setup.bash\nexport ROS_MASTER_URI=http://192.168.122.4:11311\nexport ROS_IP=10.1.1.$IP_SUFFIX\nexport LD_LIBRARY_PATH=$PWD:$LD_LIBRARY_PATH\n" }, { "alpha_fraction": 0.5380923748016357, "alphanum_fraction": 0.5518895983695984, "avg_line_length": 20.9342098236084, "blob_id": "cf165e335dd999a3044feda2cfa863e198e4d99a", "content_id": "4b3a4978a396e8f6e67913091b4efe266937618b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 63, "num_lines": 76, "path": "/src/middleware/v2.0/Server.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef SERVER_HPP\n#define SERVER_HPP\n\n#include <math.h>\n#include <string>\n\n#include \"log_macro.hpp\"\n#include \"ConnectionSubsys.hpp\"\n#include \"Message.hpp\"\n#include \"NetworkProfile.hpp\"\n\nclass Options {\npublic:\n std::string ip;\n std::string outputFile;\n std::string tgFile;\n long port;\n long bitLength;\n\n Options() {\n port = 7777;\n bitLength = 4096;\n ip = \"10.1.1.2\";\n outputFile = \"serverOutput.csv\";\n tgFile = \"receiver.csv\";\n }\n\n int Parse(int argc, char **argv) {\n for (int i=0; i < argc; i++)\n {\n\tif (!strcmp(argv[i], \"--profile\"))\n\t {\n\t tgFile = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--output_file\"))\n\t {\n\t outputFile = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--ip\"))\n\t {\n\t ip = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--port\"))\n\t {\n\t ip = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--message_bit_length\"))\n\t {\n\t bitLength = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--help\"))\n\t {\n\t TG_LOG(\"usage: \\n\\t%s\\n\"\n\t\t \"\\t\\t --profile <TG profile filename>\\n\"\n\t\t \"\\t\\t --output_file <filename for data output file>\\n\"\n\t\t \"\\t\\t --ip <ipv6 address of server>\\n\"\n\t\t \"\\t\\t --port <port number of server>\\n\"\n\t\t \"\\t\\t --message_bit_length <# bits in message>\\n\"\n\t\t ,argv[0]);\n\t return -1;\n\t }\n }\n return 0;\n }\n \n void Print() {\n TG_LOG(\"Options():\\n\");\n TG_LOG(\"\\t profile filename\\t\\t: %s\\n\", tgFile.c_str());\n TG_LOG(\"\\t output filename\\t\\t: %s\\n\", outputFile.c_str());\n TG_LOG(\"\\t server ip address\\t\\t: %s\\n\", ip.c_str());\n TG_LOG(\"\\t server port number\\t\\t: %lu\\n\", port);\n TG_LOG(\"\\t bits in message\\t\\t: %lu\\n\", bitLength);\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.592610776424408, "alphanum_fraction": 0.60918128490448, "avg_line_length": 26.523649215698242, "blob_id": "15cb7f8d11ddfc10da0c08c63d77504b3abcf5a9", "content_id": "1fd403ad23bba78362e7f176f73cfec460cbbad3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8147, "license_type": "no_license", "max_line_length": 102, "num_lines": 296, "path": "/src/middleware/v1.0/NetworkProfile.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef NETWORK_PROFILE_HPP\n#define NETWORK_PROFILE_HPP\n\n#include \"CSVIterator.hpp\"\n#include \"log_macro.hpp\"\n\n#include <stdio.h>\n#include <string.h>\n#include <math.h>\n\n#include <iomanip>\n#include <algorithm> // std::swap\n#include <streambuf>\n#include <list>\n#include <vector>\n#include <memory>\n\n#include <time.h>\n\n#define NETWORK_PROFILE_DEBUG 0\n\nstruct profileMemBuf : std::streambuf {\n profileMemBuf(char* begin, char* end) {\n this->setg(begin, begin, end);\n }\n};\n\nclass ResourceEntry {\npublic:\n double time;\n unsigned long long bandwidth;\n unsigned long long data;\n unsigned long long latency;\n};\n\nclass NetworkProfile {\nprivate:\n bool initialized;\npublic:\n std::vector<ResourceEntry> resources;\n timespec start_time;\n double period;\n\npublic:\n\n NetworkProfile()\n : initialized(false),\n resources (0),\n period (0)\n {\n start_time.tv_sec = 0;\n start_time.tv_nsec = 0;\n }\n\n NetworkProfile (const NetworkProfile &s)\n : initialized(s.initialized),\n resources(s.resources),\n period(s.period)\n {\n start_time.tv_sec = s.start_time.tv_sec;\n start_time.tv_nsec = s.start_time.tv_nsec;\n }\n\n ~NetworkProfile()\n {\n }\n\n NetworkProfile & operator= (const NetworkProfile &s)\n {\n if (&s != this)\n {\n NetworkProfile tmp (s);\n swap (tmp);\n }\n return *this;\n }\n\n NetworkProfile* clone() const {\n return new NetworkProfile( *this );\n }\n\n void swap (NetworkProfile &s) {\n std::swap (initialized, s.initialized);\n std::swap (resources, s.resources);\n std::swap (period, s.period);\n std::swap (start_time.tv_sec, s.start_time.tv_sec);\n std::swap (start_time.tv_nsec, s.start_time.tv_nsec);\n }\n\n int initializeFromFile(const char* fname) {\n std::ifstream file(fname);\n if ( !file.is_open() ) {\n TG_LOG(\"ERROR: couldn't open file %s\\n\",\n\t fname);\n return -1;\n }\n TG_LOG(\"Reading profile %s:\\n\",fname);\n return initializeFromIStream(file);\n }\n\n int initializeFromString(char* buffer) {\n profileMemBuf sbuf(buffer,buffer + strlen(buffer));\n std::istream file(&sbuf);\n return initializeFromIStream(file);\n }\n\n int initializeFromIStream(std::istream& stream)\n {\n std::vector<std::vector<double> > csv;\n\n for (CSVIterator loop(stream);loop != CSVIterator();++loop) {\n std::vector<double> rowvec;\n if ( (*loop).size() > 0 && (*loop)[0].c_str()[0] != '%' ) {\n for (int i=0;i<(*loop).size();i++) {\n rowvec.push_back( atof( (*loop)[i].c_str() ) );\n }\n csv.push_back(rowvec);\n }\n }\n if ( parse_csv(csv) )\n return -1;\n else {\n initialized = true;\n return 0;\n }\n }\n\n int parse_csv(std::vector<std::vector<double> > csv) {\n // first csv row contains profile start time and period\n // each other csv row contains time,bandwidth,latency\n double fractpart,intpart;\n fractpart = modf(csv[0][0],&intpart);\n start_time.tv_sec = (unsigned long long)(intpart);\n start_time.tv_nsec = (unsigned long)(fractpart*1000000000.0);\n period = csv[0][1];\n TG_LOG(\"Got start time and period: %lu.%lu , %f\\n\",\n\t start_time.tv_sec,\n\t start_time.tv_nsec,\n\t period);\n for (int i=1;i<csv.size();i++) {\n ResourceEntry entry;\n entry.time = csv[i][0]; // s\n entry.bandwidth = (unsigned long long) (csv[i][1]); // bps\n entry.latency = (unsigned long long) (csv[i][2]); // ms\n\n if ( resources.size() > 0 ) {\n entry.data = resources.back().data +\n resources.back().bandwidth *\n (entry.time - resources.back().time);\n }\n else {\n entry.data = 0;\n }\n TG_LOG(\"Got interval: [ %f, %llu, %llu, %llu]\\n\",\n\t entry.time,\n\t entry.bandwidth,\n\t entry.latency,\n\t entry.data);\n resources.push_back(entry);\n }\n if (resources.size () && (resources.back().time < period)) {\n ResourceEntry entry;\n entry.time = period;\n entry.bandwidth = 0;\n entry.latency = 0;\n entry.data = resources.back().data +\n resources.back().bandwidth *\n (entry.time - resources.back().time);\n TG_LOG(\"Got interval: [ %f, %llu, %llu, %llu]\\n\",\n\t entry.time,\n\t entry.bandwidth,\n\t entry.latency,\n\t entry.data);\n resources.push_back(entry);\n }\n return 0;\n }\n\n inline double getOffset(timespec& t) {\n double _start = (double)(start_time.tv_sec) +\n (double)(start_time.tv_nsec)/1000000000.0;\n double _time = (double)(t.tv_sec) + (double)(t.tv_nsec)/1000000000.0;\n double offset = 0;\n offset += (double)(t.tv_sec - start_time.tv_sec);\n offset += ((double)(t.tv_nsec - start_time.tv_nsec)/1000000000.0);\n offset = fmod(offset,period);\n offset = fabs(offset);\n if ( _time < _start ) {\n offset = period - offset;\n }\n return offset;\n }\n\n int getNextInterval( timespec& start, unsigned long long& bandwidth, unsigned long long& latency ) {\n if (resources.size () == 0)\n return -1;\n timespec currentTime;\n int returnCode = clock_gettime (CLOCK_REALTIME, &currentTime);\n double offset = getOffset(currentTime);\n double end = period;\n bandwidth = resources[0].bandwidth;\n latency = resources[0].latency;\n for ( int i=0; i < resources.size() - 1; i++ ) {\n if ( resources[i].time > offset ) {\n\tend = resources[i].time;\n\tbandwidth = resources[i].bandwidth;\n\tlatency = resources[i].latency;\n\tbreak;\n }\n }\n double timeDiff = end - offset;\n double fractpart, intpart;\n fractpart = modf(timeDiff, &intpart);\n start.tv_sec = (long) intpart;\n start.tv_nsec = (long)( fractpart*1000000000.0f );\n start.tv_sec += currentTime.tv_sec;\n start.tv_nsec += currentTime.tv_nsec;\n if ( start.tv_nsec > 999999999 ) {\n start.tv_sec += 1;\n start.tv_nsec = (start.tv_nsec - 1000000000);\n }\n return 0;\n }\n\n double Delay(unsigned long dataLen, timespec sentTime) {\n if (resources.size () == 0)\n return -1;\n\n double offset = getOffset(sentTime);\n\n double start = resources.back().time;\n unsigned long long offsetData = resources.back().data;\n unsigned long long bandwidth = resources.back().bandwidth;\n int res_id = resources.size() - 1;\n for (int i=0;i<resources.size();i++) {\n if ( resources[i].time > offset ) {\n res_id = i;\n start = resources[i-1].time;\n bandwidth = resources[i-1].bandwidth;\n offsetData = resources[i-1].data;\n break;\n }\n }\n\n offsetData += (unsigned long long)((double)(offset-start)*((double)bandwidth));\n\n double timeDiff = 0;\n unsigned long long dataInPeriod = resources.back().data;\n unsigned long long dataToEnd = dataInPeriod - offsetData;\n unsigned long long numPeriods = dataLen / dataInPeriod;\n unsigned long long modData = dataLen % dataInPeriod;\n \n if ( numPeriods > 0 ) { // will take more than numPeriods to send data\n timeDiff += (double)numPeriods * period;\n }\n\n if ( dataToEnd < modData ) { // will have to cycle back to beginning to send data\n timeDiff += period - offset;\n offsetData = 0;\n offset = 0;\n res_id = 0;\n modData = modData - dataToEnd;\n }\n\n unsigned long long remainder = modData;\n if ( (resources[res_id].data - offsetData) <= modData ) {\n remainder = modData - (resources[res_id].data - offsetData);\n timeDiff += resources[res_id++].time - offset;\n while ( (resources[res_id].data - offsetData) < modData ) {\n remainder = modData - (resources[res_id].data - offsetData);\n timeDiff += resources[res_id].time - resources[res_id-1].time;\n res_id++;\n }\n }\n res_id--;\n\n timeDiff += (double)remainder / (double)resources[res_id].bandwidth;\n\n int return_code;\n timespec current_time;\n return_code = clock_gettime (CLOCK_REALTIME, &current_time);\n double delta = 0;\n delta += (double)(current_time.tv_sec - sentTime.tv_sec);\n delta += ((double)(current_time.tv_nsec - sentTime.tv_nsec)/1000000000.0);\n timeDiff = timeDiff - delta;\n\n if (timeDiff < 0)\n timeDiff = 0;\n\n return timeDiff;\n }\n \n bool Initialized() const { return initialized; }\n};\n\n#endif\n" }, { "alpha_fraction": 0.6458333134651184, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 23, "blob_id": "0b986ae7b58cc199a43affff5f6d6e31c6f73c7e", "content_id": "401310483a8e13a7d93f928a24e1cca11ad5f3f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 96, "license_type": "no_license", "max_line_length": 67, "num_lines": 4, "path": "/src/tests/Makefile", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "all:\n\tg++ -std=c++11 -I. zmqTest.cpp -o zmqTest -lzmq -lpthread -pthread\nclean:\n\trm -rf zmqTest\n" }, { "alpha_fraction": 0.5491379499435425, "alphanum_fraction": 0.5521013140678406, "avg_line_length": 37.58627700805664, "blob_id": "e6804a12acadff6d8eb91ad5056dc2325286ce62", "content_id": "624212251b62089c0392d73429cd49014b00f228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18560, "license_type": "no_license", "max_line_length": 120, "num_lines": 481, "path": "/src/analysis/v2.0/networkProfile.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "\"\"\"\nNetwork Profile implements the Profile class. \nThis class provides all the members and functions neccessary to \nmodel, compose, and analyze network profiles for applications \nand systems. \n\"\"\"\n\nimport copy,sys\nimport utils\nfrom collections import OrderedDict\nfrom decimal import *\n\nclass Profile:\n \"\"\"\n Profile contains the information about a single network profie.\n A network profile has a kind (e.g. 'provided'), a period (in seconds),\n and a lists of relevant data vs time series (e.g. bandwidth, latency, data, etc.).\n \"\"\"\n\n #: Sepearates fileds in a line in a profile file\n field_delimeter = ','\n #: Denotes headers (profile properties) in a profile file\n header_delimeter = '#'\n #: Denotes commends in the profile file\n comment_delimeter = '%'\n #: Splits lines in a profile file\n line_delimeter = '\\n'\n #: Strip lines starting with these delimeters to get just profile data\n special_delimeters = [header_delimeter, comment_delimeter]\n #: Which profiles are interpolated between points\n interpolated_profiles = ['data','latency']\n \n def __init__(self, name = None, kind = None, period = 0, priority = 0,\n node = 0, flow_type = None, num_periods = 1, sender_names = []):\n \"\"\"\n :param string name: what is the name of the profile?\n :param string kind: what kind of profile is it?\n :param double period: what is the periodicity (in seconds) of the profile\n :param int priority: what is the priority of the flow in the system\n :param int source: what is the node id from which the data on this profile will be sent\n :param int dest: what is the node id to which the data on this profile will be sent\n \"\"\"\n self.kind = kind #: The kind of this profile, e.g. 'required'\n self.name = name #: The user-provided name for this profile; defaults to the kind\n if not self.name:\n self.name = self.kind\n self.period = period #: The length of one period of this profile\n self.priority = priority #: The priority of the profile; relevant for 'required' profiles\n self.node_id = node #: The node ID which is the source of this profile\n self.flow_type = flow_type #: This flow is the reciever for which sender flows?\n self.entries = OrderedDict() #: Dictionary of 'type name' : 'list of [x,y] points' k:v pairs \n\n def ParseHeader(self, header):\n \"\"\"\n Parses information from the profile's header if it exists:\n\n * period\n * priority\n * node ID\n * flow_type (for matching senders <--> receivers)\n * profile kind (provided, required, receiver, output, leftover)\n * profile name\n\n A profile header is at the top of the file and has the following syntax::\n\n # <property> = <value>\n\n \"\"\"\n if header:\n for line in header:\n line.strip('#')\n prop, value = line.split('=')\n if \"period\" in prop:\n self.period = Decimal(value)\n elif \"priority\" in prop:\n self.priority = int(value)\n elif \"node ID\" in prop:\n self.node_id = value.strip()\n elif \"flow type\" in prop:\n self.flow_type = value.strip()\n elif \"kind\" in prop:\n self.kind = value.strip()\n elif \"name\" in prop:\n self.name = value.strip()\n if not self.name:\n self.name = self.kind\n\n def ParseFromFile(self, prof_fName):\n \"\"\"\n Builds the entries from a properly formatted CSV file. \n Internally calls :func:`Profile.ParseFromString`.\n \"\"\"\n prof_str = None\n try:\n with open(prof_fName, 'r+') as f:\n prof_str = f.read()\n except:\n print >> sys.stderr, \"ERROR: Couldn't find/open {}\".format(prof_fName)\n return -1\n if prof_str == None:\n return -1\n return self.ParseFromString( prof_str )\n\n def ParseFromString(self, prof_str):\n \"\"\"\n Builds the entries from a string (line list of csv's formatted as per\n :func:`ParseEntriesFromLine`).\n \"\"\"\n if not prof_str:\n print >> sys.stderr, \"ERROR: String contains no profile spec!\"\n return -1\n lines = prof_str.split(self.line_delimeter)\n header = [l for l in lines if self.header_delimeter in l]\n self.ParseHeader(header)\n p = copy.copy(lines)\n for s in self.special_delimeters:\n p = [l for l in p if s not in l]\n for line in p:\n if self.ParseEntriesFromLine(line):\n return -1\n self.EntriesRemoveDegenerates()\n self.EntriesStartFill()\n return 0\n\n def ParseEntriesFromLine(self, line_str):\n \"\"\"\n Builds the [time, value] list for each type of value into entries:\n \n * slope\n * max slope\n * latency\n\n These values are formatted in the csv as::\n\n <time>, <slope>, <max slope>, <latency>\n\n \"\"\"\n if line_str:\n fields = line_str.split(self.field_delimeter)\n if len(fields) == 4:\n time = Decimal(fields[0])\n slope = Decimal(fields[1])\n maxSlope = Decimal(fields[2])\n latency = Decimal(fields[3])\n self.entries.setdefault('slope',[]).append([time, slope])\n self.entries.setdefault('max slope',[]).append([time, maxSlope])\n self.entries.setdefault('latency',[]).append([time, latency])\n else:\n print >> sys.stderr,\"{} must be formatted:\".format(line_str)\n print >> sys.stderr,\"\\t<time>, <slope>, <max slope>, <latency>\"\n return -1\n return 0\n\n def EntriesRemoveDegenerates(self):\n \"\"\"Remove duplicate entries by time stamp.\"\"\"\n for key, values in self.entries.iteritems():\n values = utils.remove_degenerates(values)\n \n def AggregateSlopes(self):\n \"\"\"Remove sequential entries which have the same slope.\"\"\"\n self.entries['slope'] = utils.aggregate(self.entries['slope'])\n\n def EntriesStartFill(self):\n \"\"\"Make sure all entries have a start time of 0.\"\"\"\n for name, values in self.entries.iteritems():\n if values[0][0] > 0:\n values.insert(0,[0,0])\n\n def Repeat(self, num_periods):\n \"\"\"Copy the current profile entries over some number of its periods.\"\"\"\n keys = ['slope', 'max slope', 'latency']\n for key in keys:\n if key in self.entries:\n self.entries[key] = utils.repeat(self.entries[key], self.period, num_periods)\n\n def Integrate(self, time):\n \"\"\"Integrates the slope entries to produce data entries up to *time*\"\"\"\n self.AggregateSlopes()\n self.entries['data'] = utils.integrate(self.entries['slope'], time)\n\n def Derive(self):\n \"\"\"Derives the slope entries from the data entries\"\"\"\n self.entries['slope'] = utils.derive( self.entries['data'] )\n self.AggregateSlopes()\n\n def IsKind(self, kind):\n \"\"\"Returns True if the profile is of type *kind*, False otherwise.\"\"\"\n return kind in self.kind\n \n def Kind(self,kind):\n \"\"\"Set the kind of the profile.\"\"\"\n self.kind = kind\n\n def Shrink(self, t):\n \"\"\"Shrink the profile to be <= *t*.\"\"\"\n for key, values in self.entries.iteritems():\n self.entries[key], r = utils.split(values, t)\n del self.entries['slope'][-1]\n\n def AddProfile(self,profile):\n \"\"\"\n Compose this profile with an input profile by adding their slopes together.\n\n :rtype: :class:`Profile`\n \"\"\"\n new_slopes = utils.add_values(\n self.entries['slope'],\n profile.entries['slope'],\n interpolate = 'slope' in self.interpolated_profiles\n )\n retProf = copy.deepcopy(self)\n retProf.entries['slope'] = new_slopes\n return retProf\n\n def SubtractProfile(self,profile):\n \"\"\"\n Compose this profile with an input profile by subtracting the input profile's slopes.\n\n :rtype: :class:`Profile`\n \"\"\"\n new_slopes = utils.subtract_values(\n self.entries['slope'],\n profile.entries['slope'],\n interpolate = 'slope' in self.interpolated_profiles\n )\n retProf = copy.deepcopy(self)\n retProf.entries['slope'] = new_slopes\n return retProf\n\n def MakeGraphPointsSlope(self):\n \"\"\"Return matplotlib plottable x and y series for the slope of the profile.\"\"\"\n return utils.convert_values_to_graph(self.entries['slope'], interpolate = 'slope' in self.interpolated_profiles)\n\n def MakeGraphPointsData(self):\n \"\"\"Return matplotlib plottable x and y series for the data of the profile.\"\"\"\n return utils.convert_values_to_graph(self.entries['data'], interpolate = 'data' in self.interpolated_profiles)\n\n def GetValueAtTime(self, key, t, interpolate = True):\n \"\"\"Return the value at time *t* from series *key*, optionally interpolating between.\"\"\"\n return utils.get_value_at_time(self.entries[key], t, interpolate)\n\n def ToString(self, prefix = ''):\n \"\"\"\n Returns a string version of the profile, with all values properly tabulated.\n\n :rtype: :func:`string`\n\n :param in prefix: string to be prepended to every line of the returned string.\n \"\"\"\n retstr = ''\n try:\n from tabulate import tabulate\n newDict = OrderedDict()\n times = []\n for key,values in self.entries.iteritems():\n for val in values:\n if val[0] not in times:\n times.append(val[0])\n newDict['time(s)'] = sorted(times)\n for key,values in self.entries.iteritems():\n for t in times:\n newDict.setdefault(key,[]).append(\n float(utils.get_value_at_time(\n values,\n t,\n interpolate= key in self.interpolated_profiles\n ))\n )\n retstr = tabulate(newDict, headers='keys',floatfmt='.1f')\n r = retstr\n retstr = ''\n for line in r.split('\\n'):\n retstr += '{}{}\\n'.format(prefix,line)\n except ImportError:\n print >> sys.stderr, \"Tabulate module should be installed for printing profiles.\"\n return retstr\n\n def ConvertToNC(self,filterFunc, step = 0):\n \"\"\"\n Perform time-window based integration to generate a Network Calculus curve\n from the profile. The conversion is configurable based on time-window step-size\n and a filter function (e.g. min or max). Passing :func:`max` will create an arrival\n curve, while passing :func:`min` will create a service curve.\n\n :rtype: :class:`Profile`, the network-calculus version of the *self* profile\n\n .. note:: Requires the profile to have been integrated\n\n \"\"\"\n time_list = []\n data_list = []\n for t,d in self.entries['data']:\n time_list.append(t)\n data_list.append(-d)\n new_datas = []\n if step <= 0: step = min( [x for x in time_list if x > 0] )\n for tw in time_list:\n extreme_data = -filterFunc(data_list)\n t = tw\n while t <= time_list[-1]:\n start_data = utils.get_value_at_time(self.entries['data'],\n t - tw,\n interpolate = 'data' in self.interpolated_profiles)\n end_data = utils.get_value_at_time(self.entries['data'],\n t,\n interpolate = 'data' in self.interpolated_profiles)\n diff = end_data - start_data\n extreme_data = filterFunc([diff,extreme_data])\n t += step\n new_datas.append([tw, extreme_data])\n \n new_datas = utils.remove_degenerates(new_datas)\n retProf = Profile(kind = self.kind)\n retProf.entries['data'] = new_datas\n retProf.Derive()\n return retProf\n\n def CalcDelay(self, output):\n \"\"\"\n Compute the maximum horizontal distance between this profile and the input profile. \n\n This function implements the operation (see :ref:`network_math_formalism`):\n\n .. math::\n delay = sup\\{l^{-1}[y]-r^{-1}[y] : y \\in \\mathbb{N}\\}\n\n Where\n\n * :math:`l^{-1}[y]` is the inverse map of the ouptut profile, \n e.g. a function mapping output data to time\n * :math:`r^{-1}[y]` is the inverse map of the required profile, \n e.g. a function mapping required data to time\n\n :rtype: :func:`list` of the form::\n \n [ <time>, <data>, <length of delay> ]\n \n :param in output: a :class:`Profile` describing the output profile\n \"\"\"\n r = self.entries['data']\n o = output.entries['data']\n delay = utils.max_horizontal_difference(r, o,\n interpolate = 'data' in self.interpolated_profiles)\n return delay\n\n def CalcBuffer(self, output):\n \"\"\"\n Compute the maximum vertical distance between this profile and the input profile. \n\n This function implements the operation (see :ref:`network_math_formalism`): \n\n .. math::\n buffer= sup\\{r[t] - l[t] : t \\in \\mathbb{N}\\}\n\n Where\n\n * :math:`l[t]` is the output profile (see :func:`Profile.Convolve`)\n * :math:`r[t]` is the required profile (*self*)\n\n :rtype: :func:`list` of the form::\n\n [ <time>, <data>, <size of the buffer> ]\n \n :param in output: a :class:`Profile` describing the output profile\n \"\"\"\n r = self.entries['data']\n o = output.entries['data']\n buff = utils.max_vertical_difference(r, o,\n interpolate = 'data' in self.interpolated_profiles)\n return buff\n\n def Delay(self, delayProf):\n \"\"\"\n Compute the delayed profile composed of *self* profile and *delayProf*,\n received by a node for which this *self* profile is the output profile on the sender side.\n The delay profile describes the delay as a function of time for the link.\n\n This function implements the operation: \n\n .. math::\n o[t + \\delta[t]] = l[t]\n\n Where\n\n * :math:`\\delta[t]` is the delay profile\n * :math:`l[t]` is the profile transmitted into the link (*self*)\n * :math:`o[t]` is the output profile received at the other end of the link\n\n :rtype: :class:`Profile`, :math:`o[t]`\n\n :param in delayProf: :class:`Profile` describing the delay\n \"\"\"\n delays = delayProf.entries['latency']\n all0 = True\n for time, delay in delays:\n if delay != 0:\n all0 = False\n if all0: return copy.deepcopy(self)\n datas = self.entries['data']\n endTime = datas[-1][0]\n times = [ x[0] for x in delays ]\n times.extend( [ x[0] for x in datas ] )\n times = sorted(list(set(times)))\n newDatas = []\n for t in times:\n d = utils.get_value_at_time(datas, t)\n delay = utils.get_value_at_time(delays, t, interpolate = 'latency' in self.interpolated_profiles)\n newDatas.append([ t + delay, d ])\n newDatas = utils.remove_degenerates(newDatas)\n newDatas, remainder = utils.split(newDatas, endTime)\n if remainder:\n t = -remainder[0][0]\n utils.shift(remainder, t)\n r_slopes = utils.derive(remainder)\n d_slopes = utils.derive(newDatas)\n d_slopes = utils.add_values(d_slopes,r_slopes)\n newDatas = utils.integrate(d_slopes, endTime)\n\n retProf = Profile()\n retProf.entries['data'] = newDatas\n retProf.Derive()\n return retProf\n\n def Convolve(self, provided):\n \"\"\"\n Use min-plus calculus to convolve this *required* profile with an input *provided* profile.\n\n This function implements the operation (see :ref:`network_math_formalism`):\n\n .. math::\n y=l[t] &= (r \\otimes p)[t] = min( r[t] , p[t] - (p[t-1] -l[t-1]) )\n\n Where\n\n * :math:`r[t]` is the data profile required by the application (*self*)\n * :math:`p[t]` is the data profile provided by the node's link\n * :math:`l[t]` is the data profile transmitted onto the link\n\n :rtype: :class:`Profile`, :math:`l[t]`\n\n :param in provided: a :class:`Profile` describing the node's provided link profile\n \"\"\"\n r = self.entries['data']\n p = provided.entries['data']\n o = []\n\n times = [ x[0] for x in p ]\n times.extend( [ x[0] for x in r ] )\n times = sorted(list(set(times)))\n offset = 0\n prevDiff = 0\n prevTime = None\n r_prev = None\n p_prev = None\n for t in times:\n r_data = utils.get_value_at_time(r, t, interpolate = 'data' in self.interpolated_profiles)\n p_data = utils.get_value_at_time(p, t, interpolate = 'data' in self.interpolated_profiles) - offset\n diff = p_data - r_data\n if diff > 0:\n offset += diff\n if cmp(diff,0) != cmp(prevDiff,0):\n intersection = utils.get_intersection(\n [ prevTime, r_prev ],\n [ t, r_data ],\n [ prevTime, p_prev ],\n [ t, p_data ]\n )\n if intersection:\n o.append(intersection)\n newPoint = [t, p_data - max(0,diff)]\n o.append(newPoint)\n prevDiff = diff\n prevTime = t\n r_prev = r_data\n p_prev = p_data\n o = utils.remove_degenerates(o)\n\n output = Profile(kind='output')\n output.entries['data'] = o\n output.Derive()\n return output\n" }, { "alpha_fraction": 0.7404761910438538, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 25.25, "blob_id": "18121e48b47406a297d8527d0a2f2cce3c215e6c", "content_id": "a01f2362b438f40f4bc81a14271ce0f2565371a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 420, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/docs/_sources/middleware-api.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Middleware API\n===============\n\n.. toctree::\n :includehidden:\n :maxdepth: 2\n\n python-api/middleware/network-profile\n python-api/middleware/network-buffer\n python-api/middleware/network-middleware\n python-api/middleware/message\n python-api/middleware/client\n python-api/middleware/server\n python-api/middleware/connection-subsys\n python-api/middleware/csv-iterator\n python-api/middleware/tc-wrapper\n" }, { "alpha_fraction": 0.6104905009269714, "alphanum_fraction": 0.6134045720100403, "avg_line_length": 41.020408630371094, "blob_id": "631e7d3c7bac2c9567c9b2737cad6854a65e4bda", "content_id": "1c1d456465f6baebf0fcb7ac7b0d848d980d1762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2059, "license_type": "no_license", "max_line_length": 79, "num_lines": 49, "path": "/src/analysis/v2.0/rosmod-network-analysis.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "from networkProfile import *\n\ndef AnalyzeHost(host, nodes, period, numPeriods, nc_mode = False, nc_step = 1):\n print \"Analyzing host {} with period {} for {} periods.\".format(\n host.properties[\"name\"],\n period,\n numPeriods)\n\n provided = Profile(\n kind = 'provided',\n period = period )\n provided.ParseFromFile(\n prof_str = host.properties['system_network_profile'],\n num_periods = numPeriods )\n required = Profile('required',period)\n for node in nodes:\n for compInst in node.children:\n for port in compInst.properties['component_reference'].children:\n if 'port_network_profile' in port.properties.keys():\n tmpProfile = Profile(\n kind = 'required',\n period = period )\n tmpProfile.ParseFromFile(\n prof_str = port.properties['port_network_profile'],\n num_periods = numPeriods)\n required.AddProfile(tmpProfile)\n required.Integrate()\n provided.Integrate()\n if nc_mode:\n required.ConvertToNC(nc_step, lambda l: max(l))\n provided.ConvertToNC(nc_step, lambda l: min(l))\n output, maxBuffer, maxDelay = required.Convolve(provided)\n print \"\\n[Time location, buffersize]:\",[maxBuffer[0], maxBuffer[2]]\n print \"[Time location, delay]:\",[maxDelay[0], maxDelay[2]]\n\n networkProfile.nodeProfiles[selected_host].plotBandwidth()\n networkProfile.nodeProfiles[selected_host].plotData()\n \n\ndef AnalyzeDeployment(dep, period, numPeriods, nc_mode = False, nc_step = 1):\n hostToNodeListMap = {}\n for node in dep.getChildrenByKind(\"Node\"):\n host = node.properties['hardware_reference']\n if host in hostToNodeListMap.keys():\n hostToNodeListMap[host].append(node)\n else:\n hostToNodeListMap[host] = [node]\n for host,nodeList in hostToNodeListMap.iteritems():\n AnalyzeHost(host, nodeList, period, numPeriods, nc_mode, nc_step)\n" }, { "alpha_fraction": 0.5463510751724243, "alphanum_fraction": 0.5522682666778564, "avg_line_length": 16.77777862548828, "blob_id": "d7e9878c2f747c2811e04c96e57b92ba04611b05", "content_id": "68f62daf2b2e858e846a6aed9c215f3afe2a3a60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2028, "license_type": "no_license", "max_line_length": 61, "num_lines": 108, "path": "/src/middleware/v2.0/NetworkBuffer.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef NETWORK_BUFFER_HPP\r\n#define NETWORK_BUFFER_HPP\r\n\r\n#include <stdlib.h>\r\n#include <unistd.h>\r\n#include <stdio.h>\r\n#include <signal.h>\r\n#include <time.h>\r\n\r\n#include <pthread.h>\r\n\r\n#include <algorithm> // std::swap\r\n#include <memory>\r\n#include <vector>\r\n#include <queue>\r\n#include <string>\r\n#include <iomanip>\r\n#include <streambuf>\r\n#include <fstream>\r\n\r\n#include \"Message.hpp\"\r\n\r\nclass NetworkBuffer {\r\nprivate:\r\n std::queue<Message*> buffer;\r\n long size;\r\n long capacity;\r\n long maxSize;\r\npublic:\r\n NetworkBuffer( long _capacity = 0 ) {\r\n capacity = _capacity;\r\n size = 0;\r\n maxSize = 0;\r\n }\r\n\r\n ~NetworkBuffer(){ }\r\n\r\n NetworkBuffer (const NetworkBuffer &s)\r\n : size(s.size),\r\n capacity(s.capacity),\r\n maxSize(s.maxSize),\r\n buffer(s.buffer)\r\n {\r\n }\r\n\r\n NetworkBuffer & operator= (const NetworkBuffer &s)\r\n {\r\n if (&s != this)\r\n {\r\n NetworkBuffer tmp (s);\r\n swap (tmp);\r\n }\r\n return *this;\r\n }\r\n\r\n NetworkBuffer* clone() const {\r\n return new NetworkBuffer( *this );\r\n }\r\n\r\n void swap (NetworkBuffer &s) {\r\n std::swap (size, s.size);\r\n std::swap (capacity, s.capacity);\r\n std::swap (maxSize, s.maxSize);\r\n std::swap (buffer, s.buffer);\r\n }\r\n\r\n long MaxSize() const {\r\n return maxSize;\r\n }\r\n\r\n long Size() const {\r\n return size;\r\n }\r\n\r\n long Capacity() const {\r\n return capacity;\r\n }\r\n void Capacity(long _capacity) {\r\n capacity = _capacity;\r\n }\r\n\r\n int Push(Message* data) {\r\n if ( data == NULL )\r\n return -1;\r\n int retVal = -1;\r\n if ( capacity == 0 || data->Bits() <= (capacity-size) ) {\r\n size += data->Bits();\r\n if (size > maxSize && buffer.size() > 1)\r\n maxSize = size;\r\n buffer.push(data);\r\n retVal = 0;\r\n }\r\n return retVal;\r\n }\r\n\r\n int Pop(Message *&data) {\r\n int retVal = -1;\r\n if ( size > 0 && buffer.size() > 0) {\r\n data = buffer.front();\r\n buffer.pop();\r\n size = size - data->Bits();\r\n retVal = 0;\r\n }\r\n return retVal;\r\n }\r\n};\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.6222279667854309, "alphanum_fraction": 0.6382156014442444, "avg_line_length": 27.094202041625977, "blob_id": "3241dcd233636f2eff865ef3c57d93b861dca879", "content_id": "2dc085964079d68e4ca634ded7c158e604ec89b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3878, "license_type": "no_license", "max_line_length": 98, "num_lines": 138, "path": "/src/middleware/v1.0/Client.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "\n#include \"Client.hpp\"\n\nNetworkProfile profile, bufferProfile;\nstatic std::vector <Message*> messages;\nIPV6_Connection interface;\n\ndouble maxLatency=0;\n\nlong messageBitLength;\nlong messageStrLength;\nstatic long id = 0;\n\nlong precision = 30;// for file output\n\nvoid *sendFunc(Message* data) {\n if ( data != NULL ) {\n int retVal = interface.send(data->Buffer().c_str(),data->Bytes());\n if ( retVal <= 0 ) {\n TG_LOG(\"Couldn't send Message %lu with buffer %s\\n\",data->Id(),data->Buffer().c_str());\n }\n return (void *) retVal;\n }\n else\n return (void *) NULL;\n}\n\nint main(int argc, char **argv) {\n timespec timeout, remaining;\n\n Options options;\n if ( options.Parse(argc,argv) == -1 )\n return -1;\n options.Print();\n\n std::string profileFile = options.tgFile; \n if ( profile.initializeFromFile(profileFile.c_str()) != 0 ) {\n TG_LOG(\"ERROR: couldn't initialize TG profile!\\n\");\n return -1;\n }\n\n interface.serverIP = options.ip;\n interface.serverPort = options.port;\n if ( interface.Initialize(false,false) != 0 )\n return -1;\n\n std::string outputFile = options.outputFile;\n messageBitLength = options.bitLength;\n messageStrLength = ceil((double)messageBitLength/8.0f);\n double runTime = ( options.runTime > 0 ) ? options.runTime : profile.period*options.numPeriods ;\n\n std::string bufferProfileFile = options.bufferFile;\n bufferProfile.initializeFromFile(bufferProfileFile.c_str());\n\n if ( NetworkMiddleware::Init(bufferProfile, sendFunc) != 0 ) {\n TG_LOG(\"ERROR: couldn't initialize network middleware!\\n\");\n return -1;\n }\n\n double timeDiff = 0;\n timespec startTime;\n clock_gettime(CLOCK_REALTIME,&startTime);\n\n while (true) {\n Message* data = new Message(messageBitLength, id);\n messages.push_back(data); \n data->TimeStamp();\n NetworkMiddleware::send(data);\n\n timeDiff = (double)(data->FirstEpochTime().tv_sec - \n\t\t\tstartTime.tv_sec);\n timeDiff += (double)(data->FirstEpochTime().tv_nsec - \n\t\t\t startTime.tv_nsec)/1000000000.0f;\n\n if ( timeDiff >= runTime )\n break;\n\n double timerDelay = profile.Delay(data->Bits(),data->FirstEpochTime());\n id++;\n if ( timerDelay > 0 ) {\n double fractpart,intpart;\n fractpart = modf(timerDelay,&intpart);\n timeout.tv_sec = (unsigned long long)(intpart);\n timeout.tv_nsec = (unsigned long)(fractpart*1000000000.0);\n int return_code = nanosleep (&timeout, &remaining);\n }\n }\n\n NetworkMiddleware::Exit();\n\n double maxLatency = 0;\n double latency = 0;\n for (long i=0; i<messages.size(); i++) {\n std::vector<timespec> times(messages[i]->EpochTimes());\n latency = (double)(times.back().tv_sec - times.front().tv_sec);\n latency += ((double)(times.back().tv_nsec - times.front().tv_nsec)/1000000000.0f);\n if ( latency > maxLatency )\n maxLatency = latency;\n }\n\n write_data(outputFile);\n\n for ( long i=0; i<messages.size(); i++) {\n if ( messages[i] != NULL )\n delete messages[i];\n }\n messages.clear();\n\n TG_LOG(\"Max bits in UDP socket buffer: %d\\n\",\n\t interface.bufferSize*8);\n TG_LOG(\"Max bits in middleware buffer: %lu\\n\",\n\t NetworkMiddleware::buffer.MaxSize());\n TG_LOG(\"Max message latency: %f seconds\\n\",\n\t maxLatency);\n}\n\nint write_data(std::string fname) {\n for (long i=0;i<messages.size();i++) {\n if ( append_data(fname,messages[i]) == -1 ) {\n TG_LOG(\"Couldn't append message %lu to file %s\\n\",i,fname.c_str());\n }\n } \n return 0;\n}\n\nint append_data(std::string fname, Message* data) {\n std::ofstream file(fname.c_str(), std::ofstream::app);\n if ( !file.is_open() ) {\n TG_LOG(\"ERROR: Couldn't open %s for appending!\\n\",fname.c_str());\n return -1;\n }\n file << data->Id() << \",\" << std::setprecision(precision)\n << data->FirstDoubleTime() << \",\"\n << data->LastDoubleTime() << \",\"\n << data->Bits()\n << \"\\n\";\n file.close();\n return 0;\n}\n" }, { "alpha_fraction": 0.5410609245300293, "alphanum_fraction": 0.5442042946815491, "avg_line_length": 30.8125, "blob_id": "a96f6f1c635d1ae720ed74bd888ebfe8c234b2e6", "content_id": "09d57658af04d6136a07c327e85306280ea2ab74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2545, "license_type": "no_license", "max_line_length": 80, "num_lines": 80, "path": "/src/analysis/v1.0/utils.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "import sys,os,copy,glob\n\nhavePLT = False\ntry:\n import matplotlib.pyplot as plt\n havePLT=True\nexcept ImportError:\n print \"Package python-matplotlib not found, plotting disabled.\"\n \n\ndef getDataAtTimeFromProfile(p,t):\n i = 0\n while i < len(p) and t > p[i].end:\n i += 1\n retVal = p[i].data - p[i].slope * (p[i].end - t)\n #print \"@ {} has value {}\\n\".format(t,retVal)\n return retVal\n\ndef plotProfile(dtype,profile,ptype,dashes,label,line_width):\n xvals = []\n yvals = []\n if dtype == 'data':\n xvals.append(0)\n yvals.append(0)\n for e in profile:\n if e.ptype == ptype:\n if dtype == 'slope':\n xvals.append(e.start)\n yvals.append(e.slope)\n yvals.append(e.slope)\n else:\n yvals.append(e.data)\n xvals.append(e.end)\n\n line, =plt.plot(xvals, yvals, label=r\"{0}{1} {2}\".format(label,ptype,dtype),\n linewidth=line_width)\n line.set_dashes(dashes) \n return\n\ndef get_app_node_map(nodes,apps):\n app_node_map = {}\n for node,nprofile in nodes.iteritems():\n for app,aprofile in apps.iteritems():\n if app.find(node) != -1:\n if app_node_map.has_key(node):\n app_node_map[node].append(app)\n else:\n app_node_map[node] = [app]\n return app_node_map\n\ndef get_appProfiles(folder):\n profile_dir = os.getcwd()+os.sep+folder\n apps = {}\n if os.path.isdir(profile_dir):\n print 'Found ',profile_dir\n for file in glob.glob(profile_dir+os.sep+'*profile.csv'):\n app_name = file.replace('_profile.csv','')\n app_name = app_name.replace(profile_dir+os.sep,'')\n with open(file,'r+') as f:\n content = f.read()\n apps[app_name] = content\n else:\n print \"ERROR: \",profile_dir,\" doesn't exist!\"\n return apps\n\ndef get_nodeProfiles(folder):\n profile_dir = os.getcwd()+os.sep+folder\n nodes = {}\n if os.path.isdir(profile_dir):\n print 'Found ',profile_dir\n for file in glob.glob(profile_dir+os.sep+'*config.csv'):\n node_name = file.replace('_crm_config.csv','')\n node_name = node_name.replace(profile_dir+os.sep,'')\n if node_name != 'crm_config.csv':\n with open(file,'r+') as f:\n content = f.read()\n nodes[node_name] = content\n else:\n print \"ERROR: \",profile_dir,\" doesn't exist!\"\n return nodes\n" }, { "alpha_fraction": 0.42307692766189575, "alphanum_fraction": 0.42307692766189575, "avg_line_length": 12, "blob_id": "aa90b778a4b30d8c06f3586edc8a2d42d9e6dbed", "content_id": "8e8bc7faa26bcbe15c2379773fcad836971d8c87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 26, "license_type": "no_license", "max_line_length": 12, "num_lines": 2, "path": "/docs/_sources/python-api/middleware/csv-iterator.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "CSV Iterator\n============\n" }, { "alpha_fraction": 0.5729882717132568, "alphanum_fraction": 0.5747405290603638, "avg_line_length": 29.78423309326172, "blob_id": "df4728f639fa674d2218763ff6656cc34c34f4aa", "content_id": "d15393053b8df2bcbd0d151af9a340b2b6c6faaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7419, "license_type": "no_license", "max_line_length": 98, "num_lines": 241, "path": "/src/analysis/v2.0/networkConfig.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "\"\"\"\nNetwork Config implements classes related to node-based\nflow/profile aggregation, routing, link management, and management of\nsystem level concerns such as multicast-capability.\n\n\"\"\"\n\nimport copy\nimport sys\n\n\nclass Node:\n \"\"\"\n Defines all the required information for a node in the network.\n This includes:\n\n * All provided profiles (aggregated) whose node_id is this node\n\n \"\"\"\n\n id_type = str\n\n def __init__(self, _id):\n self.ID = _id #: the ID of this node\n self.provided = None #: aggregate of all 'provided' profiles whose source ID is this node\n\n def HasProfiles(self):\n if not self.provided and not self.receivers:\n return False\n return True\n\n def AddProfile(self, prof):\n if prof.IsKind('provided'):\n self.AddProvidedProfile(prof)\n\n def AddProvidedProfile(self, prof):\n if not self.provided:\n self.provided = prof\n else:\n self.provided = self.provided.AddProfile(prof)\n\n def __repr__(self):\n retStr = \"Node( id = {} )\".format(self.ID)\n return retStr\n\n\nclass Route:\n \"\"\"\n Describes how a flow traverse the links of the system's network.\n This is specified as a list of nodes, with the source node at the\n front of the list and the destination node at the end of the list.\n \"\"\"\n\n header = \"route:\" #: line header specifying a route in the config file\n\n def __init__(self, path=[]):\n self.path = path #: list of node IDs with a source, intermediate nodes, and a destination\n\n def AddDest(self, dest):\n \"\"\"Append a node onto the end of the route.\"\"\"\n self.path.append(dest)\n\n def AddSource(self, src):\n \"\"\"Add a node onto the beginning of a route.\"\"\"\n self.path.insert(0, src)\n\n def InsertNode(self, node, pos):\n \"\"\"Insert a node into the route before the given position.\"\"\"\n self.path.insert(pos, node)\n\n def ParseFromLine(self, line):\n \"\"\"\n Handles parsing of a route path from a line in the config file.\n A route is defined as::\n\n route: src_node_id, hop_node_1, ... , hope_node_n, dst_node_id\n \"\"\"\n self.path = []\n line = line.strip(self.header)\n node_id_list = map(Node.id_type, line.split(','))\n for node_id in node_id_list:\n self.AddDest(node_id.strip(' '))\n return 0\n\n def Length(self):\n return len(self.path)\n\n def __getitem__(self, index):\n return self.path[index]\n\n def __repr__(self):\n retStr = \"{}\".format(self.path)\n return retStr\n\n\nclass Topology:\n \"\"\"\n Describes the active links between nodes on the system's network.\n This is specified as a dictionary of node : list of nodes pairs.\n \"\"\"\n\n header = \"topology:\" #: line header specifying a topology link in the config file.\n\n def __init__(self, links={}):\n self.links = links\n\n def ParseFromLine(self, line):\n \"\"\"\n Handles parsing of a link from a line in the config file.\n A topology is defined as::\n\n topology: src_node_id : direct_node_1, ... , direct_node_n\n \"\"\"\n line = line.strip(self.header)\n src_node, node_list_str = line.split(':')\n node_list = map(Node.id_type, node_list_str.split(','))\n node_list = [x.strip() for x in node_list]\n self.links[src_node.strip()] = node_list\n return 0\n\n def __repr__(self):\n retStr = \"{}\".format(self.links)\n return retStr\n\n\nclass Config:\n \"\"\"\n Contains the routing and topology information\n to fully describe the system's network and provide\n a mapping between application data flows (logical)\n and the system's network links. It also provides\n interfaces for setting low-level communications\n considerations such as retransmission, multiple-unicast,\n multicast, etc.\n \"\"\"\n\n def __init__(self, nodes={}, multicast=False, retransmit=False,\n routes=[], topology=Topology()):\n self.multicast = multicast\n self.retransmit = retransmit\n self.routes = routes\n self.topology = topology\n self.nodes = nodes\n self.senders = {}\n self.receivers = {}\n\n def addProfile(self, prof):\n if prof.IsKind('required'):\n self.senders[prof.priority] = prof\n elif prof.IsKind('provided'):\n self.nodes[prof.node_id].AddProfile(prof)\n elif prof.IsKind('receiver'):\n self.receivers.setdefault(prof.flow_type,[]).append(prof)\n\n def GetRoute(self, src, dst):\n \"\"\"Returns the path for the flow from *src* to *dst*.\"\"\"\n route = [src, dst]\n if dst not in self.topology.links[src]:\n route = [x for x in self.routes if x[0] == src and x[-1] == dst][0].path\n return route\n\n def ParseHeader(self, header):\n \"\"\"Parses information from the configuration's header if it exists:\n\n * multicast capability\n * retransmission setting\n\n A profile header is at the top of the file and has the\n following syntax::\n\n # <property> = <value>\n\n \"\"\"\n if header:\n for line in header:\n line.strip('#')\n prop, value = line.split('=')\n if \"multicast\" in prop:\n self.multicast = bool(value)\n elif \"retransmit\" in prop:\n self.retransmit = bool(value)\n\n def ParseFromFile(self, fName):\n \"\"\"\n Builds the entries from a properly formatted CSV file.\n Internally calls :func:`Config.ParseFromString`.\n \"\"\"\n conf_str = None\n try:\n with open(fName, 'r+') as f:\n conf_str = f.read()\n except:\n print >> sys.stderr, \"ERROR: Couldn't find/open {}\".format(fName)\n return -1\n if conf_str is None:\n return -1\n return self.ParseFromString(conf_str)\n\n def ParseFromString(self, conf_str):\n \"\"\"\n Handles parsing of the header, topology, and routes in a config\n file.\n \"\"\"\n if not conf_str:\n print >> sys.stderr, \"ERROR: String contains no configuration spec!\"\n return -1\n lines = conf_str.split('\\n')\n header = [l for l in lines if '#' in l]\n self.ParseHeader(header)\n specials = ['%', '#']\n c = copy.copy(lines)\n for s in specials:\n c = [l for l in c if s not in l]\n for line in c:\n if Route.header in line:\n route = Route()\n if route.ParseFromLine(line) == 0:\n self.routes.append(route)\n elif Topology.header in line:\n self.topology.ParseFromLine(line)\n for key in self.topology.links:\n self.nodes[key] = Node(_id=key)\n return 0\n\n def __repr__(self):\n retStr = \"Config:\\n\"\n retStr += \"\\tmulticast: {}\\n\".format(self.multicast)\n retStr += \"\\tretransmit: {}\\n\".format(self.retransmit)\n retStr += \"\\tnodes:\\n\\t\\t{}\\n\".format(self.nodes)\n retStr += \"\\tTopology:\\n\\t\\t{}\\n\".format(self.topology)\n retStr += \"\\tRoutes:\\n\\t\\t{}\\n\".format(self.routes)\n return retStr\n\n\ndef main(argv):\n config = Config()\n config.ParseFromFile(\"config.csv\")\n print \"{}\".format(config)\n\nif __name__ == '__main__':\n main(sys.argv)\n" }, { "alpha_fraction": 0.6821056604385376, "alphanum_fraction": 0.6891030073165894, "avg_line_length": 48.149471282958984, "blob_id": "adfd5240a7fde030af6aada77b0fe0f69ba5f9aa", "content_id": "41bf5418c40868ea84b97c687caaa03f810425ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 37157, "license_type": "no_license", "max_line_length": 111, "num_lines": 756, "path": "/docs/_sources/design-time.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": ".. _design_time:\n\n=====================\n Design Time Results\n=====================\n\nThese results provide a methodology and a means for application\ndevelopers and system integrators to determine conservative, precise,\ntightly bounded performance metrics for distributed networked\napplications and systems at design time. The contributions of this\nwork are broken into sections by topic:\n\n* :ref:`periodic_system_analysis`\n* :ref:`periodic_analysis_proof`\n* :ref:`nc_comparison`\n* :ref:`tdma_analysis`\n* :ref:`compositional_analysis`\n* :ref:`delay_analysis`\n* :ref:`routing_analysis`\n\n.. _periodic_system_analysis:\n\nPeriodic System Analysis\n========================\n\nOne subset of systems which we would like to analyze are periodic\nsystems, since many systems in the real world exhibit some form of\nperiodicity, e.g. satellites in orbit, traffic congestion patterns,\npower draw patterns. We define systems to be periodic if the data\nproduction rate (or consumption rate) of the system is a periodic\nfunction of time. The time-integral of these periodic data\nconsumption/production rates is the cumulative data\nproduction/consumption of the system. These cumulative functions are\ncalled *repeating*.\n\nGiven that the required data profile and system data service profile\nare *repeating*, we must determine the periodicity of the output\nprofile. If we can show that the output profile similarly repeats,\nthen we can show that the system has no unbounded buffer growth.\nFirst, let us look at the profile behavior over the course of its\nfirst two periods of activity.\n\nWe will examine two systems, *system (1)* and *system (2)*. Firstly,\nexamine *(1)*, shown below (note: you can click on the images to open\nthem in a larger format):\n\n+---------------------------------------------------+-----------------------------------------------------+\n| System *(1)* Bandwidth for 1 Period | System *(1)* Data for 1 Period |\n+===================================================+=====================================================+\n| .. image:: /images/results/1-period-system-bw.png | .. image:: /images/results/1-period-system-data.png |\n| :height: 200 | :height: 200 |\n+---------------------------------------------------+-----------------------------------------------------+\n\n+---------------------------------------------------+-----------------------------------------------------+\n| System *(1)* Bandwidth for 2 Periods | System *(1)* Data for 2 Periods |\n+===================================================+=====================================================+\n| .. image:: /images/results/2-period-system-bw.png | .. image:: /images/results/2-period-system-data.png |\n| :height: 200 | :height: 200 |\n+---------------------------------------------------+-----------------------------------------------------+\n\nWe notice that for this example system, the second period output\nprofile is not an exact copy of the first (most easily seen by\nexamining the bandwidth plots), and yet the required buffer size is\nstill the same as it was when analyzing the system over one period.\nFurthermore, by running the analysis over even larger number of\nperiods, we can determine (not plotted here for space and\nreadability), that the predicted buffer size does not change no matter\nhow many periods we analyze for this system.\n\nLet us look at a system where this is not the case before we begin the\nanalysis of such system characteristics.\n\n+-----------------------------------------------------+-------------------------------------------------------+\n| System *(2)* Bandwidth for 1 Period | System *(2)* Data for 1 Period |\n+=====================================================+=======================================================+\n| .. image:: /images/results/1-period-unstable-bw.png | .. image:: /images/results/1-period-unstable-data.png |\n| :height: 200 | :height: 200 |\n+-----------------------------------------------------+-------------------------------------------------------+\n\n+-----------------------------------------------------+-------------------------------------------------------+\n| System *(2)* Bandwidth for 2 Periods | System *(2)* Data for 2 Periods |\n+=====================================================+=======================================================+\n| .. image:: /images/results/2-period-unstable-bw.png | .. image:: /images/results/2-period-unstable-data.png |\n| :height: 200 | :height: 200 |\n+-----------------------------------------------------+-------------------------------------------------------+\n\nNotice in system *(2)*, the first period analysis predicted the same\nbuffer size and delay as system *(1)*, but when analyzing two periods\nthe predicted buffer size changed. Clearly the behavior of the system\nis changing between these two periods. If we continue to analyze more\nperiods of system *(2)*, as we did with system *(1)*, we'll find the\nunfortunate conclusion that the predicted buffer size increases with\nevery period we add to the analysis.\n\nWe have discovered a system level property that can be calculated from\nthese profiles, but we must determine what it means and how it can be\nused. First, we see that in system *(1)*, the predicted required\nbuffer size does not change regarless of the number of periods over\nwhich we analyze the system. Second, we see that for system *(2)*,\nthe predicted required buffer size changes depending on how many\nperiods of activity we choose for our analysis window. Third, we see\nthat the second period of system *(2)* contains the larger of the two\npredicted buffer sizes. These observations (with our understanding of\ndeterministic periodic systems) lead us to the conclusion: system\n*(2)* can no longer be classified as periodic, since its behavior is\nnot consistent between its periods. Furthermore, because the required\nbuffer size predicted for system system *(2)* continually increases,\nwe can determine that the system is in fact *unstable* due to\nunbounded buffer growth. \n\n.. _periodic_analysis_proof:\n\nProving the Minimum Analysis for System Stability\n-------------------------------------------------\n\nLet us now formally prove the assertion about system periodicity and\nstability which has been stated above. We will show that our analysis\nresults provide quantitative measures about the behavior of the system\nand we will determine for how long we must analyze a system to glean\nsuch behaviors.\n\nTypically, periodicity is defined for functions as the equality:\n\n.. math:: x(t) = x(t + k * T), \\forall k \\in \\mathbb{N} > 0\n\nbut for our type of system analysis this cannot hold since we deal\nwith cumulative functions (of data vs. time). Instead we must define\na these functions to be **repeating**, where a function is repeating\n*iff*:\n\n.. math:: x(0) &= 0 \\text{ and}\\\\\n\t x(t + k * T) &= x(t) + k * x(T), \\forall k \\in \\mathbb{N} > 0\n\nClearly, a repeating function :math:`x` is **periodic** *iff*\n:math:`x(T)=0`. Note that repeating functions like the cumulative\ndata vs. time profiles we deal with, are the result of **integrating**\n*periodic* functions, like the periodic bandwidth vs. time profiles we\nuse to describe application network traffic and system network\ncapacity. All periodic functions, when integrated, produce repeating\nfunctions and similarly, all repeating functions, when differentiated,\nprocduce periodic functions.\n\nNow we will consider a deterministic, *repeating* queuing system\nproviding a data service function :math:`S` to input data function\n:math:`I` to produce output data function :math:`O`, where these\nfunctions are *cumulative data versus time*. At any time :math:`t`,\nthe amount of data in the system's buffer is given by :math:`B_t`.\nAfter servicing the input, the system has a remaining capacity\nfunction :math:`R`.\n\n* :math:`S[t]` : the service function of the system, cumulative data\n service capacity versus time\n* :math:`I[t]` : the input data to the system, cumulative data versus\n time\n* :math:`O[t]` : the output data from the system, cumulative data\n versus time\n* :math:`B[t]` : the amount of data in the system's buffer at time\n :math:`t`, i.e. :math:`I[t]-O[t]`\n* :math:`R[t]` : the remaining service capacity of the system after\n servicing :math:`I`, i.e. :math:`S[t] - O[t]`\n\nBecause :math:`S` and :math:`I` are deterministic and repeating, they\nincrease deterministically from period to period, i.e. given the\nperiod :math:`T_I` of :math:`I`,\n\n.. math:: \\forall t, \\forall n \\in \\mathbb{N} > 0 : I[t + n*T_I] =\n I[t] + n*I[T_I]\n\nSimilarly, given the period :math:`T_S` of :math:`S`,\n\n.. math:: \\forall t, \\forall n \\in \\mathbb{N} > 0 : S[t + n*T_S] =\n S[t] + n*S[T_S]\n\nWe can determine the hyperperiod of the system as the :math:`lcm` of\ninput function period and the service function period, :math:`T_p =\nlcm(T_S,T_I)`.\n\nAt the start of the system, :math:`t=0`, the system's buffer is empty,\ni.e. :math:`B[0] = 0`. Therefore, the amount of data in the buffer\nat the end of the first period, :math:`t=T_p`, is the amount of data\nthat entered the system on input function :math:`I` but was not able\nto be serviced by :math:`S`. At the start of the next period, this\ndata will exist in the buffer. Data in the buffer at the start of the\nperiod can be compared to the system's remaining capacity :math:`R`,\nsince the remaining capacity of the system indicates how much extra\ndata it can transmit in that period. Consider the scenario that the\nsystem's remaining capacity :math:`R` is less than the size of the\nbuffer, i.e. :math:`R[T_p] < B[T_p]`. In this scenario,\n:math:`B[2*T_p] > B[T_p]`, i.e. there will be more data in the buffer\nat the end of the second period than there was at the end of the first\nperiod. Since the system is deterministic, for any two successive\nperiods, :math:`n*T_p` and :math:`(n+1)*T_p`, :math:`B[n*T_p] >\nB[(n+1)*T_p]`, which extends to:\n\n.. math::\n B[m*T_p] > B[n*T_p], \\forall m>n>0\n\nimplying that:\n\n.. math::\n B[t] < B[t + k*T_p], \\forall k \\in \\mathbb{N} > 0\n\nmeaning that the amount of data in the buffer versus time is *not\nperiodic*, therefore the amount of data in the system's buffer\nincreases every period, i.e. the system has *unbounded buffer growth*.\n\nIf however, there is enough remaining capacity in the system to\nservice the data in the buffer, i.e. :math:`R[T_p] >= B[T_p]`, then\n:math:`B[2*T_p] = B[T_p]`. This relation means that if the remaining\ncapacity of the system that exists after all the period's required\ntraffic has been serviced is equal to or larger than the size of the\nbuffer at the end of the period, then in the next period the system\nwill be able to service fully both the data in the buffer and the\nperiod's required traffic. Since both the period's traffic and the\nbuffer's data will have been serviced in that period, the amount of\ndata in the buffer at the end of the period will be the same as the\namount of data that was in the buffer at the start of the\nperiod. Similarly to above, since the system is deterministic, for any\ntwo successive periods, :math:`n*T_p` and :math:`(n+1)*T_p`,\n:math:`B[(n+1)*T_p] = B[n*T_p]`. This extends to:\n\n.. math::\n B[m*T_p] = B[n*T_p], \\forall m,n > 0\n\nwhich implies that:\n\n.. math::\n B[t] = B[t + k*T_p], \\forall k \\in \\mathbb{N} > 0\n\nmeaning that the amount of data in the buffer versus time is a\n*periodic function*, therefore the buffer size does not grow between\nperiods, and the system has a *finite buffer*.\n\nIf we are only concerned with buffer growth, we do not need to\ncalculate :math:`R`, and can instead infer buffer growth by comparing\nthe values of the buffer at any two period-offset times during the\nsteady-state operation of the system (:math:`t >= T_p`). This means\nthat the system buffer growth check can resolve to :math:`B[2*T_p] ==\nB[T_p]`. This comparison abides by the conditions above, with\n:math:`m=2` and :math:`n=1`.\n\n.. _nc_comparison:\n \nComparison with NC/RTC\n======================\n\nTo show how our analysis techniques compare to other available\nmethods, we developed our tools to allow us to analyze the input\nsystem using Network Calculus/Real-Time Calculus techniques as well as\nour own. Using these capabilities, we can directly compare the\nanalysis results to each other, and then finally compare both results\nto the measurements from the actual system.\n\n+---------------------------------------------------+-----------------------------------------------------+\n| System Data Rate vs. Time | System Data Analyzed with :math:`PNP^2` |\n+===================================================+=====================================================+\n| .. image:: /images/results/maren_namek_bw.png | .. image:: /images/results/maren_namek_data.png |\n| :height: 200 | :height: 200 |\n+---------------------------------------------------+-----------------------------------------------------+\n\n.. _fig-zoom:\n\n.. figure:: /images/results/maren_namek_data_zoom.png\n :align: center\n :height: 400px\n :width: 400px\n\n Zoomed-in version of :math:`PNP^2` analysis.\n\t\n.. _fig-data-nc:\n\n.. figure:: /images/results/nc_namek_data.png\n :align: center\n :height: 400px\n :width: 400px\n\n Network-Calculus based analysis of the system.\n\nThe table above shows the data rate versus time profile describing the\nexample system, side-by-side with the time-integrated and analyzed\ndata versus time profile. :num:`Figure #fig-zoom` shows a zoomed in\nportion of the second plot, focusing on the area with the maximum\ndelay and buffer as analyzed by :math:`PNP^2`. :num:`Figure\n#fig-data-nc` shows the same system analyzed using Network Calculus.\n\nThe major drawback for Network Calculus that our work aims to solve is\nthe disconnect from the real system that stems from using an approach\nbased on time-window analysis. Such an approach leads to dramatically\nunder-approximating the capacity of the network while simultaneously\nover-approximating the utilization of the network, since a known drop\nin network performance which is expected and handled by the\napplication cannot be accurately modeled. In our case, the system is\nusing a system profile which can service data during the period from\n:math:`0\\le t\\le 7` seconds with a period of 10 seconds. The\napplication is designed around this constraint and only produces data\nduring that interval. Because our technique directly compares when\nthe application produces data to when the system can service the data,\nwe are able to derive more precise performance prediction metrics than\nNetwork Calculus, which compares the 3 seconds of system downtime to\nthe 3 seconds of maximum application data production.\n\nWe developed software which produces data according to a supplied\ninput profile and configured the system's network to provide the\nbandwidth profile described in the system configuration profile.\nUsing this experimental infrastructure, we were able to measure the\ntransmitted traffic profile, the received traffic profile, the latency\nexperienced by the data, and the transmitter's buffer requirements.\nThe results are displayed in the table below:\n\n+---------------------+--------------+-------------------------------+\n| | Predicted | Measured (:math:`\\mu,\\sigma`) |\n+=====================+==============+===============================+\n| Buffer Delay (s) | 0.0625 | (0.06003 , 0.00029) |\n+---------------------+--------------+-------------------------------+\n| Time of Delay (s) | 3.0 | (2.90547 , 0.00025) |\n+---------------------+--------------+-------------------------------+\n| Buffer Size (bytes) | 8000 | (7722.59 , 36.94) |\n+---------------------+--------------+-------------------------------+\n\nTaking the results from our published work, where our methods\npredicted a buffer size of 64000 bits / 8000 bytes, we show that\nNetwork Calculus predicts a required buffer size of 3155000 bits. This\ndrastic difference comes from the mis-match between down-time and max\ndata production mentioned above.\n\n.. _tdma_analysis:\n\t\nAnalysis of TDMA Scheduling\n===========================\n\nMedium channel access (MAC) protocols are used in networking systems\nto govern the communication between computing nodes which share a\nnetwork communications medium. They are designed to allow reliable\ncommunication between the nodes, while maintaining certain goals, such\nas minimizing network collisions, maximizing bandwidth, or maximizing\nthe number of nodes the network can handle. Such protocols include\nTime Division Multiple Access (TDMA), which tries to minimize the\nnumber of packet collisions; Frequency Division Multiple Access\n(FDMA), which tries to maximize the bandwidth available to each\ntransmitter; and Code Division Multiple Access (CDMA) which tries to\nmaximize the number of nodes that the network can handle. We will not\ndiscuss CDMA in the scope of this work.\n\nIn FDMA, each node of the network is assigned a different transmission\nfrequency from a prescribed frequency band allocated for system\ncommunications. Since each node transmits on its own frequency,\ncollisions between nodes transmitting simultaneously are reduced.\nCommunications paradigms of this type, i.e. shared medium with\ncollision-free simultaneous transmission between nodes, can be modeled\neasily by our :math:`PNP^2` modeling paradigm described above, since\nthe network resource model for each node can be developed without\ntaking into account the transmissions of other nodes.\n\nIn TDMA, each node on the network is assigned one or more time-slots\nper communications period in which only that node is allowed to\ntransmit. By governing these timeslots and having each node agree\nupon the slot allocation and communications period, the protocol\nensures that at a given time, only a single node will be transmitting\ndata, minimizing the number of collisions due to multiple simultaneous\ntransmitters. In such a medium access protocol, transmissions of each\nnode affect other nodes' transmission capability. Because these\ntransmissions are scheduled by TDMA, they can be explicitly integrated\ninto the system network resource model.\n\nTDMA transmission scheduling has an impact on the timing\ncharacteristics of the applications' network communications. Because\napplications' network data production is decoupled from their node's\nTDMA transmission time slot, buffering may be required when an\napplication on one node tries to send data on the network during the\ntransmission slot of a different node. In this case, the data would\nneed to be buffered on the application's node and would therefore\nincur additional buffering delay. If this TDMA schedule is not\nintegrated into the analysis of the network resources, the additional\nbuffer space required may exceed the buffer space allocation given to\nthe application or the buffering delay may exceed the application's\nacceptable latency.\n\nSo far, the description of the system provided network service profile\n(:math:`p[t]=y`), has been abstracted as simply the available\nbandwidth as a function of time integrated to produce the amount of\ndata serviced as a function of time. We show how to model and analyze\nthe network's lower-level TDMA MAC protocol using our network modeling\nsemantics. We then derive general formulas for determining the affect\nTDMA has on buffer size and delay predictions.\n\nAs an example TDMA system which benefits from our analysis techniques,\nconsider an application platform provided by a fractionated satellite\ncluster. A fractionated satellite cluster consists of many small\nsatellites that may each have different hardware, computing, and\ncommunications capabilities. These capabilities are provided to\ndistributed components of the satellite cluster's applications. Such\na system has the combined challenges of (1) being expensive to\ndevelop, test, and deploy, (2) being very difficult to repair or\nreplace in the event of failure, and (3) having to support\nmixed-criticality and possibly multiple levels of security\napplications. For this system, the network between these satellites\nis a precious resource shared between each of the applications'\ncomponents in the cluster. To ensure the stability of the network\nresources, each satellite has a direct connection to every other\nsatellite and is assigned a slot in the TDMA schedule during which the\nsatellite may transmit. Each TDMA slot has a sinusoidally\ntime-varying bandwidth profile which may differ from the other TDMA\nslot bandwidth profiles. The time-varying profile of the slot\nbandwidth comes from the coupling between the radios' inverse-squared\nbandwidth-as-a-function-of-distance and the satellites' sinusoidal\ndistance-as-a-function-of-orbital-position.\n\nSuch a system and applications necessitates design-time guarantees\nabout resource utilization and availability. Applications which\nutilize the satellite network need assurances that the network\nresources they require during each part of the orbital period will be\nsatisfied. To provide these assurances, we provide the application\ndevelopers and system integrators the ability to specify and analyze\nthe network profiles as (possibly periodic) functions of time.\nFurthermore, the requirement for accurate predictions necessitates the\nincorporation of the TDMA scheduling and bandwidth profiling into the\nnetwork modeling and analysis tools.\n\nTDMA schedules can be described by their period, their number of\nslots, and the bandwidth available to each slot as a function of time.\nFor simplicity of explanation, we assume that each node only gets a\nsingle slot in the TDMA period and all slots have the same length, but\nthe results are valid for all static TDMA schedules. Note that each\nslot still has a bandwidth profile which varies as a function of time\nand that each slots may have a different bandwidth profile. In a\ngiven TDMA period (:math:`T`), the node can transmit a certain number\nof bits governed by its slot length (:math:`t_{slot}`) and the slot's\navailable bandwidth (:math:`bw_{slot}`). During the rest of the TDMA\nperiod, the node's available bandwidth is :math:`0`. This scheduling\nhas the effect of amortizing the node's slot bandwidth into an\neffective bandwidth of :math:`bw_{effective} = bw_{slot} *\n\\dfrac{t_{slot}}{T}`. The addition of the TDMA scheduling can affect\nthe buffer and delay calculations, based on the slot's bandwidth, the\nnumber of slots, and the slot length. The maximum additional delay is\n:math:`\\Delta_{delay} = T - t_{slot}`, and the maximum additional\nbuffer space is :math:`\\Delta_{buffer} = \\Delta_{delay} *\nbw_{effective}`. These deviations are shown below. Clearly,\n:math:`\\Delta_{delay}` is bounded by :math:`T` and\n:math:`\\Delta_{buffer}` is governed by :math:`t_{slot}`. Therefore,\nbecause :math:`t_{slot}` is dependent on :math:`T`, minimizing\n:math:`T` minimizes both the maximum extra delay and maximum extra\nbuffer space.\n\n+---------------------------------------------------+-----------------------------------------------------+\n| In-Phase TDMA profile vs abstract | Out-of-Phase TDMA Profile vs abstract |\n+===================================================+=====================================================+\n| .. image:: /images/results/tdma_phase0.png | .. image:: /images/results/tdma_phase1.png |\n| :height: 200 | :height: 200 |\n+---------------------------------------------------+-----------------------------------------------------+\n\nFollowing from this analysis, we see that if: (1) the TDMA effective\nbandwidth profile is provided as the abstract system network service\nprofile, and (2) the TDMA period is much smaller than the duration of\nthe shortest profile interval; then the system with explicit modeling\nof the TDMA schedule has similar predicted application network\ncharacteristics as the abstract system. Additionally, the maximum\ndeviation formulas derived above provide a means for application\ndevelopers to analyze the their application on a TDMA system without\nexplicitly integrating the TDMA model into the system profile model.\n\n.. _compositional_analysis:\n\nCompositional Analysis\n======================\n\nNow that we have precise network performance analysis for aggregate\nprofiles or singular profiles on individual nodes of the network, we must\ndetermine how best to compose these profiles and nodes together to\nanalyze the overall system. The aim of this work is to allow the profiles\nfrom each application to be analyzed separately from the other profiles\nin the network, so that application developers and system integrators\ncan derive meaningful perfomance predictions for specific\napplications. For this goal, let us define:\n\n| **Compositionality:** [sifakis2002_]\n| A system is compositional if its properties can be derived from\n| the properties of its components and how they are interconnected.\n|\n\n| **Composability:** [sifakis2002_]\n| A component is composable if its properties do not change\n| when the component is composed with other components.\n|\n\nFor our analysis techniques to be compositional, an application's\nrequired profile must be analyzable individually without requiring\naggregation with the rest of the required profiles in the system.\nThis means that the system's performance, i.e. the peformance of all\nthe applications on the system, can be determined by analyzing the\nperformance of each application individually.\n\nFor this compositionality, we must not only define mathematical\noperations which allow us to aggregate and separate profiles with/from\neach other, but also the semantics of how these profiles are composed\nwith one another. These semantics govern the relation between\nrequired profiles, specifically governing the distribution of their\nshared node's provided profile between each other. For our\ncompositional analysis, we defined that each required profile in the\nsystem be given a unique priority, :math:`U`, with the relation that a\nprofile :math:`P_1` has a higher priority than profile :math:`P_2`\n*iff* :math:`U_{P_1} < U_{P_2}`. Using this priority relation, we can\ndefine that a profile :math:`P_i` does not receive any capacity from\nits node at time :math:`t` until all other profiles with priority\n:math:`< U_{P_i}` have received their requested capacity from the\nnode at :math:`t`. If the node does not have enough capacity at\n:math:`t` to service :math:`P_i`, then the data :math:`P_i` attempted\nto send at :math:`t` will be placed into its buffer, to be sent at a\ntime when the node has available bandwidth for :math:`P_i`.\n\nThis priority relation for compositional analysis is similar to the\ntask priority used for schedulability analysis in Real-Time Calculus,\nmentioned in :ref:`rtc`. Similarly to RTC, this priority relation and\ncompositionality allow us to capture the effects independent profiles\nhave on each other when they share the same network resources. Just\nas RTC based its priority relation and computation scheduling on a\nfixed-priority scheduler, our priority relation and resource allotment\nis based on the network Quality-of-Service (QoS) management provided\nby different types of networking infrastructure. One such mechanism\nfor implementing this type of priority-based network resource\nallocation is through the use of the DiffServ Code Point\n(DSCP)[RFC2474_]. The DSCP is a bit-field in all packets which have\nan Internet Protocol (IP) header which allows the packet to be\nassigned a specific class for per-hop routing behavior. Routers and\nforwarders in the network group packets according to their DSCP class\nand provide different service capacities to each class. For example,\nthe *Expedited Forwarding* [RFC3246_] class receives strict priority\nqueuing above all other traffic, which makes it a suitable\nimplementation of this type of resource allocation.\n\nMathematically, compositionality requires that we be able to add and\nsubtract profiles from each other, for instance to determine the\nremaining service capacity of a node available for a profile\n:math:`P_i` after it serves all profiles with a higher priority. The\nremaining capacity, :math:`P_P'`, of the node after it services\n:math:`P_i` is given as:\n\n.. math:: P_P' = P_P - ( P_i \\otimes P_P )\n\nWhere\n\n* :math:`P_P` is the capacity available to profile :math:`P_i`\n\nWe are finalizing the design and code for tests which utilize the DSCP\nbit(s) setting on packet profiles to show that such priority-based\nanalysis techniques are correct for these types of systems.\n\n.. _delay_analysis:\n\nDelay Analysis\n==============\n\nWhen dealing with queueing systems (esp. networks) where precise\ndesign-time guarantees are required, the delay in the links of the\nnetwork must be taken into account.\n\nThe delay is modeled as a continuous function of latency (seconds)\nversus time. In the profiles, the latency is specified discretely as\n:math:`(time, latency)` pairs, and is interpolated linearly between\nsuccessive pairs.\n\nUsing these latency semantics, the delay convolution of a profile\nbecomes\n\n.. math::\n r[t + \\delta[t]] = l[t]\n\nWhere\n\n* :math:`l[t]` is the *link* profile describing the data as a function\n of time as it enters the link\n* :math:`\\delta[t]` is the *delay* profile describing the latency as a\n function of time on the link\n* :math:`r[t]` is the *received* profile describing the data as a\n function of time as it is received at the end of the link\n\nWhen analyzing delay in a periodic system, it is important to\ndetermine the effects of delay on the system's periodicity. We know\nthat the period of the periodic profiles is defined by the time\ndifference between the start of the profile and the end of the\nprofile. Therefore, we can show that if the time difference between\nthe **start time** of the *received* profile and the **end time** of\nthe *received* profile is the same as the **period** of the *link*\nprofile, the periodicity of the profile is unchanged.\n\n* :math:`T_p` is the period of the *link* profile\n* :math:`r[t + \\delta[t]]` is the beginning of the *received* profile\n* :math:`r[(t + T_p) + \\delta[(t + T_p)]]` is the end of the\n *received* profile\n \n\nWe determine the condition for which :math:`(t_{end}) - (t_{start}) =\nT_p`:\n\n.. math::\n (T_p + t + \\delta[T_p + t]) - (t + \\delta[t]) &= T_p \\\\\n T_p + \\delta[T_p + t] - \\delta[t] &= T_p \\\\\n \\delta[T_p + t] - \\delta[t] &= 0\\\\\n \\delta[T_p + t] &= \\delta[t]\n\nWhich is just confirms that the periodicity of the delayed profile is\nunchanged *iff* the latency profile is **periodic**, i.e.\n\n.. math:: \\delta[t] = \\delta[t + k*T_p], \\forall k\\in\\mathbb{N} > 0\n\n.. _routing_analysis:\n\nRouting Analysis\n================\n\nHaving discussed profile composition and the affects of delaying a\nprofile, we can address one more aspect of system analysis: *routing*.\nFor this analysis we will specifically focus on statically routed\nnetworks.\n\nBy incorporating both the latency analysis with the compositional\noperations we developed, we can perform system-level analysis of profiles\nwhich are routed by nodes of the system. In this paradigm, nodes can\ntransmit/receive their own data, i.e. they can host applications which\nact as data sources or sinks, as well as act as routers for profiles from\nand to other nodes. To make such a system amenable to analysis we\nmust ensure that we know the routes the profiles will take at design\ntime, i.e. the routes in the network are static and known or\ncalculable. Furthermore, we must, for the sake of profile composition as\ndecribed above, ensure that each profile has a priority that is unique\nwithin the network which governs how the transmitting and routing\nnodes handle the profile's data.\n\nLet us define the system configuration :math:`C` as:\n\n.. math:: C = \\{\\{P_S\\},\\{N\\},\\{R\\}\\}\n\nWhere\n\n* :math:`\\{P_S\\}` is the *set* of all *sender* profiles in the system\n configuration\n* :math:`\\{N\\}` is the *set* of all *nodes* in the system configuration, and\n* :math:`\\{R\\}` is the *set* of all *routes* in the system configuration\n\nWe define a profile :math:`P` as:\n\n.. math:: P = \\{N_I,K,T,F,U,\\{(t,R_D,D,L)\\}\\}\n\nWhere\n\n* :math:`N_I` is the *Node ID* to which the profile applies\n* :math:`K` is the *kind* of the profile, where\n :math:`K\\in\\{provided,required,receiver\\}`\n* :math:`T` is the *period* of the profile\n* :math:`F` is the *flow ID* of the profile, where two profiles,\n :math:`P_1,P_2` belong to the same flow *iff*\n :math:`F_{P_1}==F_{P_2}`\n* :math:`U` is the *priority* of the profile, where profile\n :math:`P_1` has a higher priority than profile :math:`P_2` *iff*\n :math:`U_{P_1} < U_{P_2}`, and\n* :math:`\\{(t,R_D,D,L)\\}` is a *set* of :math:`(time, data\\ rate,\n data, latency)` tuples describing how each of :math:`\\{data\\ rate,\n data, latency\\}` vary with respect to time. Semantically, the\n :math:`data\\ rate` is constant between any two successive values of\n :math:`t`, while the :math:`data` and :math:`latency` are *linearly\n interpolated* during the same interval. The initial profile\n specification does not have the :math:`data` field; :math:`data` is\n calculated based on :math:`data\\ rate`.\n\nThen we define a node :math:`N` as:\n\n.. math:: N = \\{I,P_P,\\{P_R\\}\\}\n\nWhere \n\n* :math:`I` is the *ID* of the node\n* :math:`P_P` is the *provided* profile of the node, and\n* :math:`\\{P_R\\}` is the *set* of all *receiver* profiles on the node\n\nAnd finally, we define a route :math:`R` as:\n\n.. math:: R = \\{N_{I_1},N_{I_2},...,N_{I_N}\\}\n\nWhere\n\n.. math:: \\forall N_X,N_Y \\subset N, \\exists! R_{X,Y} = \\{N_{I_X},...,N_{I_Y}\\}\n\nWe can then run the following algorithm to iteratively analyze the\nsystem:\n\n.. code-block:: C#\n\n analyze( sender_profiles )\n {\n sender_profiles = sorted(sender_profiles, priority)\n for required_profile in sender_profiles\n {\n transmitted_nodes = list.empty() \n for receiver_profile in required_profile.receiver_profiles()\n {\n route = getRoute(required_profile, receiver_profile)\n\tfor node in route\n\t{\n\t if node in transmitted_nodes and multicast == true\n\t {\n\t continue\n\t }\n\t provided_profile = node.provided_profile\n\n\t output_profile = convolve(required_profile, provided_profile)\n\t remaining_profile = provided_profile - output_profile\n\t received_profile = delay(output_profile, provided_profile)\n\n\t node.provided_profile = remaining_profile\n\t required_profile = received_profile\n\t transmitted_nodes.append(node)\n\t}\n\treceiver_received_profile = convolve(required_profile, receiver_profile)\n }\n }\n }\n\n\nIn this algorithm, the remaining capacity of the node is provided to\neach profile with a lower priority iteratively. Because of this\niterative recalculation of node provided profiles based on routed\nprofiles, we directly take into account the effect of multiple\nindependent profiles traversing the same router; the highest priority\nprofile receives as much bandwidth as the router can give it, the next\nhighest priority profile receives the remaining bandwidth, and so on.\n\nWe take care of matching all senders to their respective receivers,\nand ensure that if the system supports multicast, a no retransmissions\noccur; only nodes which must route the profile to a new part of the\nnetwork retransmit the data. However, if the system does not support\nmulticast, then the sender must issue a separate transmission, further\nconsuming network resources. In this way, lower-level transport\ncapabilities can be at least partially accounted for by our analysis.\n\nWe have implmented these functions for statically routed network\nanalysis into our tool, which automatically parses the profiles, the\nnetwork configuration and uses the algorithm and the implemented\nmathematics to iteratively analyze the network. Analytical results\nfor example systems will be provided when the experimental results can\nbe used as a comparison.\n\nWe are finishing the design and development of code which will allow\nus to run experiments to validate our routing analysis results. They\nwill be complete in the next two weeks.\n\n.. [sifakis2002] G. Goessler, J. Sifakis, \"Composition for\n\t\t Component-Based Modeling,\" Springer, Formal Methods\n\t\t for Components and Objects, 2003.\n\n.. [RFC2474] K. Nichols, Cisco Systems, et al., \"Definition of the\n\t Differentiated Services Field (DS Field) in the IPv4 and\n\t IPv6 Headers,\" IETF, RFC 2474, Dec.\n\t 1998. [Online]. Available: https://tools.ietf.org/html/rfc2474\n\n.. [RFC3246] B. Davie, A. Charny, et al., \"An Expedited Forwarding\n\t PHB (Per-Hop Behavior),\" IETF, RFC 3246, Mar.\n\t 2002. [Online]. Available: https://tools.ietf.org/html/rfc3246\n" }, { "alpha_fraction": 0.6157191395759583, "alphanum_fraction": 0.61949223279953, "avg_line_length": 38.48737335205078, "blob_id": "562bd07fd6dd160de6584cd0bd85b73923122a1b", "content_id": "04516889fad5560ecaebd5bf70102738c43275f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15637, "license_type": "no_license", "max_line_length": 116, "num_lines": 396, "path": "/src/analysis/v2.0/analysis.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"This program is designed to analyze network performance of\ndistributed applications in a networked system. Its analysis\ntechniques are based on Network Calculus and provide deterministic\nanalysis of networks and network applications. By analyzing the\nQuality of Service (QoS) that the system network provides to the\napplications and users, we can determine the buffer space required for\nthe applications to communicate losslessly as well as the buffering\ndelay experienced by the network traffic.\n\nThis program in particular implements these calculations and is able\nto load, parse, and analyze network profiles and configuration files\ndescribing the system, the network flows, and the time-dependent\ntraffic generation or service profiles associated with the\napplications or the system, respectively.\n\"\"\" \n\nimport glob\nimport os\n\nfrom networkProfile import Profile\nfrom networkConfig import Config\nfrom plotting import plot_bandwidth_and_data, havePLT\nfrom utils import lcm, bcolors\n\n\ndef analyze_profile(required, provided, config, options):\n \"\"\"* Calculates the hyperperiod of the profiles\n * Repeats the profiles for the specified number of hyperperiods in\n *options*\n * Analyzes the requested profiles\n * If more than one hyper-period has been specified it determines\n system stability\n * Optionally plots the bandwidths and data for the profiles\n\n :param in required: :class:`networkProfile.Profile` describing the\n required profile\n\n :param in provided: :class:`networkProfile.Profile` describing the\n provided profile\n\n :param in config: :class:`networkConfig.Config` describing the\n configuration of the network\n\n :param in options: :class:`Options` describing the program options\n for drawing and analysis\n\n Returns a list of analysis results consisting of::\n\n [ output, remaining, max delay, max buffer ]\n\n * The output profile as a :class:`networkProfile.Profile`\n generated by calling **required.**\n :func:`networkProfile.Profile.Convolve` ( provided )\n\n * The remaining capacity profile as a\n :class:`networkProfile.Profile` which is determined as\n :math:`remaining = (provided - output)`\n\n * The delay structure generated by calling **required.**\n :func:`networkProfile.Profile.CalcDelay` ( output )\n\n * The buffer structure generated by calling **required.**\n :func:`networkProfile.Profile.CalcBuffer` ( output )\n\n \"\"\"\n num_periods = options.num_periods\n nc_mode = options.nc_mode\n nc_step_size = options.nc_step_size\n plot_dict = options.plot_dict\n \n topology = config.topology\n routes = config.routes\n multicast = config.multicast\n retransmit = config.retransmit\n \n # CALCULATE HYPERPERIOD\n hyperPeriod = lcm( required.period, provided.period )\n #print \"\\nCalculated hyperperiod as {} seconds\".format(hyperPeriod)\n\n # REPEAT PROFILES FOR THE RIGHT NUMBER OF HYPERPERIODS\n required.Repeat( (hyperPeriod / required.period) * num_periods )\n provided.Repeat( (hyperPeriod / provided.period) * num_periods )\n\n # INTEGRATE THE PROFILES FOR ANALYSIS\n provided.Integrate(hyperPeriod * num_periods)\n required.Integrate(hyperPeriod * num_periods)\n\n # CONVOLVE REQUIRED WITH PROVIDED TO PRODUCE OUTPUT\n output = required.Convolve(provided)\n output.period = hyperPeriod\n output.name = \"transmitted\"\n # CALCULATE SENDER-SIDE BUFFER AND DELAY FROM OUTPUT AND REQUIRED\n maxBuffer = required.CalcBuffer(output)\n maxDelay = required.CalcDelay(output)\n\n # delay the output according to the latency of the node's link\n # this determines the characteristics of the data at the receiver end\n received = output.Delay(provided)\n received.Kind(\"received\")\n received.name = \"received\"\n received.period = hyperPeriod\n\n # calculate the remaining capacity of the node's link\n remaining = provided.SubtractProfile(output)\n remaining.Kind(\"leftover\")\n remaining.name = \"remaining\"\n remaining.period = hyperPeriod\n remaining.Integrate(hyperPeriod * num_periods)\n\n # optionally analyze this using NC:\n if nc_mode:\n provided_nc = provided.ConvertToNC(min, nc_step_size)\n required_nc = required.ConvertToNC(max, nc_step_size)\n output_nc = required_nc.Convolve(provided_nc)\n maxBuffer_nc = required_nc.CalcBuffer(output_nc)\n maxDelay_nc = required_nc.CalcDelay(output_nc)\n\n # Print out analysis info\n print bcolors.OKBLUE +\\\n \"\\tMax buffer (time, bits): [{}, {}]\".format(maxBuffer[0], maxBuffer[2])\n print \"\\tMax delay (time, seconds): [{}, {}]\".format(maxDelay[0], maxDelay[2]) +\\\n bcolors.ENDC\n\n if nc_mode:\n print bcolors.OKBLUE +\\\n \"\\tMax buffer NC (time, bits): [{}, {}]\".format(maxBuffer_nc[0], maxBuffer_nc[2])\n print \"\\tMax delay NC (time, seconds): [{}, {}]\".format(maxDelay_nc[0], maxDelay_nc[2]) +\\\n bcolors.ENDC\n \n # DETERMINE SYSTEM STABILITY IF WE HAVE MORE THAN ONE HYPERPERIOD\n # TO ANALYZE\n if num_periods > 1:\n reqDataP1 = required.GetValueAtTime('data', hyperPeriod)\n reqDataP2 = required.GetValueAtTime('data', 2*hyperPeriod)\n outDataP1 = output.GetValueAtTime('data', hyperPeriod)\n outDataP2 = output.GetValueAtTime('data', 2*hyperPeriod)\n buff1 = reqDataP1 - outDataP1\n buff2 = reqDataP2 - outDataP2\n # If the buffer size increases between periods, the system is\n # not stable.\n if buff2 > buff1:\n print bcolors.FAIL +\\\n \"\"\"WARNING: BUFFER UTILIZATION NOT CONSISTENT THROUGH ANALYZED\n PERIODS\"\"\"\n print \"\\t APPLICATION MAY HAVE UNBOUNDED BUFFER GROWTH ON NETWORK\\n\" +\\\n bcolors.ENDC\n\n if plot_dict['plot'] and havePLT:\n profList = [required, provided, output, remaining, received]\n for key in plot_dict:\n profList = [x for x in profList if key not in x.kind]\n plot_bandwidth_and_data(profList, maxDelay, maxBuffer,\n num_periods, plot_dict)\n if nc_mode:\n profList = [required_nc, provided_nc, output_nc]\n plot_bandwidth_and_data(profList, maxDelay_nc,\n maxBuffer_nc, num_periods,\n plot_dict, xaxislabel=\"\"\"Time\n Window Size (s)\"\"\")\n\n # Shrink the profiles back down so that they can be composed with\n # other profiles\n received.Shrink(received.period)\n output.Shrink(output.period)\n remaining.Shrink(remaining.period)\n provided.Shrink(provided.period)\n required.Shrink(required.period)\n\n return output, remaining, received, maxBuffer, maxDelay\n\n\ndef parse_profiles(config, options):\n # COPY THE CONFIG'S RELEVANT MEMBERS LOCALLY\n req_fName = options.required_fileName\n prov_fName = options.provided_fileName\n recv_fName = options.receiver_fileName\n profDir = options.profile_folderName\n\n # GET ALL PROFILE FILE NAMES\n fNames = []\n if profDir:\n if os.path.isdir(profDir):\n print \"Analyzing profiles in {}\".format(profDir)\n fNames = glob.glob(profDir + os.sep + \"*.csv\")\n fNames.extend(glob.glob(profDir + os.sep + \"*.pnp\"))\n fNames.extend(glob.glob(profDir + os.sep + \"*.snp\"))\n else:\n print \"ERROR: cannot find {}\".format(profDir)\n else:\n fNames = [req_fName, prov_fName, recv_fName]\n\n # PARSE THE PROFILES FROM THE REQUESTED FILES\n for fName in fNames:\n newProf = Profile()\n if newProf.ParseFromFile(fName) == -1:\n print \"ERROR: could not parse {}\".format(fName)\n return -1\n print \"Profile {} has:\".format(fName)\n print \"\\tperiod = {} seconds\".format(newProf.period)\n print \"\\tnode ID = {}\".format(newProf.node_id)\n print \"\\tkind = {}\".format(newProf.kind)\n if newProf.flow_type:\n print \"\\tflow type = {}\".format(newProf.flow_type)\n if newProf.priority:\n print \"\\tpriority = {}\".format(newProf.priority)\n config.addProfile(newProf)\n\n\ndef analyze_config(config, options):\n '''\n This function analyzes the system configuration in flow priority\n order, taking into account system-level concepts such as multicast\n capabilities. It performs the following steps:\n\n * sort the sender profiles by priority\n * retrieve from the system config all receiver profiles associated\n with this flow type\n * for each receiver:\n\n * get the route the flow will take from the sender to the\n receiver\n * for each node along the route:\n\n * analyze the flow's profile with the node's provided profile\n * set the node's provided profile to the remaining profile\n * set the flow's required profile to the received profile\n\n * analyze the flow's profile with the receiver's profile\n\n '''\n nodes = config.nodes\n print_profiles = options.print_profiles\n\n keys = config.senders.keys()\n keys = sorted(keys)\n\n for key in keys:\n required = config.senders[key]\n transmitted_nodes = []\n flow_receivers = config.receivers[required.flow_type]\n for recv in flow_receivers:\n route = config.GetRoute(required.node_id, recv.node_id)\n\n print ''\n if options.print_profiles:\n print \"Analyzing:\"\n print required.ToString('\\t')\n print \"along route: {}\".format(route)\n\n recv_node = route[-1]\n route = route[:-1] # don't want final node to transmit the data\n # analyze all the transmitters in the system\n for node_id in route:\n if config.multicast and node_id in transmitted_nodes:\n print\n \"\"\"Node {} Has already transmitted this data and multicast is enabled,\n skipping.\"\"\".format(\n node_id\n )\n continue\n\n if print_profiles:\n print nodes[node_id].provided.ToString('\\t')\n print \"(Re-)transmitter {} analysis:\".format(node_id)\n output, remaining, received, buf, delay = analyze_profile(\n required, nodes[node_id].provided,\n config,\n options\n )\n nodes[node_id].provided = remaining\n nodes[node_id].provided.Kind('provided')\n output.priority = required.priority\n required = received\n required.Kind('required')\n transmitted_nodes.append(node_id)\n # now analyze the receiver on the final node\n print \"Receiver analysis:\"\n output, remaining, received, buf, delay = analyze_profile(\n required, recv, config, options)\n\n\ndef main(argv):\n \"\"\"Performs the main analysis of the profiles using the following steps:\n\n * Parses the command line options according to the\n :class:`Options` specification.\n\n * Loads the specified network configuration\n\n * Parses the files in to separate profiles\n\n * Analyzes the system configuration\n\n \"\"\"\n options = Options()\n if options.parse_args(argv):\n return -1\n\n # COPY THE COMMAND LINE OPTIONS LOCALLY\n confName = options.network_configName\n\n # LOAD THE NETWORK CONFIG\n config = Config()\n if config.ParseFromFile(confName) == -1:\n return -1\n print \"Using network configuration defined in {}.\".format(\n confName)\n\n # PARSE THE PROFILES\n parse_profiles(config, options)\n\n # ANALYZE THE SYSTEM\n analyze_config(config, options)\n\n\nclass Options:\n \"\"\"\n\\t--help (to show this help and exit)\n\\t--nc_mode (to run network calculus calcs)\n\\t--no_plot (to not output any plots)\n\\t--no_profile_name (to not plot 'profile_name', e.g. 'no_required')\n\\t--no_axes_tickmarks (to not display axes tickmarks)\n\\t--no_annotations (to not display buffer / delay annotations)\n\\t--print (to print the profiles as they are analyzed)\n\\t--required <fileName containing the required profile>\n\\t--provided <fileName containing the provided profile>\n\\t--receiver <fileName containing the receiver profile>\n\\t--profile_folder <path containing profiles to be loaded>\n\\t--network_config <file containing network configuration>\n\\t--num_periods <number of periods to analyze>\n\\t--nc_step_size <step size for time-windows in NC mode>\n \"\"\"\n def __init__(self):\n self.plot_profiles = havePLT #: plot the profiles?\n self.plot_dict = {'plot': True,\n 'axes_tickmarks': True,\n 'annotations': True,\n 'linewidth': 4,\n 'dashes': True,\n 'font_size': 25} #: dictionary with plot options\n self.print_profiles = False #: print the profiles?\n self.num_periods = 1 #: number of periods to analyze\n self.nc_mode = False #: analyze using network calculus techniques?\n self.nc_step_size = 1 #: step size for network calculus analysis\n self.required_fileName = \"required.csv\" #: what file to load as the required profile\n self.provided_fileName = \"provided.csv\" #: what file to load as the provided profile\n self.receiver_fileName = \"receiver.csv\" #: what file to load as the receiver profile\n self.profile_folderName = \"\" #: path to a folder which contains all the profiles to be analyzed\n self.network_configName = \"config.csv\" #: file which contains the topology and configuration of the network\n\n def parse_args(self, args):\n argind = 1\n while argind < len(args):\n if args[argind] == \"--num_periods\":\n self.num_periods = int(args[argind+1])\n if self.num_periods <= 0:\n print \"Error! You must specify a number of periods > 0\"\n return -1\n argind += 1\n elif \"--no_\" in args[argind]:\n self.plot_dict[args[argind].split('_', 1)[-1]] = False\n elif args[argind] == '--print':\n self.print_profiles = True\n elif args[argind] == \"--nc_mode\":\n self.nc_mode = True\n elif args[argind] == \"--nc_step_size\":\n self.nc_step_size = float(args[argind+1])\n argind += 1\n elif args[argind] == \"--required\":\n self.required_fileName = args[argind+1]\n argind += 1\n elif args[argind] == \"--provided\":\n self.provided_fileName = args[argind+1]\n argind += 1\n elif args[argind] == \"--receiver\":\n self.receiver_fileName = args[argind+1]\n argind += 1\n elif args[argind] == \"--profile_folder\":\n self.profile_folderName = args[argind+1]\n argind += 1\n elif args[argind] == \"--network_config\":\n self.network_configName = args[argind+1]\n argind += 1\n elif args[argind] == \"--help\":\n self.print_usage(args[0])\n return -1\n argind += 1\n return 0\n\n def print_usage(self, name):\n print \"\"\"Usage:\\n{}{}\"\"\".format(name, self.__doc__)\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv)\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 16, "blob_id": "20b861bf9bdae41a821ac7d5866c07693415a7e9", "content_id": "fc681effa11a81e35d82b72df7fb67069e64e264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 119, "license_type": "no_license", "max_line_length": 46, "num_lines": 7, "path": "/src/middleware/v2.0/log_macro.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef LOG_MACRO_HPP\n#define LOG_MACRO_HPP\n\n#include <stdio.h>\n#define TG_LOG(x,...) printf(x, ##__VA_ARGS__)\n\n#endif\n" }, { "alpha_fraction": 0.6748251914978027, "alphanum_fraction": 0.6748251914978027, "avg_line_length": 24.909090042114258, "blob_id": "ba9e8654e068602fca107e5d3798cc86834a5ee7", "content_id": "443d0321ed578eb60aada6f36635a2340adf8295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 286, "license_type": "no_license", "max_line_length": 68, "num_lines": 11, "path": "/doc/README.md", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "# Building the documentation\n\n * Install sphinx: ```apt-get install python-sphinx```\n\n * Install the ReadTheDocs theme: ```pip install sphinx_rtd_theme```\n\n * Install sphinx autobuild: ```pip install sphinx-autobuild```\n\n * Build it: ```make build```\n\n * Or serve it: ```make serve```\n\n" }, { "alpha_fraction": 0.5995607376098633, "alphanum_fraction": 0.6139580011367798, "avg_line_length": 26.859155654907227, "blob_id": "c202d6ae3fb23f57699772144c3ad44817b54652", "content_id": "886552d9096fff751baabb2ac863f089c9819993", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4098, "license_type": "no_license", "max_line_length": 93, "num_lines": 142, "path": "/src/middleware/v1.0/NetworkMiddleware.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef NETWORK_MIDDLEWARE_HPP\r\n#define NETWORK_MIDDLEWARE_HPP\r\n\r\n#include <stdlib.h>\r\n#include <unistd.h>\r\n#include <stdio.h>\r\n#include <signal.h>\r\n#include <time.h>\r\n\r\n#include <pthread.h>\r\n\r\n#include <vector>\r\n#include <string>\r\n\r\n#include \"log_macro.hpp\"\r\n#include \"NetworkBuffer.hpp\"\r\n#include \"NetworkProfile.hpp\"\r\n#include \"NetworkMiddleware.hpp\"\r\n\r\ntypedef void* (*sendFunc_t)(Message* data);\r\n\r\nnamespace NetworkMiddleware {\r\n\r\n static NetworkBuffer buffer;\r\n NetworkProfile profile;\r\n timespec nextSendTime;\r\n static pthread_t threadSend;\r\n static bool threadSendDie;\r\n static pthread_cond_t threadSendCV;\r\n static pthread_mutex_t threadSendMutex;\r\n sendFunc_t sendFunctionPtr;\r\n\r\n void *threadSendFunction(void * arg){\r\n Message* data;\r\n bool sendData = false;\r\n double timeDiff = 0;\r\n timespec currentTime, condTimeout, sleepTime, remainingTime;\r\n sleepTime.tv_sec = 0;\r\n sleepTime.tv_nsec = 0;\r\n\r\n while (!threadSendDie) {\r\n if ( timeDiff > 0 ) {\r\n nanosleep(&sleepTime,&remainingTime);\r\n\ttimeDiff = 0;\r\n }\r\n clock_gettime(CLOCK_REALTIME,&currentTime);\r\n condTimeout = currentTime;\r\n condTimeout.tv_sec += 1;\r\n pthread_mutex_lock(&threadSendMutex);\r\n if ( buffer.Size() == 0 ) {\r\n pthread_cond_timedwait(&threadSendCV,&threadSendMutex,&condTimeout);\t\r\n }\r\n if ( buffer.Pop(data) == 0 ) {\r\n sendData = true;\r\n }\r\n pthread_mutex_unlock(&threadSendMutex);\r\n if ( sendData && data != NULL ) {\r\n\tdata->TimeStamp();\r\n (*sendFunctionPtr)(data);\r\n timeDiff = profile.Delay(data->Bits(),data->LastEpochTime());\r\n\tdouble fractpart,intpart;\r\n\tfractpart = modf(timeDiff,&intpart);\r\n sleepTime.tv_sec = (unsigned long long)(intpart);\r\n sleepTime.tv_nsec = (unsigned long)(fractpart*1000000000.0);\r\n sendData = false;\r\n }\r\n }\r\n TG_LOG(\"Buffer send thread exiting!\\n\");\r\n pthread_exit(NULL);\r\n }\r\n\r\n int Init( NetworkProfile p, sendFunc_t func ,long capacity = 0 ) {\r\n if ( func == NULL ) {\r\n TG_LOG(\"ERROR: Send func is NULL!\\n\");\r\n return -1;\r\n }\r\n sendFunctionPtr = func;\r\n nextSendTime.tv_sec = 0;\r\n nextSendTime.tv_nsec = 0;\r\n buffer.Capacity(capacity);\r\n profile = p;\r\n if (!profile.Initialized()) {\r\n TG_LOG(\"WARNING: couldn't initialize buffer profile!\\n\");\r\n TG_LOG(\"\\tActing as a pass-through buffer!\\n\");\r\n }\r\n else {\r\n threadSendDie = false;\r\n pthread_mutex_init (&threadSendMutex, NULL);\r\n pthread_cond_init (&threadSendCV, NULL);\r\n pthread_create(&threadSend, NULL, NetworkMiddleware::threadSendFunction, (void *)NULL);\r\n TG_LOG(\"Created thread %lu\\n\",\r\n\t threadSend);\r\n }\r\n return 0;\r\n }\r\n\r\n int Exit() {\r\n if ( profile.Initialized() == true ) {\r\n threadSendDie = true;\r\n int retVal;\r\n TG_LOG(\"Joining thread %lu\\n\",\r\n\t threadSend);\r\n pthread_join(threadSend,(void **)&retVal);\r\n TG_LOG(\"exited join thread!\\n\");\r\n pthread_mutex_destroy(&threadSendMutex);\r\n pthread_cond_destroy(&threadSendCV);\r\n }\r\n }\r\n\r\n int send(Message* data) {\r\n if ( data == NULL )\r\n return -1;\r\n int retVal = -1;\r\n if ( profile.Initialized() == true ) {\r\n //timespec currentTime;\r\n double timeDiff = 0;\r\n\r\n pthread_mutex_lock(&threadSendMutex);\r\n if ( buffer.Size() == 0 )\r\n\tpthread_cond_signal(&threadSendCV);\r\n retVal = buffer.Push(data);\r\n pthread_mutex_unlock(&threadSendMutex);\r\n \r\n timeDiff = profile.Delay(data->Bits(),data->FirstEpochTime());\r\n double fractpart,intpart;\r\n fractpart = modf(timeDiff,&intpart);\r\n clock_gettime(CLOCK_REALTIME,&nextSendTime);\r\n nextSendTime.tv_sec += (unsigned long long)(intpart);\r\n nextSendTime.tv_nsec += (unsigned long)(fractpart*1000000000.0);\r\n if ( nextSendTime.tv_nsec > 999999999 ) {\r\n\tnextSendTime.tv_sec += 1;\r\n\tnextSendTime.tv_nsec = nextSendTime.tv_nsec - 1000000000;\r\n }\r\n }\r\n else {\r\n (*sendFunctionPtr)(data); \r\n }\r\n return retVal;\r\n }\r\n}\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.49003320932388306, "alphanum_fraction": 0.514950156211853, "avg_line_length": 18.846153259277344, "blob_id": "b96b098ab2051ff75abc4b13204aa67538ec18ec", "content_id": "c7696f14a8930f03b8a70b09a5436961192700ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1806, "license_type": "no_license", "max_line_length": 68, "num_lines": 91, "path": "/src/middleware/v1.0/Server.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef SERVER_HPP\n#define SERVER_HPP\n\n#include <math.h>\n#include <string>\n\n#include \"log_macro.hpp\"\n#include \"ConnectionSubsys.hpp\"\n#include \"Message.hpp\"\n\nint append_data(std::string fname, Message& data);\n\nclass Options {\npublic:\n std::string ip;\n std::string outputFile;\n long port;\n long bitLength;\n\n Options() {\n this->port = 7777;\n this->bitLength = 4096;\n this->ip = \"2001:470:489e::3\";\n this->outputFile = \"serverOutput.csv\";\n }\n\n int Parse(int argc, char **argv) {\n \n if ( argc < 2 )\n return 0;\n int c;\n char str[256];\n sprintf(str,\"%s\",argv[1]);\n if ( argc > 2 ) {\n for (int i=2;i<argc;i++) {\n\tsprintf(str,\"%s %s\",str,argv[i]);\n }\n }\n char *p = strtok(str,\"-\");\n while (p != 0) {\n switch (p[0])\n\t{\n\tcase 'o':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t this->outputFile = p+2;\n\t break;\n\tcase 'i':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t this->ip = p+2;\n\t break;\n\tcase 'p':\n\t this->port = atoi(p+2);\n\t break;\n\tcase 'b':\n\t this->bitLength = atoi(p+2);\n\t break;\n\tcase '?':\n\tdefault:\n\t TG_LOG(\"usage: \\n\\t%s\\n\"\n\t\t \"\\t\\t -o <filename for data output file>\\n\"\n\t\t \"\\t\\t -i <ipv6 address of server>\\n\"\n\t\t \"\\t\\t -p <port number of server>\\n\"\n\t\t \"\\t\\t -b <# bits in message>\\n\"\n\t\t ,argv[0]);\n\t return -1;\n\t}\n p = strtok(NULL,\"-\");\n }\n return 0;\n }\n \n void Print() {\n TG_LOG(\"Options():\\n\");\n TG_LOG(\"\\t output filename\\t\\t: %s\\n\",this->outputFile.c_str());\n TG_LOG(\"\\t server ipv6 address\\t\\t: %s\\n\",this->ip.c_str());\n TG_LOG(\"\\t server port number\\t\\t: %lu\\n\",this->port);\n TG_LOG(\"\\t bits in message\\t\\t: %lu\\n\",this->bitLength);\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 23, "blob_id": "bc0b94f6159e0bdd0849fb11156f5ab17507d115", "content_id": "83a35959d4ee229c349dded58562abe1c4d72f57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 48, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/refs/README.md", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "# cbsat\nComponent-Based Software Analysis Tools\n" }, { "alpha_fraction": 0.6339539289474487, "alphanum_fraction": 0.6532028913497925, "avg_line_length": 29.757282257080078, "blob_id": "3b7cc18eabb7be764fa2b7e78aa162e7e8b78bd1", "content_id": "146db75dbd90ebbb6233a2c1781821efdbf8deb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3169, "license_type": "no_license", "max_line_length": 98, "num_lines": 103, "path": "/src/middleware/v2.0/Client.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "\n#include \"Client.hpp\"\n\nint main(int argc, char **argv) {\n\n Options options;\n if ( options.Parse(argc,argv) == -1 )\n return -1;\n options.Print();\n\n Network::NetworkProfile profile;\n std::string profileFile = options.tgFile; \n if ( profile.initializeFromFile(profileFile.c_str()) != 0 ) {\n TG_LOG(\"ERROR: couldn't initialize TG profile!\\n\");\n return -1;\n }\n\n std::string outputFile = options.outputFile;\n long messageBitLength = options.bitLength;\n long messageStrLength = ceil((double)messageBitLength/8.0f);\n double runTime = ( options.runTime > 0 ) ? options.runTime : profile.period*options.numPeriods ;\n\n Connection* interface;\n if ( options.ip.find(\".\") != std::string::npos )\n interface = new IPV4_Connection();\n else\n interface = new IPV6_Connection();\n interface->serverIP = options.ip;\n interface->serverPort = options.port;\n if ( interface->Initialize(false) != 0 ) {\n TG_LOG(\"ERROR: Couldn't initialize interface!\\n\");\n return -1;\n }\n\n double timerDelay = 0;\n timespec timeout, remaining;\n\n timespec startTime, currentTime;\n clock_gettime(CLOCK_REALTIME,&currentTime);\n\n long id = 0;\n std::vector<Network::Message> messages;\n\n double start_delay = profile.period - profile.getOffset(currentTime);\n if ( start_delay > 0 ) {\n double fractpart,intpart;\n fractpart = modf(start_delay,&intpart);\n timeout.tv_sec = (unsigned long long)(intpart);\n timeout.tv_nsec = (unsigned long)(fractpart*1000000000.0);\n int return_code = nanosleep (&timeout, &remaining);\n }\n \n double timeDiff = 0;\n double start = 0;\n clock_gettime(CLOCK_REALTIME,&startTime);\n start = Network::EpochToDouble(startTime);\n\n while (true) {\n Network::Message data = Network::Message(messageBitLength, id++);\n data.TimeStamp();\n \n interface->Send( data.Buffer().c_str(),\n\t\t data.Bytes() );\n\n data.Bits( data.Bits() +\n\t Network::ipv4_header_bytes * 8 +\n\t Network::ipv4_route_bytes * 8 +\n\t Network::ipv4_header_padding_bytes * 8 +\n\t Network::udp_header_bytes * 8 );\n messages.push_back(data); \n\n timeDiff = data.FirstDoubleTime() - start;\n if ( timeDiff >= runTime )\n break;\n\n timerDelay = profile.Delay(data.Bits(),data.FirstEpochTime());\n if ( timerDelay > 0 ) {\n double fractpart,intpart;\n fractpart = modf(timerDelay,&intpart);\n timeout.tv_sec = (unsigned long long)(intpart);\n timeout.tv_nsec = (unsigned long)(fractpart*1000000000.0);\n int return_code = nanosleep (&timeout, &remaining);\n }\n }\n\n TG_LOG(\"Finished sending # messages = %lu\\n\", messages.size());\n \n double maxLatency = 0;\n double latency = 0;\n for (long i=0; i<messages.size(); i++) {\n std::vector<timespec> times(messages[i].EpochTimes());\n latency = (double)(times.back().tv_sec - times.front().tv_sec);\n latency += ((double)(times.back().tv_nsec - times.front().tv_nsec)/1000000000.0f);\n if ( latency > maxLatency )\n maxLatency = latency;\n }\n\n TG_LOG(\"Max bits in UDP socket buffer: %d\\n\",\n\t interface->bufferSize*8);\n TG_LOG(\"Max message latency: %f seconds\\n\",\n\t maxLatency);\n\n Network::write_data(outputFile.c_str(), messages);\n}\n" }, { "alpha_fraction": 0.6357752084732056, "alphanum_fraction": 0.648573100566864, "avg_line_length": 25.55078125, "blob_id": "28c6d25c356f32937fd12cc9f81b497830eed139", "content_id": "23bfb64c2570ca35d61a102488d1f03f8998c54a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6798, "license_type": "no_license", "max_line_length": 106, "num_lines": 256, "path": "/src/analysis/v1.0/devs.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "'''\nDesign:\n * Data:\n \t* size\n \t* timestamp list\n \t* destination\n * Buffer:\n \t* list of data objects\n \t* output profile\n \t* size\n \t* max size ( > 0 => enforcement )\n * Event:\n \t* time\n \t* action:\n\t\t* data = inBuffer.popData()\n\t\t* data.timestamps.append(time)\n\t\t* eventList = outBuffer.pushData(data)\n\t* objects (at least one is required)\n\t\t* inBuffer\n\t\t* outBuffer\n'''\n\nfrom networkProfile import *\n\nEPSILON = 0.000001\n\nclass Data:\n\tdef __init__(self, size = 0, parent = None, interface = None):\n\t\tself.size = size\t\t\t# amount of data\n\t\tself.interface = interface \t# Interface to be sent out on\n\t\tself.parent = parent\t\t# parent process who sent the data (useful for graphs)\n\t\tself.times = []\t\t\t\t# timestamp list for each buffer entry/exit\n\t\tself.latency = 0\t\t\t# calculated after transmission := <sent buffer entry> - <app buffer entry>\n\t\treturn\n\n\tdef __repr__(self):\n\t\treturn \"Data()\"\n\n\tdef __str__(self):\n\t\tretStr = \"Data:\\n\"\n\t\tretStr += \"size = {0}\\n\".format(self.size)\n\t\tretStr += \"time stamps = {0}\\n\".format(self.times)\n\t\treturn retStr\n\n\tdef timeStamp(self,time):\n\t\tself.times.append(time)\n\t\tif len(self.times) > 0:\n\t\t\tself.latency = time - self.times[0]\n\t\treturn\n\nclass DataBuffer:\n\tdef __init__(self, capacity=0, unitSize=1, inProfile=[], outProfile=[], next=None, name=\"DataBuffer()\"):\n\t\tself.unitSize = unitSize\n\t\tself.capacity = capacity\n\t\tself.inProfile = inProfile\n\t\tself.outProfile = outProfile\n\t\tself.next = next\n\t\tself.name = name\n\t\tself.buffer = []\n\t\tself.size = 0\n\t\tself.maxSize = 0\n\t\tself.pushTime = 0\n\t\tself.popTime = self.getNextPopTime(0,self.unitSize)\n\t\treturn\n\n\tdef __repr__(self):\n\t\treturn self.name\n\n\tdef __str__(self):\n\t\tretStr = \"{0} (DataBuffer):\\n\\t[\".format(self.name)\n\t\tretStr += \"size = {0}\".format(self.size)\n\t\tretStr += \", max size = {0}\".format(self.maxSize)\n\t\tretStr += \", bits per unit = {0}\".format(self.unitSize)\n\t\tretStr += \"]\\n\"\n\t\treturn retStr\n\n\tdef addData(self,data):\n\t\tself.buffer.append(data)\n\t\tself.size += data.size\n\t\tif len(self.buffer) > 1:\n\t\t\tif self.size > self.maxSize:\n\t\t\t\tself.maxSize = self.size\n\t\treturn\n\n\tdef fillFromOutProfile(self):\n\t\tif self.outProfile != []:\n\t\t\tbufferedData = 0\n\t\t\ttotalData = self.outProfile[-1].data\n\t\t\tremainingData = totalData\n\t\t\twhile remainingData > 0:\n\t\t\t\tsize = 0\n\t\t\t\tif remainingData > self.unitSize:\n\t\t\t\t\tsize = self.unitSize\n\t\t\t\telse:\n\t\t\t\t\tsize = remainingData\n\t\t\t\tnewData = Data(size,parent=self.name)\n\t\t\t\tremainingData = remainingData - size\n\t\t\t\tself.addData(newData)\n\t\treturn\n\n\tdef getNextPopTime(self,time,size):\n\t\tnextTime = None\n\t\tif self.outProfile != []:\n\t\t\ti = 0\n\t\t\twhile i < len(self.outProfile) and self.outProfile[i].start <= time:\n\t\t\t\ti += 1\n\t\t\ti = i - 1\n\t\t\tsentData = 0\n\t\t\tif i > 0:\n\t\t\t\tsentData = self.outProfile[i-1].data \n\t\t\tsentData += int((self.outProfile[i].bandwidth*1.0)*(time*1.0 - self.outProfile[i].start*1.0))\n\t\t\tfinalData = sentData + size\n\t\t\t#print time,size,i,sentData,finalData,self.outProfile[-1].data,self.name\n\t\t\tif round(self.outProfile[-1].data - finalData) >= 0:\n\t\t\t\tif round(self.outProfile[i].data-finalData) >=0:\n\t\t\t\t\tnextTime = (size*1.0)/(self.outProfile[i].bandwidth*1.0) + time\n\t\t\t\telse:\n\t\t\t\t\twhile i < len(self.outProfile) and self.outProfile[i].data < finalData:\n\t\t\t\t\t\ti += 1\n\t\t\t\t\tremainingData = finalData - self.outProfile[i-1].data\n\t\t\t\t\tnextTime = self.outProfile[i].start + (remainingData*1.0)/(self.outProfile[i].bandwidth*1.0)\n\t\telse:\n\t\t\tnextTime = time\n\t\treturn nextTime\n\n\tdef popData(self,time):\n\t\tnextTime = None\n\t\tdata = None\n\t\tif self.size > 0:\n\t\t\tif time >= self.popTime:\n\t\t\t\tnextTime = self.getNextPopTime(time,self.buffer[0].size)\n\t\t\t\tself.popTime = nextTime\n\t\t\t\tdata = self.buffer.pop(0)\n\t\t\t\tself.size = self.size - data.size\n\t\t\telse:\n\t\t\t\tnextTime = self.popTime\n\t\treturn data, nextTime\n\n\tdef pushData(self,time,data):\n\t\tif self.inProfile == []:\n\t\t\tself.addData(data)\n\t\telse:\n\t\t\tif time < self.pushTime:\n\t\t\t\treturn\n\t\t\tif self.maxSize == 0 or self.maxSize >= (self.size + data.size):\n\t\t\t\tif self.unitSize > 0 and data.size > self.unitSize:\n\t\t\t\t\tbitsRemaining = data.size\n\t\t\t\t\twhile bitsRemaining > 0:\n\t\t\t\t\t\tnewData = copy.deepcopy(data)\n\t\t\t\t\t\tif bitsRemaining > self.unitSize:\n\t\t\t\t\t\t\tnewData.size = self.unitSize\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnewData.size = bitsRemaining\n\t\t\t\t\t\tbitsRemaining = bitsRemaining - newData.size\n\t\t\t\t\t\tself.addData(newData)\n\t\t\t\telse:\n\t\t\t\t\tself.addData(data)\n\t\t\tfor i in range(0,len(self.inProfile)):\n\t\t\t\tif self.inProfile[i].start > time:\n\t\t\t\t\tbreak\n\t\t\ti = i - 1\n\t\t\tsentData = 0\n\t\t\tif i > 0:\n\t\t\t\tsentData = self.inProfile[i-1].data + self.inProfile[i].bandwidth*(time*1.0 - self.inProfile[i].start)\n\t\t\tfinalData = sentData + data.size\n\t\t\tif finalData < self.inProfile[i].data:\n\t\t\t\tself.pushTime = (data.size*1.0)/(self.inProfile[i].bandwidth*1.0) + time\n\t\t\telse:\n\t\t\t\tfor j in range(i,len(self.inProfile)):\n\t\t\t\t\tif self.inProfile[j].data > finalData:\n\t\t\t\t\t\tbreak\n\t\t\t\tj = j - 1\n\t\t\t\tremainingData = finalData - self.inProfile[j-1].data\n\t\t\t\tself.pushTime = self.inProfile[j].start + (remainingData*1.0)/(self.inProfile[j].bandwidth*1.0)\n\t\treturn\n\n\tdef maxLatency(self):\n\t\tlatency = 0\n\t\tfor d in self.buffer:\n\t\t\tif d.latency > latency:\n\t\t\t\tlatency = d.latency\n\t\treturn latency\n\n\tdef hasNoEvents(self):\n\t\treturn (self.size == 0 and self.next != None)\n\nclass Event:\n\tdef __init__(self, time=-1, inBuffer=[], outBuffer=[]):\n\t\tself.time = time\n\t\tself.inBuffer = inBuffer\n\t\tself.outBuffer = outBuffer\n\t\treturn\n\n\tdef __lt__(self, other):\n\t\treturn self.time < other.time\n\n\tdef __repr__(self):\n\t\treturn \"Event()\"\n\n\tdef __str__(self):\n\t\tretStr = \"Event:\\n\\t[\"\n\t\tretStr += \"time = {0}\".format(self.time)\n\t\tretStr += \", \"+repr(self.inBuffer)\n\t\tretStr += \", \"+repr(self.outBuffer)\n\t\tretStr += \"]\\n\"\n\t\treturn retStr\n\n\tdef action(self):\n\t\teventList = []\n\t\tdata, nextTime = self.inBuffer.popData(self.time)\n\t\tif nextTime != None:\n\t\t\tnewEvent = Event(nextTime,self.inBuffer,self.outBuffer)\n\t\t\teventList.append(newEvent)\n\t\tif data != None:\n\t\t\tdata.timeStamp(self.time)\n\t\t\tif self.outBuffer != None:\n\t\t\t\tif self.outBuffer.hasNoEvents():\n\t\t\t\t\tnewEvent = Event(self.time,self.outBuffer,self.outBuffer.next)\n\t\t\t\t\teventList.append(newEvent)\n\t\t\t\tself.outBuffer.pushData(self.time,data)\n\t\treturn eventList\n\nclass DEVS:\n\tdef __init__(self, buffers=[]):\n\t\tself.events = []\n\t\tself.buffers = buffers\n\t\treturn\n\n\tdef __repr__(self):\n\t\treturn \"DEVS()\"\n\n\tdef __str__(self):\n\t\tretStr = \"DEVS:\\n\"\n\t\tfor e in self.events:\n\t\t\tretStr += \"{0}\".format(e)\n\t\treturn retStr\n\n\tdef setup(self):\n\t\tfor b in self.buffers:\n\t\t\tif b.size > 0:\n\t\t\t\tnewEvent = Event(0,b,b.next)\n\t\t\t\tself.events.append(newEvent)\n\n\tdef step(self):\n\t\tevent = self.events.pop(0)\n\t\tnewEvents = event.action()\n\t\tif newEvents != None:\n\t\t\tfor e in newEvents:\n\t\t\t\tself.events.append(e)\n\t\t\tself.events = sorted(self.events)\n\t\treturn\n\n\tdef run(self):\n\t\twhile len(self.events) > 0:\n\t\t\tself.step()\n\t\treturn\n\n" }, { "alpha_fraction": 0.5920107960700989, "alphanum_fraction": 0.6048024892807007, "avg_line_length": 28.3157901763916, "blob_id": "5ac9cbda9addef1d5ff018341949a3ccc1e046f3", "content_id": "d56dec4eebd0d74fde0d43e1b7061801569618af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4456, "license_type": "no_license", "max_line_length": 100, "num_lines": 152, "path": "/src/middleware/v2.0/tcWrapper.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#include \"tcWrapper.hpp\"\n\n// Main function: loops forever-> sleep, setTC, sleep, setTC, ...\nint main(int argc, char **argv) {\n Options options;\n if ( options.Parse(argc,argv) == -1 )\n return -1;\n options.Print();\n\n std::string interface = options.interface;\n std::string parent = options.parent;\n std::string handle = options.handle;\n bool isRouter = options.isRouter;\n uint64_t buffer = options.buffer;\n uint64_t bucket = options.bucket;\n bool useTBF = options.useTBF;\n\n Network::NetworkProfile profile;\n std::string profileFile = options.profile;\n if ( profile.initializeFromFile(profileFile.c_str()) != 0 ) {\n TG_LOG(\"ERROR: couldn't initialize node profile!\\n\");\n return -1;\n }\n\n uint64_t bandwidth;\n double latency;\n timespec remainingTime, wakeTime;\n while ( true ) {\n if ( profile.getNextInterval( wakeTime, bandwidth, latency ) == 0 ) {\n TG_LOG(\"Sleeping until %lu.%09lu\\n\", wakeTime.tv_sec, wakeTime.tv_nsec);\n while ( clock_nanosleep( CLOCK_REALTIME, TIMER_ABSTIME, &wakeTime, &remainingTime ) == EINTR )\n\t{\n\t TG_LOG(\"WHO HAS AWOKEN ME FROM MY SLUMBER?!\\n\");\n\t}\n\n TG_LOG(\"Setting latency to %fs\\n\", latency);\n setTCLatency(latency, interface, \"1:1\", \"11:\");\n\n TG_LOG(\"Setting bandwidth to %d bps\\n\",bandwidth);\n\n if (bandwidth == 0)\n\tbandwidth = 10;\n\n uint64_t ceil_bandwidth = bandwidth;\n ceil_bandwidth = (uint64_t)((double)bandwidth * 1.01f);\n if ( ceil_bandwidth == bandwidth )\n\tceil_bandwidth++;\n\n if ( isRouter )\n\t{\n\t if ( useTBF )\n\t setTC(bandwidth, ceil_bandwidth, buffer, bucket, interface, parent, handle, useTBF);\n\t else\n\t {\n\t setTC(bandwidth, bandwidth, buffer, bucket, interface, parent, handle, useTBF);\n\t std::string sub_handle = parent + \"10\";\n\t setTC(bandwidth, bandwidth, buffer, bucket, interface, handle, sub_handle, useTBF, 0);\n\t sub_handle = parent + \"20\";\n\t setTC(10, bandwidth, buffer, bucket, interface, handle, sub_handle, useTBF, 1);\n\t }\n\t}\n else\n\t{\n\t if ( useTBF )\n\t setTC(bandwidth, ceil_bandwidth, buffer, bucket, interface, parent, handle, useTBF);\n\t else\n\t {\n\t setTC(bandwidth, bandwidth, buffer, bucket, interface, parent, handle, useTBF);\n\t std::string sub_handle = parent + \"10\";\n\t setTC(bandwidth, bandwidth, buffer, bucket, interface, handle, sub_handle, useTBF);\n\t }\n\t}\n }\n }\n}\n\n// Forks/Execs to call TC for setting HTB bandwidth\nvoid setTC( uint64_t bandwidth, uint64_t ceil, uint64_t buffer, uint64_t bucket,\n\t std::string interface, std::string parent, std::string handle, bool useTBF, int priority )\n{\n std::string tc_args;\n if ( useTBF )\n {\n tc_args = \"qdisc change \"\n\t\"dev \" + interface + \" \"\n\t\"parent \" + parent + \" \"\n\t\"handle \" + handle + \" tbf \"\n\t\"rate \" + std::to_string(bandwidth) + \"bit \"\n\t\"burst \" + std::to_string(bucket) + \"b \"\n\t\"limit \" + std::to_string(buffer) + \"k \";\n }\n else\n {\n tc_args = \"class change \"\n\t\"dev \" + interface + \" \"\n\t\"parent \" + parent + \" \"\n\t\"classid \" + handle + \" htb \"\n\t\"rate \" + std::to_string(bandwidth) + \"bit \"\n\t\"ceil \" + std::to_string(ceil) + \"bit \";\n if ( priority >= 0 )\n\ttc_args += \"prio \" + std::to_string(priority);\n }\n forkTC(tc_args);\n}\n\nvoid setTCLatency( double latency,\n\t\t std::string interface, std::string parent, std::string handle )\n{\n std::string tc_args;\n \n tc_args = \"qdisc change \"\n \"dev \" + interface + \" \"\n \"parent \" + parent + \" \"\n \"handle \" + handle + \" netem \"\n \"delay \" + std::to_string((uint64_t)(latency*1000)) + \"ms \";\n forkTC(tc_args);\n}\n\nvoid forkTC(std::string tc_args)\n{\n std::string tc_binary = \"/sbin/tc\";\n TG_LOG(\" cmd: %s\\n\", tc_args.c_str());\n // FORK\n pid_t parent_pid = getpid();\n pid_t my_pid = fork();\n if ( my_pid == -1 )\n {\n TG_LOG(\"ERROR: COULDNT FORK\\n\");\n }\n else if ( my_pid == 0 ) // child\n {\n std::vector<std::string> string_args;\n string_args.push_back(tc_binary);\n std::string s;\n std::istringstream f(tc_args);\n while ( getline(f, s, ' ') )\n\t{\n\t string_args.push_back(s);\n\t}\n // build args\n char *args[string_args.size() + 1]; // must be NULL terminated\n args[string_args.size()] = NULL;\n for (int i=0; i < string_args.size(); i++)\n\t{\n\t args[i] = new char[string_args[i].length()];\n\t sprintf(args[i], \"%s\", string_args[i].c_str());\n\t}\n // EXECV\n execvp(args[0], args);\n TG_LOG(\"ERROR: EXEC COULDN'T COMPLETE\\n\");\n }\n}\n" }, { "alpha_fraction": 0.5888888835906982, "alphanum_fraction": 0.5888888835906982, "avg_line_length": 14, "blob_id": "4ccfc6f53b56c0d7e81512b3f79e3cb5867fa257", "content_id": "0a5d9c66655de9bfd7dd4ba559d4698af1e8c759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 90, "license_type": "no_license", "max_line_length": 28, "num_lines": 6, "path": "/docs/_sources/python-api/analysis/generate-tdma.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Generate TDMA\n=============\n\n.. automodule:: generateTDMA\n :members:\n :undoc-members:\n" }, { "alpha_fraction": 0.6937229633331299, "alphanum_fraction": 0.7002164721488953, "avg_line_length": 20.418603897094727, "blob_id": "00b5eb2c66832f641f3be60ee0dbb359e1d3e025", "content_id": "13c9e4f328ed18544f8bec46f190cde3636678c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 924, "license_type": "no_license", "max_line_length": 99, "num_lines": 43, "path": "/docs/_sources/users.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": ".. _users:\n\nUsing the Code\n==============\n\n**THIS SECTION IS NOT COMPLETE**\n\nExplain here how to use the code with specific references to the API\nand give an example to help people set up an example system and\nanalyze it\n\nThe Analysis Tool\n-----------------\n\nThe analysis tool is a python library which implements both\nthe Network Calculus and MAReN techniques described\n:ref:`above<network-performance-analysis>`.\n\nInstallation\n^^^^^^^^^^^^\n\n0. Install tabulate for nice formatting of the output:\n\n ``sudo pip install tabulate``\n\n1. Download the analysis tool from the `CBSAT repo <https://github.com/finger563/cbsat/releases>`_.\n\n\nThe Middleware\n--------------\n\n.. note:: The middleware is C++ and supports Linux.\n\nCompilation\n^^^^^^^^^^^\n\n1. To Build the client and server test of the middleware, run from a terminal:\n\n ``make``\n\n\nCongratulations! The set-up of the analysis tool and the middleware\nare complete! \n" }, { "alpha_fraction": 0.6343216300010681, "alphanum_fraction": 0.6379155516624451, "avg_line_length": 23.711111068725586, "blob_id": "f67a7dae9106ae650c3ca51d2e4a459564df66f7", "content_id": "b402927f3826a1fabc335280ae7e778c54b20f3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 185, "num_lines": 45, "path": "/doc/api/middleware/network-buffer.rst", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Network Buffer\n==============\n\n.. cpp:class:: NetworkBuffer\n\n A network profile contains a sorted list of time- and data-contiguous entries of type :cpp:class:`ResourceEntry`. The profiles are periodic with a specific epoch-centric start-time.\n\n .. method:: MaxSize ( )\n\n Return the maximum size that the buffer has reached.\n\n :rtype: long\n\n .. method:: Size ( )\n\n Return the current size of the buffer.\n\n :rtype: long\n\n .. method:: Capacity ( )\n\n Return the capacity of the buffer.\n\n :rtype: long\n\n .. method:: Capacity ( _capacity )\n\n Set the capacity of the buffer.\n\n :param in long _capacity: new capacity for the buffer\n :rtype: void\n\n .. method:: Push ( data )\n\n Add data to the buffer if :math:`data.size < (capacity - size)`. Return 0 on success, -1 on failure.\n\n :param in Message* data: message to be added to the buffer\n :rtype: int\n\n .. method:: Pop (data)\n\n Returns 0 for successful data retrieval from the buffer, -1 otherwise.\n\n :param in-out Message*& data: Message pointer to data retrieved\n :rtype: int\n\n" }, { "alpha_fraction": 0.40909090638160706, "alphanum_fraction": 0.40909090638160706, "avg_line_length": 10, "blob_id": "1a971bcb8215c4063737872f295b0047aedc56bc", "content_id": "364f44c1268cfa7bbc4b0314865be8855ad5edb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 22, "license_type": "no_license", "max_line_length": 10, "num_lines": 2, "path": "/docs/_sources/python-api/middleware/tc-wrapper.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "TC Wrapper\n==========\n" }, { "alpha_fraction": 0.5473182201385498, "alphanum_fraction": 0.5579261183738708, "avg_line_length": 36.28888702392578, "blob_id": "3fb521fe7c13339299e82125286bac8506825838", "content_id": "c4c6b20f8c085c7288244b24a0d36573e82750de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8390, "license_type": "no_license", "max_line_length": 203, "num_lines": 225, "path": "/src/analysis/v1.0/qosEnforcement.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"\nThis program is designed to mimic the behavior\nof the network QOS profile enforcement for application\nactors. It allows you to configure the number of:\n * server threads (pulling from queues)\n * client threads (pushing into queues)\n * number of clients\n * number of network interfaces\n\"\"\" \n\nfrom networkProfile import *\nfrom nodeModel import *\nfrom devs import *\n\nimport sys\n\nclass Options:\n def __init__(self,\n period=(90*60), \n periods = 1, \n node = '', \n interface = '',\n plot = False, \n log = 'qosEnforcement.log',\n output = 'output.csv',\n redirect = False,\n fundamental_unit_size = 1000\n ):\n self.period = period\n self.periods = periods\n self.node = node\n self.interface = interface\n self.plot = plot\n self.log_filename = log\n self.output_filename = output\n self.redirect_to_file = redirect\n self.fundamental_unit_size = fundamental_unit_size\n return\n\n def __repr__(self):\n return \"Options()\"\n\n def __str__(self):\n retStr = \"Options():\\n\"\n retStr += \"\\tPeriod:\\t\\t{0} s\\n\".format(self.period)\n retStr += \"\\tNum Periods:\\t{0}\\n\".format(self.periods)\n retStr += \"\\tNode:\\t\\t{0}\\n\".format(self.node)\n retStr += \"\\tInterface:\\t{0}\\n\".format(self.interface)\n retStr += \"\\tPlot?:\\t\\t{0}\\n\".format(self.plot)\n retStr += \"\\tLog file:\\t{0}\\n\".format(self.log_filename)\n retStr += \"\\tOutput File:\\t{0}\\n\".format(self.output_filename)\n retStr += \"\\tRedirect?:\\t{0}\\n\".format(self.redirect_to_file)\n retStr += \"\\tUnit size:\\t{0} b\\n\".format(self.fundamental_unit_size)\n return retStr\n\n def parse_args(self,args):\n argind = 1\n while argind < len(args):\n if args[argind] == \"-P\":\n self.period = int(args[argind+1])\n if self.period <= 0:\n print \"Error! You must specify a time period > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-n\":\n self.periods = int(args[argind+1])\n if self.periods <= 0:\n print \"Error! You must specify a number of periods > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-N\":\n self.node = args[argind+1]\n argind += 2\n elif args[argind] == \"-I\":\n self.interface = args[argind+1]\n argind += 2\n elif args[argind] == \"-S\":\n self.fundamental_unit_size = int(args[argind+1])\n if self.fundamental_unit_size < 1:\n print \"Error! You must specify a fundamental unit size > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-p\":\n self.plot = True\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print \"Error! Matplotlib not found; cannot plot!\"\n return -1\n argind += 1\n elif args[argind] == \"-O\":\n self.output_filename = args[argind+1]\n argind += 2\n elif args[argind] == \"-L\":\n self.log_filename = args[argind+1]\n argind += 2\n elif args[argind] == \"-r\":\n self.redirect_to_file = True\n argind += 1\n elif args[argind] == \"-?\" or args[argind] == \"-h\" or args[argind] == \"--help\":\n print \"Usage:\\n\\tpython \",args[0],\"\"\"\n \\t\\t-N <(N)ode name>\n \\t\\t-I <node (I)nterface name>\n \\t\\t-P <(P)eriod in seconds>\n \\t\\t-S <fundamental unit (S)ize>\n \\t\\t-O <(O)utput file name>\n \\t\\t-L <program (L)og filename>\n \\t\\t-r ((r)edirect program output to log file)\n \\t\\t-n <(n)umber of periods to analyze>\n \\t\\t-p ((p)lot the output)\\n\"\"\"\n return -1\n else:\n print \"\"\"Usage:\\n\\t\"\"\",args[0],\"\"\"\n \\t\\t-N <(N)ode name>\n \\t\\t-I <node (I)nterface name>\n \\t\\t-P <(P)eriod in seconds>\n \\t\\t-S <fundamental unit (S)ize>\n \\t\\t-O <(O)utput file name>\n \\t\\t-L <program (L)og filename>\n \\t\\t-r ((p)edirect program output to log file)\n \\t\\t-n <(n)umber of periods to analyze>\n \\t\\t-p ((p)lot the output)\\n\"\"\"\n return -1\n return 0\n\ndef main():\n\n options = Options()\n\n if options.parse_args(sys.argv):\n return -1\n\n if options.redirect_to_file == True:\n sys.stdout = open(options.log_filename, \"w\")\n\n nodes = get_nodeProfiles('scripts')\n if nodes == {}:\n return -1\n apps = get_appProfiles('profiles')\n if apps == {}:\n return -1\n app_node_map = get_app_node_map(nodes,apps)\n networkProfile = NetworkProfile(options.period)\n for node,profile in nodes.iteritems():\n nodeProfile = NodeProfile(options.period,options.periods)\n nodeProfile.addProvidedProfile(profile)\n if node in app_node_map.keys():\n for app in app_node_map[node]:\n if \",\" in apps[app]:\n nodeProfile.addRequiredProfile(apps[app])\n networkProfile.addNodeProfile(node,nodeProfile)\n networkProfile.calcData()\n\n if options.node == '':\n options.node=nodes.keys()[0]\n if options.node not in nodes:\n print 'ERROR: node {0} not found in system!'.format(options.node)\n return -1\n\n if options.interface == '':\n if len(networkProfile.nodeProfiles[options.node].interfaces) > 0:\n options.interface = networkProfile.nodeProfiles[options.node].interfaces[0]\n else:\n print 'ERROR: node {0} has no interfaces that can be analyzed!'.format(options.node)\n return -1\n if options.interface not in networkProfile.nodeProfiles[options.node].interfaces:\n print 'ERROR: node {0} has no interface named {1}!'.format(options.node,options.interface)\n return -1\n\n print \"{0}\".format(options)\n\n if networkProfile.convolve(options.node,options.interface) == -1:\n print 'Node {0} has cannot be analyzed for interface {1}: no usable profile'.format(options.node,options.interface)\n\n buff = networkProfile.nodeProfiles[options.node].buffer\n print \"\\n[Time location, buffersize]: [{0}, {1}]\\n\".format(buff[0],buff[2])\n\n delay = networkProfile.nodeProfiles[options.node].delay\n print \"[Time location, delay]: [{0}, {1}]\\n\".format(delay[0],delay[2])\n\n if options.plot == True:\n networkProfile.nodeProfiles[options.node].plotBandwidth()\n networkProfile.nodeProfiles[options.node].plotData()\n\n buffers = []\n\n finalBuffer = DataBuffer(unitSize=options.fundamental_unit_size,name=\"Final Buffer\")\n buffers.append(finalBuffer)\n\n intfBuffer = DataBuffer( outProfile=networkProfile.nodeProfiles[options.node].getProvidedProfile(options.interface), unitSize=options.fundamental_unit_size, next=buffers[0], name=\"Interface Buffer\" )\n buffers.append(intfBuffer)\n\n app_id = 0\n for a in networkProfile.nodeProfiles[options.node].apps:\n newBuffer = DataBuffer( outProfile=a, unitSize=options.fundamental_unit_size, next=buffers[1], name=\"App {0}\".format(app_id) )\n app_id += 1\n newBuffer.fillFromOutProfile()\n buffers.append(newBuffer)\n\n devs = DEVS(buffers)\n devs.setup()\n devs.run()\n\n with open(options.output_filename,\"w\") as f:\n for d in buffers[0].buffer:\n f.write(\"{0}\".format(d.size))\n for t in d.times:\n f.write(\",{0}\".format(t))\n f.write(\"\\n\")\n\n print \"Latency:\"\n print \"\\t{0} seconds\".format(finalBuffer.maxLatency())\n print \"Difference between simulation and calculation latencies:\"\n print \"\\t{0} seconds\\n\".format(abs(finalBuffer.maxLatency()-delay[2]))\n\n print \"Max Buffer Size:\"\n print \"\\t{0} bits\".format(intfBuffer.maxSize)\n print \"Difference between simulation and calculation max buffer sizes:\"\n print \"\\t{0} bits\\n\".format(abs(intfBuffer.maxSize - buff[2]))\n\n return\n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5412942171096802, "alphanum_fraction": 0.5541651844978333, "avg_line_length": 21.92622947692871, "blob_id": "6d7b295ff3f4387336f45caaaea329460849ee75", "content_id": "0473e7613888464192f310851c512a42eb64ede4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2797, "license_type": "no_license", "max_line_length": 108, "num_lines": 122, "path": "/src/middleware/v2.0/tcWrapper.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef TCWRAPPER_HPP\n#define TCWRAPPER_HPP\n\n#include <unistd.h>\n#include <sys/types.h>\n#include <time.h>\n#include <errno.h>\n\n#include <math.h>\n#include <string>\n#include <sstream>\n\n#include \"NetworkProfile.hpp\"\n#include \"log_macro.hpp\"\n\nvoid setTCLatency( double latency,\n\t\t std::string interface, std::string parent, std::string handle );\n\nvoid setTC( uint64_t bandwidth, uint64_t ceil, uint64_t buffer, uint64_t bucket,\n\t std::string interface, std::string parent, std::string handle, bool useTBF = true, int priority = -1 );\n\nvoid forkTC( std::string tc_args );\n\nclass Options {\npublic:\n std::string interface;\n std::string parent;\n std::string handle;\n std::string profile;\n bool isRouter;\n bool useTBF;\n uint64_t buffer;\n uint64_t bucket;\n\n Options() {\n interface = \"eth0\";\n parent = \"1:\";\n handle = \"1:1\";\n profile = \"node_profile.csv\";\n isRouter = false;\n useTBF = true;\n buffer = 10000;\n bucket = 100;\n }\n\n int Parse(int argc, char **argv) {\n \n for (int i=0; i < argc; i++)\n {\n\tif (!strcmp(argv[i], \"--profile\"))\n\t {\n\t profile = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--buffer\"))\n\t {\n\t buffer = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--bucket\"))\n\t {\n\t bucket = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--use_tbf\"))\n\t {\n\t useTBF = true;\n\t }\n\telse if (!strcmp(argv[i], \"--use_htb\"))\n\t {\n\t useTBF = false;\n\t }\n\telse if (!strcmp(argv[i], \"--buffer\"))\n\t {\n\t buffer = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--is_router\"))\n\t {\n\t isRouter = true;\n\t }\n\telse if (!strcmp(argv[i], \"--interface\"))\n\t {\n\t interface = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--parent\"))\n\t {\n\t parent = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--handle\"))\n\t {\n\t handle = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--help\"))\n\t {\n\t TG_LOG(\"usage: \\n\\t%s\\n\"\n\t\t \"\\t\\t --profile <profile name>\\n\"\n\t\t \"\\t\\t --is_router (this node is a router node)\\n\"\n\t\t \"\\t\\t --use_tbf (TC filter is TBF)\\n\"\n\t\t \"\\t\\t --use_htb (TC filter is HTB)\\n\"\n\t\t \"\\t\\t --buffer <buffer size>\\n\"\n\t\t \"\\t\\t --bucket <buffer size>\\n\"\n\t\t \"\\t\\t --interface <interface name>\\n\"\n\t\t \"\\t\\t --parent <parent TC object>\\n\"\n\t\t \"\\t\\t --handle <handle TC object>\\n\"\n\t\t ,argv[0]);\n\t return -1;\n\t }\n }\n return 0;\n }\n \n void Print() {\n TG_LOG(\"Options():\\n\");\n TG_LOG(\"\\t profile name\\t\\t: %s\\n\", profile.c_str());\n TG_LOG(\"\\t is router?\\t\\t: %d\\n\", isRouter);\n TG_LOG(\"\\t use tbf?\\t\\t: %d\\n\", useTBF);\n TG_LOG(\"\\t buffer\\t\\t\\t: %d\\n\", buffer);\n TG_LOG(\"\\t bucket\\t\\t\\t: %d\\n\", buffer);\n TG_LOG(\"\\t interface name\\t\\t: %s\\n\", interface.c_str());\n TG_LOG(\"\\t parent name\\t\\t: %s\\n\", parent.c_str());\n TG_LOG(\"\\t handle name\\t\\t: %s\\n\", handle.c_str());\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.5129683017730713, "alphanum_fraction": 0.5398654937744141, "avg_line_length": 21.467626571655273, "blob_id": "cde4ed3b156b8d4c1c1096a10d738c28b8cb8b86", "content_id": "4e5942194396b1c5aea932fb61923a2370af4c89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3123, "license_type": "no_license", "max_line_length": 67, "num_lines": 139, "path": "/src/middleware/v1.0/Client.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef CLIENT_HPP\n#define CLIENT_HPP\n\n#include <math.h>\n#include <queue>\n\n#include \"log_macro.hpp\"\n#include \"ConnectionSubsys.hpp\"\n#include \"NetworkMiddleware.hpp\"\n#include \"Message.hpp\"\n#include \"NetworkProfile.hpp\"\n\n#include <string>\n\nvoid *sendFunc(std::string data);\nvoid labelMessage(long index);\nint write_data(std::string fname);\nint append_data(std::string fname, Message* data);\n\n\nclass Options {\npublic:\n char ip[256];\n long port;\n long bitLength;\n double runTime;\n int numPeriods;\n char tgFile[256];\n char bufferFile[256];\n char outputFile[256];\n\n Options() {\n this->port = 7777;\n this->bitLength = 4096;\n this->runTime = -1;\n this->numPeriods = 1;\n sprintf(this->ip,\"2001:470:489e::3\");\n sprintf(this->outputFile,\"clientOutput.csv\");\n sprintf(this->tgFile, \"./tg_profile.csv\");\n sprintf(this->bufferFile, \"./namek_crm_config.csv\" );\n }\n\n int Parse(int argc, char **argv) {\n \n if ( argc < 2 )\n return 0;\n int c;\n char str[256];\n sprintf(str,\"%s\",argv[1]);\n if ( argc > 2 ) {\n for (int i=2;i<argc;i++) {\n\tsprintf(str,\"%s %s\",str,argv[i]);\n }\n }\n char *p = strtok(str,\"-\");\n while (p != 0) {\n switch (p[0])\n\t{\n\tcase 'P':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t sprintf(this->tgFile,\"%s\",p+2);\n\t break;\n\tcase 'B':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t sprintf(this->bufferFile,\"%s\",p+2);\n\t break;\n\tcase 'o':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t sprintf(this->outputFile,\"%s\",p+2);\n\t break;\n\tcase 'i':\n\t for (int i=0;i<=strlen(p+2);i++) {\n\t if ( (p+2)[i] == ' ' ) {\n\t (p+2)[i] = 0;\n\t break;\n\t }\n\t }\n\t sprintf(this->ip,\"%s\",p+2);\n\t break;\n\tcase 'p':\n\t this->port = atoi(p+2);\n\t break;\n\tcase 'b':\n\t this->bitLength = atoi(p+2);\n\t break;\n\tcase 'N':\n\t this->numPeriods = atoi(p+2);\n\t break;\n\tcase 'T':\n\t this->runTime = atof(p+2);\n\t break;\n\tcase '?':\n\tdefault:\n\t TG_LOG(\"usage: \\n\\t%s\\n\"\n\t\t \"\\t\\t -P <TG profile filename>\\n\"\n\t\t \"\\t\\t -B <buffer profile filename>\\n\"\n\t\t \"\\t\\t -N <number of periods to run>\\n\"\n\t\t \"\\t\\t -T <length of time to run>\\n\"\n\t\t \"\\t\\t -o <output file filename>\\n\"\n\t\t \"\\t\\t -i <ipv6 address of server>\\n\"\n\t\t \"\\t\\t -p <port number of server>\\n\"\n\t\t \"\\t\\t -b <# bits in message>\\n\"\n\t\t ,argv[0]);\n\t return -1;\n\t}\n p = strtok(NULL,\"-\");\n }\n return 0;\n }\n \n void Print() {\n TG_LOG(\"Options():\\n\");\n TG_LOG(\"\\t tg profile filename\\t\\t: %s\\n\",this->tgFile);\n TG_LOG(\"\\t buffer profile filename\\t: %s\\n\",this->bufferFile);\n TG_LOG(\"\\t number of periods to run\\t: %u\\n\",this->numPeriods);\n TG_LOG(\"\\t length of time to run\\t\\t: %f\\n\",this->runTime);\n TG_LOG(\"\\t output filename\\t\\t: %s\\n\",this->outputFile);\n TG_LOG(\"\\t server ipv6 address\\t\\t: %s\\n\",this->ip);\n TG_LOG(\"\\t server port number\\t\\t: %lu\\n\",this->port);\n TG_LOG(\"\\t bits in message\\t\\t: %lu\\n\",this->bitLength);\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.5927190184593201, "alphanum_fraction": 0.604522168636322, "avg_line_length": 27.299999237060547, "blob_id": "cf19b084bfe833416ba2b7ba7852b5612e950488", "content_id": "948095fa9a91183ed2b2dabb0f58cb4a71961ff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7032, "license_type": "no_license", "max_line_length": 89, "num_lines": 240, "path": "/src/middleware/v2.0/NetworkMiddleware.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef NETWORK_MIDDLEWARE_HPP\r\n#define NETWORK_MIDDLEWARE_HPP\r\n\r\n#include <stdlib.h>\r\n#include <unistd.h>\r\n#include <stdio.h>\r\n#include <signal.h>\r\n#include <time.h>\r\n\r\n#include <pthread.h>\r\n\r\n#include <vector>\r\n#include <string>\r\n\r\n#include <map>\r\n\r\n#include \"log_macro.hpp\"\r\n#include \"NetworkBuffer.hpp\"\r\n#include \"NetworkProfile.hpp\"\r\n#include \"NetworkMiddleware.hpp\"\r\n\r\nnamespace NetworkMiddleware {\r\n\r\n static NetworkBuffer buffer;\r\n NetworkProfile profile;\r\n timespec nextSendTime;\r\n\r\n static pthread_t threadSend;\r\n static bool threadSendDie;\r\n static pthread_cond_t threadSendCV;\r\n static pthread_mutex_t threadSendMutex;\r\n\r\n static pthread_t threadRecv;\r\n static bool threadRecvDie;\r\n static pthread_cond_t threadRecvCV;\r\n static pthread_mutex_t threadRecvMutex;\r\n\r\n static uint64_t new_conn_id = 1;\r\n static std::map<uint64_t,IPV6_Connection*> data_conns;\r\n static std::map<uint64_t,IPV6_Connection*> oob_conns;\r\n \r\n void *dataRecvThread(void * arg){\r\n }\r\n\r\n void *dataSendThread(void * arg){\r\n Message* data;\r\n bool sendData = false;\r\n double timeDiff = 0;\r\n timespec currentTime, condTimeout, sleepTime, remainingTime;\r\n sleepTime.tv_sec = 0;\r\n sleepTime.tv_nsec = 0;\r\n\r\n while (!threadSendDie) {\r\n if ( timeDiff > 0 ) {\r\n nanosleep(&sleepTime,&remainingTime);\r\n\ttimeDiff = 0;\r\n }\r\n clock_gettime(CLOCK_REALTIME,&currentTime);\r\n condTimeout = currentTime;\r\n condTimeout.tv_sec += 1;\r\n pthread_mutex_lock(&threadSendMutex);\r\n if ( buffer.Size() == 0 ) {\r\n pthread_cond_timedwait(&threadSendCV,&threadSendMutex,&condTimeout);\t\r\n }\r\n if ( buffer.Pop(data) == 0 ) {\r\n sendData = true;\r\n }\r\n pthread_mutex_unlock(&threadSendMutex);\r\n if ( sendData && data != NULL ) {\r\n\tdata->TimeStamp();\r\n\tint retVal =\r\n\t data_conns[data->connection_id]->send(data->Buffer().c_str(),\r\n\t\t\t\t\t\tdata->Bytes());\r\n\tif ( retVal <= 0 )\r\n\t TG_LOG(\"Couldn't send message %lu on connection %lu\\n\",\r\n\t\t data->Id(),\r\n\t\t data->connection_id);\r\n timeDiff = profile.Delay(data->Bits(),data->LastEpochTime());\r\n\tdouble fractpart,intpart;\r\n\tfractpart = modf(timeDiff,&intpart);\r\n sleepTime.tv_sec = (unsigned long long)(intpart);\r\n sleepTime.tv_nsec = (unsigned long)(fractpart*1000000000.0);\r\n sendData = false;\r\n }\r\n }\r\n TG_LOG(\"Buffer send thread exiting!\\n\");\r\n pthread_exit(NULL);\r\n }\r\n\r\n int Init()\r\n {\r\n // CREATE OOB RECV THREAD HERE\r\n // CREATE DATA RECV THREAD HERE\r\n // CREATE DATA SEND THREAD HERE ( ONE BUFFER OR WHAT?)\r\n }\r\n\r\n int InitClient( NetworkProfile p,\r\n\t\t std::string serverIP,\r\n\t\t int serverPort,\r\n\t\t long capacity = 0 ) {\r\n uint64_t conn_id = new_conn_id++;\r\n data_conns[conn_id] = new IPV6_Connection();\r\n data_conns[conn_id]->serverIP = serverIP;\r\n data_conns[conn_id]->serverPort = serverPort;\r\n if ( data_conns[conn_id]->Initialize(false,false) != 0 )\r\n {\r\n\tTG_LOG(\"ERROR:: could't initialize data interface to %s : %d\",\r\n\t data_conns[conn_id]->serverIP.c_str(),\r\n\t data_conns[conn_id]->serverPort);\r\n\treturn -1;\r\n }\r\n\r\n oob_conns[conn_id] = new IPV6_Connection();\r\n oob_conns[conn_id]->serverIP = serverIP;\r\n oob_conns[conn_id]->serverPort = serverPort+1;\r\n if ( oob_conns[conn_id]->Initialize(false,false) != 0 )\r\n {\r\n\tTG_LOG(\"ERROR:: could't initialize oob interface to %s : %d\",\r\n\t data_conns[conn_id]->serverIP.c_str(),\r\n\t data_conns[conn_id]->serverPort);\r\n\treturn -1;\r\n }\r\n\r\n nextSendTime.tv_sec = 0;\r\n nextSendTime.tv_nsec = 0;\r\n buffer.Capacity(capacity);\r\n profile = p;\r\n if (!profile.Initialized()) {\r\n TG_LOG(\"WARNING: couldn't initialize buffer profile!\\n\");\r\n TG_LOG(\"\\tActing as a pass-through buffer!\\n\");\r\n new_conn_id--;\r\n data_conns.erase(new_conn_id);\r\n return 0;\r\n }\r\n else {\r\n threadSendDie = false;\r\n pthread_mutex_init (&threadSendMutex, NULL);\r\n pthread_cond_init (&threadSendCV, NULL);\r\n pthread_create(&threadSend, NULL, NetworkMiddleware::dataSendThread, (void *)NULL);\r\n TG_LOG(\"Created client MW thread %lu\\n\",\r\n\t threadSend);\r\n return conn_id;\r\n }\r\n }\r\n\r\n int InitServer( NetworkProfile p, long capacity = 0 ) {\r\n buffer.Capacity(capacity);\r\n profile = p;\r\n if (!profile.Initialized()) {\r\n TG_LOG(\"WARNING: couldn't initialize buffer profile!\\n\");\r\n TG_LOG(\"\\tActing as a pass-through buffer!\\n\");\r\n }\r\n else {\r\n threadRecvDie = false;\r\n pthread_mutex_init (&threadRecvMutex, NULL);\r\n pthread_cond_init (&threadRecvCV, NULL);\r\n pthread_create(&threadRecv, NULL, NetworkMiddleware::dataRecvThread, (void *)NULL);\r\n TG_LOG(\"Created server MW thread %lu\\n\",\r\n\t threadRecv);\r\n }\r\n return 0;\r\n }\r\n\r\n int Exit() {\r\n if ( profile.Initialized() == true ) {\r\n threadSendDie = true;\r\n threadRecvDie = true;\r\n int retVal;\r\n\r\n TG_LOG(\"Joining thread %lu\\n\",\r\n\t threadSend);\r\n pthread_join(threadSend,(void **)&retVal);\r\n TG_LOG(\"exited join thread!\\n\");\r\n pthread_mutex_destroy(&threadSendMutex);\r\n pthread_cond_destroy(&threadSendCV);\r\n\r\n TG_LOG(\"Joining thread %lu\\n\",\r\n\t threadRecv);\r\n pthread_join(threadRecv,(void **)&retVal);\r\n TG_LOG(\"exited join thread!\\n\");\r\n pthread_mutex_destroy(&threadRecvMutex);\r\n pthread_cond_destroy(&threadRecvCV);\r\n\r\n std::map<uint64_t,IPV6_Connection*>::iterator it;\r\n for (it=data_conns.begin();it!=data_conns.end();it++)\r\n\t{\r\n\t if (it->second != NULL) {\r\n\t it->second->Close();\r\n\t delete it->second;\r\n\t }\r\n\t}\r\n for (it=oob_conns.begin();it!=oob_conns.end();it++)\r\n\t{\r\n\t if (it->second != NULL) {\r\n\t it->second->Close();\r\n\t delete it->second;\r\n\t }\r\n\t}\r\n }\r\n }\r\n\r\n int send(Message* data) {\r\n if ( data == NULL )\r\n return -1;\r\n int retVal = -1;\r\n if ( profile.Initialized() == true ) {\r\n //timespec currentTime;\r\n double timeDiff = 0;\r\n\r\n pthread_mutex_lock(&threadSendMutex);\r\n if ( buffer.Size() == 0 )\r\n\tpthread_cond_signal(&threadSendCV);\r\n retVal = buffer.Push(data);\r\n pthread_mutex_unlock(&threadSendMutex);\r\n \r\n timeDiff = profile.Delay(data->Bits(),data->FirstEpochTime());\r\n double fractpart,intpart;\r\n fractpart = modf(timeDiff,&intpart);\r\n clock_gettime(CLOCK_REALTIME,&nextSendTime);\r\n nextSendTime.tv_sec += (unsigned long long)(intpart);\r\n nextSendTime.tv_nsec += (unsigned long)(fractpart*1000000000.0);\r\n if ( nextSendTime.tv_nsec > 999999999 ) {\r\n\tnextSendTime.tv_sec += 1;\r\n\tnextSendTime.tv_nsec = nextSendTime.tv_nsec - 1000000000;\r\n }\r\n }\r\n else { \r\n int retVal =\r\n\tdata_conns[data->connection_id]->send(data->Buffer().c_str(),\r\n\t\t\t\t\t data->Bytes());\r\n if ( retVal <= 0 )\r\n\tTG_LOG(\"Couldn't send message %lu on connection %lu\\n\",\r\n\t data->Id(),\r\n\t data->connection_id);\r\n }\r\n return retVal;\r\n }\r\n}\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.5104515552520752, "alphanum_fraction": 0.5199898481369019, "avg_line_length": 37.3463020324707, "blob_id": "38974bf433d66197b8427f5a8d679e6de03ed39a", "content_id": "2154baa050ab5a35792718cae6d853d68f41fd0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19710, "license_type": "no_license", "max_line_length": 169, "num_lines": 514, "path": "/src/analysis/v1.0/networkProfile.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "\nimport sys, os, csv, copy, glob\nfrom acceptancemathlib import *\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n pass\n\nPLOT_WIDTH = 4 # line width for plots\nFONT_SIZE = 25 # font size for plots\n\nclass ProfileEntry:\n def __init__(self,start=0,end=0,bandwidth=0,data=0,interface='none',ptype='none'):\n self.start = start\n self.end = end\n self.bandwidth = bandwidth\n self.data = data\n self.interface = interface\n self.ptype = ptype\n\n def __lt__(self, other):\n return self.start < other.start\n\n def __repr__(self):\n return \"ProfileEntry()\"\n \n def __str__(self):\n return \"{0},{1},{2},{3},{4},{5}\".format(self.start,self.end,self.bandwidth,self.data,self.interface,self.ptype)\n\nclass NodeProfile:\n def __init__(self,period,num_periods):\n self.profile = []\n self.required = []\n self.apps = []\n self.provided = []\n self.link = []\n self.period = period\n self.num_periods = num_periods\n self.buffer = [0,0,0]\n self.delay = [0,0,0]\n self.interfaces = []\n\n def getProvidedProfile(self,interface):\n retProfile = []\n for e in self.provided:\n if e.interface == interface:\n retProfile.append(e)\n return retProfile\n\n def addProvidedProfile(self,profile):\n p = profile.split('\\n')\n self.provided = []\n if p == None or profile == '':\n return\n for line in p:\n entry = get_entry_from_line(line)\n if entry != None:\n entry.ptype = 'provided'\n self.provided.append(entry)\n if len(self.provided) == 0:\n return\n for i in range(0,len(self.provided)-1):\n if self.provided[i].interface not in self.interfaces:\n self.interfaces.append(self.provided[i].interface)\n if self.provided[i].interface == self.provided[i+1].interface:\n self.provided[i].end = self.provided[i+1].start\n else:\n self.provided[i].end = self.period\n self.provided[-1].end = self.period \n self.provided = sorted(self.provided)\n for intf in self.interfaces:\n prof = self.getProvidedProfile(intf)\n if prof[0].start > 0:\n entry = ProfileEntry()\n entry.start = 0\n entry.end = prof[0].start\n entry.ptype = 'provided'\n entry.interface = intf\n self.provided.insert(0,entry)\n\n originalProvided = copy.deepcopy(self.provided)\n pData = {}\n for intf in self.interfaces:\n prof = self.getProvidedProfile(intf)\n pData[intf] = prof[-1].data\n for i in range(1,self.num_periods):\n tmpProvided = copy.deepcopy(originalProvided)\n for e in tmpProvided:\n e.data += pData[e.interface]\n e.start += self.period*i\n e.end += self.period*i\n self.provided.append(e)\n for data in pData:\n data += data\n return\n\n def addRequiredEntry(self, entry):\n if self.required == [] or entry.start >= self.required[-1].end:\n self.required.append(entry)\n elif entry.start > self.required[-1].start:\n entry.bandwidth += self.required[-1].bandwidth\n self.required[-1].end = entry.start\n self.required.append(entry)\n elif entry.end < self.required[0].start:\n self.required.insert(0,entry)\n else:\n for i in range(0,len(self.required)):\n if entry.start <= self.required[i].start:\n endTime = entry.end\n addedBW = entry.bandwidth\n if i != 0:\n self.required[i-1].end = entry.start\n entry.bandwidth = self.required[i-1].bandwidth + addedBW\n if endTime >= self.required[i-1].end:\n entry.end = self.required[i].start\n self.required.insert(i,entry)\n i+=1 \n while i < len(self.required) and endTime >= self.required[i].end:\n self.required[i].bandwidth += addedBW\n i+=1\n if i < len(self.required) and endTime < self.required[i].end:\n remainingEntry = ProfileEntry(start=endTime,end=self.required[i].end,bandwidth=self.required[i].bandwidth,ptype='required')\n self.required[i].bandwidth += addedBW\n self.required[i].end = endTime\n self.required.insert(i+1,remainingEntry)\n break\n for r in self.required:\n if r.start == r.end:\n self.required.remove(r)\n return\n\n def addRequiredProfile(self,profile):\n if profile == '':\n return -1\n p = profile.split('\\n')\n if p == '' or p == None:\n return -1\n\n entryList = []\n for line in p:\n entry = get_entry_from_line(line)\n if entry != None:\n entry.ptype = 'required'\n entryList.append(entry)\n entryList = sorted(entryList)\n for i in range(0,len(entryList)-1):\n entryList[i].end = entryList[i+1].start\n entryList[-1].end = self.period\n\n appList = copy.deepcopy(entryList)\n for a in appList:\n a.ptype = 'app'\n self.apps.append(appList)\n\n if len(self.required) == 0:\n for e in entryList:\n self.required.append(e)\n else:\n for e in entryList:\n self.addRequiredEntry(e)\n if len(self.required) > 0 and self.required[0].start > 0:\n entry = ProfileEntry()\n entry.start = 0\n entry.end = self.required[0].start\n entry.ptype = 'required'\n self.required.insert(0,entry)\n\n if len(self.required) > 0:\n originalRequired = copy.deepcopy(self.required)\n pData = self.required[-1].data\n for i in range(1,self.num_periods):\n tmpRequired = copy.deepcopy(originalRequired)\n for e in tmpRequired:\n e.data += pData\n e.start += self.period*i\n e.end += self.period*i\n self.required.append(e)\n pData += pData\n return\n\n def convolve(self,interface):\n if len(self.required) == 0:\n print \"ERROR: no required profiles on this node!\"\n return -1\n if len(self.provided) == 0:\n print \"ERROR: no provided profiles on this node\"\n return -1\n self.profile = []\n for e in self.provided:\n if e.interface == interface:\n self.profile.append(e)\n for e in self.required:\n self.profile.append(e)\n self.profile = sorted(self.profile)\n pInterval = None\n rInterval = None\n self.link = []\n buff = 0\n delay = [0,0,0]\n pOffset = 0\n pEndData = 0\n rEndData = 0\n for e in self.profile:\n if e.ptype == 'provided':\n pInterval = e\n else:\n rInterval = e\n # note: the way intervals are created, the\n # req and prov intervals will always overlap\n # and adjacent intervals will never overlap\n if pInterval != None and rInterval != None:\n start = 0\n end = 0\n # get the later start value\n if pInterval.start < rInterval.start:\n start = rInterval.start\n elif pInterval.start == rInterval.start:\n start = rInterval.start\n elif pInterval.start > rInterval.start:\n start = pInterval.start\n # get the earlier end value\n if pInterval.end < rInterval.end:\n end = pInterval.end\n pEndData = pInterval.data - pOffset\n rEndData = rInterval.data - rInterval.bandwidth*(rInterval.end-end)\n elif pInterval.end == rInterval.end:\n end = pInterval.end\n pEndData = pInterval.data - pOffset\n rEndData = rInterval.data\n elif pInterval.end > rInterval.end:\n end = rInterval.end\n pEndData = pInterval.data - pOffset - pInterval.bandwidth*(pInterval.end-end)\n rEndData = rInterval.data \n # create interval entry for link profile\n entry = ProfileEntry()\n entry.ptype = 'link'\n entry.start = start\n entry.end = end\n # link interval time bounds configured; now to calc data\n if pEndData <= rEndData:\n # set entry data\n entry.data = pEndData\n buff = rEndData - pEndData\n if buff > self.buffer[2]:\n self.buffer = [entry.end,entry.data,buff]\n else:\n # set entry data and see if there was a profile crossing\n if len(self.link) == 0 or self.link[-1].data < rEndData:\n rData = rInterval.bandwidth*(rInterval.end - start)\n rStart= rInterval.data - rInterval.bandwidth*(rInterval.end - rInterval.start)\n pStart= pInterval.data - pOffset - pInterval.bandwidth*(pInterval.end - pInterval.start)\n point = get_intersection([pInterval.start,pStart],[pInterval.end,pInterval.data-pOffset],[rInterval.start,rStart],[rInterval.end,rInterval.data])\n if point[0] != -1:\n xEntry = ProfileEntry()\n xEntry.ptype = 'link'\n xEntry.start = start\n xEntry.end = point[0]\n xEntry.data = point[1]\n self.link.append(xEntry)\n entry.start = xEntry.end\n entry.data = rEndData\n self.link.append(entry)\n # do we need to add to the offset?\n if pEndData >= rEndData:\n pOffset += pEndData - rEndData\n self.link = [e for e in self.link if e.start != e.end]\n lData = 0\n for e in self.link:\n e.bandwidth = (e.data - lData)/(e.end-e.start)\n lData = e.data\n self.calcDelay()\n return 0\n\n def calcDelay(self):\n if len(self.required) == 0:\n print \"ERROR: no required profiles on this node!\"\n return -1\n if len(self.link) == 0:\n print \"ERROR: profiles have not been convolved; no link profile exists!\"\n return -1\n delay = [0,0,0]\n # match required points to link profile horizontally\n for r in self.required:\n for l in self.link:\n if l.data > r.data:\n offset = l.end-(l.data-r.data)/l.bandwidth\n timeDiff = offset-r.end\n if timeDiff > delay[2] and delay[1] != r.data:\n delay = [r.end,r.data,timeDiff]\n break\n elif l.data == r.data:\n timeDiff = l.end - r.end\n if timeDiff > delay[2] and delay[1] != r.data:\n delay = [r.end,r.data,timeDiff]\n break\n # match link points to required profile horizontally\n for l in self.link:\n for r in self.required:\n if l.data < r.data:\n offset = r.end-(r.data-l.data)/r.bandwidth\n timeDiff = l.end - offset\n if timeDiff > delay[2] and l.data != delay[1]:\n delay = [offset,l.data,timeDiff]\n break\n self.delay = delay\n return 0\n\n def calcData(self):\n if len(self.required) == 0:\n print \"ERROR: no required profiles on this node!\"\n return -1\n if len(self.provided) == 0:\n print \"ERROR: no provided profiles on this node\"\n return -1\n rData = 0\n pData = {}\n for intf in self.interfaces:\n pData[intf] = 0\n for e in self.required:\n rData += e.bandwidth*(e.end-e.start)\n e.data = int(rData)\n for e in self.provided:\n pData[e.interface] += e.bandwidth*(e.end-e.start)\n e.data = int(pData[e.interface])\n for a in self.apps:\n rData = 0\n for e in a:\n rData += e.bandwidth*(e.end-e.start)\n e.data = int(rData)\n return 0\n\n def plotProfile(self,dtype,profile,ptype,dashes,label=''):\n xvals = []\n yvals = []\n if dtype == 'data':\n xvals.append(0)\n yvals.append(0)\n for e in profile:\n if e.ptype == ptype:\n if dtype == 'bandwidth':\n xvals.append(e.start)\n yvals.append(e.bandwidth)\n yvals.append(e.bandwidth)\n else:\n yvals.append(e.data)\n xvals.append(e.end)\n\n line, =plt.plot(xvals,yvals,label=r\"{0}{1} {2}\".format(label,ptype,dtype),linewidth=PLOT_WIDTH)\n line.set_dashes(dashes) \n return\n\n def plotData(self):\n plt.figure(2)\n plt.hold(True)\n self.plotProfile('data',self.profile,'required',[8,4,2,4,2,4],'r[t]: ')\n self.plotProfile('data',self.profile,'provided',[2,4],'p[t]: ')\n self.plotProfile('data',self.link,'link',[6,12],'l[t]: ')\n\n buffplotx = [self.buffer[0],self.buffer[0]]\n buffploty = [self.buffer[1],self.buffer[1]+self.buffer[2]]\n plt.plot(buffplotx,buffploty,'0.5',label=r\"Buffer\",linewidth=PLOT_WIDTH) #:%d B\"%(int(buff)/8)\n\n delayplotx = [self.delay[0],self.delay[0]+self.delay[2]]\n delayploty = [self.delay[1],self.delay[1]]\n plt.plot(delayplotx,delayploty,'0.8',label=r\"Delay\",linewidth=PLOT_WIDTH) #:%0.4f s\"%float(delay)\n \n '''\n line, =plt.plot([orbital_period,orbital_period],[0,max(column(req,1))],linewidth=2,color='black', label=r\"Period End\")\n for i in range(2,num_periods+1):\n line, =plt.plot([orbital_period*i,orbital_period*i],[0,max(column(req,1))],linewidth=2,color='black')\n '''\n\n plt.title(\"Network Traffic vs. Time over %d period(s)\"%self.num_periods)\n plt.ylabel(\"Data (bits)\")\n plt.xlabel(\"Time (s)\")\n plt.legend(loc='upper left')\n #plt.grid(True)\n frame1 = plt.gca()\n frame1.axes.get_xaxis().set_ticks([])\n frame1.axes.get_yaxis().set_ticks([])\n plt.show()\n return\n\n def plotBandwidth(self):\n plt.figure(1)\n plt.hold(True)\n self.plotProfile('bandwidth',self.profile,'required',[4,8])\n self.plotProfile('bandwidth',self.profile,'provided',[2,4])\n self.plotProfile('bandwidth',self.link,'link',[2,4])\n \n '''\n line, =plt.plot([orbital_period,orbital_period],[0,max(column(linkbw,1))],linewidth=2,color='black', label=r\"Period End\")\n for i in range(2,num_periods+1):\n line, =plt.plot([orbital_period*i,orbital_period*i],[0,max(column(linkbw,1))],linewidth=2,color='black')\n '''\n\n plt.title(\"Network Bandwidth vs. Time over %d period(s)\"%self.num_periods)\n plt.ylabel(\"Bandwidth (bps)\")\n plt.xlabel(\"Time (s)\")\n plt.legend(loc='lower left')\n #plt.grid(True)\n plt.show()\n return\n\n def __repr__(self):\n return \"NodeProfile()\"\n\n def __str__(self):\n retStr = 'Buffer: {0}\\nDelay: {1}\\n'.format(self.buffer,self.delay)\n retStr += \"Provided:\\n\"\n for e in self.provided:\n retStr += \"{0}\\n\".format(e)\n retStr += \"Apps:\\n\"\n for i in range(0,len(self.apps)):\n retStr += \"App {0} profile:\\n\".format(i+1)\n for e in self.apps[i]:\n retStr += \"{0}\\n\".format(e)\n retStr += \"Required:\\n\"\n for e in self.required:\n retStr += \"{0}\\n\".format(e)\n retStr += \"Link:\\n\"\n for e in self.link:\n retStr += \"{0}\\n\".format(e)\n return retStr\n\nclass NetworkProfile:\n def __init__(self,_period):\n self.nodeProfiles = {}\n self.period = _period \n\n def addNodeProfile(self,node,profile):\n self.nodeProfiles[node] = profile\n\n def calcData(self):\n for n,p in self.nodeProfiles.iteritems():\n p.calcData()\n\n def convolve(self,node,interface):\n self.nodeProfiles[node].convolve(interface)\n return self.nodeProfiles[node]\n\n def __repr__(self):\n return \"NetworkProfile()\"\n\n def __str__(self):\n retStr = \"NetworkProfile:\\n\"\n retStr += \"has period {0} and node profiles:\\n\".format(self.period)\n for n,p in self.nodeProfiles.iteritems():\n retStr += \"Node {0} has profiles:\\n{1}\\n\".format(n,p)\n return retStr\n\ndef get_entry_from_line(line=None):\n if line == None or len(line) == 0:\n return None\n fields = line.split(',')\n if len(fields) == 0 or fields[0][0] == '%':\n return None\n entry = ProfileEntry()\n entry.start = float(fields[0])\n entry.bandwidth = float(fields[1])\n entry.latency = float(fields[2])\n if len(fields) == 4:\n entry.interface = fields[3]\n return entry\n\ndef gen_network_profile(nodeProfiles,appProfiles,app_node_map,period):\n profiles = NetworkProfile()\n for node,apps in app_node_map.iteritems():\n nodeProfile = NodeProfile()\n nodeProfile.addProvidedProfile(nodeProfiles[node])\n for app in profiles:\n nodeProfile.addRequiredProfile(profiles[app])\n profiles.addNodeProfile(node,nodeProfile)\n\ndef get_app_node_map(nodes,apps):\n app_node_map = {}\n for node,nprofile in nodes.iteritems():\n for app,aprofile in apps.iteritems():\n if app.find(node) != -1:\n if app_node_map.has_key(node):\n app_node_map[node].append(app)\n else:\n app_node_map[node] = [app]\n return app_node_map\n\ndef get_appProfiles(folder):\n profile_dir = os.getcwd()+os.sep+folder\n apps = {}\n if os.path.isdir(profile_dir):\n print 'Found ',profile_dir\n for file in glob.glob(profile_dir+os.sep+'*profile.csv'):\n app_name = file.replace('_profile.csv','')\n app_name = app_name.replace(profile_dir+os.sep,'')\n with open(file,'r+') as f:\n content = f.read()\n apps[app_name] = content\n else:\n print \"ERROR: \",profile_dir,\" doesn't exist!\"\n return apps\n\ndef get_nodeProfiles(folder):\n profile_dir = os.getcwd()+os.sep+folder\n nodes = {}\n if os.path.isdir(profile_dir):\n print 'Found ',profile_dir\n for file in glob.glob(profile_dir+os.sep+'*config.csv'):\n node_name = file.replace('_crm_config.csv','')\n node_name = node_name.replace(profile_dir+os.sep,'')\n if node_name != 'crm_config.csv':\n with open(file,'r+') as f:\n content = f.read()\n nodes[node_name] = content\n else:\n print \"ERROR: \",profile_dir,\" doesn't exist!\"\n return nodes" }, { "alpha_fraction": 0.6930596232414246, "alphanum_fraction": 0.6930596232414246, "avg_line_length": 28.117647171020508, "blob_id": "c56a52b7026c5afc8747b4242cc5a2232ad2a2d5", "content_id": "0308825ebc7305970a74dcbce6f5f13b6a39b939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2046, "license_type": "no_license", "max_line_length": 67, "num_lines": 68, "path": "/src/middleware/v1.0/Design.MD", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Enhancements:\r\n-------------\r\n* Middleware:\r\n * Server side receive middleware\r\n * Enable out-of-band communication between servers & clients\r\n * Communication of profiles?\r\n * Allow receiver to stop sender\r\n* Sever Functionality:\r\n * reads all profiles for all clients connected\r\n * checks all incoming client data against the client's profile\r\n * sends STOP command to client if client has been deemed a threat\r\n\r\nQuestions:\r\n----------\r\n* What do we do about server (receiver)?\r\n* How to create process specific buffers?\r\n\t* Singleton instance used by each process\r\n\t* each process has single buffer?\r\n* how to properly meter out data from the buffer\r\n\t* break up packets?\r\n\r\nGoals:\r\n------\r\n* enforce application (process) network traffic profiles\r\n* measure each packet's latency \r\n* measure each buffer's size as a function of time\r\n* enforce system network traffic profile (tc?)\r\n* vanilla linux: i.e. UDP\r\n\r\nArchitecture:\r\n-------------\r\n* underlying send layer uses UDP\r\n* applications call a MW level send call \r\n\t* send call places data into a buffer\r\n\t* buffer management:\r\n\t\t* (possibly) rejects insertion\r\n\t\t* (possibly) delays transmission\r\n\t\t* tracks stats\r\n\r\nNeed to Create:\r\n---------------\r\n* Client traffic generation (port)\r\n* Server reception/measurement (port)\r\n* Client side architecture:\r\n\t* process-specific buffers and management code\r\n\t* connection setup/teardown\r\n\r\nClient Middleware:\r\n------------------\r\n* Read in standard Network Profile (port)\r\n* configure buffer space (packet based?)\r\n* create egress thread\r\n* When application invokes send():\r\n\t* check time for input rejection\r\n\t* get buffer lock()\r\n\t* check buffer space for input rejection\r\n\t* insert into buffer (single packet?)\r\n\t* if !(buffer_has_data): send cond var to egress thread\r\n\t* release buffer lock()\r\n* egress thread:\r\n\t* if (buffer_has_data)\r\n\t\t* get buffer lock\r\n\t\t* pop data off the buffer (profile based)\r\n\t\t* release buffer lock\r\n\t\t* calc next pop time\r\n\t\t* wait until pop time\r\n\t* else\r\n\t\t* wait for cond var from input thread" }, { "alpha_fraction": 0.7788600921630859, "alphanum_fraction": 0.7803080081939697, "avg_line_length": 50.31756591796875, "blob_id": "cfc5fb1e29522819cb13ce4fe31bcb98cbbedba8", "content_id": "2f037f7ecc3b46f3b39c64535812d584823dfdd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7597, "license_type": "no_license", "max_line_length": 73, "num_lines": 148, "path": "/docs/_sources/math.txt", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Precise Network Performance Prediction : Theory\n=================================================\n\nThis chapter describes the mathematical formalization behind the\nnetwork analysis techniques used by *Precise Network Performance\nPrediction*, (:math:`PNP^2`).\n\n.. _network_math_formalism:\n\nMathematical Formalism\n----------------------\n\nTo model the network capability of the system and the application\ntraffic patterns, we have developed a network modeling paradigm\nsimilar to Network Calculus' traffic arrival curves and traffic shaper\nservice curves.\n\nSimilarly to Network Calculus' arrival curves and service curves, our\nnetwork profiles model how the network performance or application\ntraffic generation changes with respect to time. Whereas Network\nCalculus' modeling transforms application data profiles and network\nservice profiles into min and max curves for data received vs. size of\ntime-window, our models take a simpler, deterministic approach which\nmodels exactly the data generated by the application and the data\nwhich could be sent through the network, allowing our performance\nmetrics to be more precise. Specifically, the bandwidth that the\nnetwork provides on a given communication link is specified as a time\nseries of scalar bandwidth values. Here, bandwidth is defined as data\nrate, i.e. bits per second, over some averaging interval. This\nbandwidth profile can then be time-integrated to determine the maximum\namount of data throughput the network link could provide over a given\ntime. The bandwidth profile for the application traffic similarly can\nbe time-integrated to determine the amount of data that the\napplication attempts to send on the network link as a function of\ntime.\n\nHaving time-integrated the bandwidth profiles to obtain data vs. time\nprofiles that the application requires and that the system provides,\nwe can use a special type of convolution (:math:`\\otimes`),\n*(min,+)-calculus convolution*, on these two profiles to obtain the\ntransmitted link data profile as a function of discrete time. The\nconvolution we define on these profiles borrows concepts from the\nmin-plus calculus used in Network Calculus, but does not use a\nsliding-window and instead takes the transformed minimum of the\nprofiles. For a given application data generation profile,\n:math:`r[t]`, and a given system link capacity profile :math:`p[t]`,\nwhere :math:`t\\in\\mathbb{N}`, the link transmitted data profile\n:math:`l[t]` is given by the convolution equation\n:eq:`convolution`. The difference :math:`(p[t-1] - l[t-1])` represents\nthe difference between the amount of data that has been transmitted on\nthe link :math:`(l[t-1])` and the data that the link could have\ntransmitted at full utilization :math:`(p[t-1])`. As demonstrated by\nthe convolution equation, :math:`\\forall t : l[t] \\le r[t]`, which is\nthe relation that, without lower-layer reliable transport, the link\ncannot transmit more application data for the application than the\napplication requests as there will be packetization and communication\nheader overhead as well. The buffer and delay equations\n:eq:`convolution` use the output of the convolution with the input\nprofile to predict the minimum required buffer size for lossless\ntranmission and the maximum delay experienced by the transmitted data,\nrespectively. A representative convolution example is shown below for\nreference.\n\n.. math::\n y=l[t] &= (r \\otimes p)[t] \\\\\n &= min( r[t] , p[t] - (p[t-1] - l[t-1]) )\\\\\n \\text{buffer}&= sup\\{r[t] - l[t] : t \\in \\mathbb{N}\\}\\\\\n \\text{delay} &= sup\\{l^{-1}[y]-r^{-1}[y] : y \\in \\mathbb{N}\\}\n :label: convolution\n\n.. figure:: /images/results/convolution.png\n :align: center\n\n Illustrative example for network profile convolution.\n\n.. _assumptions:\n\nAssumptions Involved\n--------------------\n\nAs with any type of system modeling and analysis paradigm, it is\nimportant to remain aware of the types of systems the\nmodeling/analysis is applicable to, the requirements imposed on the\nsystem by the model, and any edge cases or scenarios where the\nanalysis or modeling paradigm breaks down.\n\nThe major assumption that we make with this type of system modeling\nand analysis is that we *can* know at design time what the system\nnetwork capacity and the application data production will be as a\n(possibly periodic) function of time. Of course, this assumption is\nunrealistic for heavily data-dependent systems, but by performing some\ncode analysis and/or doing some controlled experiments, models of the\napplications' behavior can be developed that can be analyzed.\n\nAnother key assumption and thus requirement of our modeling and\nanalysis framework is a system-wide synchronized clock which all nodes\nuse. By this we mean that if two nodes produce data for a third node\nat time :math:`t=3` seconds, they will produce their data at exactly\nthe same time. This is required for the composition of profiles as they\ntraverse the network and are routed through nodes. This assumption\nrestricts the types of systems for which our analysis can be most\nuseful, but is not a critical hindrance, as many such critical\nsystems, e.g. satellite constellations or UAVs have GPS synchronized\nclocks, which provide such a foundation.\n\nAnother restriction with our modeling paradigm is that data-dependent\nflows cannot be accurately represented, since we have no way of\nmodeling data-dependence. A related assumption is processing power\nand the ability of the software to adhere to the profiles: we assume\nthe applications are able to accurately and precisely follow their\ndata production profiles, regardless of the number of other components\non their hardware node. Similarly, we assume that under all\ncircumstances, the service profile of a hardware node will be adhered to.\n\n.. _impacts:\n\nFactors Impacting Analysis\n--------------------------\n\nIt is important when developing modeling and analysis techniques to\nanalyze how the analysis time and results are affected by changes in\nthe model. This is especially true when trying to determine how\napplicable new techniques are to large scale systems. Models are\nprovided by the application and system developers and are described in\nthe form of bandwdith (bps) vs time that the application requires or\nthe system provides. These profiles are a time series that maps a\ngiven time to a given bandwdith. Between two successive intervals,\nthe bandwidth is held constant. Clearly, to represent changing\nbandwidth over time, the developer must use sufficiently short enough\ntime intervals to allow step-wise approximation of the curve.\nHowever, as with any system, there is a tradeoff between precision of\nthe model and the analysis time and results.\n\nBecause the fundamental mathematics are linear for our convolution,\nour convolution scales with :math:`O(n)`, where :math:`n` is the total\nnumber of intervals in all of the profiles analyzed. It is worth\nnoting that this complexity is not the same as the :math:`O(n^2)` or\n:math:`O(n*log(n))` complexity that traditional convolution has. This\ndecrease in complexity is due to our convolution only requiring a\nsingle operation (comparison operation for the minimum) for each value\nof :math:`t`. As such, each element in both of the profiles being\nconvolved only needs to be operated on once.\n\nClearly, the overall system analysis complexity depends on the\ncomplexity of the system, so as the system scales and increases\nrouting complexity, so too will the analysis complexity. However, for\nall systems there is an asymptotically increasing precision for a\ngiven increase in model precision and analysis time. \n" }, { "alpha_fraction": 0.5972384810447693, "alphanum_fraction": 0.6084718108177185, "avg_line_length": 34.907562255859375, "blob_id": "0878362c07ecc43906a0fec3000ba7d63c35be60", "content_id": "21e814c156821b8cb1edb9b0ae2a8b7701a3e220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4273, "license_type": "no_license", "max_line_length": 125, "num_lines": 119, "path": "/src/analysis/v1.0/acceptance.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"\nThis program is designed to do admissibilty tests for admission of an application\nor set of applications to the F6 satellite cluster. Each application may be \nsplit accross multiple nodes of the cluster. Each node has its own network\ninterface and as such, each node's bandwidth is independent of the other nodes'\nnetwork utilization. Therefore each node's bandwidth is modeled as a network\n\"link\" which connects from that node to all other nodes. \n\"\"\" \n\nfrom networkProfile import *\n\norbital_period = (90*60) # orbital period in seconds\nnum_periods = 1\nselected_node = ''\nselected_interface = ''\n\ndef parse_args(args):\n global orbital_period\n global num_periods\n global selected_node\n global selected_interface\n\n argind = 1\n while argind < len(args):\n if args[argind] == \"-P\":\n orbital_period = int(args[argind+1])\n if orbital_period <= 0:\n print \"Error! You must specify a time period > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-n\":\n num_periods = int(args[argind+1])\n if num_periods <= 0:\n print \"Error! You must specify a number of periods > 0\"\n return -1\n argind += 2\n elif args[argind] == \"-N\":\n selected_node = args[argind+1]\n argind += 2\n elif args[argind] == \"-I\":\n selected_interface = args[argind+1]\n argind += 2\n elif args[argind] == \"-?\" or args[argind] == \"-h\":\n print \"Usage:\\n\\tpython \",args[0],\"\"\"\n \\t\\t-N <node name>\n \\t\\t-I <node interface name>\n \\t\\t-P <period (s)>\n \\t\\t-n <number of periods to analyze>\\n\"\"\"\n return -1\n else:\n print \"\"\"Usage:\\n\\t\"\"\",args[0],\"\"\"\n \\t\\t-N <node name>\n \\t\\t-I <node interface name>\n \\t\\t-P <period (s)>\n \\t\\t-n <number of periods to analyze>\\n\"\"\"\n return -1\n return 0\n\ndef main():\n global selected_node\n global selected_interface\n global orbital_period\n global num_periods\n args = sys.argv\n\n if parse_args(args):\n return -1\n\n nodes = get_nodeProfiles('scripts')\n if nodes == {}:\n return -1\n apps = get_appProfiles('profiles')\n if apps == {}:\n return -1\n app_node_map = get_app_node_map(nodes,apps)\n networkProfile = NetworkProfile(orbital_period)\n for node,profile in nodes.iteritems():\n nodeProfile = NodeProfile(orbital_period,num_periods)\n nodeProfile.addProvidedProfile(profile)\n if node in app_node_map.keys():\n for app in app_node_map[node]:\n if \",\" in apps[app]:\n nodeProfile.addRequiredProfile(apps[app])\n networkProfile.addNodeProfile(node,nodeProfile)\n networkProfile.calcData()\n\n if selected_node == '':\n selected_node=nodes.keys()[0]\n if selected_node not in nodes:\n print 'ERROR: node {0} not found in system!'.format(selected_node)\n return -1\n\n if selected_interface == '':\n if len(networkProfile.nodeProfiles[selected_node].interfaces) > 0:\n selected_interface = networkProfile.nodeProfiles[selected_node].interfaces[0]\n else:\n print 'ERROR: node {0} has no interfaces that can be analyzed!'.format(selected_node)\n return -1\n if selected_interface not in networkProfile.nodeProfiles[selected_node].interfaces:\n print 'ERROR: node {0} has no interface named {1}!'.format(selected_node,selected_interface)\n return -1\n\n print 'Using node: interface {0} on node {1}'.format(selected_interface,selected_node)\n print \"Using period \",orbital_period,\" over \",num_periods,\" periods\"\n\n if networkProfile.convolve(selected_node,selected_interface) == -1:\n print 'Node {0} has cannot be analyzed for interface {1}: no usable profile'.format(selected_node,selected_interface)\n\n buff = networkProfile.nodeProfiles[selected_node].buffer\n print \"\\n[Time location, buffersize]:\",[buff[0],buff[2]]\n\n delay = networkProfile.nodeProfiles[selected_node].delay\n print \"[Time location, delay]:\",[delay[0],delay[2]]\n\n return\n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5746496319770813, "alphanum_fraction": 0.6758074164390564, "avg_line_length": 27.275861740112305, "blob_id": "c679f2d3a8b2d83253e794f3483d4143be8eb7da", "content_id": "e8b8850767242d1fee3782966717e2064b6b9bbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 70, "num_lines": 58, "path": "/src/analysis/v2.0/delay_test/tc_config_router.sh", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nTC=/sbin/tc\n\nDEV=eth0\n\nif [[ \"$1\" = \"status\" ]]\nthen\n $TC -s qdisc ls dev $DEV\n $TC -s class ls dev $DEV\nexit\nfi\n\n# clean existing down- and uplink qdiscs, hide errors\n$TC qdisc del dev $DEV root 2> /dev/null > /dev/null\n$TC qdisc del dev $DEV ingress 2> /dev/null > /dev/null\n\nif [[ \"$1\" = \"stop\" ]]\nthen\n exit\nfi\n\n###### uplink\n\n$TC qdisc add dev ${DEV} root handle 1: tbf \\\n rate 100Mbit peakrate 101Mbit mtu 8192 latency 1ms burst 1540\n\n$TC qdisc add dev ${DEV} parent 1:1 handle 11: prio\n\n$TC qdisc add dev ${DEV} parent 11:1 handle 111: netem delay 100ms\n$TC qdisc add dev ${DEV} parent 11:2 handle 112: pfifo\n\n$TC qdisc add dev ${DEV} parent 111:1 handle 1111: tbf \\\n rate 1Mbit peakrate 1001kbit mtu 8192 latency 100s burst 154000000\n$TC qdisc add dev ${DEV} parent 1111:1 handle 2: prio\n\n$TC qdisc add dev ${DEV} parent 2:1 handle 21: pfifo\n$TC qdisc add dev ${DEV} parent 2:2 handle 22: pfifo\n\n# FILTER APPLICATION TRAFFIC VERSUS NON APP TRAFIC\n$TC filter add dev ${DEV} protocol ip parent 11: prio 1 u32 \\\n match ip src 10.1.1.1 flowid 11:1\n\n$TC filter add dev ${DEV} protocol ip parent 11: prio 1 u32 \\\n match ip src 10.1.1.3 flowid 11:1\n\n$TC filter add dev ${DEV} protocol ip parent 11: prio 2 u32 \\\n match ip src 10.1.1.0/24 flowid 11:2\n\n$TC filter add dev ${DEV} protocol ip parent 11: prio 2 u32 \\\n match ip src 192.168.122.0/24 flowid 11:2\n\n# PRIORITIZE CERTAIN APPLICATION TRAFFIC\n$TC filter add dev ${DEV} protocol ip parent 2: prio 1 u32 \\\n match ip src 10.1.1.1 flowid 2:1\n\n$TC filter add dev ${DEV} protocol ip parent 2: prio 2 u32 \\\n match ip src 10.1.1.3 flowid 2:2\n\n" }, { "alpha_fraction": 0.5410292148590088, "alphanum_fraction": 0.5535466074943542, "avg_line_length": 21.946807861328125, "blob_id": "8d0387bbef1a79e8e45360e4bf14a8120ab48557", "content_id": "be2b8d5a0b46b1513cc9a17061b77ba4a7ab2dc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2157, "license_type": "no_license", "max_line_length": 63, "num_lines": 94, "path": "/src/middleware/v2.0/Client.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef CLIENT_HPP\n#define CLIENT_HPP\n\n#include <math.h>\n#include <queue>\n\n#include \"log_macro.hpp\"\n#include \"ConnectionSubsys.hpp\"\n#include \"Message.hpp\"\n#include \"NetworkProfile.hpp\"\n\n#include <string>\n\nclass Options {\npublic:\n long port;\n long bitLength;\n double runTime;\n int numPeriods;\n std::string ip;\n std::string tgFile;\n std::string outputFile;\n\n Options() {\n port = 7777;\n bitLength = 4096;\n runTime = -1;\n numPeriods = 1;\n ip = \"10.1.1.2\";\n outputFile = \"clientOutput.csv\";\n tgFile = \"./tg_profile.csv\";\n }\n\n int Parse(int argc, char **argv) {\n for (int i=0; i < argc; i++)\n {\n\tif (!strcmp(argv[i], \"--profile\"))\n\t {\n\t tgFile = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--output_file\"))\n\t {\n\t outputFile = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--ip\"))\n\t {\n\t ip = argv[i+1];\n\t }\n\telse if (!strcmp(argv[i], \"--port\"))\n\t {\n\t ip = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--message_bit_length\"))\n\t {\n\t bitLength = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--num_periods\"))\n\t {\n\t numPeriods = atoi(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--run_time\"))\n\t {\n\t runTime = atof(argv[i+1]);\n\t }\n\telse if (!strcmp(argv[i], \"--help\"))\n\t {\n\t TG_LOG(\"usage: \\n\\t%s\\n\"\n\t\t \"\\t\\t --profile <TG profile filename>\\n\"\n\t\t \"\\t\\t --num_periods <number of periods to run>\\n\"\n\t\t \"\\t\\t --run_time <length of time to run>\\n\"\n\t\t \"\\t\\t --output_file <output file filename>\\n\"\n\t\t \"\\t\\t --ip <ipv6 address of server>\\n\"\n\t\t \"\\t\\t --port <port number of server>\\n\"\n\t\t \"\\t\\t --message_bit_length <# bits in message>\\n\"\n\t\t ,argv[0]);\n\t return -1;\n\t }\n }\n return 0;\n }\n \n void Print() {\n TG_LOG(\"Options():\\n\");\n TG_LOG(\"\\t tg profile filename\\t\\t: %s\\n\", tgFile.c_str());\n TG_LOG(\"\\t number of periods to run\\t: %u\\n\", numPeriods);\n TG_LOG(\"\\t length of time to run\\t\\t: %f\\n\", runTime);\n TG_LOG(\"\\t output filename\\t\\t: %s\\n\", outputFile.c_str());\n TG_LOG(\"\\t server ip address\\t\\t: %s\\n\", ip.c_str());\n TG_LOG(\"\\t server port number\\t\\t: %lu\\n\", port);\n TG_LOG(\"\\t bits in message\\t\\t: %lu\\n\", bitLength);\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.5995607376098633, "alphanum_fraction": 0.6520254015922546, "avg_line_length": 33.72881317138672, "blob_id": "3c3f0c2a8a60ad989f7db8409bc453011205ec50", "content_id": "ea1294bb47d82a7d71bf509d2fd881fe82a79f62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4098, "license_type": "no_license", "max_line_length": 117, "num_lines": 118, "path": "/src/middleware/v2.0/recursive.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <iostream>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <string>\n#include <math.h>\n\nclass ResourceEntry {\npublic:\n unsigned long long time; // sec\n unsigned long long bandwidth; // bits / sec\n unsigned long long data; // bits\n\n ResourceEntry(unsigned long long t, unsigned long long b, unsigned long long d) : time(t), bandwidth(b), data(d) {}\n\n std::string toString() {\n char charBuf[100];\n sprintf(charBuf,\"%llu, %llu, %llu\",\n\t time, bandwidth, data);\n std::string retStr = charBuf;\n return retStr;\n }\n};\n\nunsigned long long getDataAtTime(unsigned long long, ResourceEntry, std::vector<ResourceEntry>);\nunsigned long long getTimeAtData(unsigned long long, ResourceEntry, std::vector<ResourceEntry>);\n\nunsigned long long Delay(unsigned long long data,\n\t\t\t unsigned long long start,\n\t\t\t unsigned long long period,\n\t\t\t std::vector<ResourceEntry> resources)\n{\n unsigned long long offsetData = getDataAtTime(start % period,\n\t\t\t\t\t\tresources.front(),\n\t\t\t\t\t\tstd::vector<ResourceEntry>(resources.begin() + 1,\n\t\t\t\t\t\t\t\t\t resources.end())\n\t\t\t\t\t\t);\n if ( (data % resources.back().data) > (resources.back().data - offsetData) )\n { // (data % resources.back().data) will go through the end of the period\n return (data / resources.back().data) * period + (period - start%period) +\n\tgetTimeAtData((data % resources.back().data) - (resources.back().data-offsetData),\n\t\t resources.front(),\n\t\t std::vector<ResourceEntry>(resources.begin() + 1, resources.end()));\n }\n else\n { // (data % resource.back().data) will not go through the end of the period\n return (data / resources.back().data) * period +\n\tgetTimeAtData((data % resources.back().data) + offsetData,\n\t\t resources.front(),\n\t\t std::vector<ResourceEntry>(resources.begin() + 1, resources.end())) - start;\n }\n}\n\nunsigned long long getTimeAtData(unsigned long long data,\n\t\t\t\t ResourceEntry prev,\n\t\t\t\t std::vector<ResourceEntry> resources)\n{\n if (prev.data <= data and resources[0].data >= data)\n return resources[0].time + (data - prev.data) / resources[0].bandwidth;\n else if (prev.data > data)\n return resources[0].time - (prev.data - data) / prev.bandwidth;\n else\n return getTimeAtData(data,\n\t\t\t resources[0],\n\t\t\t std::vector<ResourceEntry>(\n\t\t\t\t\t\t resources.begin() + 1,\n\t\t\t\t\t\t resources.end()\n\t\t\t\t\t\t )\n\t\t\t );\n}\n\nunsigned long long getDataAtTime(unsigned long long time,\n\t\t\t\t ResourceEntry prev, std::vector<ResourceEntry> resources)\n{\n if (prev.time <= time and resources[0].time >= time)\n return prev.data - prev.bandwidth * (resources[0].time - time);\n else\n return getDataAtTime(time,\n\t\t\t resources[0],\n\t\t\t std::vector<ResourceEntry>(\n\t\t\t\t\t\t resources.begin() + 1,\n\t\t\t\t\t\t resources.end()\n\t\t\t\t\t\t )\n\t\t\t );\n}\n\nint main(int argc, char** argv)\n{\n std::vector<ResourceEntry> resources;\n resources.push_back( ResourceEntry(0000000, 90, 90000000) );\n resources.push_back( ResourceEntry(1000000, 87, 177000000) );\n resources.push_back( ResourceEntry(2000000, 94, 271000000) );\n resources.push_back( ResourceEntry(3000000, 100, 371000000) );\n resources.push_back( ResourceEntry(4000000, 90, 461000000) );\n resources.push_back( ResourceEntry(5000000, 110, 571000000) );\n resources.push_back( ResourceEntry(6000000, 120, 691000000) );\n resources.push_back( ResourceEntry(7000000, 0, 691000000) );\n resources.push_back( ResourceEntry(8000000, 0, 691000000) );\n resources.push_back( ResourceEntry(9000000, 0, 691000000) );\n\n unsigned long long delay;\n unsigned long long start = 500000, data = 100;\n for (int i=0; i < argc; i++)\n {\n if (!strcmp(argv[i], \"--start\"))\n\tstart = atol(argv[i+1]);\n if (!strcmp(argv[i], \"--data\"))\n\tdata = atol(argv[i+1]);\n }\n delay = Delay(data, start, 9000000, resources);\n unsigned long long end = delay + start;\n std::cout << \"Start: \" << start << std::endl;\n std::cout << \"Data: \" << data << std::endl;\n std::cout << \"Delay: \" << delay << std::endl;\n std::cout << \"End: \" << end << std::endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.6962233185768127, "alphanum_fraction": 0.69786536693573, "avg_line_length": 26.636363983154297, "blob_id": "8317b573675a08c763c3963489e15a12d41c9e92", "content_id": "af363d0f60a16e2bdc1a470fc929d490f87b0f3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 609, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/doc/src/results.rst", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": ".. _results:\n\nPrecise Network Performance Prediction : Results\n==================================================\n\nThis chapter covers the results of my research as it applies to analysis\nof networked CPS. I will cover the research contributions in two\naspects:\n\n* :ref:`design_time` : Details design-time network analysis\n contributions and improvements to network performance prediction\n* :ref:`run_time` : Details the run-time network monitoring and\n management contributions which have been based of the design-time\n work\n\n\n.. toctree::\n :includehidden:\n :maxdepth: 2\n\n design-time\n run-time\n\n" }, { "alpha_fraction": 0.5595623850822449, "alphanum_fraction": 0.5753646492958069, "avg_line_length": 21.642202377319336, "blob_id": "fe68ff0ff97540dfc5ad4761d216cb49e0971f37", "content_id": "777c2e1de10f28475571de3777ce0b7ac80fed57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2468, "license_type": "no_license", "max_line_length": 71, "num_lines": 109, "path": "/src/middleware/v2.0/buffer.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef NETWORK_BUFFER_HPP\n#define NETWORK_BUFFER_HPP\n\n#include <deque>\n#include <boost/thread/condition.hpp>\n#include <boost/thread/mutex.hpp>\n\nnamespace Network\n{\n // Thread safe circular buffer \n class Buffer_Empty {};\n\n template <typename T>\n class message_buffer : private boost::noncopyable\n {\n public:\n typedef boost::mutex::scoped_lock lock;\n message_buffer() : _bits(0), _maxSize(0), _capacity(0) {}\n message_buffer(int bits) : message_buffer() { _capacity = bits; }\n void send (T data, uint64_t bits) {\n lock lk(monitor);\n if (!_capacity ||\n\t (_capacity && bits <= (_capacity - _bits)) ) {\n\t_bits += bits;\n\t_maxSize = std::max(_bits,_maxSize);\n\tsizes.push_back(bits);\n\tq.push_back(data);\n\tbuffer_not_empty.notify_one();\n }\n }\n T receive(uint64_t timeout_ms = 0) {\n boost::system_time const timeout =\n\tboost::get_system_time()+ boost::posix_time::milliseconds(timeout_ms);\n lock lk(monitor);\n while (q.empty()) {\n\tif (!buffer_not_empty.timed_wait(lk, timeout)) {\n\t lk.unlock();\n\t throw Buffer_Empty();\n\t}\n }\n T data = q.front();\n q.pop_front();\n uint64_t bits = sizes.front();\n sizes.pop_front();\n _bits = _bits - bits;\n return data;\n }\n T non_blocking_receive() {\n lock lk(monitor);\n if (q.empty()) {\n\tlk.unlock();\n\tthrow Buffer_Empty();\n }\n else {\n\tlk.unlock();\n\treturn receive();\n }\n }\n void clear() {\n lock lk(monitor);\n q.clear();\n sizes.clear();\n _bits = _maxSize = 0;\n }\n uint64_t bits() {\n lock lk(monitor);\n return _bits;\n }\n uint64_t bytes() {\n lock lk(monitor);\n return _bits / 8;\n }\n uint64_t maxBits() {\n lock lk(monitor);\n return _maxSize;\n }\n uint64_t maxBytes() {\n lock lk(monitor);\n return _maxSize / 8;\n }\n uint64_t capacityBits() {\n lock lk(monitor);\n return _capacity;\n }\n uint64_t capacityBytes() {\n lock lk(monitor);\n return _capacity / 8;\n }\n void set_capacityBits(uint64_t capacityBits) {\n lock lk(monitor);\n _capacity = capacityBits;\n }\n void set_capacityBytes(uint64_t capacityBytes) {\n lock lk(monitor);\n _capacity = capacityBytes * 8;\n }\n private:\n uint64_t _bits;\n uint64_t _maxSize;\n uint64_t _capacity;\n boost::condition buffer_not_empty;\n boost::mutex monitor;\n std::deque<uint64_t> sizes;\n std::deque<T> q;\n };\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.6746387481689453, "alphanum_fraction": 0.6776282787322998, "avg_line_length": 24.245283126831055, "blob_id": "8ac85bfa4247e986d972502b468bac37e42020ae", "content_id": "9e577109ad02e1f29e9eec7a82664489bdf85540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4014, "license_type": "no_license", "max_line_length": 96, "num_lines": 159, "path": "/src/analysis/v1.0/nodeModel.py", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "'''\nQuestions:\n * what are the semantics for putting data into the buffer?\n \t* lump/burst (i.e. instantaneous datablock)\n \t* rate (bps/Bps)\n * what are the semantics for emptying the buffers?\n \t* lump/burst (i.e. instantaneous datablock)\n \t* rate (bps/Bps)\n * what are the interesting measurements to make?\n \t* sizes of each buffer (as function of time?)\n \t* latency for each datum?\n \t* buffer entry/exit for each datum?\n \t* max latency\n \t* max buffer size (app, NIC)\n'''\n\n'''\nDesign:\n * Server is kernel thread:\n \t* acquires lock on NIC buffer\n \t* acquires lock on actor buffer\n \t* moves some amount of data from app buffer to NIC buffer\n \t* releases lock on both buffers\n \t* (waits?)\n * Client is an actor thread:\n \t* acquires lock on actor buffer\n \t* inserts data (dataSize) into buffer according to required profile\n \t* releases lock on actor buffer\n \t* waits for amount of time equal to dataSize:profile\n * NIC is a network interface thread:\n \t* contains its own internal buffer to store sent items\n \t* grabs lock on NIC buffer\n \t* moves some amount of data from NIC buffer to internal buffer\n \t* releases lock on NIC buffer\n * Node contains:\n \t* some number of actors\n \t* some number of servers\n \t* one NIC per interface\n \t* one buffer per NIC\n \t* one buffer per actor\n'''\n\nimport sys, os, csv, copy, glob, time, calendar\nfrom networkProfile import *\nfrom multiprocessing import Queue, Process, Pool, Pipe, Manager, Lock\nfrom datetime import datetime\n\nclass Data:\n\tdef __init__(self, size = 0, parent = -1, interface = None):\n\t\tself.size = size\t\t\t# amount of data\n\t\tself.interface = interface \t# Interface to be sent out on\n\t\tself.parent = parent\t\t# parent process who sent the data (useful for graphs)\n\t\tself.time = []\t\t\t\t# timestamp list for each buffer entry/exit\n\t\tself.latency = 0\t\t\t# calculated after transmission := <sent buffer entry> - <app buffer entry>\n\t\treturn\n\n\tdef timeStamp(self):\n\t\tself.time.append(time.time())\n\t\treturn\n\t\t\nclass DataBuffer:\n\tdef __init__(self):\n\t\tself.size = 0\n\t\tself.numEntries = 0\n\t\tself.data = []\n\t\tself.lock = Lock()\n\t\treturn\n\n\tdef lock():\n\t\tself.lock.acquire()\n\t\treturn\n\n\tdef unlock():\n\t\tself.lock.release()\n\t\treturn\n\n\tdef push(self,data):\n\t\tdata.timeStamp()\n\t\tself.size += data.size\n\t\tself.numEntries += 1\n\t\tself.data.insert(0,data)\n\t\treturn\n\n\tdef pop(self):\n\t\titem = None\n\t\tif self.numEntries > 0:\n\t\t\titem = self.data.pop()\n\t\t\titem.timeStamp()\n\t\t\tself.numEntries = self.numEntries - 1\n\t\t\tself.size = self.size - item.size\n\t\treturn item\n\nclass Server:\n\tdef __init__(self, profile = []):\n\t\tself.profile = profile\n\t\treturn\n\n\tdef run(appBuffers, nicBuffers):\n\t\tfor app,appBuffer in appBuffers.iteritems():\n\t\t\tdata = None\n\t\t\tappBuffer.lock()\n\t\t\tdata = appBuffer.pop()\n\t\t\tappBuffer.unlock()\n\t\treturn\n\nclass Client:\n\tdef __init__(self, profile = []):\n\t\tself.profile = profile\n\t\treturn\n\n\tdef run(lock, buffer):\n\t\tdata = Data(size=dataSize, parent=self, interface=inft)\n\t\tbuffer.lock()\n\t\tbuffer.push(data)\n\t\tbuffer.unlock()\n\t\treturn\n\nclass NIC:\n\tdef __init__(self, profile = []):\n\t\tself.profile = profile\n\t\tself.buffer = DataBuffer()\n\t\tself.bufferSize = 0\n\t\treturn\n\n\tdef run(buffer):\n\t\tdata = None\n\t\tbuffer.lock()\n\t\tif buffer.numEntries > 0:\n\t\t\tdata = buffer.pop()\n\t\tbuffer.unlock()\n\t\tself.buffer.push(data)\n\t\treturn\n\nclass Node:\n\tdef __init__(self, clients = [], servers = [], NICs = []):\n\t\tself.clients = clients\n\t\tself.servers = servers\n\t\tself.NICs = NICs\n\t\tself.appBuffers = {}\n\t\tself.nicBuffers = {}\n\t\tself.time = 0\n\t\tfor client in clients:\n\t\t\tself.appBuffers[client] = DataBuffer()\n\t\tfor nic in NICs:\n\t\t\tself.nicBuffers[nic] = DataBuffer()\n\t\treturn\n\n\tdef start():\n\t\tpids = []\n\t\tfor nic in self.NICs:\n\t\t\tp = Process(target=nic.run, args=(self.nicBuffers[nic])).start()\n\t\t\tpids.append(p)\n\t\tfor server in self.servers:\n\t\t\tp = Process(target=server.run, args=(self.appBuffers, self.nicBuffers)).start()\n\t\t\tpids.append(p)\n\t\tfor client in self.clients:\n\t\t\tp = Process(target=client.run, args=(self.appBuffers[client])).start()\n\t\t\tpids.append(p)\n\t\treturn\n" }, { "alpha_fraction": 0.6377550959587097, "alphanum_fraction": 0.704081654548645, "avg_line_length": 15.333333015441895, "blob_id": "c64e58de0aa692c85c7a75108e02f86f361e5b8b", "content_id": "914838d1c87c2e8ecdb55757ed4b4c8565c3f3e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 196, "license_type": "no_license", "max_line_length": 56, "num_lines": 12, "path": "/src/middleware/v2.0/oob.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef NETWORK_OOB_HPP\n#define NETWORK_OOB_HPP\n\n#include <string>\n\nnamespace Network\n{\n static const std::string oob_mc_group = \"224.0.0.251\";\n static const int oob_mc_port = 12345;\n};\n\n#endif\n" }, { "alpha_fraction": 0.6879432797431946, "alphanum_fraction": 0.6904101371765137, "avg_line_length": 31.75757598876953, "blob_id": "6e48900801c069656bcdd95244c5c7f63eb9b329", "content_id": "a70927d138306f335272f3a557321e066647f588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3243, "license_type": "no_license", "max_line_length": 263, "num_lines": 99, "path": "/doc/api/middleware/network-profile.rst", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Network Profile\n===============\n\n.. cpp:class:: profileMemBuf\n\n This structuture creates a stream buffer for parsing csv profile files.\n\n.. cpp:class:: ResourceEntry\n\n .. attribute:: time\n \n Contains the start time for the resource entry.\n \n :rtype: float\n\n .. attribute:: bandwidth\n\n The bandwidth (bps) which is constant from the start of the entry to its end.\n \n :rtype: unsigned long long\n\t\t\t \n .. attribute:: data\n\n The cumulative data (bits) which have been sent by the end of this resource entry. Includes all summation of all previous entries' data.\n \n :rtype: unsigned long long\n\t\t\t \n .. attribute:: latency\n\n The latency (ms) for network traffic during this entry.\n \n :rtype: unsigned long long\n\t\t\t \n.. cpp:class:: NetworkProfile\n\n A network profile contains a sorted list of time- and data-contiguous entries of type :cpp:class:`ResourceEntry`. The profiles are periodic with a specific epoch-centric start-time.\n\n .. attribute:: resources\n\n :rtype: std\\:\\:vector<:class:`ResourceEntry`>\n\n .. attribute:: start_time\n\n :rtype: timespec\n\n .. attribute:: period\n\n :rtype: double\n\n .. method:: initializeFromFile (fname)\n\n Load in the profile specified by *fname*. Return 0 on success, -1 on error.\n\n :param const char* fname: The filename containing a csv-delimited profile\n :rtype: int\n\n .. method:: initializeFromString (buffer)\n\n Load in the profile contained in *buffer*. Return 0 on success, -1 on error.\n\n :param char* buffer: A string buffer containing the csv-delimited profile\n :rtype: int\n\n .. method:: initializeFromIStream (stream)\n\n Load in the profile contained in *stream*. Return 0 on success, -1 on error.\n\n :param std\\:\\:istream& stream: An istream containing the csv-delimited profile\n :rtype: int\n\n .. method:: getOffset (t)\n\n Returns the difference between *t* and the profile's start time, modulo the profile's period.\n\n :param out timespec& t: epoch-centric time value\n :rtype: double\n\n .. method:: getNextInterval (start, bandwidth, latency)\n\n Returns as output parameters the next interval by comparing the current system epoch time to the profile's start epoch time. IF the profile has not been properly initialized, the call fails and returns -1, else it fills the output parameters and returns 0.\n\n :param out timespec& start: epoch time when the next interval starts\n :param out unsigned long long& bandwidth: bandwidth during the next interval\n :param out unsigned long long& latency: latency value for the next interval\n :rtype: int\n\n .. method:: Delay (dataLen, sentTime)\n\n Returns the amount of time the program has to wait before sending again. This is calculated based using the input *dataLengh* that was last transmitted at *sentTime*, and takes into account the current system itme. \n\n :param in unsigned long dataLen: size of the message that was last sent\n :param in timespec sentTime: epoch time the message of length dataLen was sent\n :rtype: double\n\n .. method:: Initialized ( )\n\n Returns true if the profile was properly initialized, false otherwise.\n \n :rtype: bool\n" }, { "alpha_fraction": 0.7454511523246765, "alphanum_fraction": 0.75614333152771, "avg_line_length": 43.420833587646484, "blob_id": "f61de4e06395dcb35b495c523a8558f50d702721", "content_id": "425ab1047e6dc4fba46d62953548a0836ae22349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10662, "license_type": "no_license", "max_line_length": 95, "num_lines": 240, "path": "/doc/src/background.rst", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Background: Network Performance Analysis\n========================================\n\nNetworking systems have been developed for over half a century and the\nanalysis of processing networks and communications networks began even\nearlier. As computing power has increased, the field of network\nperformance analysis at design-time has evolved into two main\nparadigms: (1) network performance testing of the applications and\nsystem to be deployed to determine performance and pitfalls, and (2)\nanalytical models and techniques to provide application network\nperformance guarantees based on those models. The first paradigm\ngenerally involves either arbitrarily precise network simulation, or\nnetwork emulation, or sub-scale experiments on the actual system. The\nsecond paradigm focuses on formal models and methods for composing and\nanalyzing those models to derive performance predictions.\n\nWe focus on the second paradigm, using models for predicting network\nperformance at design-time. This focus comes mainly from the types of\nsystems to which we wish to apply our analysis: safety- or\nmission-critical distributed cyber-physical systems, such as\nsatellites, or autonomous vehicles. For such systems, resources come\nat a premium and design-time analysis must provide strict guarantees\nabout run-time performance and safety before the system is ever\ndeployed. \n\nFor such systems, probabilistic approaches do not provide high enough\nconfidence on performance predictions since they are based on\nstatistical models [Cruz1991_]. Therefore, we must use deterministic analysis\ntechniques to analyze these systems. \n\n.. _min_plus_calc:\n\nMin-Plus Calculus\n-----------------\n\nBecause our work and other work in the field, e.g. Network Calculus,\nis based on Min-Plus Calculus, or (min,+)-calculus, we will give a\nbrief overview of it here, adapted from [Thiran2001_].\n\nMin-plus calculus, :math:`(\\mathbb{R}\\cup\\{+\\infty\\},\\wedge,+)`, deals\nwith *wide-sense increasing functions* :\n\n.. math::\n F=\\{f : \\mathbb{R}^+ \\rightarrow \\mathbb{R}^+, \\forall s \\leq t : f(s) \\leq f(t), f(0) = 0\\}\n\nwhich represent functions whose slopes are always :math:`\\geq 0`.\nIntuitively this makes sense for modeling network traffic, as data can\nonly ever by sent or not sent by the network, therefore the cumulative\namount of data sent by the network as a function of time can only ever\nincrease or stagnate. A wide-sense increasing function can further be\nclassified as a sub-additive function if\n\n.. math::\n \\forall s,t : f(s+t) \\leq f(s) + f(t)\n\nNote that if a function is concave with :math:`f(0)=0`, it is\nsub-additive, e.g. :math:`y=\\sqrt{x}`.\n\nThe main operations of min-plus calculus are the convolution and\ndeconvolution operations, which act on sub-additive functions.\nConvolution is a function of the form:\n\n.. math::\n (f\\otimes g)(t)\\equiv inf_{\\{0\\leq s \\leq t\\}}\\{f(t-s)+g(s)\\}\n\nNote that if the functions :math:`f,g` are concave, this convolution\nsimplifies into the computation of the minimum:\n\n.. math::\n (f\\otimes g)(t)=min(f,g)\n\nConvolution in min-plus calculus has the properties of\n\n* closure: :math:`(f\\otimes g)(t) \\in F`,\n* Associativity,\n* Commutativity, and\n* Distributivity\n\n.. _network_calc:\n\nNetwork Calculus\n----------------\n\nNetwork Calculus [Cruz1991_, Cruz1991a_, Thiran2001_] provides a\nmodeling and analysis paradigm for deterministically analyzing\nnetworking and queueing systems. Its roots come from the desire to\nanalyze network and queuing systems using similar techniques as\ntraditiional electrical circuit systems, i.e. by analyzing the\n*convolution* of an *input* function with a *system* function to\nproduce an *output* function. Instead of the convolution mathematics\nfrom traditional systems theory, Network Calculus is based on the\nconcepts of *(min,+)-calculus*, which we will not cover here for\nclarity, but for which an explanation can be found in my proposal and\nthesis.\n\nBy using the concepts of *(min,+)-calculus*, Network Calculus provides\na way to model the application network requirements and system network\ncapacity as functions, not of time, but of *time-window size*. Such\napplication network requirements become a cumulative curve defined as\nthe *maximum arrival curve*. This curve represents the cumulative\namount of data that can be transmitted as a function of time-window\nsize. Similarly, the system network capacity becomes a cumulative\ncurve defined as the *minimum service curve*. These curves bound the\napplication requirements and system service capacity.\n\nNote that sub-additivity of functions is required to be able to define\nmeaningful constraints for network calculus, though realistically\nmodeled systems (in Network Calculus) will always have sub-additive\nfunctions to describe their network characteristics (e.g. data\nserviced or data produced). This sub-additivity comes from the\nsemantics of the modeling; since the models describe maximum data\nproduction or minimum service as functions of *time-windows*, maximum\ndata production over a longer time window must inherently encompass\nthe maximum data production of shorter time-windows.\n\n.. figure:: images/background/nc_arrival_curve.png\n :align: center\n :width: 400px\n\n Network Calculus arrival curve (:math:`\\alpha`). Reprinted from\n [Thiran2001_].\n\n.. figure:: images/background/nc_service_curve.png\n :align: center\n :width: 400px\n\n Network Calculus service curve (:math:`\\beta`). Reprinted from\n [Thiran2001_].\n\nNetwork calculus uses *(min,+)-calculus convolution* to compose the\napplication requirement curve with the system service curve. The\noutput of this convolution is the maximum data arrival curve for the\noutput flow from the node providing the service. By analyzing these\ncurves, bounds on the application's required buffer size and buffering\ndelay can be determined.\n\n.. figure:: images/background/nc_bounds.png\n :align: center\n :width: 400px\n\n Schematic deptiction of the buffer size (vertical difference) and\n delay (horizontal difference) calculations in Network\n Calculus. Reprinted from [Thiran2001_].\n\nWith these bounds and the convolution, developers can make\n*worst-case* performance predictions of the applications on the\nnetwork. These bounds are *worst-case* because the curves are\nfunctions of *time-window size*, instead of directly being functions\nof time. This distinction means that the worst service period\nprovided by the system is directly compared with the maximum data\nproduction period of the application. Clearly such a comparison can\nlead to over-estimating the buffer requirements if the application's\nmaximum data production does not occur during that period. \n\n.. _rtc:\n\nReal Time Calculus\n------------------\n\nReal-Time Calculus[Thiele2000_] builds from Network Calculus, Max-Plus Linear\nSystem Theory, and real-time scheduling to analyze systems which\nprovide computational or communications services. Unlike Network\nCalculus, Real-Time Calculus (RTC) is designed to analyze real-time\nscheduling and priority assignment in task service systems. The use\nof (max,+)-calculus in RTC allows specification and analysis not of\nonly the arrival and service curves described above for Network\nCalculus, but of upper and lower arrival curves\n(:math:`\\alpha^u(\\Delta)` and :math:`\\alpha^l(\\Delta)`) and upper and\nlower service curves (:math:`\\beta^u(\\Delta)` and\n:math:`\\beta^l(\\Delta)`). These curves represent the miniumum and\nmaximum computation requested and computation serviced, respectively.\nAn overview of RTC is shown below.\n\n.. figure:: images/background/rtc_overview.png\n :align: center\n :width: 400px\n\n Overview of Real-Time Calculus' request, computation, and capacity\n models.\n\n:math:`R(t)` is the request function that represents the amount of\ncomputation that has been requested up to time :math:`t`, with\nassociated minimum request curve, :math:`\\alpha`. :math:`R'(t)` is\nthe total amount of computation delivered up to time :math:`t`, with\nassociated delivered computation bound :math:`R_b(t)`. :math:`C` and\n:math:`C'` are the capacity function and remaining capacity functions\nwhich describe the total processing capacity under full load and the\nremaining processing capacity, respectively. :math:`C` and :math:`C'`\nare bounded by the delivery curve :math:`\\beta` and the remaining\ndelivery curve :math:`\\beta'`.\n\n\nRTC allows for the analysis of task scheduling systems by computing\nthe request curve for a task model which is represented as a directed\nacyclic graph (DAG), the task graph :math:`G(T)`. The graph's\nvertices represent subtasks and each have their own associated\nrequired computation time :math:`e(u)`, and relative deadline\n:math:`d(u)` specifying that the task must be completed :math:`d(u)`\nunits of time after its triggering. Two vertices in :math:`G(T)` may\nbe connected by a directed edge :math:`(u,v)` which has an associated\nparameter :math:`p(u,v)` which specifies the minimum time that must\nelapse after the triggering of :math:`u` before :math:`v` can be\ntriggered. RTC develops from this specification the minimum\ncomputation request curve :math:`\\alpha_r` and the maximum computation\ndemand curve :math:`\\alpha_d`. Finally, the schedulability of a task\n:math:`T_i` is determined by the relation:\n\n.. math::\n \\beta'(\\Delta)\\geq\\alpha^i_d(\\Delta)\\ \\ \\ \\forall\\Delta\n\nwhich, if satisfied, guarantees that task :math:`T_i` will meet all of\nits deadlines for a static priority scheduler where tasks are ordered\nwith decreasing priority. Note that the remaining delivery curve\n:math:`\\beta'(\\Delta)` is the capacity offered to task :math:`T_i`\nafter all tasks :math:`T_{1\\leq j<i}` have been processed. Similarly\nto Network Calculus, RTC provides analytical techniques for the\ncomputation of performance metrics such as computation backlog bounds:\n\n.. math::\n \\text{backlog}\\leq sup_{\\{t\\geq0\\}}\\{\\alpha^u(t)-\\beta^l(t)\\}\n\nwhich is equivalent to the *network buffer bound* derived in Network\nCalculus.\n\n\n.. [Cruz1991] R. L. Cruz. A calculus for network delay-I: Network\n elements in isolation. *IEEE Transactions on\n Information Theory*, 37(1):114-131, 1991\n\n.. [Cruz1991a] R. L. Cruz. A calculus for network delay-II: Network\n analysis. *IEEE Transactions on Information Theory*,\n 37(1):132-141, 1991\n\n.. [Thiran2001] J.-Y. Le Boudec and P. Thiran. *Network Calclulus: A\n Theory of Deterministic Queuing Systems for the\n Internet*. Springer-Verlag, Berlin, Heidelberg, 2001.\n\n.. [Thiele2000] L. Thiele, S. Chakraborty, and M. Naedele. Real-time\n calculus for scheduling hard real-time systems. In\n *ISCAS*, pages 101-104, 2000.\n\n" }, { "alpha_fraction": 0.5745614171028137, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 16.538461685180664, "blob_id": "17bbaccf44cc707ea4b837ba9c83fa8a889bf35c", "content_id": "de6e9b5e02b7d713629ec46924d752e764c88e5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 456, "license_type": "no_license", "max_line_length": 64, "num_lines": 26, "path": "/src/middleware/v2.0/Makefile", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "IDIR =./\nCC=g++\nCFLAGS=-I$(IDIR) -std=c++11\n\nLIBS=-lpthread\n\n_DEPS = *.hpp\nDEPS = $(patsubst %,$(IDIR)/%,$(_DEPS))\n\nall: client server tcWrapper\n\nclient: Client.cpp\n\t$(CC) -o ${@} $^ $(CFLAGS) $(LIBS)\n\nserver: Server.cpp\n\t$(CC) -o ${@} $^ $(CFLAGS) $(LIBS)\n\ntcWrapper: tcWrapper.cpp\n\t$(CC) -o ${@} $^ $(CFLAGS) $(LIBS)\n\n.PHONY: clean\n\nclean:\n\trm -f *~ core $(INCDIR)/*~ *.a *Output* server client tcWrapper\ntest:\n\tg++ recursive.cpp -o recursive -std=c++11\n" }, { "alpha_fraction": 0.6364089846611023, "alphanum_fraction": 0.6538653373718262, "avg_line_length": 25.733333587646484, "blob_id": "12c965ec0a60abe85f3cdb39a323a4033eea0a51", "content_id": "7f182a449403b97862d3623257a5504e20b718a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2005, "license_type": "no_license", "max_line_length": 66, "num_lines": 75, "path": "/src/middleware/v2.0/Server.cpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#include \"Server.hpp\"\n\nint main(int argc, char **argv) {\n Options options;\n if ( options.Parse(argc,argv) == -1 )\n return -1;\n options.Print();\n\n std::string outputFile = options.outputFile;\n\n long messageBitLength = options.bitLength;\n long messageStrLength = ceil((double)messageBitLength/8.0f);\n char *messageData = new char[messageStrLength+2];\n\n Network::NetworkProfile profile;\n std::string profileFile = options.tgFile; \n if ( profile.initializeFromFile(profileFile.c_str()) != 0 ) {\n TG_LOG(\"ERROR: couldn't initialize TG profile!\\n\");\n return -1;\n }\n\n Connection* interface;\n if ( options.ip.find(\".\") != std::string::npos )\n interface = new IPV4_Connection();\n else\n interface = new IPV6_Connection();\n interface->serverIP = options.ip;\n interface->serverPort = options.port;\n if ( interface->Initialize(true) != 0 ) {\n TG_LOG(\"ERROR: Couldn't initialize interface!\\n\");\n return -1;\n }\n\n double timerDelay = 0;\n timespec timeout, remaining;\n long id = 0;\n\n std::string fStr = Network::header(1);\n std::ofstream file(outputFile.c_str());\n if (!file.is_open())\n return -1;\n file << fStr;\n file.close();\n\n while ( true ) {\n memset(messageData,0,messageStrLength+2);\n\n if ( interface->Receive(messageData,messageStrLength) > 0 ) {\n\n Network::Message msg;\n long id = atol(messageData);\n\n if ( id >=0 ) {\n\tmsg.TimeStamp();\n\tmsg.Id(id);\n\tmsg.Bytes( strlen(messageData) +\n\t\t Network::ipv4_header_bytes +\n\t\t Network::ipv4_route_bytes +\n\t\t Network::ipv4_header_padding_bytes +\n\t\t Network::udp_header_bytes );\n\tNetwork::append_data(outputFile.c_str(), msg);\n }\n\n timerDelay = profile.Delay(msg.Bits(),msg.FirstEpochTime());\n if ( timerDelay > 0 ) {\n\tdouble fractpart,intpart;\n\tfractpart = modf(timerDelay,&intpart);\n\ttimeout.tv_sec = (unsigned long long)(intpart);\n\ttimeout.tv_nsec = (unsigned long)(fractpart*1000000000.0);\n\tint return_code = nanosleep (&timeout, &remaining);\n }\n }\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.44736841320991516, "alphanum_fraction": 0.44736841320991516, "avg_line_length": 18, "blob_id": "e6c8177e269103b01d18a64fed94fca3dd4ccd92", "content_id": "96b6cabc17d7d77097298c64e2a8f8ff14819599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 38, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/doc/api/middleware/network-middleware.rst", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "Network Middleware\n==================\n" }, { "alpha_fraction": 0.5689851641654968, "alphanum_fraction": 0.5860889554023743, "avg_line_length": 18.634328842163086, "blob_id": "32ed5fc61b02fe94a585a2933259b5d35851ce22", "content_id": "4dbc70b096283205fd05dd944a5ccfaf405e25d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2631, "license_type": "no_license", "max_line_length": 87, "num_lines": 134, "path": "/src/middleware/v1.0/Message.hpp", "repo_name": "finger563/cbsat", "src_encoding": "UTF-8", "text": "#ifndef MESSAGE_HPP\n#define MESSAGE_HPP\n\n#include <string.h>\n\n#include <stdlib.h>\n#include <unistd.h>\n#include <stdio.h>\n#include <signal.h>\n#include <time.h>\n#include <math.h>\n#include <algorithm> // std::swap\n#include <memory>\n#include <vector>\n#include <iomanip>\n#include <streambuf>\n#include <fstream>\n\nclass Message {\npublic: \n std::string buffer;\n long bits;\n long id;\n std::vector<timespec> times;\n Message ()\n : bits(0),\n id(0)\n {\n }\n\n Message ( long len, long i )\n : bits (len),\n id(i)\n {\n buffer = std::string(this->Bytes()+2,'A');\n long templen = 256;\n char temp[templen];\n memset(temp,0,templen);\n sprintf(temp,\"%lu\",id);\n templen = strlen(temp);\n if (templen <= this->Bytes())\n std::copy(temp,temp+templen, buffer.begin());\n else\n std::copy(temp,temp+this->Bytes(),buffer.begin());\n }\n\n Message (const Message &s)\n : bits(s.bits),\n id(s.id),\n buffer(s.buffer),\n times (s.times)\n {\n }\n\n Message & operator= (const Message &s)\n { \n if (&s != this)\n {\n\tMessage tmp (s);\n\tswap (tmp);\n }\n return *this;\n }\n\n void swap (Message &s)\n {\n std::swap (bits, s.bits);\n std::swap (id, s.id);\n std::swap (buffer, s.buffer);\n std::swap (times, s.times);\n }\n\n void TimeStamp() { \n timespec time;\n clock_gettime(CLOCK_REALTIME, &time);\n times.push_back(time);\n }\n\n std::string Buffer() const {\n return buffer;\n }\n\n std::vector<double> DoubleTimes() {\n std::vector<double> retTimes;\n double time = 0;\n for (int i=0;i<times.size();i++) {\n time = (double)times[i].tv_sec + ((double)times[i].tv_nsec)/(double)1000000000.0;\n retTimes.push_back( time );\n }\n return retTimes;\n }\n\n double FirstDoubleTime() {\n double retTime = \n (double)times.front().tv_sec + \n ((double)times.front().tv_nsec)/(double)1000000000.0;\n return retTime;\n }\n\n double LastDoubleTime() {\n double retTime = \n (double)times.back().tv_sec + \n ((double)times.back().tv_nsec)/(double)1000000000.0;\n return retTime;\n }\n\n std::vector<timespec> EpochTimes() {\n std::vector<timespec> retTimes(times);\n return retTimes;\n }\n\n timespec FirstEpochTime() {\n timespec retTime = times.front();\n return retTime;\n }\n\n timespec LastEpochTime() {\n timespec retTime = times.back();\n return retTime;\n }\n\n void Clear() { buffer.clear(); }\n\n long Id() { return id; }\n void Id(long i) { id = i; }\n\n long Bits() { return bits; }\n void Bits(long b) { bits = b; }\n\n long Bytes() { return ceil((double)bits/8.0f); }\n void Bytes(long B) { bits = B*8; }\n};\n\n#endif\n" } ]
62
Stefan566/Reinforcement_Learning
https://github.com/Stefan566/Reinforcement_Learning
79c55cff84c892a7d318edc7ebb6d334514df507
f95aa5866bc47177f7fe11b2d65a809f879c2e43
dd94f675c109c6d25a6ab62bca50850353cec02e
refs/heads/master
2020-08-27T10:16:57.673650
2019-10-24T15:25:47
2019-10-24T15:25:47
217,330,893
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6036961078643799, "alphanum_fraction": 0.6201232075691223, "avg_line_length": 33.78571319580078, "blob_id": "15ab93eea9334ec513c86f0014905f3af4f73242", "content_id": "d7e5e2b7922fc44d6283980c894cf919a612e99a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1948, "license_type": "no_license", "max_line_length": 100, "num_lines": 56, "path": "/dqnagent.py", "repo_name": "Stefan566/Reinforcement_Learning", "src_encoding": "UTF-8", "text": "import numpy as np \nimport random\nfrom collections import deque\nimport tensorflow as tf \nfrom keras.models import Sequential\nfrom keras.layers import Dense \nfrom keras.optimizers import Adam\n\nclass DQNagent:\n\n def __init__(self, state_size, action_size):\n self.learning_rate = 0.001\n self.action_size = action_size\n self.state_size = state_size\n\n self.eps = 1.0\n\n self.memory = deque(maxlen=2000)\n self.model = self.create_model(self.state_size, self.action_size, self.learning_rate)\n self.target_model = self.create_model(self.state_size, self.action_size, self.learning_rate)\n self.target_model.set_weights(self.model.get_weights())\n\n def create_model(self, ss, acts, lr):\n model = Sequential()\n model.add(Dense(24,input_dim=ss, activation='relu'))\n model.add(Dense(24,activation='relu'))\n model.add(Dense(acts, activation='linear'))\n # model.add(Dense(activation='linear'))\n model.compile(loss='mse',optimizer=Adam(lr=lr))\n\n return model \n\n def act(self, st):\n if np.random.rand() <= self.eps:\n return random.randrange(self.action_size)\n act_values = self.model.predict(st)\n return np.argmax(act_values[0])\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def replay(self, bs):\n gamma = 0.95\n eps_min = 0.01\n eps_decay = 0.995\n\n mb = random.sample(self.memory, bs)\n for state, action, reward, next_state, done in mb:\n target = reward\n if not done:\n target = reward + gamma * np.amax(self.model.predict(next_state)[0])\n target2 = self.model.predict(state)\n target2[0][action] = target\n self.model.fit(state, target2, epochs=1, verbose=0)\n if self.eps > eps_min:\n self.eps *= eps_decay\n" }, { "alpha_fraction": 0.5862857103347778, "alphanum_fraction": 0.6045714020729065, "avg_line_length": 26.375, "blob_id": "2f4e399f5ad37332f7319354fbe9241a13836cc3", "content_id": "369f9cac2c71272102160db92087d901b9672a48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "no_license", "max_line_length": 97, "num_lines": 32, "path": "/app.py", "repo_name": "Stefan566/Reinforcement_Learning", "src_encoding": "UTF-8", "text": "import numpy as np \nimport random\n\nfrom dqnagent import DQNagent\nimport gym\n\nepisodes = 1000\nbatch_size = 32\n\nenv = gym.make('CartPole-v0')\nstate_size = env.observation_space.shape[0]\naction_size = env.action_space.n \nagent = DQNagent(state_size, action_size)\ndone = False\n\nfor ep in range(episodes):\n state = env.reset()\n state = np.reshape(state, [1, state_size])\n\n for t in range(500):\n env.render()\n ac = agent.act(state)\n next_state, reward, done, _ = env.step(ac)\n if done:\n reward = -10\n next_state = np.reshape(next_state, [1, state_size])\n agent.remember(state, ac, reward, next_state, done)\n if done:\n print(\"Episode: {}/{}, Score: {}, Epsilon: {:.2}\".format(ep, episodes, t, agent.eps))\n break\n if len(agent.memory) > batch_size:\n agent.replay(batch_size)" } ]
2
lisahas/rotm
https://github.com/lisahas/rotm
19de53664433086fe15ee2ecc2d343ba8ab94f23
79e1837e5e159b0190ceecc9a0b53f43567a0c70
7e9e5ba49af12ec3274ca890b6adf7ef60ad926f
refs/heads/master
2020-04-05T23:22:04.848546
2014-01-20T09:16:41
2014-01-20T09:16:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6410126686096191, "alphanum_fraction": 0.6516455411911011, "avg_line_length": 35.574073791503906, "blob_id": "26f22449d63b43b5a4ec99eb7129c20fddac79a3", "content_id": "a26270c33f48155367445fa6cad8dd38087eb7bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1975, "license_type": "no_license", "max_line_length": 107, "num_lines": 54, "path": "/apps/microsite/views.py", "repo_name": "lisahas/rotm", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom models import Artist, Event, Page, MenuItem\nfrom django.http import HttpResponse\n\n\ndef artist(request, artist_id):\n artist = get_object_or_404(Artist, pk=artist_id)\n page_name = artist.title\n current_menu = menu('')\n context = {'artist' : artist, 'menu' : current_menu, 'page_name' : page_name}\n return render(request, 'microsite/artist.html', context)\n\ndef event(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n page_name = event.title\n current_menu = menu('')\n if event.event_image :\n hasImage = 1\n else :\n hasImage = 0\n context = {'event' : event, 'menu' : current_menu, 'page_name' : page_name, 'has_image' : hasImage}\n return render(request, 'microsite/event.html', context)\n\ndef page(request, page_id=0):\n #page 0 is the front page, page id 1 is always front\n if page_id == 0 :\n page_id = 1\n page = get_object_or_404(Page, pk=page_id)\n page_name = page.title\n current_menu = menu(page_name)\n if page.page_image :\n hasImage = 1\n else :\n hasImage = 0\n context = {'page' : page, 'menu' : current_menu, 'page_name' : page_name, 'has_image' : hasImage}\n return render(request, 'microsite/page.html', context)\n\ndef listing(request, list_name):\n if list_name == 'event':\n listing = Event.objects.all().order_by('event_date')\n page_name = 'Events'\n if list_name == 'artist' :\n listing = Artist.objects.all().order_by('weight')\n page_name = 'Artists'\n current_menu = menu(page_name)\n context = {'listing': listing, 'list_name' : list_name, 'menu' : current_menu, 'page_name' : page_name}\n return render(request, 'microsite/listing.html', context)\n\ndef menu(current_page):\n #todo add order by weight\n menus = MenuItem.objects.all()\n return menus\n" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.71875, "avg_line_length": 7, "blob_id": "190e27f1116a5481af1d7974580f290dc00ffd07", "content_id": "cfb862af50dd5056dea107c0c45284cdf47af03f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 32, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/README.md", "repo_name": "lisahas/rotm", "src_encoding": "UTF-8", "text": "rotm\n====\n\nexhibition microsite\n" }, { "alpha_fraction": 0.5874524712562561, "alphanum_fraction": 0.5931559205055237, "avg_line_length": 31.9375, "blob_id": "06e109deda18d24dc90b8bde1e31e2bc02bf7eaa", "content_id": "3a6a9e7ab423b751e92a1c3571dbd79f4be67fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/apps/microsite/urls.py", "repo_name": "lisahas/rotm", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns('',\n # ex: /microsite/\n url(r'^$', views.page, name='page'),\n # ex: /microsite/5\n url(r'^(?P<page_id>\\d+)$', views.page, name='page'),\n # ex: /microsite/2/event\n url(r'^(?P<event_id>\\d+)/event$', views.event, name='event'),\n # ex: /microsite/3/artist\n url(r'^(?P<artist_id>\\d+)/artist$', views.artist, name='artist'),\n #ex: /microsite/event/list\n url(r'^(?P<list_name>\\w+)/list/$', views.listing, name='listing'),\n\n)" }, { "alpha_fraction": 0.7117347121238708, "alphanum_fraction": 0.7155612111091614, "avg_line_length": 31.66666603088379, "blob_id": "2f29f53c867b22b5ece9cf0bd3e7004d92248460", "content_id": "4e575fd76e8b472b709427c79c79fc5d67a69833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 86, "num_lines": 24, "path": "/apps/microsite/admin.py", "repo_name": "lisahas/rotm", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.forms import TextInput\nfrom django.db import models\n\n\n# Register your models here.\nfrom microsite.models import Page, Event, Artist, MenuItem\n\nclass PageAdmin(admin.ModelAdmin):\n change_form_template = 'microsite/admin/change_form.html'\n \nclass EventAdmin(admin.ModelAdmin):\n change_form_template = 'microsite/admin/change_form.html'\n formfield_overrides = {\n models.CharField: {'widget': TextInput(attrs={'size':200})}\n }\n \nclass ArtistAdmin(admin.ModelAdmin):\n change_form_template = 'microsite/admin/change_form.html'\n\nadmin.site.register(Page, PageAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Artist, ArtistAdmin)\nadmin.site.register(MenuItem)\n" }, { "alpha_fraction": 0.7047872543334961, "alphanum_fraction": 0.7047872543334961, "avg_line_length": 27.923076629638672, "blob_id": "66529903e1ebd2c05c5a7c22b24c99cf6d84ca6d", "content_id": "6b5b6f3d75395356c1192c99345cad852958012a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 74, "num_lines": 13, "path": "/apps/rotm/urls.py", "repo_name": "lisahas/rotm", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nfrom microsite import views\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n #make the microsite url appear at the root\n url(r'^$', views.page, name='page'),\n url(r'^microsite/', include('microsite.urls', namespace='microsite')),\n url(r'^admin/', include(admin.site.urls)),\n)\n" }, { "alpha_fraction": 0.665709376335144, "alphanum_fraction": 0.6823664307594299, "avg_line_length": 37.599998474121094, "blob_id": "0c71fe22cc0c643f79f65d23ac5ec35668f0ffa8", "content_id": "e3604e620d5edaabc7e9b4c5978623d9ed23bfe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 105, "num_lines": 45, "path": "/apps/microsite/models.py", "repo_name": "lisahas/rotm", "src_encoding": "UTF-8", "text": "# Create your models here.\nfrom django.db import models\nfrom django.utils import timezone\nimport datetime\n\n\nclass Page(models.Model):\n title = models.CharField(max_length=200)\n body = models.TextField()\n page_image = models.ImageField(upload_to= 'images', height_field=None, width_field=None, blank=True)\n def __unicode__(self): # Python 3: def __str__(self):\n return self.title\n\nclass Event(models.Model):\n title = models.CharField(max_length=200)\n description = models.TextField()\n short_description = models.CharField(max_length=500)\n event_date = models.DateTimeField('Event date and time')\n booking_link = models.CharField(max_length=300, blank=True)\n location = models.TextField()\n event_image = models.ImageField(upload_to= 'images', height_field=None, width_field=None, blank=True)\n weight = models.IntegerField()\n def hasImage(self):\n return self.event_image.empty_values != ''\n def __unicode__(self):\n return self.title\n \nclass Artist(models.Model):\n title = models.CharField(max_length=200)\n short_description = models.CharField(max_length=500)\n bio = models.TextField()\n work_title = models.CharField(max_length=200)\n work_description = models.TextField()\n artist_image = models.ImageField(upload_to= 'images', height_field=None, width_field=None)\n weight = models.IntegerField()\n def __unicode__(self): # Python 3: def __str__(self):\n return self.title + ' - ' + self.work_title\n \nclass MenuItem(models.Model):\n title = models.CharField(max_length=200)\n link = models.CharField(max_length=200)\n weight = models.IntegerField()\n def __unicode__(self):\n #return a formatted link\n return self.title\n " } ]
6
plangarcsilla/Hazifeladat
https://github.com/plangarcsilla/Hazifeladat
790528e6594fe8681f8baa12f661788ecfce6df1
00fc6380773b45b00248c07fdd9178f5f0a377ab
cab6932e0c7ace79b5e5add0e06092da1e632cc8
refs/heads/master
2021-04-29T12:25:46.275545
2018-03-27T20:51:26
2018-03-27T20:51:26
121,730,171
0
0
null
2018-02-16T08:46:41
2018-03-27T20:50:31
2018-03-27T20:51:26
Python
[ { "alpha_fraction": 0.4397495985031128, "alphanum_fraction": 0.471048504114151, "avg_line_length": 17.42424201965332, "blob_id": "e448ae9c551988a3becc4a68b959c8fbf64b588e", "content_id": "6b5bb6f73d4414d7aa235918801e5554f08b49af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 48, "num_lines": 33, "path": "/Beadandó2.py", "repo_name": "plangarcsilla/Hazifeladat", "src_encoding": "UTF-8", "text": "\"\"\"def feladat10(fajl_nev):\r\n fajl=open(fajl_nev,mode=\"r\")\r\n max=0\r\n for sor in fajl:\r\n if (sor.[0].isupper() and len(sor)>max):\r\n max=len(sor)\r\n print(max)\r\n fajl.close()\r\n\r\ndef main:\r\n feladat10(\"be.txt\")\r\nif __name__ == '__main__':\r\n main()\"\"\"\r\n\r\n\r\ndef feladat5_osztok(szam):\r\n db=2\r\n for i in range(2, szam//2+1):\r\n if szam%i==0:\r\n db=db+1\r\n return db\r\n\r\ndef eros(n):\r\n max=1\r\n for i in range(2,n+1):\r\n if max<feladat5_osztok(i):\r\n max=feladat5_osztok(i)\r\n print(i)\r\n\r\ndef main():\r\n eros(12)\r\nif __name__ == '__main__':\r\n main()" }, { "alpha_fraction": 0.4173842966556549, "alphanum_fraction": 0.4451022744178772, "avg_line_length": 17.871658325195312, "blob_id": "b34532c8351d3506c1ef2d6f4d3865b1a88c1ae3", "content_id": "fbef6cb8e0c8ab42b2ab023b93d7d8db57b0b477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3735, "license_type": "no_license", "max_line_length": 81, "num_lines": 187, "path": "/Beadandó3.py", "repo_name": "plangarcsilla/Hazifeladat", "src_encoding": "UTF-8", "text": "import numpy as np\r\ndef feladat1():\r\n n=0\r\n s=1\r\n n=int(input(\"Add meg az n: \"))\r\n szam = int(input(\"Szám: \"))\r\n ertek=szam\r\n while s<=n-1:\r\n szam=int(input(\"Szám: \"))\r\n if ertek==szam:\r\n sorszam=s+1\r\n elozoszam=s\r\n\r\n print(\"a {0}. szám egyenlő a {1}. számmal\".format(elozoszam,sorszam))\r\n s+=1\r\n ertek=szam\r\n\r\ndef main():\r\n feladat1()\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat2():\r\n n=5\r\n i=1\r\n szam=0\r\n paros=0\r\n paratlan=0\r\n while i<=n:\r\n szam=int(input(\"Adj egy számot: \"))\r\n\r\n if szam%2==0:\r\n paros+=1\r\n else:\r\n paratlan+=1\r\n i+=1\r\n print(\"{0}:{1} páros : páratlan \".format(paros,paratlan))\r\n\r\ndef main():\r\n feladat2()\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat3():\r\n n=int(input(\"N:\"))\r\n i=0\r\n szum=0\r\n li=[]\r\n while i<=n:\r\n szam=int(input(\"Adj egy számot: \"))\r\n\r\n li.append(szam)\r\n i+=1\r\n for i in li:\r\n szum += abs(i)\r\n print( szum/n)\r\n\r\ndef main():\r\n feladat3()\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat4():\r\n szum= 0\r\n k=1\r\n i=1\r\n negativ=0\r\n n=int(input(\"N: \"))\r\n while i <= n:\r\n szam=int(input(\"Szám: \"))\r\n if szam>0:\r\n k*=szam\r\n else:\r\n szum+=szam\r\n negativ+=1\r\n i+=1\r\n print(\"Szorzat: {0}.Számtani középarányos: {1}. \".format(k,szum/negativ))\r\n\r\ndef main():\r\n feladat4()\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat5():\r\n n=int(input(\"N: \"))\r\n i=1\r\n li=[]\r\n sum=0\r\n while i <=n:\r\n szam=int(input(\"Szám: \"))\r\n li.append(szam)\r\n i+=1\r\n szorzat=1\r\n for i in li:\r\n if i<7:\r\n szorzat*=i\r\n if szam>10:\r\n sum+=i\r\n print(\"Szorzat{0] összeg: {1}\".format(szorzat,sum))\r\n\r\ndef main():\r\n feladat5()\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat6():\r\n szam1=int(input(\"Szám: \"))\r\n szam2=int(input(\"Szám: \"))\r\n\r\n\r\n while True:\r\n sum=szam1+szam2\r\n szam3=int(input(\"Szám: \"))\r\n if szam3==0:\r\n break\r\n\r\n if szam3==sum:\r\n print(\"egyeznek\" ,szam3)\r\n s=szam2\r\n szam1=szam2\r\n szam2=szam3\r\n\r\ndef main():\r\n feladat6()\r\nif __name__=='__main__':\r\n main()\r\n\r\n\r\ndef feladat9(m,n):\r\n mar=m%n\r\n if mar==0:\r\n return n\r\n else:\r\n return feladat9(n,mar)\r\n\r\ndef rel_prim(t,n):\r\n for i in range(0,n-1):\r\n for j in range(i+1,n):\r\n if feladat9(t[i],t[j]!=1):\r\n return False\r\n return True\r\n\r\ndef main():\r\n n=int(input())\r\n tomb=np.empty(n,dtype='int')\r\n\r\n for i in range(0,n):\r\n tomb[i]=int(input())\r\n\r\n print(rel_prim(tomb,n))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef zhfeladat_ascii(n,m):\r\n zhfeladat_ascii=np.zeros(n,m)\r\n\r\n for i in range(0,n):\r\n for j in range(0,m):\r\n if i==0 or i==n//2 or i==n-1:\r\n zhfeladat_ascii[i][j]=42\r\n else:\r\n zhfeladat_ascii[i][j]=32\r\n zhfeladat_ascii[i][0]==42\r\n if i>n//2:\r\n zhfeladat_ascii[i][m-1]=42\r\n\r\n for i in range(0,n):\r\n for j in range(0,m):\r\n print(chr(int(zhfeladat_ascii[i][j])),end=' ')\r\n print('\\n')\r\n\r\ndef main():\r\n esetek=int(input())\r\n t=np.zeros(2*esetek,dtype='int')\r\n for i in range(0,2*esetek):\r\n sor=input()\r\n sor=sor.strip()\r\n sor=sor.split()\r\n t[i]=int(sor[0])\r\n t[i+1]=int(sor[1])\r\n\r\n for i in range(0,2*esetek,2):\r\n print(zhfeladat_ascii(t[i],t[i+1]))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.75, "avg_line_length": 13, "blob_id": "dd79c95b87cd7971d02fb62c23ede0aca30c64ae", "content_id": "96dc73b73d54b442916ea2782f2a1cc7096da11b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/README.md", "repo_name": "plangarcsilla/Hazifeladat", "src_encoding": "UTF-8", "text": "# Hazifeladat\nPlangár Csilla IK234P\n" }, { "alpha_fraction": 0.4258517026901245, "alphanum_fraction": 0.4628145098686218, "avg_line_length": 17.568628311157227, "blob_id": "ef4ad243580715f18d8d735582ce7601b7d75388", "content_id": "0261739ee02c2d99b64285649dc87acbe689a99d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9067, "license_type": "no_license", "max_line_length": 95, "num_lines": 459, "path": "/Beadandó.py", "repo_name": "plangarcsilla/Hazifeladat", "src_encoding": "UTF-8", "text": "def feladat1(a,b):\r\n a=a+b\r\n b=a-b\r\n a=a-b\r\n print(a,b)\r\n\r\ndef main():\r\n feladat1(12,-7)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat3(x):\r\n if x>-2 and x<0:\r\n return 2*x\r\n elif x>=0 and x<2:\r\n return x*x\r\n elif x>2:\r\n return 10\r\n else:\r\n print(\"Nem értelmezhető!\")\r\n return\r\n\r\ndef main():\r\n print(feladat3(10))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat4(a, b, c):\r\n return min(a, b, c) and max(abs(a, b, c))\r\n\r\ndef main():\r\n feladat4(2, 3, 4)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat5(a,b,c,d):\r\n if d>=0:\r\n print(a,c,b,d)\r\n else:\r\n print(a,b,d,c)\r\n\r\ndef main():\r\n feladat5(2,3,4,5)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\nimport math as mt\r\ndef feladat6():\r\n while True:\r\n a=float(input(\"A háromszög oldalai: \"))\r\n b=float(input(\"A háromszög oldalai: \"))\r\n c=float(input(\"A háromszög oldalai: \"))\r\n if a<=0 or b<=0 or c<=0:\r\n print(\"Nem megfelelő adatok!\")\r\n else:\r\n break\r\n if a+b>c and a+c>b and b+c>a:\r\n p=(a+b+c)/2\r\n T=mt.sqrt(p*(p-a)*(p-b)*(p-c))\r\n r=T/p\r\n R=a*b*c/(4*T)\r\n print(\"R=%2.2f és r=%.2f\" % (R,2))\r\n else:\r\n print(\"Nem értelmezhető!\")\r\n\r\ndef main():\r\n print(feladat6())\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat7():\r\n a=float(input(\"A téglalap egyik oldala méterben: \"))\r\n b=float(input(\"A téglalap egyik oldala méterben: \"))\r\n c=float(input(\"A drót hossza méterben: \"))\r\n if a<=0 or b<=0 or c<=0:\r\n print(\"Nem megfelelő adatok!\")\r\n else:\r\n K=(a+b)*2\r\n s=c-K\r\n print(K,s)\r\n\r\ndef main():\r\n (feladat7()\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat10(a,b):\r\n db=0\r\n if a>0 and b>0:\r\n if a>=b:\r\n s=a-b\r\n else:\r\n s=b-a\r\n if s%4==0 and s%100!=0:\r\n db+=1\r\n print(db)\r\n\r\ndef main():\r\n print(feladat10(1997,1990))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat12():\r\n a=float(input(\"A dolgozat max pontszáma: \"))\r\n b=float(input(\"A dolgozat eredménye: \"))\r\n s=b%a*100\r\n if s>=60:\r\n print(\"Sikeres dolgozat!\")\r\n else:\r\n print(\"Sikertelen dolgozat!\")\r\n\r\ndef main():\r\n feladat12()\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat13():\r\n a=int(input(\"Add meg az érdemjegyet!: \"))\r\n if a==1:\r\n print(\"Elégtelen\")\r\n elif a==2:\r\n print(\"Elégséges\")\r\n elif a==3:\r\n print(\"Közepes\")\r\n elif a==4:\r\n print(\"Jó\")\r\n elif a==5:\r\n print(\"Jeles\")\r\n else:\r\n print(\"Nem értelmezhető input!\")\r\n\r\ndef main():\r\n feladat13()\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat14():\r\n a=int(input(\"A hónap sorszáma: \"))\r\n if a==1:\r\n print(\"Január\")\r\n elif a==2:\r\n print(\"Február\")\r\n elif a==3:\r\n print(\"Március\")\r\n elif a==4:\r\n print(\"Április\")\r\n elif a==5:\r\n print(\"Május\")\r\n elif a==6:\r\n print(\"Június\")\r\n elif a==7:\r\n print(\"Július\")\r\n elif a==8:\r\n print(\"Augusztus\")\r\n elif a==9:\r\n print(\"Szeptember\")\r\n elif a==10:\r\n print(\"Október\")\r\n elif a==11:\r\n print(\"November\")\r\n elif a==12:\r\n print(\"December\")\r\n else:\r\n print(\"Nem értelmezhető input\")\r\n\r\ndef main():\r\n feladat14()\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat15(a,b,hanyados):\r\n if a>=b:\r\n hanyados=hanyados+1\r\n a=a-b\r\n else:\r\n print(\"Nem értelmezhető!\")\r\n return a\r\n\r\ndef main():\r\n print(feladat15(600,400,100))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat16(a,b):\r\n while True:\r\n r=a%b\r\n a=b\r\n b=r\r\n if r==0:\r\n break\r\n return a\r\n\r\ndef main():\r\n print(feladat16(360,225))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat17(n):\r\n masolat=n\r\n uj_szam=0\r\n while n!=0:\r\n szj=n%10\r\n uj_szam=uj_szam*10+szj\r\n n=n/10\r\n return uj_szam==masolat\r\n\r\ndef main():\r\n print(feladat17(333))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat19(n):\r\n n=int(input(\"Szám: \"))\r\n prim=True\r\n if n==1:\r\n prim=False\r\n for i in range(2,n):\r\n if n%i==0:\r\n prim=False\r\n break\r\n if prim:\r\n print(\"Prímszám\")\r\n else:\r\n print(\"Nem prímszám\")\r\n\r\ndef main():\r\n feladat19(10)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat20(n):\r\n a=1\r\n b=1\r\n if n==1:\r\n print(a, end=\" \")\r\n elif n==2:\r\n print(a,b, end=\" \")\r\n else:\r\n c=a+b\r\n print(a,b,c, end=\" \")\r\n k=3\r\n while k<n:\r\n a=b\r\n b=c\r\n c=a+b\r\n print(c, end=\" \")\r\n k+=1\r\n\r\ndef main():\r\n feladat20(10)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat21(n):\r\n ujszam=0\r\n while n!=0:\r\n maradek=n%10\r\n ujszam=ujszam*10+maradek\r\n n=n/10\r\n return ujszam\r\n\r\ndef main():\r\n print(feladat21(7532))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat22(a,b,c):\r\n c=1\r\n while b>0:\r\n if b%2==1:\r\n c=c*a\r\n b=b-1\r\n else:\r\n a=a*a\r\n b=[b/2]\r\n return c\r\n\r\ndef main():\r\n feladat22(2,3,4)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat25():\r\n a=input(\"Add meg az ország lakosainak számát ezer főben: \")\r\n b=input(\"Add meg az ország területét négyzetkilométerben:\")\r\n c=a%b*1000\r\n if c<50:\r\n print(\"Ritkán lakott\")\r\n elif c>=50 and c<300:\r\n print(\"Álagos népsűrűségű\")\r\n else:\r\n print(\"Sűrűn lakott\")\r\n\r\ndef main():\r\n feladat25()\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat27():\r\n szam=int(input())\r\n poz=szam>0\r\n pozdb=0\r\n negdb=0\r\n if szam==0:\r\n return [0,0]\r\n if poz:\r\n pozdb=pozdb+1\r\n else:\r\n negdb=negdb+1\r\n while True:\r\n szam=int(input())\r\n if szam==0:\r\n break\r\n if poz:\r\n pozdb=pozdb+1\r\n if szam>0:\r\n break\r\n else:\r\n negdb=negdb+1\r\n if szam<0:\r\n break\r\n poz=szam>0\r\n return [pozdb,negdb]\r\n\r\ndef main():\r\n print(feladat27())\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat28(n):\r\n legnagyobb=0\r\n for i in range(2,n-1):\r\n negyzetsz=i*i\r\n if negyzetsz>=n:\r\n return legnagyobb\r\n else:\r\n legnagyobb=negyzetsz\r\n\r\ndef main():\r\n print(feladat28(10))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat29(n):\r\n if n<0 or n>12:\r\n return\r\n eredmeny=1\r\n for i in range(1,n+1):\r\n eredmeny=eredmeny*i\r\n return eredmeny\r\n\r\ndef main():\r\n print(feladat29(10))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat31(n):\r\n osztok=[]\r\n for oszto in range(1,n):\r\n if n%oszto==0:\r\n osztok.append(oszto)\r\n return osztok\r\n\r\ndef main():\r\n print(feladat31(200))\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\ndef feladat32(n1,n2,k):\r\n szamok=[]\r\n for szam in range(n1,n2):\r\n if szam%k==0:\r\n szamok.append(szam)\r\n return szamok\r\n\r\ndef main():\r\n print(feladat32(2,123,40))\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat33(n):\r\n counter = 0\r\n for x in range(1, n+1):\r\n if n%x == 0:\r\n counter += 1\r\n return counter\r\n\r\ndef legtobbOszto(n):\r\n counter, number = 0, 0\r\n for x in range(1, n+1):\r\n if feladat33(x) > counter:\r\n counter = feladat33(x)\r\n number = x\r\n print(\"A legtöbb osztóval rendelkező szám: {}, {} osztó\".format(number, counter))\r\n\r\ndef main():\r\n print(legtobbOszto(int(input(\"Szám: \"))))\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat34(n):\r\n for x in range(2, n):\r\n if n%x == 0:\r\n return False\r\n return True\r\n\r\ndef felir(n):\r\n elso, masodik = 0, 0\r\n if n <= 6 or n%2 != 0:\r\n print(\"6-nál nagyobb, páros számra van szükség!\")\r\n return 0\r\n for x in range(2, n):\r\n if feladat34(x) == True:\r\n elso = x\r\n if feladat34(n-elso) == True:\r\n masodik = n-elso\r\n break\r\n print(\"{} összege a {} és {} prímszámoknak. \".format(n, elso, masodik))\r\n\r\ndef main():\r\n print(felir(28))\r\nif __name__ == '__main__':\r\n main()\r\n\r\ndef feladat38(egeszszam, szamjegy):\r\n str_egesz, str_szamjegy, result = \"{}\".format(egeszszam), \"{}\".format(szamjegy), 0\r\n if egeszszam > 999999999:\r\n print(\"Legfeljebb 9 számjegy megengedett!\")\r\n return 0\r\n for x in range(len(str_egesz)):\r\n if str_egesz[x] == str_szamjegy:\r\n result += 1\r\n print(\"A '{}' számban a '{}' számjegy {}x fordul elő!\".format(egeszszam, szamjegy, result))\r\n\r\ndef main():\r\n print(feladat38(43574843, 4))\r\nif __name__ == '__main__':\r\n main()\r\n" } ]
4
lucas-source/data-science
https://github.com/lucas-source/data-science
0a5f695c6eb556e2930fa62f309ef9c3cfb33aa9
14c5ac03b1b24e251eacb5ff4cb3b09ea0a4147d
d9ef55fa04766116e7fb6999867c05d10a6319a1
refs/heads/main
2023-07-02T10:28:40.188064
2021-08-12T19:33:10
2021-08-12T19:33:10
338,053,151
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6649283170700073, "alphanum_fraction": 0.6720991134643555, "avg_line_length": 29.68000030517578, "blob_id": "e4fea0a5bc06a25ec34dec6880212fa5d9bf49a3", "content_id": "2f07666e2c231c1709a98afbc76ac280d8533b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1534, "license_type": "no_license", "max_line_length": 85, "num_lines": 50, "path": "/Covid19/pycharm_covid/InterfaceCovide.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "from abc import ABC, abstractmethod\nimport inspect\n\n\nclass InterfacCv(ABC):\n\n @abstractmethod\n def to_present(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n @abstractmethod\n def structure(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n @abstractmethod\n def missing_values(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n @abstractmethod\n def data_analysis(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n def tadoido(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n\nclass InterfacPs(ABC):\n\n @abstractmethod\n def continent_cases(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n @abstractmethod\n def country_cases(self, ax2) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n @abstractmethod\n def vaccinations_cases(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n\nclass InterfaceBr(ABC):\n\n @abstractmethod\n def structure_br(self) -> None:\n raise Exception(f\"must implement the method: {inspect.stack()[0].function}\")\n\n @abstractmethod\n def graphic_br(self) -> None:\n raise Exception(f\"must implement the methodo: {inspect.stack()[0].function}\")\n" }, { "alpha_fraction": 0.5730082988739014, "alphanum_fraction": 0.589595377445221, "avg_line_length": 36.18691635131836, "blob_id": "e3e69d1a4bd043bbf10667eb62bb873188c7d506", "content_id": "de417f43dcfeb9a8d9af415bef180ba792bf3ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3981, "license_type": "no_license", "max_line_length": 119, "num_lines": 107, "path": "/Covid19/pycharm_covid/countries.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom InterfaceCovid import InterfacPs, InterfaceBr\nfrom Constructor import CovidConstructor\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n\nclass InfoCoutries(CovidConstructor, InterfacPs):\n\n sns.set()\n\n def __init__(self) -> None:\n super().__init__()\n return\n\n def continent_cases(self) -> None:\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))\n\n check = dict(self.df.groupby(self.df['location'])['total_deaths'].max().sort_values(ascending=False)[1:5])\n new_data = pd.DataFrame(check, index=[0])\n new_data.plot(cmap=plt.get_cmap('cividis'), kind=\"bar\", ax=ax1)\n plt.xticks(rotation=50, ha='right')\n ax1.set_title('$Continents$ $with$ $more$ $cases$')\n ax1.set_xlabel('$places$ $X$')\n ax1.set_ylabel('$quantities$ $Y$')\n plt.tight_layout()\n self.country_cases(ax2)\n return\n\n def country_cases(self, ax2) -> None:\n country_single = dict(self.df.groupby(self.df['location'])['total_cases'].\n max().sort_values(ascending=False)[5:6])\n biggest_countries = dict(self.df.groupby(self.df['location'])['total_cases'].\n max().sort_values(ascending=False)[7:11])\n\n self.gear.update(country_single)\n self.gear.update(biggest_countries)\n new_frame = pd.DataFrame(self.gear, index=[0])\n\n new_frame.plot(cmap=plt.get_cmap('bone'), kind=\"bar\", ax=ax2)\n ax2.set_title('$Countries$ $with$ $more$ $cases$')\n ax2.set_xlabel('$places X$')\n ax2.set_ylabel('$quantities Y$')\n plt.show()\n return\n\n def vaccinations_cases(self) -> None:\n vaccinations_single = self.df.groupby(self.df['location'])['total_vaccinations'].max().sort_values(\n ascending=False)[2:3]\n vacina_countries = self.df.groupby(self.df['location'])['total_vaccinations'].max().sort_values(\n ascending=False)[6:8]\n self.gear.update(vaccinations_single)\n self.gear.update(vacina_countries)\n\n peg = []\n for i in self.gear:\n peg.append(i)\n\n top1 = self.df.loc[self.df.location == peg[0], \"total_vaccinations\"]\n top2 = self.df.loc[self.df.location == peg[1], \"total_vaccinations\"]\n top3 = self.df.loc[self.df.location == peg[2], \"total_vaccinations\"]\n\n figure, ax = plt.subplots(figsize=(10, 6))\n\n sns.lineplot(data=self.df, x=self.df.date, y=top1, label=peg[0], color='darkred')\n sns.lineplot(data=self.df, x=self.df.date, y=top2, label=peg[1], color='Gold')\n sns.lineplot(data=self.df, x=self.df.date, y=top3, label=peg[2], color='mediumblue')\n ax.set_title('$top$ $3$ $most$ $vaccinated$ $countries$')\n ax.set_xlabel('$Date X$')\n ax.set_ylabel('$quantities Y$')\n plt.show()\n return\n\n\nclass InfoBrasil(CovidConstructor, InterfaceBr):\n\n def __init__(self) -> None:\n super().__init__()\n self.br = self.df.loc[self.df.location == \"Brazil\"].copy()\n return\n\n def br_head(self) -> None:\n print(self.br.head())\n\n def structure_br(self) -> None:\n print(f\"Início: {self.df['date'].min()}\\n\"\n f\"Fim: {self.df['date'].max()}\\n\")\n\n first_case = self.br.loc[self.br['new_cases'] != 0.0].head(1)\n print(f\"Primeiro caso de covid no Brasil {first_case.date.loc[13811]}\")\n\n print(f\"Até a data desse estudo o Brasil apresenta {self.br['total_cases'].max():,.0f} pessoa infectado covid,\"\n f\" e um total de {self.br['total_deaths'].max():,.0f} mortes pelo covid!\")\n return\n\n def graphic_br(self) -> None:\n x = np.arange(533)\n y = self.br.total_vaccinations\n\n fig, ax = plt.subplots(figsize=(10, 4))\n\n sns.regplot(x, y, color=\"g\", ax=ax, x_bins=70)\n ax.set_title('$vaccinated$ $in$ $Brazil$')\n ax.set_ylabel('$quantities Y$')\n\n plt.show()\n" }, { "alpha_fraction": 0.7149171233177185, "alphanum_fraction": 0.7392265200614929, "avg_line_length": 38.34782791137695, "blob_id": "2baeafaad9a4718bd6616aa7e7b62579e2674068", "content_id": "520127f005cb88a797805f87491fcb553b9b88ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 909, "license_type": "no_license", "max_line_length": 253, "num_lines": 23, "path": "/README.md", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "[![author](https://img.shields.io/badge/author-lucas-red.svg)](https://www.linkedin.com/in/lucas-rocha-1904a3172/) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/lucas-source)\n\n<p align=\"center\">\n <img src=\"at.png\" >\n</p>\n \n# Lucas Rocha Vieira\n<sub>Analista de sistema ERP</sub>\n\nAnalista de sistema erp protheus. Atualmente estou desenvolvendo habilidades na linguagem Python com ênfase em Data Science.\n\n**Links:**\n* [LinkedIn](https://www.linkedin.com/in/lucas-rocha-1904a3172/)\n\n---\n\n**Projetos:**\n* [Covid-19](https://github.com/lucas-source/data-science/blob/main/Covid19/Covid19.ipynb)\n* [Análise da Violência no Rio de Janeiro](https://github.com/lucas-source/data-science/blob/main/RJ.ipynb)\n* [Análise de dados Airbnb - Sydney](https://github.com/lucas-source/data-science/blob/main/Analisando_os_Dados_do_Airbnb.ipynb)\n\n\n---\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6240601539611816, "avg_line_length": 32.25, "blob_id": "448f886585576f6fb3ac9d798d72a6f6d20cccfb", "content_id": "26fdb0c2bb076316f56eecc39e14276c3fcbdaf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 124, "num_lines": 12, "path": "/Covid19/pycharm_covid/Constructor.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\nclass CovidConstructor:\n\n def __init__(self) -> None:\n self.df = pd.read_csv(\"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv\")\n self.df['date'] = pd.to_datetime(self.df['date'])\n self.gear = {}\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', None)\n return\n" }, { "alpha_fraction": 0.6061946749687195, "alphanum_fraction": 0.6128318309783936, "avg_line_length": 33.769229888916016, "blob_id": "3274801ac5a0067b7a0525cb6d6cc5b8b08a365e", "content_id": "bb146d1e64cb565b4cea510c7e5589d12b7f7ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 114, "num_lines": 13, "path": "/Violencia RJ/Codigos por arquivos/Constructor.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass RioDeJaneiro:\n\n def __init__(self) -> None:\n self.data_path = \"https://raw.githubusercontent.com/carlosfab/dsnp2/master/datasets/violencia_rio.csv\"\n self.df = pd.read_csv(self.data_path)\n self.fig = plt.figure(figsize=(9, 5))\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', None)\n return\n" }, { "alpha_fraction": 0.6516487002372742, "alphanum_fraction": 0.682990550994873, "avg_line_length": 34.61627960205078, "blob_id": "84582601e40b7036495a81021cb98a9f823d02e9", "content_id": "bb1b640a61c6fde4d962ac907df68f29dbe8419b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3076, "license_type": "no_license", "max_line_length": 108, "num_lines": 86, "path": "/Airnbnb Australia/Pycharm_airbnb.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "\"\"\"\n:autor = Lucas Rocha Vieira\n\"\"\"\nimport pandas as pd\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# csv sydney-austrália\ndf = pd.read_csv(\"http://data.insideairbnb.com/australia/nsw/sydney/2021-04-10/visualisations/listings.csv\")\n\n\n# Tratativa para mostrar todas as colunas\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\nprint(df.head())\n\nprint(\"Linhas:\\t {}\\n\".format(df.shape[0]) + # Entrada\n \"Colunas:\\t {}\\n\".format(df.shape[1])) # Variavel\nprint(\"-----------------------------------------\\n\")\ndisplay(df.dtypes)\nprint(\"-----------------------------------------\\n\")\n\n# verificando a porcentagem de valores nulos em cada coluna\ndisplay((df.isnull().sum() / df.shape[0]).sort_values(ascending=False))\ndf.hist(bins=15, figsize=(20, 20));\nplt.show()\n\n# verificando os outlires nas colunas\nprint(df[['price', 'minimum_nights', 'number_of_reviews', 'reviews_per_month',\n 'calculated_host_listings_count', 'availability_365']].describe())\n\n# minimum_nights\ndf.minimum_nights.plot(kind='box', vert=False, figsize=(15, 3))\nplt.show()\nplt.show()\n\n# ver quantidade de valores acima de 30 dias para minimum_nights\nprint(\"minimum_nights: valores acima de 30:\")\nprint(\"{} entradas\".format(len(df[df.minimum_nights > 30])))\nprint(\"{:.4f}%\".format((len(df[df.minimum_nights > 30]) / df.shape[0])*100))\n\n# mostrar a quantidade de cada tipo de imóvel disponível\n# price\ndf.price.plot(kind='box', vert=False, figsize=(15, 3),)\nplt.show()\n\n# ver quantidade de valores acima de 1500 para price\nprint(\"\\nprice: valores acima de 1500\")\nprint(\"{} entradas\".format(len(df[df.price > 1500])))\nprint(\"{:.4f}%\".format((len(df[df.price > 1500]) / df.shape[0])*100))\n\ndf.price.plot(kind='box', vert=False, xlim=(0, 1300), figsize=(15, 3));\n\n# remover os *outliers* em um novo DataFrame\ndf_clean = df.copy()\ndf_clean.drop(df_clean[df_clean.price > 1500].index, axis=0, inplace=True)\ndf_clean.drop(df_clean[df_clean.minimum_nights > 30].index, axis=0, inplace=True)\n\n# remover `neighbourhood_group`, pois está vazio\ndf_clean.drop('neighbourhood_group', axis=1, inplace=True)\n\n# plotar o histograma para as variáveis numéricas\ndf_clean.hist(bins=15, figsize=(15, 10));\nplt.show()\n\n# criar uma matriz de correlação\ncorr = df_clean[['price', 'minimum_nights', 'number_of_reviews', 'reviews_per_month',\n 'calculated_host_listings_count', 'availability_365']].corr()\ndisplay(corr)\n# hetmap\nsns.heatmap(corr, cmap='RdBu', fmt='.2f', square=True, linecolor='white', annot=True)\nplt.show()\n\n# mostrar a quantidade de cada tipo de imóvel disponível\ndf_clean.room_type.value_counts()\n\n# mostrar a porcentagem de cada tipo de imóvel disponível\ndf_clean.room_type.value_counts() / df_clean.shape[0]\nvar = df_clean.groupby(['neighbourhood']).price.mean().sort_values(ascending=False)[:10]\n\n# plotar os imóveis pela latitude-longitude\ndf_clean.plot(kind=\"scatter\", x='longitude', y='latitude', alpha=0.4, c=df_clean['price'], s=8,\n cmap=plt.get_cmap('jet'), figsize=(12, 8));\n\nplt.show()\n" }, { "alpha_fraction": 0.6807453632354736, "alphanum_fraction": 0.6807453632354736, "avg_line_length": 25.83333396911621, "blob_id": "0e35a19b07ee4eaf86e74213e2eb1b67210f25c6", "content_id": "6cf726719a9235ddd6f19046d960ebbf04117fd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/Violencia RJ/Codigos por arquivos/Interface.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "from abc import ABC, abstractmethod\n\n\nclass InterfaceShape(ABC):\n\n @abstractmethod\n def data_shape(self) -> None:\n raise Exception(\"Should impleent comer method\")\n\n def data_header(self) -> None:\n raise Exception(\"Should impleent comer method\")\n\n def missing_values(self) -> None:\n raise Exception(\"Should impleent comer method\")\n\n def describe(self) -> None:\n raise Exception(\"Should impleent comer method\")\n\n\nclass InterfacePlot(ABC):\n\n @abstractmethod\n def graphic_vehicle_theft(self) -> None:\n raise Exception(\"Should impleent comer method\")\n\n def graphic_general_records(self) -> None:\n raise Exception(\"Shoul impleent comer method\")\n\n def graphic_manslaughter(self) -> None:\n raise Exception(\"Shoul impleent comer method\")\n" }, { "alpha_fraction": 0.5722965002059937, "alphanum_fraction": 0.5771567225456238, "avg_line_length": 27.379310607910156, "blob_id": "c38c64215d67a41f3071f91898782081c1dc52dd", "content_id": "3d8282798a6fa043ca9d4c2c61af901c04d53ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 87, "num_lines": 29, "path": "/Covid19/pycharm_covid/Structure.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "from Constructor import CovidConstructor\nfrom InterfaceCovid import InterfacCv\n\n\nclass CovidStructure(CovidConstructor, InterfacCv):\n\n def __init__(self) -> None:\n self.consult = None\n super().__init__()\n\n def to_present(self, num: int = 5) -> None:\n print(self.df.head(num))\n return\n\n def structure(self) -> None:\n print(f\"Lines: {self.df.shape[0]}\\n\"\n f\"Columns: {self.df.shape[1]}\\n\")\n print(\"Tipos das variáveis:\")\n print(self.df.dtypes)\n return\n\n def missing_values(self) -> None:\n print((self.df.isnull().sum() / self.df.shape[0]).sort_values(ascending=False))\n return\n\n def data_analysis(self) -> None:\n print(f\"Início: {self.df['date'].min()}\\n\"\n f\"Fim: {self.df['date'].max()}\\n\")\n return\n" }, { "alpha_fraction": 0.5971428751945496, "alphanum_fraction": 0.6107142567634583, "avg_line_length": 32.33333206176758, "blob_id": "a83e403c08dd70bdfe914b66fbd40ccfc6557e74", "content_id": "36b310c72cea7308c9c1ed73e26982eda0088876", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1401, "license_type": "no_license", "max_line_length": 91, "num_lines": 42, "path": "/Violencia RJ/Codigos por arquivos/Plot.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "from Constructor import RioDeJaneiro\nimport matplotlib.pyplot as plt\nfrom Interface import InterfacePlot\n\n\nclass PlotRio(RioDeJaneiro, InterfacePlot):\n\n def __init__(self) -> None:\n super().__init__()\n return\n\n def graphic_vehicle_theft(self) -> None:\n ax = plt.subplot(131)\n theft = self.df.mean().sort_values(ascending=False)[4:10]\n theft.plot(cmap=plt.get_cmap('Reds'), edgecolor='r', kind=\"bar\", ax=ax)\n plt.xticks(rotation=50, ha='right')\n ax.set(title=\"Crimes mais cometidos\")\n self.graphic_general_records()\n return\n\n def graphic_general_records(self) -> None:\n ax = plt.subplot(132)\n theft = self.df.mean().sort_values(ascending=False)[0:4]\n theft.plot(cmap=plt.get_cmap('twilight'), edgecolor='gray', kind=\"bar\", ax=ax)\n plt.xticks(rotation=50, ha='right')\n ax.set(title=\"Registros\")\n plt.show()\n return\n\n def graphic_trend(self) -> None:\n ax = plt.subplot()\n ax.plot(self.df.sequestro.index, self.df.sequestro, linewidth=1.0, color='red')\n ax.set(title=\"Roubo Coletivo\")\n plt.show()\n return\n\n def graphic_manslaughter(self) -> None:\n ax = plt.subplot()\n ax.plot(self.df.hom_culposo.index, self.df.hom_culposo, linewidth=1.0, color='red')\n ax.set(title=\"Homicídio culposo\")\n plt.show()\n return\n" }, { "alpha_fraction": 0.7542997598648071, "alphanum_fraction": 0.7542997598648071, "avg_line_length": 24.4375, "blob_id": "548062739aa1c4a99e17b0273254fa6e9f750c3f", "content_id": "be33178314eb424ec82d6049d2bef6ac6f0c5a59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 54, "num_lines": 16, "path": "/Violencia RJ/Codigos por arquivos/Run.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "from Constructor import RioDeJaneiro\nfrom Interface import InterfaceShape, InterfacePlot\nfrom plot import PlotRio\nfrom Struct import ShapeRio\n\"\"\"\nPara executar alguma função só tirar o comentario '#' \n\"\"\"\n# ESTRUTURA\n# ShapeRio().data_shape()\n# ShapeRio().data_header()\n# ShapeRio().missing_values()\n# ShapeRio().describe()\n\n# PLOTAGEM\n# PlotRio().graphic_vehicle_theft()\n# PlotRio().graphic_manslaughter()\n" }, { "alpha_fraction": 0.5595855116844177, "alphanum_fraction": 0.5647668242454529, "avg_line_length": 23.90322494506836, "blob_id": "eabc2ec003c56eca3a5988cc9ac8035a172a6689", "content_id": "f8b382ced783f33703797de20158bca9d9a1bd1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 87, "num_lines": 31, "path": "/Violencia RJ/Codigos por arquivos/Struct.py", "repo_name": "lucas-source/data-science", "src_encoding": "UTF-8", "text": "from Constructor import RioDeJaneiro\nfrom Interface import InterfaceShape\n\n\nclass ShapeRio(RioDeJaneiro, InterfaceShape):\n\n def __init__(self) -> None:\n super().__init__()\n return\n\n def data_shape(self) -> None:\n \"\"\"\n -shape\n -columns\n -type variables\n \"\"\"\n print(f\"linhas: {self.df.shape[0]}\\n\"\n f\"colunas: {self.df.shape[1]}\")\n print(self.df.columns)\n print(self.df.dtypes)\n return\n\n def data_header(self, num: int = 5) -> None:\n print(self.df.head(num))\n return\n\n def missing_values(self) -> None:\n print((self.df.isnull().sum() / self.df.shape[0]).sort_values(ascending=False))\n\n def describe(self) -> None:\n print(self.df.describe())\n" } ]
11
anzhongyin/PdoProject
https://github.com/anzhongyin/PdoProject
e0589a3cbe72800bd420b64a974ea24543585678
a257960bd25ac6d0ae8f52e3a28d15530c062885
63762bf4242d69896af287bef6039ed1e50989d7
refs/heads/master
2020-12-02T11:08:57.924063
2017-07-08T09:26:55
2017-07-08T09:26:55
96,604,292
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.611501157283783, "alphanum_fraction": 0.6128385066986084, "avg_line_length": 34.60714340209961, "blob_id": "3360e9e5e5f18fb0dd764bc18960660292382b63", "content_id": "ec2198a4cd8c59a8b025f3dfcfd83b2a8d03c4e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3189, "license_type": "no_license", "max_line_length": 81, "num_lines": 84, "path": "/PdoApp/views.py", "repo_name": "anzhongyin/PdoProject", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom PdoApp import models\n# Create your views here.\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.http import HttpResponse\ndef LOGIN(request):\n return render(request,'background.html')\ndef index(request):\n return render(request,'index.html')\ndef zhuce(request):\n return render(request,'PersonInfo.html')\ndef xiugai(request):\n return render(request,'modify.html')\ndef regist(request):\n admin = request.GET['a']\n password = request.GET['b']\n repassword = request.GET['c']\n if not admin:\n return HttpResponse(u\"用户名不能为空\")\n list = models.Admincheck.objects.filter()\n for i in list:\n d = str(i.admin)\n e = str(i.password)\n if password != repassword:\n return HttpResponse(u\"两次密码不一致,请返回上一页面重新注册\")\n if admin == d:\n return HttpResponse(u\"用户名已存在\")\n p =models.Admincheck(admin=admin)\n p.password = password\n p.save()\n return render(request, 'background.html')\ndef home(request):\n personlist = models.Information.objects.filter()\n return render(request,'home.html', {'personlist': personlist})\ndef information(request):\n personlist = models.Information.objects.filter()\n for i in range(len(personlist)):\n if count ==str(personlist[i].map):\n int(i);\n return render(request,'userInfo.html', {'personlist': personlist[i]})\n\ndef nav(request):\n return render(request,'nav.html')\ndef add(request):\n admin = request.GET['a']\n password = request.GET['b']\n admin = str(admin)\n global count\n count = str(admin)\n password = str(password)\n list = models.Admincheck.objects.filter()\n for i in list:\n d = str(i.admin)\n e = str(i.password)\n if admin == d:\n if password == e:\n #return HttpResponse(u\"信息分析团队欢迎你\")\n personlist = models.Information.objects.filter()\n #response = HttpResponseRedirect('')\n # return render(request, 'nav.html')\n # response.set_cookie('admin', admin, 3600)\n #return response\n return render(request, 'nav.html')\n else:\n return HttpResponse(u\"密码错误 :(\")\n else:\n continue\n return HttpResponse(u\"用户名不存在 :(\")\n\n\ndef modify(request):\n oldpassword = request.GET['a']\n newpassword = request.GET['b']\n repassword = request.GET['c']\n if newpassword != repassword:\n return HttpResponse(u\"两次密码不一致,请返回上一页面重新修改\")\n personlist = models.Admincheck.objects.filter()\n for i in range(len(personlist)):\n if count == str(personlist[i].admin):\n break;\n if oldpassword !=personlist[i].password:\n return HttpResponse(u\"旧密码输入有误,请返回上一页面重新修改\")\n models.Admincheck.objects.filter(admin=count).update(password=newpassword)\n return HttpResponse(u\"修改成功,请重新登录\")\n" }, { "alpha_fraction": 0.6526217460632324, "alphanum_fraction": 0.6591760516166687, "avg_line_length": 34.599998474121094, "blob_id": "d106751783b459cb7f059b79673d83f7f06868c9", "content_id": "724f1df275ddb117b471a1ca9c34dbbeb82e601e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 77, "num_lines": 30, "path": "/PdoProject/urls.py", "repo_name": "anzhongyin/PdoProject", "src_encoding": "UTF-8", "text": "\"\"\"PdoProject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom PdoApp import views\nurlpatterns = [\n url(r'^home/', views.home),\n url(r'^admin/', admin.site.urls),\n url(r'^LOGIN/', views.LOGIN),\n url(r'^add/', views.add),\n url(r'^nav/', views.nav),\n url(r'^zhuce/', views.zhuce),\n url(r'^regist/', views.regist),\n url(r'^modify/', views.modify),\n url(r'^information/', views.information),\n url(r'^xiugai/', views.xiugai),\n url(r'^index/', views.index),\n]\n" }, { "alpha_fraction": 0.6613418459892273, "alphanum_fraction": 0.690095841884613, "avg_line_length": 27.454545974731445, "blob_id": "8a1694d8874cd3fa8c382131cebc7ae88b678780", "content_id": "86b8b111179a81969cf5d94d82fe063838e426f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 56, "num_lines": 22, "path": "/PdoApp/models.py", "repo_name": "anzhongyin/PdoProject", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Information(models.Model):\n name = models.CharField(max_length=20)\n age = models.CharField(max_length=3)\n sex = models.CharField(max_length=2)\n grade = models.CharField(max_length=10)\n major = models.CharField(max_length=30)\n personinformation = models.CharField(max_length=100)\n map = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Admincheck(models.Model):\n admin = models.CharField(max_length=10)\n password = models.CharField(max_length=10)\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.4330708682537079, "alphanum_fraction": 0.4690663814544678, "avg_line_length": 23.72222137451172, "blob_id": "f9ea07a1f1d88b142e77116e5d62cf889696d474", "content_id": "594f70806f26b125d4d29abb9873089754e720e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 957, "license_type": "no_license", "max_line_length": 118, "num_lines": 36, "path": "/templates/home.html", "repo_name": "anzhongyin/PdoProject", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\" xmlns:>\n<div style=\"margin-left: auto;margin-right: auto;\">\n<head>\n <body background=\"http://www.quazero.com/uploads/allimg/140411/1-140411151215-50.jpg\" width=\"70%\" height=\"100 %\">\n</body>\n <meta charset=\"utf-8\">\n <title>信息分析团队</title>\n <h2 align=\"center\">信息分析团队欢迎你</h2>\n</head>\n<table border=\"0\" cellspacing=\"40\" align=\"renter\">\n\n<caption>信息分析团队成员表</caption>\n<pre>\n <tr>\n <th>姓名</th>\n <th>年龄</th>\n <th>性别</th>\n <th>年级</th>\n <th>专业</th>\n </tr>\n {% for foo in personlist %}\n <tr align=>\n <td>{{foo.name}}</td>\n <td>{{foo.age}}</td>\n <td>{{foo.sex}}</td>\n <td>{{foo.grade}}</td>\n <td>{{foo.major}}</td>\n </tr>\n\n {% endfor %}\n</pre>\n</table>\n</div>\n</body>\n</html>" }, { "alpha_fraction": 0.5224999785423279, "alphanum_fraction": 0.5383333563804626, "avg_line_length": 34.29411697387695, "blob_id": "6c622cd0e6dc02e86e2dfd96dcd508c28000a2c5", "content_id": "cefcd3bf1ec7070e7e8baf86c0f61af453b5bd7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1200, "license_type": "no_license", "max_line_length": 114, "num_lines": 34, "path": "/PdoApp/migrations/0001_initial.py", "repo_name": "anzhongyin/PdoProject", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Admincheck',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('admin', models.CharField(max_length=10)),\n ('password', models.CharField(max_length=10)),\n ],\n ),\n migrations.CreateModel(\n name='Information',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.CharField(max_length=20)),\n ('age', models.CharField(max_length=3)),\n ('sex', models.CharField(max_length=2)),\n ('grade', models.CharField(max_length=10)),\n ('major', models.CharField(max_length=30)),\n ('personinformation', models.CharField(max_length=100)),\n ('map', models.CharField(max_length=100)),\n ],\n ),\n ]\n" } ]
5
Murray-LIANG/python-manilaclient
https://github.com/Murray-LIANG/python-manilaclient
1ee8e5a353155c9676d609b1ad4e105e972ff2d6
e3652b9c1c36c825d07dce741802930ad0ec1a12
793768a63b9aff1c40d877433612a89335d00250
refs/heads/master
2023-01-04T10:05:46.418382
2020-01-06T13:06:04
2020-03-09T09:18:46
250,205,201
0
0
NOASSERTION
2020-03-26T08:46:10
2020-03-23T15:22:13
2020-03-26T08:36:16
null
[ { "alpha_fraction": 0.6281453967094421, "alphanum_fraction": 0.6356011033058167, "avg_line_length": 35.37288284301758, "blob_id": "83a88a5fb01e8016f19bfaea45298fe8706f4961", "content_id": "190774bdc3683c072972e1c0528e646a273c7de2", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2146, "license_type": "permissive", "max_line_length": 78, "num_lines": 59, "path": "/manilaclient/tests/functional/test_availability_zones.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2016 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\nfrom oslo_utils import uuidutils\n\nfrom manilaclient.tests.functional import base\n\n\[email protected]\nclass ManilaClientTestAvailabilityZonesReadOnly(base.BaseTestCase):\n\n @ddt.data(\"2.6\", \"2.7\", \"2.22\")\n def test_availability_zone_list(self, microversion):\n self.skip_if_microversion_not_supported(microversion)\n\n azs = self.user_client.list_availability_zones(\n microversion=microversion)\n\n for az in azs:\n self.assertEqual(4, len(az))\n for key in ('Id', 'Name', 'Created_At', 'Updated_At'):\n self.assertIn(key, az)\n self.assertTrue(uuidutils.is_uuid_like(az['Id']))\n self.assertIsNotNone(az['Name'])\n self.assertIsNotNone(az['Created_At'])\n\n @ddt.data(\n ('name', ['Name']),\n ('name,id', ['Name', 'Id']),\n ('name,created_at', ['Name', 'Created_At']),\n ('name,id,created_at', ['Name', 'Id', 'Created_At']),\n )\n @ddt.unpack\n def test_availability_zone_list_with_columns(self, columns_arg, expected):\n azs = self.user_client.list_availability_zones(columns=columns_arg)\n\n for az in azs:\n self.assertEqual(len(expected), len(az))\n for key in expected:\n self.assertIn(key, az)\n if 'Id' in expected:\n self.assertTrue(uuidutils.is_uuid_like(az['Id']))\n if 'Name' in expected:\n self.assertIsNotNone(az['Name'])\n if 'Created_At' in expected:\n self.assertIsNotNone(az['Created_At'])\n" }, { "alpha_fraction": 0.5918753147125244, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 41.34000015258789, "blob_id": "09211065c3e64ab0d116401198678634f26d7728", "content_id": "c923520bfd06c98cc88a354071fd398a81177ef0", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2117, "license_type": "permissive", "max_line_length": 192, "num_lines": 50, "path": "/doc/source/user/api.rst", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "The :program:`manilaclient` Python API\n======================================\n\n.. module:: manilaclient\n :synopsis: A client for the OpenStack Manila API.\n\n.. currentmodule:: manilaclient\n\nUsage\n-----\n\nIn order to use the Python API directly, you must first obtain an auth\ntoken and identify which endpoint you wish to speak to. Once you have\ndone so, you can use the API like so::\n\n >>> from manilaclient import client\n >>> manila = client.Client('1', $OS_USER_NAME, $OS_PASSWORD, $OS_TENANT_NAME, $OS_AUTH_URL)\n >>> manila.shares.list()\n []\n >>> share = manila.shares.create(share_proto=\"nfs\", size=1, share_network_id=\"some_share_network_id\")\n >>> share.id\n ce06d0a8-5c1b-4e2c-81d2-39eca6bbfb70\n >>> manila.shares.list()\n [<Share: ce06d0a8-5c1b-4e2c-81d2-39eca6bbfb70>]\n >>>share.delete\n\nIn addition to creating and deleting shares, the manilaclient can manage\nshare-types, access controls, and more! Using CephFS with Ganesha for NFS\nsupport as an example (assuumes this continues from the above initialization)::\n\n >>> share_type = client.share_types.create(\n >>> name=\"cephfsnfstype\", spec_driver_handles_share_servers=False,\n >>> extra_specs={\n >>> 'vendor_name': 'Ceph',\n >>> 'storage_protocol': 'NFS',\n >>> 'snapshot_support': False,\n >>> })\n >>> share_type\n <ShareType: cephfsnfstype>\n >>> share = client.shares.create(\n >>> share_type='cephfsnfstype', name='cephnfsshare1',\n >>> share_proto=\"nfs\", size=1)\n >>> share.allow(access_type='ip', access=\"192.168.0.0/24\", access_level='rw')\n {'id': '29bc4b66-d55d-424d-8107-aee96d1c562b', 'share_id': '0ac95dd2-afba-4ba3-8934-721b29492f04', 'access_level': 'rw', 'access_to': '192.168.0.0/24', 'access_type': 'ip', 'state': 'new'}\n >>> share.export_locations\n ['10.5.0.22:/volumes/_nogroup/cf0451b6-0a95-4982-a801-2e212e9c9b96']\n\nIn the above example, Manila will be setup with an NFS share type, backed\nby CephFS. A share is then created, and then access controls are added giving\nthe 192.168.0/24 subnet read/write access to the share.\n" }, { "alpha_fraction": 0.5909016132354736, "alphanum_fraction": 0.5924590229988098, "avg_line_length": 40.35593032836914, "blob_id": "8ef061c10ae653a2bf77e3b4f70ab8221dca4a9d", "content_id": "68fcb777d8c9a7569d92e47e1fb85570c5e9de31", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12224, "license_type": "permissive", "max_line_length": 79, "num_lines": 295, "path": "/manilaclient/tests/functional/test_share_networks.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ast\nimport ddt\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import exceptions as tempest_lib_exc\n\nfrom manilaclient.tests.functional import base\nfrom manilaclient.tests.functional import utils\n\n\[email protected]\nclass ShareNetworksReadWriteTest(base.BaseTestCase):\n\n def setUp(self):\n super(ShareNetworksReadWriteTest, self).setUp()\n self.name = data_utils.rand_name('autotest')\n self.description = 'fake_description'\n self.neutron_net_id = 'fake_neutron_net_id'\n self.neutron_subnet_id = 'fake_neutron_subnet_id'\n\n self.sn = self.create_share_network(\n name=self.name,\n description=self.description,\n neutron_net_id=self.neutron_net_id,\n neutron_subnet_id=self.neutron_subnet_id,\n )\n\n @ddt.data(\n {'name': data_utils.rand_name('autotest_share_network_name')},\n {'description': 'fake_description'},\n {'neutron_net_id': 'fake_neutron_net_id',\n 'neutron_subnet_id': 'fake_neutron_subnet_id'},\n )\n def test_create_delete_share_network(self, net_data):\n share_subnet_support = utils.share_network_subnets_are_supported()\n share_subnet_fields = (\n ['neutron_net_id', 'neutron_subnet_id', 'availability_zone']\n if share_subnet_support else [])\n sn = self.create_share_network(cleanup_in_class=False, **net_data)\n default_subnet = (utils.get_default_subnet(self.user_client, sn['id'])\n if share_subnet_support\n else None)\n\n expected_data = {\n 'name': 'None',\n 'description': 'None',\n 'neutron_net_id': 'None',\n 'neutron_subnet_id': 'None',\n }\n expected_data.update(net_data)\n share_network_expected_data = [\n (k, v) for k, v in expected_data.items()\n if k not in share_subnet_fields]\n share_subnet_expected_data = [\n (k, v) for k, v in expected_data.items()\n if k in share_subnet_fields]\n\n for k, v in share_network_expected_data:\n self.assertEqual(v, sn[k])\n for k, v in share_subnet_expected_data:\n self.assertEqual(v, default_subnet[k])\n\n self.admin_client.delete_share_network(sn['id'])\n self.admin_client.wait_for_share_network_deletion(sn['id'])\n\n @utils.skip_if_microversion_not_supported('2.51')\n def test_create_delete_share_network_with_az(self):\n share_subnet_fields = (\n ['neutron_net_id', 'neutron_subnet_id', 'availability_zone'])\n az = self.user_client.list_availability_zones()[0]\n net_data = {\n 'neutron_net_id': 'fake_neutron_net_id',\n 'neutron_subnet_id': 'fake_neutron_subnet_id',\n 'availability_zone': az['Name']\n }\n sn = self.create_share_network(cleanup_in_class=False, **net_data)\n default_subnet = utils.get_subnet_by_availability_zone_name(\n self.user_client, sn['id'], az['Name'])\n\n expected_data = {\n 'name': 'None',\n 'description': 'None',\n 'neutron_net_id': 'None',\n 'neutron_subnet_id': 'None',\n 'availability_zone': 'None',\n }\n expected_data.update(net_data)\n share_network_expected_data = [\n (k, v) for k, v in expected_data.items()\n if k not in share_subnet_fields]\n share_subnet_expected_data = [\n (k, v) for k, v in expected_data.items()\n if k in share_subnet_fields]\n\n for k, v in share_network_expected_data:\n self.assertEqual(v, sn[k])\n for k, v in share_subnet_expected_data:\n self.assertEqual(v, default_subnet[k])\n\n self.admin_client.delete_share_network(sn['id'])\n self.admin_client.wait_for_share_network_deletion(sn['id'])\n\n def test_get_share_network_with_neutron_data(self):\n get = self.admin_client.get_share_network(self.sn['id'])\n\n self.assertEqual(self.name, get['name'])\n self.assertEqual(self.description, get['description'])\n if not utils.share_network_subnets_are_supported():\n self.assertEqual(self.neutron_net_id, get['neutron_net_id'])\n self.assertEqual(self.neutron_subnet_id, get['neutron_subnet_id'])\n\n def _get_expected_update_data(self, net_data, net_creation_data):\n # NOTE(dviroel): When subnets are supported, the outputs are converted\n # from string to literal structures in order to process the content of\n # 'share_network_subnets' field.\n default_return_value = (\n None if utils.share_network_subnets_are_supported() else 'None')\n\n expected_nn_id = (\n default_return_value\n if net_data.get('neutron_net_id')\n else net_creation_data.get('neutron_net_id', default_return_value))\n expected_nsn_id = (\n default_return_value\n if net_data.get('neutron_subnet_id')\n else net_creation_data.get('neutron_subnet_id',\n default_return_value))\n return expected_nn_id, expected_nsn_id\n\n @ddt.data(\n ({'name': data_utils.rand_name('autotest_share_network_name')}, {}),\n ({'description': 'fake_description'}, {}),\n ({'neutron_net_id': 'fake_neutron_net_id',\n 'neutron_subnet_id': 'fake_neutron_subnet_id'}, {}),\n ({'name': '\"\"'}, {}),\n ({'description': '\"\"'}, {}),\n ({'neutron_net_id': '\"\"'},\n {'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsn_id'}),\n ({'neutron_subnet_id': '\"\"'},\n {'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsn_id'})\n )\n @ddt.unpack\n def test_create_update_share_network(self, net_data, net_creation_data):\n sn = self.create_share_network(\n cleanup_in_class=False, **net_creation_data)\n\n update = self.admin_client.update_share_network(sn['id'], **net_data)\n\n expected_nn_id, expected_nsn_id = self._get_expected_update_data(\n net_data, net_creation_data)\n\n expected_data = {\n 'name': 'None',\n 'description': 'None',\n 'neutron_net_id': expected_nn_id,\n 'neutron_subnet_id': expected_nsn_id,\n }\n subnet_keys = []\n if utils.share_network_subnets_are_supported():\n subnet_keys = ['neutron_net_id', 'neutron_subnet_id']\n subnet = ast.literal_eval(update['share_network_subnets'])\n\n update_values = dict([(k, v) for k, v in net_data.items()\n if v != '\"\"'])\n expected_data.update(update_values)\n\n for k, v in expected_data.items():\n if k in subnet_keys:\n self.assertEqual(v, subnet[0][k])\n else:\n self.assertEqual(v, update[k])\n\n self.admin_client.delete_share_network(sn['id'])\n self.admin_client.wait_for_share_network_deletion(sn['id'])\n\n @ddt.data(True, False)\n def test_list_share_networks(self, all_tenants):\n share_networks = self.admin_client.list_share_networks(all_tenants)\n\n self.assertTrue(\n any(self.sn['id'] == sn['id'] for sn in share_networks))\n for sn in share_networks:\n self.assertEqual(2, len(sn))\n self.assertIn('id', sn)\n self.assertIn('name', sn)\n\n def test_list_share_networks_select_column(self):\n share_networks = self.admin_client.list_share_networks(columns=\"id\")\n self.assertTrue(any(s['Id'] is not None for s in share_networks))\n self.assertTrue(all('Name' not in s for s in share_networks))\n self.assertTrue(all('name' not in s for s in share_networks))\n\n def _list_share_networks_with_filters(self, filters):\n assert_subnet_fields = utils.share_network_subnets_are_supported()\n share_subnet_fields = (['neutron_subnet_id', 'neutron_net_id']\n if assert_subnet_fields\n else [])\n share_network_filters = [(k, v) for k, v in filters.items()\n if k not in share_subnet_fields]\n share_network_subnet_filters = [(k, v) for k, v in filters.items()\n if k in share_subnet_fields]\n share_networks = self.admin_client.list_share_networks(filters=filters)\n\n self.assertGreater(len(share_networks), 0)\n self.assertTrue(\n any(self.sn['id'] == sn['id'] for sn in share_networks))\n for sn in share_networks:\n try:\n share_network = self.admin_client.get_share_network(sn['id'])\n default_subnet = (\n utils.get_default_subnet(self.user_client, sn['id'])\n if assert_subnet_fields\n else None)\n except tempest_lib_exc.NotFound:\n # NOTE(vponomaryov): Case when some share network was deleted\n # between our 'list' and 'get' requests. Skip such case.\n continue\n for k, v in share_network_filters:\n self.assertIn(k, share_network)\n self.assertEqual(v, share_network[k])\n for k, v in share_network_subnet_filters:\n self.assertIn(k, default_subnet)\n self.assertEqual(v, default_subnet[k])\n\n def test_list_share_networks_filter_by_project_id(self):\n project_id = self.admin_client.get_project_id(\n self.admin_client.tenant_name)\n filters = {'project_id': project_id}\n self._list_share_networks_with_filters(filters)\n\n def test_list_share_networks_filter_by_name(self):\n filters = {'name': self.name}\n self._list_share_networks_with_filters(filters)\n\n def test_list_share_networks_filter_by_description(self):\n filters = {'description': self.description}\n self._list_share_networks_with_filters(filters)\n\n def test_list_share_networks_filter_by_neutron_net_id(self):\n filters = {'neutron_net_id': self.neutron_net_id}\n self._list_share_networks_with_filters(filters)\n\n def test_list_share_networks_filter_by_neutron_subnet_id(self):\n filters = {'neutron_subnet_id': self.neutron_subnet_id}\n self._list_share_networks_with_filters(filters)\n\n @ddt.data('name', 'description')\n def test_list_share_networks_filter_by_inexact(self, option):\n self.create_share_network(\n name=data_utils.rand_name('autotest_inexact'),\n description='fake_description_inexact',\n neutron_net_id='fake_neutron_net_id',\n neutron_subnet_id='fake_neutron_subnet_id',\n )\n\n filters = {option + '~': 'inexact'}\n share_networks = self.admin_client.list_share_networks(\n filters=filters)\n\n self.assertGreater(len(share_networks), 0)\n\n def test_list_share_networks_by_inexact_unicode_option(self):\n self.create_share_network(\n name=u'网络名称',\n description=u'网络描述',\n neutron_net_id='fake_neutron_net_id',\n neutron_subnet_id='fake_neutron_subnet_id',\n )\n\n filters = {'name~': u'名称'}\n share_networks = self.admin_client.list_share_networks(\n filters=filters)\n\n self.assertGreater(len(share_networks), 0)\n\n filters = {'description~': u'描述'}\n share_networks = self.admin_client.list_share_networks(\n filters=filters)\n\n self.assertGreater(len(share_networks), 0)\n" }, { "alpha_fraction": 0.6856600046157837, "alphanum_fraction": 0.692307710647583, "avg_line_length": 35.31034469604492, "blob_id": "a000b7c281439dec3be129c63842937414e149de", "content_id": "a3ee2d9c5f703b2fc5155eed436ec87c10cfa282", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "permissive", "max_line_length": 78, "num_lines": 29, "path": "/manilaclient/tests/unit/test_utils.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport six\nimport testtools\n\nfrom manilaclient import utils\n\n\nclass TestCommonUtils(testtools.TestCase):\n\n def test_unicode_key_value_to_string(self):\n src = {u'key': u'\\u70fd\\u7231\\u5a77'}\n expected = {'key': '\\xe7\\x83\\xbd\\xe7\\x88\\xb1\\xe5\\xa9\\xb7'}\n if six.PY2:\n self.assertEqual(expected, utils.unicode_key_value_to_string(src))\n else:\n # u'xxxx' in PY3 is str, we will not get extra 'u' from cli\n # output in PY3\n self.assertEqual(src, utils.unicode_key_value_to_string(src))\n" }, { "alpha_fraction": 0.6373864412307739, "alphanum_fraction": 0.6399827003479004, "avg_line_length": 37.840335845947266, "blob_id": "e91634bf50aef745182cdfd6eb05fbb5a1e1e60b", "content_id": "49c790fdd624d1bba29dc17bd687a1ad19522cdb", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4622, "license_type": "permissive", "max_line_length": 78, "num_lines": 119, "path": "/manilaclient/tests/functional/test_share_network_subnets.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2019 NetApp\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\nfrom manilaclient.tests.functional import base\nfrom manilaclient.tests.functional import utils\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import exceptions\n\n\[email protected]\[email protected]_if_microversion_not_supported('2.51')\nclass ShareNetworkSubnetsReadWriteTest(base.BaseTestCase):\n\n def setUp(self):\n super(ShareNetworkSubnetsReadWriteTest, self).setUp()\n self.name = data_utils.rand_name('autotest')\n self.description = 'fake_description'\n self.neutron_net_id = 'fake_neutron_net_id'\n self.neutron_subnet_id = 'fake_neutron_subnet_id'\n\n self.sn = self.create_share_network(\n name=self.name,\n description=self.description,\n neutron_net_id=self.neutron_net_id,\n neutron_subnet_id=self.neutron_subnet_id,\n )\n\n def test_get_share_network_subnet(self):\n default_subnet = utils.get_default_subnet(self.user_client,\n self.sn['id'])\n\n subnet = self.user_client.get_share_network_subnet(\n self.sn['id'], default_subnet['id'])\n\n self.assertEqual(self.neutron_net_id, subnet['neutron_net_id'])\n self.assertEqual(self.neutron_subnet_id, subnet['neutron_subnet_id'])\n\n def test_get_invalid_share_network_subnet(self):\n self.assertRaises(\n exceptions.CommandFailed,\n self.user_client.get_share_network_subnet,\n self.sn['id'], 'invalid_subnet_id')\n\n def _get_availability_zone(self):\n availability_zones = self.user_client.list_availability_zones()\n return availability_zones[0]['Name']\n\n def test_add_share_network_subnet_to_share_network(self):\n neutron_net_id = 'new_neutron_net_id'\n neutron_subnet_id = 'new_neutron_subnet_id'\n availability_zone = self._get_availability_zone()\n\n subnet = self.add_share_network_subnet(\n self.sn['id'],\n neutron_net_id, neutron_subnet_id,\n availability_zone,\n cleanup_in_class=False)\n\n self.assertEqual(neutron_net_id, subnet['neutron_net_id'])\n self.assertEqual(neutron_subnet_id, subnet['neutron_subnet_id'])\n self.assertEqual(availability_zone, subnet['availability_zone'])\n\n @ddt.data(\n {'neutron_net_id': None, 'neutron_subnet_id': 'fake_subnet_id'},\n {'neutron_net_id': 'fake_net_id', 'neutron_subnet_id': None},\n {'availability_zone': 'invalid_availability_zone'},\n )\n def test_add_invalid_share_network_subnet_to_share_network(self, params):\n self.assertRaises(\n exceptions.CommandFailed,\n self.add_share_network_subnet,\n self.sn['id'],\n **params)\n\n def test_add_share_network_subnet_to_invalid_share_network(self):\n self.assertRaises(\n exceptions.CommandFailed,\n self.add_share_network_subnet,\n 'invalid_share_network',\n self.neutron_net_id,\n self.neutron_subnet_id)\n\n def test_add_delete_share_network_subnet_from_share_network(self):\n neutron_net_id = 'new_neutron_net_id'\n neutron_subnet_id = 'new_neutron_subnet_id'\n availability_zone = self._get_availability_zone()\n\n subnet = self.add_share_network_subnet(\n self.sn['id'],\n neutron_net_id, neutron_subnet_id,\n availability_zone,\n cleanup_in_class=False)\n self.user_client.delete_share_network_subnet(\n share_network_subnet=subnet['id'],\n share_network=self.sn['id'])\n\n self.user_client.wait_for_share_network_subnet_deletion(\n share_network_subnet=subnet['id'],\n share_network=self.sn['id'])\n\n def test_delete_invalid_share_network_subnet(self):\n self.assertRaises(\n exceptions.NotFound,\n self.user_client.delete_share_network_subnet,\n share_network_subnet='invalid_subnet_id',\n share_network=self.sn['id'])\n" }, { "alpha_fraction": 0.38213688135147095, "alphanum_fraction": 0.3987627327442169, "avg_line_length": 33.48444366455078, "blob_id": "39ef957e3a3022864160ea1c2b58f61e86c1fdd8", "content_id": "c8d79432368cf3839b0a679deaaf27bc6bf2ab26", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7759, "license_type": "permissive", "max_line_length": 79, "num_lines": 225, "path": "/manilaclient/tests/unit/test_functional_utils.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\n\nfrom manilaclient.tests.functional import utils as func_utils\nfrom manilaclient.tests.unit import utils\n\n\[email protected]\nclass ShellTest(utils.TestCase):\n\n OUTPUT_LINES_SIMPLE = \"\"\"\n+----+------+---------+\n| ID | Name | Status |\n+----+------+---------+\n| 11 | foo | BUILD |\n| 21 | bar | ERROR |\n+----+------+---------+\n\"\"\"\n OUTPUT_LINES_ONE_MULTI_ROW = \"\"\"\n+----+------+---------+\n| ID | Name | Status |\n+----+------+---------+\n| 11 | foo | BUILD |\n| 21 | bar | ERROR |\n| | | ERROR2 |\n| 31 | bee | None |\n+----+------+---------+\n\"\"\"\n\n OUTPUT_LINES_COMPLICATED_MULTI_ROW = \"\"\"\n+----+------+---------+\n| ID | Name | Status |\n+----+------+---------+\n| 11 | foo | BUILD |\n| 21 | bar | ERROR |\n| | | ERROR2 |\n| | | ERROR3 |\n| 31 | bee | None |\n| | bee2 | |\n| | bee3 | |\n| 41 | rand | None |\n| | rend | None2 |\n| | | |\n+----+------+---------+\n\"\"\"\n\n OUTPUT_LINES_COMPLICATED_MULTI_ROW_WITH_SHIFTED_ID = \"\"\"\n+----+----+------+---------+\n| ** | ID | Name | Status |\n+----+----+------+---------+\n| ** | 11 | foo | BUILD |\n| | 21 | bar | ERROR |\n| | | | ERROR2 |\n| | | | ERROR3 |\n| | | | |\n| ** | 31 | bee | None |\n| | | bee2 | |\n| | | | |\n+----+----+------+---------+\n\"\"\"\n\n OUTPUT_LINES_NESTED_TABLE = \"\"\"\n+----+----+------+--------------+\n| ** | ID | Name | Status |\n+----+----+------+--------------+\n| ** | 11 | foo | +----+----+ |\n| | | | | aa | bb | |\n| | | | +----+----+ |\n| | | | +----+----+ |\n| | 21 | bar | ERROR |\n| | | | ERROR2 |\n| | | | ERROR3 |\n+----+----+------+--------------+\n\"\"\"\n OUTPUT_LINES_NESTED_TABLE_MULTI_LINE = \"\"\"\n+----+----+------+--------------+\n| ** | ID | Name | Status |\n+----+----+------+--------------+\n| ** | 11 | foo | +----+----+ |\n| | | | | id | bb | |\n| | | | +----+----+ |\n| | | | | 01 | a1 | |\n| | | | | | a2 | |\n| | | | +----+----+ |\n| | 21 | bar | ERROR |\n| | | | ERROR2 |\n| | | | ERROR3 |\n+----+----+------+--------------+\n\"\"\"\n OUTPUT_LINES_DETAILS = \"\"\"\n+----------+--------+\n| Property | Value |\n+----------+--------+\n| foo | BUILD |\n| bar | ERROR |\n| | ERROR2 |\n| | ERROR3 |\n| bee | None |\n+----------+--------+\n\"\"\"\n\n @ddt.data({'input': OUTPUT_LINES_SIMPLE,\n 'valid_values': [\n ['11', 'foo', 'BUILD'],\n ['21', 'bar', 'ERROR']\n ]},\n {'input': OUTPUT_LINES_ONE_MULTI_ROW,\n 'valid_values': [\n ['11', 'foo', 'BUILD'],\n ['21', 'bar', ['ERROR', 'ERROR2']],\n ['31', 'bee', 'None'],\n ]},\n {'input': OUTPUT_LINES_COMPLICATED_MULTI_ROW,\n 'valid_values': [\n ['11', 'foo', 'BUILD'],\n ['21', 'bar', ['ERROR', 'ERROR2', 'ERROR3']],\n ['31', ['bee', 'bee2', 'bee3'], 'None'],\n ['41', ['rand', 'rend'], ['None', 'None2']],\n ['', '', '']\n ]})\n @ddt.unpack\n def test_multi_line_row_table(self, input, valid_values):\n\n actual_result = func_utils.multi_line_row_table(input)\n\n self.assertEqual(['ID', 'Name', 'Status'], actual_result['headers'])\n self.assertEqual(valid_values, actual_result['values'])\n\n def test_multi_line_row_table_shifted_id_column(self):\n input = self.OUTPUT_LINES_COMPLICATED_MULTI_ROW_WITH_SHIFTED_ID\n valid_values = [\n ['**', '11', 'foo', 'BUILD'],\n ['', '21', 'bar', ['ERROR', 'ERROR2', 'ERROR3']],\n ['', '', '', ''],\n ['**', '31', ['bee', 'bee2'], 'None'],\n ['', '', '', '']\n ]\n\n actual_result = func_utils.multi_line_row_table(\n input, group_by_column_index=1)\n\n self.assertEqual(['**', 'ID', 'Name', 'Status'],\n actual_result['headers'])\n self.assertEqual(valid_values, actual_result['values'])\n\n @ddt.data({'input': OUTPUT_LINES_NESTED_TABLE,\n 'valid_nested': {\n 'headers': ['aa', 'bb'],\n 'values': []\n }},\n {'input': OUTPUT_LINES_NESTED_TABLE_MULTI_LINE,\n 'valid_nested': {\n 'headers': ['id', 'bb'],\n 'values': [['01', ['a1', 'a2']]]\n }},)\n @ddt.unpack\n def test_nested_tables(self, input, valid_nested):\n\n actual_result = func_utils.multi_line_row_table(\n input, group_by_column_index=1)\n\n self.assertEqual(['**', 'ID', 'Name', 'Status'],\n actual_result['headers'])\n\n self.assertEqual(2, len(actual_result['values']))\n self.assertEqual(valid_nested, actual_result['values'][0][3])\n\n @ddt.data({'input': OUTPUT_LINES_DETAILS,\n 'valid_values': [\n ['foo', 'BUILD'],\n ['bar', ['ERROR', 'ERROR2', 'ERROR3']],\n ['bee', 'None'],\n ]})\n @ddt.unpack\n def test_details(self, input, valid_values):\n actual_result = func_utils.multi_line_row_table(input)\n\n self.assertEqual(['Property', 'Value'], actual_result['headers'])\n self.assertEqual(valid_values, actual_result['values'])\n\n @ddt.data({'input_data': OUTPUT_LINES_DETAILS,\n 'output_data': [\n {'Property': 'foo', 'Value': 'BUILD'},\n {'Property': 'bar', 'Value': ['ERROR', 'ERROR2', 'ERROR3']},\n {'Property': 'bee', 'Value': 'None'}]},\n {'input_data': OUTPUT_LINES_SIMPLE,\n 'output_data': [\n {'ID': '11', 'Name': 'foo', 'Status': 'BUILD'},\n {'ID': '21', 'Name': 'bar', 'Status': 'ERROR'},\n ]},\n {'input_data': OUTPUT_LINES_ONE_MULTI_ROW,\n 'output_data': [\n {'ID': '11', 'Name': 'foo', 'Status': 'BUILD'},\n {'ID': '21', 'Name': 'bar', 'Status': ['ERROR', 'ERROR2']},\n {'ID': '31', 'Name': 'bee', 'Status': 'None'},\n ]},\n {'input_data': OUTPUT_LINES_COMPLICATED_MULTI_ROW,\n 'output_data': [\n {'ID': '11', 'Name': 'foo', 'Status': 'BUILD'},\n {'ID': '21', 'Name': 'bar',\n 'Status': ['ERROR', 'ERROR2', 'ERROR3']},\n {'ID': '31', 'Name': ['bee', 'bee2', 'bee3'],\n 'Status': 'None'},\n {'ID': '41', 'Name': ['rand', 'rend'],\n 'Status': ['None', 'None2']},\n {'ID': '', 'Name': '', 'Status': ''},\n ]})\n @ddt.unpack\n def test_listing(self, input_data, output_data):\n actual_result = func_utils.listing(input_data)\n self.assertEqual(output_data, actual_result)\n" }, { "alpha_fraction": 0.5803804993629456, "alphanum_fraction": 0.5878862142562866, "avg_line_length": 35.72435760498047, "blob_id": "54a40680f1d9619385ef9cf011a0c24af3f28eec", "content_id": "672174fe951a413c603b18c1f11ec7ba14a962bd", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5729, "license_type": "permissive", "max_line_length": 78, "num_lines": 156, "path": "/manilaclient/v2/share_servers.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2014 OpenStack Foundation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom manilaclient import api_versions\nfrom manilaclient import base\nfrom manilaclient.common.apiclient import base as common_base\n\nRESOURCES_NAME = 'share_servers'\nRESOURCES_PATH = '/share-servers'\nRESOURCE_PATH = RESOURCES_PATH + '/%s'\nRESOURCE_NAME = 'share_server'\nACTION_PATH = RESOURCE_PATH + '/action'\n\n\nclass ShareServer(common_base.Resource):\n\n def __repr__(self):\n return \"<ShareServer: %s>\" % self.id\n\n def __getattr__(self, attr):\n if attr == 'share_network':\n attr = 'share_network_name'\n return super(ShareServer, self).__getattr__(attr)\n\n def delete(self):\n \"\"\"Delete this share server.\"\"\"\n self.manager.delete(self)\n\n def unmanage(self, force=False):\n \"\"\"Unmanage this share server.\"\"\"\n self.manager.unmanage(self, force)\n\n def reset_state(self, state):\n \"\"\"Update the share server with the provided state.\"\"\"\n self.manager.reset_state(self, state)\n\n\nclass ShareServerManager(base.ManagerWithFind):\n \"\"\"Manage :class:`ShareServer` resources.\"\"\"\n resource_class = ShareServer\n\n def get(self, server):\n \"\"\"Get a share server.\n\n :param server: ID of the :class:`ShareServer` to get.\n :rtype: :class:`ShareServer`\n \"\"\"\n server_id = common_base.getid(server)\n server = self._get(\"%s/%s\" % (RESOURCES_PATH, server_id),\n RESOURCE_NAME)\n # Split big dict 'backend_details' to separated strings\n # as next:\n # +---------------------+------------------------------------+\n # | Property | Value |\n # +---------------------+------------------------------------+\n # | details:instance_id |35203a78-c733-4b1f-b82c-faded312e537|\n # +---------------------+------------------------------------+\n for k, v in server._info[\"backend_details\"].items():\n server._info[\"details:%s\" % k] = v\n return server\n\n def details(self, server):\n \"\"\"Get a share server details.\n\n :param server: ID of the :class:`ShareServer` to get details from.\n :rtype: list of :class:`ShareServerBackendDetails\n \"\"\"\n server_id = common_base.getid(server)\n return self._get(\"%s/%s/details\" % (RESOURCES_PATH, server_id),\n \"details\")\n\n def delete(self, server):\n \"\"\"Delete share server.\n\n :param server: ID of the :class:`ShareServer` to delete.\n \"\"\"\n server_id = common_base.getid(server)\n self._delete(RESOURCE_PATH % server_id)\n\n def list(self, search_opts=None):\n \"\"\"Get a list of share servers.\n\n :rtype: list of :class:`ShareServer`\n \"\"\"\n query_string = self._build_query_string(search_opts)\n return self._list(RESOURCES_PATH + query_string, RESOURCES_NAME)\n\n @api_versions.wraps(\"2.49\", \"2.50\")\n def manage(self, host, share_network_id, identifier, driver_options=None):\n\n driver_options = driver_options or {}\n body = {\n 'host': host,\n 'share_network_id': share_network_id,\n 'identifier': identifier,\n 'driver_options': driver_options,\n }\n\n resource_path = RESOURCE_PATH % 'manage'\n return self._create(resource_path, {'share_server': body},\n 'share_server')\n\n @api_versions.wraps(\"2.51\") # noqa\n def manage(self, host, share_network_id, identifier,\n share_network_subnet_id=None, driver_options=None):\n\n driver_options = driver_options or {}\n body = {\n 'host': host,\n 'share_network_id': share_network_id,\n 'identifier': identifier,\n 'share_network_subnet_id': share_network_subnet_id,\n 'driver_options': driver_options,\n }\n\n resource_path = RESOURCE_PATH % 'manage'\n return self._create(resource_path, {'share_server': body},\n 'share_server')\n\n @api_versions.wraps(\"2.49\")\n def unmanage(self, share_server, force=False):\n return self._action(\"unmanage\", share_server, {'force': force})\n\n @api_versions.wraps(\"2.49\")\n def reset_state(self, share_server, state):\n \"\"\"Update the provided share server with the provided state.\n\n :param share_server: either share_server object or text with its ID.\n :param state: text with new state to set for share.\n \"\"\"\n return self._action(\"reset_status\", share_server, {\"status\": state})\n\n def _action(self, action, share_server, info=None):\n \"\"\"Perform a share server 'action'.\n\n :param action: text with action name.\n :param share_server: either share_server object or text with its ID.\n :param info: dict with data for specified 'action'.\n :param kwargs: dict with data to be provided for action hooks.\n \"\"\"\n body = {action: info}\n self.run_hooks('modify_body_for_action', body)\n url = ACTION_PATH % common_base.getid(share_server)\n return self.api.client.post(url, body=body)\n" }, { "alpha_fraction": 0.6526315808296204, "alphanum_fraction": 0.6606170535087585, "avg_line_length": 38.35714340209961, "blob_id": "bde5d3f8cf8a29ff702321e4412305dfe40beca8", "content_id": "e7a8380c11b59fb791c5c0125c0a7eb59831507b", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2755, "license_type": "permissive", "max_line_length": 78, "num_lines": 70, "path": "/manilaclient/tests/functional/test_messages.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\n\nfrom manilaclient.tests.functional import base\n\n\[email protected]\nclass MessagesReadOnlyTest(base.BaseTestCase):\n\n @ddt.data(\n (\"admin\", \"2.37\"),\n (\"user\", \"2.37\"),\n )\n @ddt.unpack\n def test_message_list(self, role, microversion):\n self.skip_if_microversion_not_supported(microversion)\n self.clients[role].manila(\"message-list\", microversion=microversion)\n\n\[email protected]\nclass MessagesReadWriteTest(base.BaseTestCase):\n\n def setUp(self):\n super(MessagesReadWriteTest, self).setUp()\n self.message = self.create_message()\n\n def test_list_messages(self):\n self.skip_if_microversion_not_supported('2.37')\n messages = self.admin_client.list_messages()\n self.assertTrue(any(m['ID'] is not None for m in messages))\n self.assertTrue(any(m['User Message'] is not None for m in messages))\n self.assertTrue(any(m['Resource ID'] is not None for m in messages))\n self.assertTrue(any(m['Action ID'] is not None for m in messages))\n self.assertTrue(any(m['Detail ID'] is not None for m in messages))\n self.assertTrue(any(m['Resource Type'] is not None for m in messages))\n\n @ddt.data(\n 'id', 'action_id', 'resource_id', 'action_id', 'detail_id',\n 'resource_type', 'created_at', 'action_id,detail_id,resource_id',\n )\n def test_list_share_type_select_column(self, columns):\n self.skip_if_microversion_not_supported('2.37')\n self.admin_client.list_messages(columns=columns)\n\n def test_get_message(self):\n self.skip_if_microversion_not_supported('2.37')\n message = self.admin_client.get_message(self.message['ID'])\n expected_keys = (\n 'id', 'action_id', 'resource_id', 'action_id', 'detail_id',\n 'resource_type', 'created_at', 'created_at',\n )\n for key in expected_keys:\n self.assertIn(key, message)\n\n def test_delete_message(self):\n self.skip_if_microversion_not_supported('2.37')\n message = self.create_message(cleanup_in_class=False)\n self.admin_client.delete_message(message['ID'])\n self.admin_client.wait_for_message_deletion(message['ID'])\n" }, { "alpha_fraction": 0.7717238068580627, "alphanum_fraction": 0.7728980779647827, "avg_line_length": 37.36936950683594, "blob_id": "3c08bc320c2373b984dc9132fb6650ad6d3abc29", "content_id": "588ee18acefbd468bfa2671869b36ab0cfe3e2e6", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4258, "license_type": "permissive", "max_line_length": 83, "num_lines": 111, "path": "/contrib/ci/post_test_hook.sh", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "#!/bin/bash -xe\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# This script is executed inside post_test_hook function in devstack gate.\n\nexport MANILACLIENT_DIR=\"$BASE/new/python-manilaclient\"\nexport MANILACLIENT_CONF=\"$MANILACLIENT_DIR/etc/manilaclient/manilaclient.conf\"\n\n# Go to the manilaclient dir\ncd $MANILACLIENT_DIR\n\n# Give permissions\nsudo chown -R $USER:stack .\n\n# Create manilaclient config file\ntouch $MANILACLIENT_CONF\n\n# Import functions from devstack\nsource $BASE/new/devstack/functions\n\nenv | grep OS_\n\n# Set options to config client.\nsource $BASE/new/devstack/openrc demo demo\nenv | grep OS_\nexport OS_TENANT_NAME=${OS_PROJECT_NAME:-$OS_TENANT_NAME}\niniset $MANILACLIENT_CONF DEFAULT username $OS_USERNAME\niniset $MANILACLIENT_CONF DEFAULT tenant_name $OS_TENANT_NAME\niniset $MANILACLIENT_CONF DEFAULT password $OS_PASSWORD\niniset $MANILACLIENT_CONF DEFAULT auth_url $OS_AUTH_URL\niniset $MANILACLIENT_CONF DEFAULT project_domain_name $OS_PROJECT_DOMAIN_NAME\niniset $MANILACLIENT_CONF DEFAULT user_domain_name $OS_USER_DOMAIN_NAME\niniset $MANILACLIENT_CONF DEFAULT project_domain_id $OS_PROJECT_DOMAIN_ID\niniset $MANILACLIENT_CONF DEFAULT user_domain_id $OS_USER_DOMAIN_ID\n\nsource $BASE/new/devstack/openrc admin demo\nenv | grep OS_\nexport OS_TENANT_NAME=${OS_PROJECT_NAME:-$OS_TENANT_NAME}\niniset $MANILACLIENT_CONF DEFAULT admin_username $OS_USERNAME\niniset $MANILACLIENT_CONF DEFAULT admin_tenant_name $OS_TENANT_NAME\niniset $MANILACLIENT_CONF DEFAULT admin_password $OS_PASSWORD\niniset $MANILACLIENT_CONF DEFAULT admin_auth_url $OS_AUTH_URL\niniset $MANILACLIENT_CONF DEFAULT admin_project_domain_name $OS_PROJECT_DOMAIN_NAME\niniset $MANILACLIENT_CONF DEFAULT admin_user_domain_name $OS_USER_DOMAIN_NAME\niniset $MANILACLIENT_CONF DEFAULT admin_project_domain_id $OS_PROJECT_DOMAIN_ID\niniset $MANILACLIENT_CONF DEFAULT admin_user_domain_id $OS_USER_DOMAIN_ID\n\n# Suppress errors in cleanup of resources\nSUPPRESS_ERRORS=${SUPPRESS_ERRORS_IN_CLEANUP:-True}\niniset $MANILACLIENT_CONF DEFAULT suppress_errors_in_cleanup $SUPPRESS_ERRORS\n\n# Set access type usage specific to dummy driver that we are using in CI\niniset $MANILACLIENT_CONF DEFAULT access_types_mapping \"nfs:ip,cifs:user\"\n\n# Dummy driver is capable of running share migration tests\niniset $MANILACLIENT_CONF DEFAULT run_migration_tests \"True\"\n\n# Dummy driver is capable of running share manage tests\niniset $MANILACLIENT_CONF DEFAULT run_manage_tests \"True\"\n\n# Running mountable snapshot tests in dummy driver\niniset $MANILACLIENT_CONF DEFAULT run_mount_snapshot_tests \"True\"\n\n# Create share network and use it for functional tests if required\nUSE_SHARE_NETWORK=$(trueorfalse True USE_SHARE_NETWORK)\nif [[ ${USE_SHARE_NETWORK} = True ]]; then\n SHARE_NETWORK_NAME=${SHARE_NETWORK_NAME:-ci}\n\n DEFAULT_NEUTRON_NET=$(openstack network show private -c id -f value)\n DEFAULT_NEUTRON_SUBNET=$(openstack subnet show private-subnet -c id -f value)\n NEUTRON_NET=${NEUTRON_NET:-$DEFAULT_NEUTRON_NET}\n NEUTRON_SUBNET=${NEUTRON_SUBNET:-$DEFAULT_NEUTRON_SUBNET}\n\n manila share-network-create \\\n --name $SHARE_NETWORK_NAME \\\n --neutron-net $NEUTRON_NET \\\n --neutron-subnet $NEUTRON_SUBNET\n\n iniset $MANILACLIENT_CONF DEFAULT share_network $SHARE_NETWORK_NAME\n iniset $MANILACLIENT_CONF DEFAULT admin_share_network $SHARE_NETWORK_NAME\nfi\n\n# Set share type if required\nif [[ \"$SHARE_TYPE\" ]]; then\n iniset $MANILACLIENT_CONF DEFAULT share_type $SHARE_TYPE\nfi\n\n# let us control if we die or not\nset +o errexit\n\nCONCURRENCY=${CONCURRENCY:-8}\n\n# Run functional tests\nsudo -H -u $USER tox -e functional -v -- --concurrency=$CONCURRENCY\nEXIT_CODE=$?\n\n# Copy artifacts into ZUUL's workspace\nsudo -H -u $USER cp -r $MANILACLIENT_DIR $WORKSPACE\n\nreturn $EXIT_CODE" }, { "alpha_fraction": 0.6181651949882507, "alphanum_fraction": 0.6206817626953125, "avg_line_length": 41.85293960571289, "blob_id": "dbfdcaeafaa9874e22d148e2bd6617a7e17215d8", "content_id": "3f35364e3ec13abd83c6c7d20d7038b4a476ec77", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4371, "license_type": "permissive", "max_line_length": 78, "num_lines": 102, "path": "/manilaclient/tests/functional/test_share_replica_export_locations.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\nfrom oslo_utils import uuidutils\nimport testtools\n\nfrom manilaclient import config\nfrom manilaclient.tests.functional import base\nfrom manilaclient.tests.functional import utils\n\nCONF = config.CONF\n\n\[email protected]\[email protected](CONF.run_replication_tests,\n \"Replication tests are disabled.\")\[email protected]_if_microversion_not_supported('2.47')\nclass ShareReplicaExportLocationsTest(base.BaseTestCase):\n\n def _create_share_and_replica(self):\n replication_type = CONF.replication_type\n share_type = self.create_share_type(\n driver_handles_share_servers=False,\n extra_specs={'replication_type': replication_type})\n share = self.create_share(share_type=share_type['ID'],\n client=self.get_user_client())\n share_replica = self.create_share_replica(share['id'])\n return share, share_replica\n\n @ddt.data('admin', 'user')\n def test_list_share_export_locations(self, role):\n share, share_replica = self._create_share_and_replica()\n client = self.admin_client if role == 'admin' else self.user_client\n export_locations = client.list_share_replica_export_locations(\n share_replica['id'])\n\n self.assertGreater(len(export_locations), 0)\n expected_keys = ['ID', 'Path', 'Preferred', 'Replica State',\n 'Availability Zone']\n\n for el in export_locations:\n for key in expected_keys:\n self.assertIn(key, el)\n self.assertTrue(uuidutils.is_uuid_like(el['ID']))\n self.assertIn(el['Preferred'], ('True', 'False'))\n\n @ddt.data('admin', 'user')\n def test_list_share_export_locations_with_columns(self, role):\n share, share_replica = self._create_share_and_replica()\n client = self.admin_client if role == 'admin' else self.user_client\n export_locations = client.list_share_replica_export_locations(\n share_replica['id'], columns='id,path')\n\n self.assertGreater(len(export_locations), 0)\n expected_keys = ('Id', 'Path')\n unexpected_keys = ('Updated At', 'Created At')\n for el in export_locations:\n for key in expected_keys:\n self.assertIn(key, el)\n for key in unexpected_keys:\n self.assertNotIn(key, el)\n self.assertTrue(uuidutils.is_uuid_like(el['Id']))\n\n @ddt.data('admin', 'user')\n def test_get_share_replica_export_location(self, role):\n share, share_replica = self._create_share_and_replica()\n client = self.admin_client if role == 'admin' else self.user_client\n export_locations = client.list_share_replica_export_locations(\n share_replica['id'])\n\n el = client.get_share_replica_export_location(\n share_replica['id'], export_locations[0]['ID'])\n\n expected_keys = ['path', 'updated_at', 'created_at', 'id',\n 'preferred', 'replica_state', 'availability_zone']\n if role == 'admin':\n expected_keys.extend(['is_admin_only', 'share_instance_id'])\n for key in expected_keys:\n self.assertIn(key, el)\n if role == 'admin':\n self.assertTrue(uuidutils.is_uuid_like(el['share_instance_id']))\n self.assertIn(el['is_admin_only'], ('True', 'False'))\n self.assertTrue(uuidutils.is_uuid_like(el['id']))\n self.assertIn(el['preferred'], ('True', 'False'))\n for list_k, get_k in (\n ('ID', 'id'), ('Path', 'path'), ('Preferred', 'preferred'),\n ('Replica State', 'replica_state'),\n ('Availability Zone', 'availability_zone')):\n self.assertEqual(\n export_locations[0][list_k], el[get_k])\n" }, { "alpha_fraction": 0.6019301414489746, "alphanum_fraction": 0.6148724555969238, "avg_line_length": 40.3838996887207, "blob_id": "1e2537037c84513bc832d408ca3bf2b7297b9529", "content_id": "1d3bd38796dd961bd2d7aae97249722bcf3d232a", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13367, "license_type": "permissive", "max_line_length": 79, "num_lines": 323, "path": "/manilaclient/tests/functional/test_share_access.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ast\n\nimport ddt\nfrom tempest.lib import exceptions as tempest_lib_exc\n\nfrom manilaclient import api_versions\nfrom manilaclient import config\nfrom manilaclient.tests.functional import base\n\nCONF = config.CONF\n\n\[email protected]\nclass ShareAccessReadWriteBase(base.BaseTestCase):\n protocol = None\n access_level = None\n\n def setUp(self):\n super(ShareAccessReadWriteBase, self).setUp()\n if self.protocol not in CONF.enable_protocols:\n message = \"%s tests are disabled.\" % self.protocol\n raise self.skipException(message)\n if self.access_level not in CONF.access_levels_mapping.get(\n self.protocol, '').split(' '):\n raise self.skipException(\"%(level)s tests for %(protocol)s share \"\n \"access are disabled.\" % {\n 'level': self.access_level,\n 'protocol': self.protocol\n })\n self.access_types = CONF.access_types_mapping.get(\n self.protocol, '').split(' ')\n if not self.access_types:\n raise self.skipException(\"No access levels were provided for %s \"\n \"share access tests.\" % self.protocol)\n\n self.share = self.create_share(share_protocol=self.protocol,\n public=True)\n self.share_id = self.share['id']\n\n # NOTE(vponomaryov): increase following int range when significant\n # amount of new tests is added.\n int_range = range(20, 50)\n self.access_to = {\n # NOTE(vponomaryov): list of unique values is required for ability\n # to create lots of access rules for one share using different\n # API microversions.\n 'ip': ['99.88.77.%d' % i for i in int_range],\n # NOTE(vponomaryov): following users are fakes and access rules\n # that use it are expected to fail, but they are used only for\n # API testing.\n 'user': ['foo_user_%d' % i for i in int_range],\n 'cert': ['tenant_%d.example.com' % i for i in int_range],\n 'ipv6': ['2001:db8::%d' % i for i in int_range],\n }\n\n def _test_create_list_access_rule_for_share(\n self, microversion, metadata=None):\n access_type = self.access_types[0]\n\n access = self.user_client.access_allow(\n self.share['id'], access_type, self.access_to[access_type].pop(),\n self.access_level, metadata=metadata, microversion=microversion)\n\n return access\n\n @ddt.data(*set([\n \"1.0\", \"2.0\", \"2.6\", \"2.7\", \"2.21\", \"2.33\", \"2.44\", \"2.45\",\n api_versions.MAX_VERSION]))\n def test_create_list_access_rule_for_share(self, microversion):\n self.skip_if_microversion_not_supported(microversion)\n access = self._test_create_list_access_rule_for_share(\n microversion=microversion)\n access_list = self.user_client.list_access(\n self.share['id'],\n microversion=microversion\n )\n self.assertTrue(any(\n [item for item in access_list if access['id'] == item['id']]))\n self.assertTrue(any(a['access_type'] is not None for a in access_list))\n self.assertTrue(any(a['access_to'] is not None for a in access_list))\n self.assertTrue(any(a['access_level'] is not None\n for a in access_list))\n if (api_versions.APIVersion(microversion) >=\n api_versions.APIVersion(\"2.33\")):\n self.assertTrue(\n all(all(key in access for key in (\n 'access_key', 'created_at', 'updated_at'))\n for access in access_list))\n elif (api_versions.APIVersion(microversion) >=\n api_versions.APIVersion(\"2.21\")):\n self.assertTrue(all('access_key' in a for a in access_list))\n else:\n self.assertTrue(all('access_key' not in a for a in access_list))\n\n @ddt.data(\"1.0\", \"2.0\", \"2.6\", \"2.7\")\n def test_create_list_access_rule_for_share_select_column(\n self,\n microversion):\n self.skip_if_microversion_not_supported(microversion)\n self._test_create_list_access_rule_for_share(\n microversion=microversion)\n access_list = self.user_client.list_access(\n self.share['id'],\n columns=\"access_type,access_to\",\n microversion=microversion\n )\n self.assertTrue(any(a['Access_Type'] is not None for a in access_list))\n self.assertTrue(any(a['Access_To'] is not None for a in access_list))\n self.assertTrue(all('Access_Level' not in a for a in access_list))\n self.assertTrue(all('access_level' not in a for a in access_list))\n\n def _create_delete_access_rule(self, share_id, access_type, access_to,\n microversion=None):\n self.skip_if_microversion_not_supported(microversion)\n if access_type not in self.access_types:\n raise self.skipException(\n \"'%(access_type)s' access rules is disabled for protocol \"\n \"'%(protocol)s'.\" % {\"access_type\": access_type,\n \"protocol\": self.protocol})\n\n access = self.user_client.access_allow(\n share_id, access_type, access_to, self.access_level,\n microversion=microversion)\n\n self.assertEqual(share_id, access.get('share_id'))\n self.assertEqual(access_type, access.get('access_type'))\n self.assertEqual(access_to.replace('\\\\\\\\', '\\\\'),\n access.get('access_to'))\n self.assertEqual(self.access_level, access.get('access_level'))\n if (api_versions.APIVersion(microversion) >=\n api_versions.APIVersion(\"2.33\")):\n self.assertIn('access_key', access)\n self.assertIn('created_at', access)\n self.assertIn('updated_at', access)\n elif (api_versions.APIVersion(microversion) >=\n api_versions.APIVersion(\"2.21\")):\n self.assertIn('access_key', access)\n else:\n self.assertNotIn('access_key', access)\n\n self.user_client.wait_for_access_rule_status(share_id, access['id'])\n self.user_client.access_deny(share_id, access['id'])\n self.user_client.wait_for_access_rule_deletion(share_id, access['id'])\n\n self.assertRaises(tempest_lib_exc.NotFound,\n self.user_client.get_access, share_id, access['id'])\n\n @ddt.data(*set([\"2.45\", api_versions.MAX_VERSION]))\n def test_create_list_access_rule_with_metadata(self, microversion):\n self.skip_if_microversion_not_supported(microversion)\n\n md1 = {\"key1\": \"value1\", \"key2\": \"value2\"}\n md2 = {\"key3\": \"value3\", \"key4\": \"value4\"}\n self._test_create_list_access_rule_for_share(\n metadata=md1, microversion=microversion)\n access = self._test_create_list_access_rule_for_share(\n metadata=md2, microversion=microversion)\n access_list = self.user_client.list_access(\n self.share['id'], metadata={\"key4\": \"value4\"},\n microversion=microversion)\n self.assertEqual(1, len(access_list))\n # Verify share metadata\n get_access = self.user_client.access_show(\n access_list[0]['id'], microversion=microversion)\n metadata = ast.literal_eval(get_access['metadata'])\n self.assertEqual(2, len(metadata))\n self.assertIn('key3', metadata)\n self.assertIn('key4', metadata)\n self.assertEqual(md2['key3'], metadata['key3'])\n self.assertEqual(md2['key4'], metadata['key4'])\n self.assertEqual(access['id'], access_list[0]['id'])\n\n self.user_client.access_deny(access['share_id'], access['id'])\n self.user_client.wait_for_access_rule_deletion(access['share_id'],\n access['id'])\n\n @ddt.data(*set([\"2.45\", api_versions.MAX_VERSION]))\n def test_create_update_show_access_rule_with_metadata(self, microversion):\n self.skip_if_microversion_not_supported(microversion)\n\n md1 = {\"key1\": \"value1\", \"key2\": \"value2\"}\n md2 = {\"key3\": \"value3\", \"key2\": \"value4\"}\n # create a access rule with metadata\n access = self._test_create_list_access_rule_for_share(\n metadata=md1, microversion=microversion)\n # get the access rule\n get_access = self.user_client.access_show(\n access['id'], microversion=microversion)\n # verify access rule\n self.assertEqual(access['id'], get_access['id'])\n self.assertEqual(md1, ast.literal_eval(get_access['metadata']))\n\n # update access rule metadata\n self.user_client.access_set_metadata(\n access['id'], metadata=md2, microversion=microversion)\n get_access = self.user_client.access_show(\n access['id'], microversion=microversion)\n\n # verify access rule after update access rule metadata\n self.assertEqual(\n {\"key1\": \"value1\", \"key2\": \"value4\", \"key3\": \"value3\"},\n ast.literal_eval(get_access['metadata']))\n self.assertEqual(access['id'], get_access['id'])\n\n @ddt.data(*set([\"2.45\", api_versions.MAX_VERSION]))\n def test_delete_access_rule_metadata(self, microversion):\n self.skip_if_microversion_not_supported(microversion)\n\n md = {\"key1\": \"value1\", \"key2\": \"value2\"}\n # create a access rule with metadata\n access = self._test_create_list_access_rule_for_share(\n metadata=md, microversion=microversion)\n # get the access rule\n get_access = self.user_client.access_show(\n access['id'], microversion=microversion)\n\n # verify access rule\n self.assertEqual(access['id'], get_access['id'])\n self.assertEqual(md, ast.literal_eval(get_access['metadata']))\n\n # delete access rule metadata\n self.user_client.access_unset_metadata(\n access['id'], keys=[\"key1\", \"key2\"], microversion=microversion)\n get_access = self.user_client.access_show(\n access['id'], microversion=microversion)\n\n # verify access rule after delete access rule metadata\n self.assertEqual({}, ast.literal_eval(get_access['metadata']))\n self.assertEqual(access['id'], get_access['id'])\n\n @ddt.data(\"1.0\", \"2.0\", \"2.6\", \"2.7\", \"2.21\", \"2.33\")\n def test_create_delete_ip_access_rule(self, microversion):\n self._create_delete_access_rule(\n self.share_id, 'ip', self.access_to['ip'].pop(), microversion)\n\n @ddt.data(\"1.0\", \"2.0\", \"2.6\", \"2.7\", \"2.21\", \"2.33\")\n def test_create_delete_user_access_rule(self, microversion):\n self._create_delete_access_rule(\n self.share_id, 'user', CONF.username_for_user_rules, microversion)\n\n @ddt.data(\"1.0\", \"2.0\", \"2.6\", \"2.7\", \"2.21\", \"2.33\")\n def test_create_delete_cert_access_rule(self, microversion):\n self._create_delete_access_rule(\n self.share_id, 'cert', self.access_to['cert'].pop(), microversion)\n\n @ddt.data(\"2.38\", api_versions.MAX_VERSION)\n def test_create_delete_ipv6_access_rule(self, microversion):\n self._create_delete_access_rule(\n self.share_id, 'ip', self.access_to['ipv6'].pop(), microversion)\n\n\nclass NFSShareRWAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'nfs'\n access_level = 'rw'\n\n\nclass NFSShareROAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'nfs'\n access_level = 'ro'\n\n\nclass CIFSShareRWAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'cifs'\n access_level = 'rw'\n\n\nclass CIFSShareROAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'cifs'\n access_level = 'ro'\n\n\nclass GlusterFSShareRWAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'glusterfs'\n access_level = 'rw'\n\n\nclass GlusterFSShareROAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'glusterfs'\n access_level = 'ro'\n\n\nclass HDFSShareRWAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'hdfs'\n access_level = 'rw'\n\n\nclass HDFSShareROAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'hdfs'\n access_level = 'ro'\n\n\nclass MAPRFSShareRWAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'maprfs'\n access_level = 'rw'\n\n\nclass MAPRFSShareROAccessReadWriteTest(ShareAccessReadWriteBase):\n protocol = 'maprfs'\n access_level = 'ro'\n\n\ndef load_tests(loader, tests, _):\n result = []\n for test_case in tests:\n if type(test_case._tests[0]) is ShareAccessReadWriteBase:\n continue\n result.append(test_case)\n return loader.suiteClass(result)\n" }, { "alpha_fraction": 0.6438515186309814, "alphanum_fraction": 0.6461716890335083, "avg_line_length": 36.4782600402832, "blob_id": "c4dea8468819118b160a8fdfc4638262515a5565", "content_id": "e8b8ff0c3aaaeeaa6e585195b4355408bf147593", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3448, "license_type": "permissive", "max_line_length": 78, "num_lines": 92, "path": "/manilaclient/v2/share_network_subnets.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2019 NetApp\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom manilaclient import base\nfrom manilaclient.common.apiclient import base as common_base\n\nRESOURCES_PATH = '/share-networks/%(share_network_id)s/subnets'\nRESOURCE_PATH = RESOURCES_PATH + '/%(share_network_subnet_id)s'\nRESOURCE_NAME = 'share_network_subnet'\n\n\nclass ShareNetworkSubnet(common_base.Resource):\n \"\"\"Network subnet info for Manila share networks.\"\"\"\n def __repr__(self):\n return \"<ShareNetworkSubnet: %s>\" % self.id\n\n def __getitem__(self, key):\n return self._info[key]\n\n def delete(self):\n \"\"\"Delete this share network subnet.\"\"\"\n self.manager.delete(self)\n\n\nclass ShareNetworkSubnetManager(base.ManagerWithFind):\n \"\"\"Manage :class:`ShareNetworkSubnet` resources.\"\"\"\n\n resource_class = ShareNetworkSubnet\n\n def create(self, neutron_net_id=None, neutron_subnet_id=None,\n availability_zone=None, share_network_id=None):\n \"\"\"Create share network subnet.\n\n :param neutron_net_id: ID of Neutron network\n :param neutron_subnet_id: ID of Neutron subnet\n :param availability_zone: Name of the target availability zone\n :rtype: :class:`ShareNetworkSubnet`\n \"\"\"\n values = {}\n if neutron_net_id:\n values['neutron_net_id'] = neutron_net_id\n if neutron_subnet_id:\n values['neutron_subnet_id'] = neutron_subnet_id\n if availability_zone:\n values['availability_zone'] = availability_zone\n\n body = {'share-network-subnet': values}\n url = '/share-networks/%(share_network_id)s/subnets' % {\n 'share_network_id': share_network_id\n }\n\n return self._create(url, body, RESOURCE_NAME)\n\n def get(self, share_network, share_network_subnet):\n \"\"\"Get a share network subnet.\n\n :param policy: share network subnet to get.\n :rtype: :class:`NetworkSubnetInfo`\n \"\"\"\n share_network_id = common_base.getid(share_network)\n share_network_subnet_id = common_base.getid(share_network_subnet)\n url = ('/share-networks/%(share_network_id)s/subnets'\n '/%(share_network_subnet)s') % {\n 'share_network_id': share_network_id,\n 'share_network_subnet': share_network_subnet_id\n }\n return self._get(url, \"share_network_subnet\")\n\n def delete(self, share_network, share_network_subnet):\n \"\"\"Delete a share network subnet.\n\n :param share_network: share network that owns the subnet.\n :param share_network_subnet: share network subnet to be deleted.\n \"\"\"\n url = ('/share-networks/%(share_network_id)s/subnets'\n '/%(share_network_subnet)s') % {\n 'share_network_id': common_base.getid(share_network),\n 'share_network_subnet': share_network_subnet\n }\n self._delete(url)\n" }, { "alpha_fraction": 0.7464967966079712, "alphanum_fraction": 0.7477707266807556, "avg_line_length": 29.230770111083984, "blob_id": "ac723bdec7393c8e28ebac48c71505bf3bcd9202", "content_id": "25e912c2e84dfac0ce6330ef4c7c97e63c014b6e", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 785, "license_type": "permissive", "max_line_length": 132, "num_lines": 26, "path": "/doc/source/contributor/index.rst", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "Contributing\n============\n\nCode is hosted at `opendev.org`_. Submit bugs to the\npython-manilaclient project on `Launchpad`_. Submit code to the\nopenstack/python-manilaclient project using `Gerrit`_.\n\n.. _opendev.org: https://opendev.org/openstack/python-manilaclient\n.. _Launchpad: https://launchpad.net/python-manilaclient\n.. _Gerrit: https://docs.openstack.org/infra/manual/developers.html#development-workflow\n\nTesting\n-------\n\nManilaclient has two types of tests - 'unit' and 'functional'.\n\nThe preferred way to run tests is using ``tox``.\n\nSee `Consistent Testing Interface`_ for more details.\n\n.. toctree::\n :maxdepth: 3\n\n functional-tests\n\n.. _Consistent Testing Interface: https://opendev.org/openstack/governance/src/branch/master/reference/project-testing-interface.rst" }, { "alpha_fraction": 0.7743431329727173, "alphanum_fraction": 0.7836167216300964, "avg_line_length": 39.4375, "blob_id": "98264b62cff28aeb1b8e2533981ca6967d0f937f", "content_id": "e115f3c8520808a7da3fb77675cd597fb9045261", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1294, "license_type": "permissive", "max_line_length": 75, "num_lines": 32, "path": "/contrib/ci/pre_test_hook.sh", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "#!/bin/bash -xe\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# This script is executed inside pre_test_hook function in devstack gate.\n\n# Run manila's pre_test_hook.sh script first\nsource $BASE/new/manila/contrib/ci/pre_test_hook.sh True dummy multibackend\n\nsudo -H pip install virtualenv\nvirtualenv /tmp/devstack-tools\n/tmp/devstack-tools/bin/pip install -U devstack-tools==0.4.0\n\nlocalconf=$BASE/new/devstack/local.conf\nDSCONF=/tmp/devstack-tools/bin/dsconf\n\n# Set big quota for share networks to avoid limit exceedances\n$DSCONF setlc $localconf MANILA_OPTGROUP_DEFAULT_quota_share_networks 50\n$DSCONF setlc $localconf MANILA_CONFIGURE_DEFAULT_TYPES True\n# Enable and use only v3 of Identity API\n$DSCONF setlc $localconf IDENTITY_API_VERSION 3\n$DSCONF setlc $localconf ENABLE_IDENTITY_V2 False\n" }, { "alpha_fraction": 0.6103895902633667, "alphanum_fraction": 0.6124188303947449, "avg_line_length": 34.71014404296875, "blob_id": "48363c6ab89e84fff7c2450abac0860654406824", "content_id": "78f25e5d6278193aecff1a183ecfae59c3ec968e", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2464, "license_type": "permissive", "max_line_length": 78, "num_lines": 69, "path": "/manilaclient/tests/functional/osc/test_shares.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom manilaclient.tests.functional.osc import base\n\n\nclass SharesCLITest(base.OSCClientTestBase):\n\n def test_openstack_share_create(self):\n share_name = 'test_create_share'\n share = self.create_share(name=share_name)\n\n self.assertEqual(share['share_proto'], 'NFS')\n self.assertEqual(share['size'], '1')\n self.assertEqual(share['name'], share_name)\n\n shares_list = self.listing_result('share', 'list')\n self.assertIn(share['id'], [item['ID'] for item in shares_list])\n\n def test_openstack_share_list(self):\n share = self.create_share()\n shares_list = self.listing_result('share', 'list')\n self.assertTableStruct(shares_list, [\n 'ID',\n 'Name',\n 'Size',\n 'Share Proto',\n 'Status',\n 'Is Public',\n 'Share Type Name',\n 'Host',\n 'Availability Zone'\n ])\n self.assertIn(share['id'], [item['ID'] for item in shares_list])\n\n def test_openstack_share_show(self):\n share = self.create_share()\n\n result = self.dict_result('share', 'show %s' % share['id'])\n self.assertEqual(share['id'], result['id'])\n\n listing_result = self.listing_result('share', 'show %s' % share['id'])\n self.assertTableStruct(listing_result, [\n 'Field',\n 'Value'\n ])\n\n def test_openstack_share_delete(self):\n share = self.create_share(add_cleanup=False)\n shares_list = self.listing_result('share', 'list')\n\n self.assertIn(share['id'], [item['ID'] for item in shares_list])\n\n self.openstack('share delete %s' % share['id'])\n self.check_object_deleted('share', share['id'])\n shares_list_after_delete = self.listing_result('share', 'list')\n\n self.assertNotIn(\n share['id'], [item['ID'] for item in shares_list_after_delete])\n" }, { "alpha_fraction": 0.6386289000511169, "alphanum_fraction": 0.6407791376113892, "avg_line_length": 36.11737060546875, "blob_id": "cdec27be4585991b1f697106a666231e75f326fa", "content_id": "341548dc8bdcaf3a1be38a42bf2861ebe39788d3", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7906, "license_type": "permissive", "max_line_length": 78, "num_lines": 213, "path": "/manilaclient/tests/functional/test_share_servers.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\nimport testtools\n\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import exceptions\n\nfrom manilaclient.common import constants\nfrom manilaclient import config\nfrom manilaclient.tests.functional import base\nfrom manilaclient.tests.functional import utils\n\nCONF = config.CONF\n\n\[email protected]\nclass ShareServersReadOnlyTest(base.BaseTestCase):\n\n def setUp(self):\n super(ShareServersReadOnlyTest, self).setUp()\n self.client = self.get_admin_client()\n\n def test_share_server_list(self):\n self.client.list_share_servers()\n\n def test_share_server_list_with_host_param(self):\n self.client.list_share_servers(filters={'host': 'fake_host'})\n\n def test_share_server_list_with_status_param(self):\n self.client.list_share_servers(filters={'status': 'fake_status'})\n\n def test_share_server_list_with_share_network_param(self):\n self.client.list_share_servers(filters={'share_network': 'fake_sn'})\n\n def test_share_server_list_with_project_id_param(self):\n self.client.list_share_servers(\n filters={'project_id': 'fake_project_id'})\n\n @ddt.data(\n 'host', 'status', 'project_id', 'share_network',\n 'host,status,project_id,share_network',\n )\n def test_share_server_list_with_specified_columns(self, columns):\n self.client.list_share_servers(columns=columns)\n\n def test_share_server_list_by_user(self):\n self.assertRaises(\n exceptions.CommandFailed, self.user_client.list_share_servers)\n\n\[email protected]\nclass ShareServersReadWriteBase(base.BaseTestCase):\n\n protocol = None\n\n def setUp(self):\n super(ShareServersReadWriteBase, self).setUp()\n if not CONF.run_share_servers_tests:\n message = \"share-servers tests are disabled.\"\n raise self.skipException(message)\n if self.protocol not in CONF.enable_protocols:\n message = \"%s tests are disabled.\" % self.protocol\n raise self.skipException(message)\n\n self.client = self.get_admin_client()\n if not self.client.share_network:\n message = \"Can run only with DHSS=True mode\"\n raise self.skipException(message)\n\n def _create_share_and_share_network(self):\n name = data_utils.rand_name('autotest_share_name')\n description = data_utils.rand_name('autotest_share_description')\n\n common_share_network = self.client.get_share_network(\n self.client.share_network)\n share_net_info = (\n utils.get_default_subnet(self.user_client,\n common_share_network['id'])\n if utils.share_network_subnets_are_supported()\n else common_share_network)\n neutron_net_id = (\n share_net_info['neutron_net_id']\n if 'none' not in share_net_info['neutron_net_id'].lower()\n else None)\n neutron_subnet_id = (\n share_net_info['neutron_subnet_id']\n if 'none' not in share_net_info['neutron_subnet_id'].lower()\n else None)\n share_network = self.client.create_share_network(\n neutron_net_id=neutron_net_id,\n neutron_subnet_id=neutron_subnet_id,\n )\n\n self.share = self.create_share(\n share_protocol=self.protocol,\n size=1,\n name=name,\n description=description,\n share_network=share_network['id'],\n client=self.client,\n wait_for_creation=True\n )\n self.share = self.client.get_share(self.share['id'])\n return self.share, share_network\n\n def _delete_share_and_share_server(self, share_id, share_server_id):\n # Delete share\n self.client.delete_share(share_id)\n self.client.wait_for_share_deletion(share_id)\n\n # Delete share server\n self.client.delete_share_server(share_server_id)\n self.client.wait_for_share_server_deletion(share_server_id)\n\n def test_get_and_delete_share_server(self):\n self.share, share_network = self._create_share_and_share_network()\n share_server_id = self.client.get_share(\n self.share['id'])['share_server_id']\n\n # Get share server\n server = self.client.get_share_server(share_server_id)\n expected_keys = (\n 'id', 'host', 'status', 'created_at', 'updated_at',\n 'share_network_id', 'share_network_name', 'project_id',\n )\n\n if utils.is_microversion_supported('2.49'):\n expected_keys += ('identifier', 'is_auto_deletable')\n\n for key in expected_keys:\n self.assertIn(key, server)\n\n self._delete_share_and_share_server(self.share['id'], share_server_id)\n self.client.delete_share_network(share_network['id'])\n\n @testtools.skipUnless(\n CONF.run_manage_tests, 'Share Manage/Unmanage tests are disabled.')\n @utils.skip_if_microversion_not_supported('2.49')\n def test_manage_and_unmanage_share_server(self):\n share, share_network = self._create_share_and_share_network()\n share_server_id = self.client.get_share(\n self.share['id'])['share_server_id']\n server = self.client.get_share_server(share_server_id)\n server_host = server['host']\n export_location = self.client.list_share_export_locations(\n self.share['id'])[0]['Path']\n share_host = share['host']\n identifier = server['identifier']\n\n self.assertEqual('True', server['is_auto_deletable'])\n\n # Unmanages share\n self.client.unmanage_share(share['id'])\n self.client.wait_for_share_deletion(share['id'])\n\n server = self.client.get_share_server(share_server_id)\n self.assertEqual('False', server['is_auto_deletable'])\n\n # Unmanages share server\n self.client.unmanage_server(share_server_id)\n self.client.wait_for_share_server_deletion(share_server_id)\n\n # Manage share server\n managed_share_server_id = self.client.share_server_manage(\n server_host, share_network['id'], identifier)\n self.client.wait_for_resource_status(\n managed_share_server_id, constants.STATUS_ACTIVE,\n resource_type='share_server')\n\n managed_server = self.client.get_share_server(managed_share_server_id)\n self.assertEqual('False', managed_server['is_auto_deletable'])\n\n # Manage share\n managed_share_id = self.client.manage_share(\n share_host, self.protocol, export_location,\n managed_share_server_id)\n self.client.wait_for_resource_status(managed_share_id,\n constants.STATUS_AVAILABLE)\n\n self._delete_share_and_share_server(managed_share_id,\n managed_share_server_id)\n self.client.delete_share_network(share_network['id'])\n\n\nclass ShareServersReadWriteNFSTest(ShareServersReadWriteBase):\n protocol = 'nfs'\n\n\nclass ShareServersReadWriteCIFSTest(ShareServersReadWriteBase):\n protocol = 'cifs'\n\n\ndef load_tests(loader, tests, _):\n result = []\n for test_case in tests:\n if type(test_case._tests[0]) is ShareServersReadWriteBase:\n continue\n result.append(test_case)\n return loader.suiteClass(result)\n" }, { "alpha_fraction": 0.6871023774147034, "alphanum_fraction": 0.6963562965393066, "avg_line_length": 37.42222213745117, "blob_id": "e50b344f31a03a341eb328288c6197fbe2e317d1", "content_id": "1d02765a3d6ec207daf1f7091e41fe5dfa0804e5", "detected_licenses": [ "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1729, "license_type": "permissive", "max_line_length": 78, "num_lines": 45, "path": "/manilaclient/tests/functional/test_services.py", "repo_name": "Murray-LIANG/python-manilaclient", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\n\nfrom manilaclient.tests.functional import base\n\n\[email protected]\nclass ManilaClientTestServicesReadOnly(base.BaseTestCase):\n\n @ddt.data(\"1.0\", \"2.0\", \"2.6\", \"2.7\")\n def test_services_list(self, microversion):\n self.skip_if_microversion_not_supported(microversion)\n self.admin_client.manila('service-list', microversion=microversion)\n\n def test_list_with_debug_flag(self):\n self.clients['admin'].manila('service-list', flags='--debug')\n\n def test_shares_list_filter_by_host(self):\n self.clients['admin'].manila('service-list', params='--host host')\n\n def test_shares_list_filter_by_binary(self):\n self.clients['admin'].manila('service-list', params='--binary binary')\n\n def test_shares_list_filter_by_zone(self):\n self.clients['admin'].manila('service-list', params='--zone zone')\n\n def test_shares_list_filter_by_status(self):\n self.clients['admin'].manila('service-list', params='--status status')\n\n def test_shares_list_filter_by_state(self):\n self.clients['admin'].manila('service-list', params='--state state')\n" } ]
17
amandasaskia97/django
https://github.com/amandasaskia97/django
425f33e78e0495059e1c4b20c62304b563d9a409
726f3b49062c235300a75de2f5dc55d9ed09ba32
3f417a907aec68fc2dc2b7d21e226ce90b4cb26b
refs/heads/main
2023-07-12T12:01:53.209464
2021-08-11T07:19:50
2021-08-11T07:19:50
390,741,216
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5876376032829285, "alphanum_fraction": 0.5982218384742737, "avg_line_length": 29.882352828979492, "blob_id": "d18bd3254f4a6e4abdcd4fd020474b229ad769dc", "content_id": "bae996d01cfe86c6c580f3b4f5e1838825c93d6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4724, "license_type": "no_license", "max_line_length": 72, "num_lines": 153, "path": "/identify/views.py", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render,redirect\nfrom .forms import UploadFileForm\nfrom django.core.files.storage import FileSystemStorage\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import load_img\nimport tensorflow as tf\nimport numpy as np\nfrom django.http import HttpResponse\nimport cv2\nimport os\n\n\n# Create your views here.\ndef index(request):\n if request.method == 'POST':\n myfiles = request.FILES['myfiles']\n fs = FileSystemStorage()\n filename = fs.save(myfiles.name, myfiles)\n uploaded_file_url = (fs.url(filename))[1:]\n else:\n uploaded_file_url = '';\n return render(request,'identify.html',\n {\n 'judul':'Classification',\n 'uploaded_file_url':uploaded_file_url,\n }\n )\n\ndef tampilkan(request):\n return render(request,'identify.html',\n {\n 'judul':'Form',\n 'uploaded_file_url':''\n }\n )\n myfile = request.FILES['myfiles']\n fs = FileSystemStorage()\n filename = fs.save(myfiles.name, myfiles)\n uploaded_file_url = fs.url(filename)\n return render(request, 'identify.html',{\n 'uploaded_file_url':uploaded_file_url,\n 'judul':'berhasil'\n })\n\ndef prosesImg(request):\n filelink = request.POST.dict().get(\"myfiles\")\n\n tf.compat.v1.disable_eager_execution()\n img_height, img_width=28,28\n model_graph = tf.Graph()\n with model_graph.as_default():\n tf_session=tf.compat.v1.Session()\n with tf_session.as_default():\n models=load_model('./model/trainbaru.model')\n\n testimage='.'+filelink\n\n img = image.load_img(testimage, target_size=(img_height, img_width))\n x = image.img_to_array(img)\n x = x.reshape(1, img_height, img_width, 3)\n predictedLabel = ''\n with model_graph.as_default():\n with tf_session.as_default():\n predi = models.predict(x)\n\n if np.argmax(predi) == 0:\n predictedLabel = \"\"\"Classification : huruf A <br/>\"\"\"\n\n elif np.argmax(predi) == 1:\n predictedLabel = \"\"\"Classification : huruf B <br/>\"\"\"\n\n elif np.argmax(predi) == 2:\n predictedLabel = \"\"\"Classification : huruf C <br/>\"\"\"\n\n elif np.argmax(predi) == 3:\n predictedLabel = \"\"\"Classification : huruf D <br/>\"\"\"\n\n elif np.argmax(predi) == 4:\n predictedLabel = \"\"\"Classification : huruf E <br/>\"\"\"\n\n elif np.argmax(predi) == 5:\n predictedLabel = \"\"\"Classification : huruf F <br/>\"\"\"\n\n elif np.argmax(predi) == 6:\n predictedLabel = \"\"\"Classification : huruf G <br/>\"\"\"\n\n elif np.argmax(predi) == 7:\n predictedLabel = \"\"\"Classification : huruf H <br/>\"\"\"\n\n elif np.argmax(predi) == 8:\n predictedLabel = \"\"\"Classification : huruf I <br/>\"\"\"\n\n elif np.argmax(predi) == 9:\n predictedLabel = \"\"\"Classification : huruf J <br/>\"\"\"\n\n elif np.argmax(predi) == 10:\n predictedLabel = \"\"\"Classification : huruf K <br/>\"\"\"\n\n elif np.argmax(predi) == 11:\n predictedLabel = \"\"\"Classification : huruf L <br/>\"\"\"\n\n elif np.argmax(predi) == 12:\n predictedLabel = \"\"\"Classification : huruf M <br/>\"\"\"\n\n elif np.argmax(predi) == 13:\n predictedLabel = \"\"\"Classification : huruf N <br/>\"\"\"\n\n elif np.argmax(predi) == 14:\n predictedLabel = \"\"\"Classification : huruf O <br/>\"\"\"\n\n elif np.argmax(predi) == 15:\n predictedLabel = \"\"\"Classification : huruf P <br/>\"\"\"\n\n elif np.argmax(predi) == 16:\n predictedLabel = \"\"\"Classification : huruf Q <br/>\"\"\"\n\n elif np.argmax(predi) == 17:\n predictedLabel = \"\"\"Classification : huruf R <br/>\"\"\"\n #predictedLabel = \"\"\"Classification : huruf C <br/>\"\"\"\n\n elif np.argmax(predi) == 18:\n predictedLabel = \"\"\"Classification : huruf S <br/>\"\"\"\n\n elif np.argmax(predi) == 19:\n predictedLabel = \"\"\"Classification : huruf T <br/>\"\"\"\n\n elif np.argmax(predi) == 20:\n predictedLabel = \"\"\"Classification : huruf U <br/>\"\"\"\n\n elif np.argmax(predi) == 21:\n predictedLabel = \"\"\"Classification : huruf V <br/>\"\"\"\n\n elif np.argmax(predi) == 22:\n predictedLabel = \"\"\"Classification : huruf W <br/>\"\"\"\n\n elif np.argmax(predi) == 23:\n predictedLabel = \"\"\"Classification : huruf X <br/>\"\"\"\n\n elif np.argmax(predi) == 24:\n predictedLabel = \"\"\"Classification : huruf Y <br/>\"\"\"\n\n else:\n predictedLabel = \"\"\"Classification : huruf Z <br/>\"\"\"\n\n return render(request, 'hasil.html', {\n 'judul': 'Result',\n 'subjudul': 'RESULT',\n 'predictedLabel': predictedLabel,\n 'predi': predi,\n 'uploaded_file_url': filelink }\n )" }, { "alpha_fraction": 0.5633770823478699, "alphanum_fraction": 0.580805778503418, "avg_line_length": 32.477272033691406, "blob_id": "79830d9e6459e41987b5899414bb7ce9f20c47ca", "content_id": "896dca4982c12c1940ee5b05e065905ea07d3aba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4418, "license_type": "no_license", "max_line_length": 150, "num_lines": 132, "path": "/TA/views.py", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\n#def index (request):\n #return render(request,'TA/index.html')\n\n#def recent (request):\n #return HttpResponse('<h1>ini recent post</h1>')\n\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.shortcuts import redirect\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nimport tensorflow as tf\nfrom tensorflow import Graph\nfrom matplotlib import pyplot as plt\n#from skimage import io\nimport string\nimport random\n#from .imageProses import ImageTransform\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'classification/index.html', {\n 'uploaded_file_url': '',\n 'judul': 'Classification'\n })\n\n\ndef review(request):\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n return render(request, 'classification/index.html', {\n 'uploaded_file_url': uploaded_file_url,\n 'judul': 'Classification'\n })\n\n\ndef prosesImg(request):\n filelink = request.POST.dict().get(\"myfile\")\n\n tf.compat.v1.disable_eager_execution()\n img_height, img_width = 224, 224\n model_graph = tf.Graph()\n with model_graph.as_default():\n tf_session = tf.compat.v1.Session()\n with tf_session.as_default():\n models = load_model('./model/BrailleNet.h5')\n\n testimage = '.' + filelink\n\n img = image.load_img(testimage, target_size=(img_height, img_width))\n x = image.img_to_array(img)\n x = x / 255\n x = x.reshape(1, img_height, img_width, 3)\n with model_graph.as_default():\n with tf_session.as_default():\n predi = models.predict(x)\n\n if np.argmax(predi) == 0:\n predictedLabel = \"\"\"Classification : Muda <br/>\n Prediction : Jika curah hujan bagus, 2,5-3 bulan akan menjadi buah setengah matang dan 3,5-4,5 bulan lagi akan matang\"\"\"\n\n elif np.argmax(predi) == 1:\n predictedLabel = \"\"\"Classification : Setengah Matang <br/> \n Prediction : Jika curah hujan bagus, 1-2 bulan lagi akan matang <br/>\n Kandungan Protein : 9.57 % <br/>\n Kandungan Gula : 1.428 %<br/>\n Kandungan Lemak : 8.2 %<br/>\n Kandungan Kafein : 0.62 %<br/>\n pH : 5.3<br/>\"\"\"\n\n elif np.argmax(predi) == 2:\n predictedLabel = \"\"\"Classification : Matang <br/>\n Prediction : Kualitas baik untuk dipanen dan bagus untuk dijadikan benih <br/>\n Kandungan Protein : 9.61 % <br/>\n Kandungan Gula : 1.652 %<br/>\n Kandungan Lemak : 7.8 %<br/>\n Kandungan Kafein : 0.65 %<br/>\n pH : 5.5<br/>\"\"\"\n\n else:\n predictedLabel = \"\"\"Classification : Tua <br/>\n Prediction : Sudah melewati masa panen <br/>\n Kandungan Protein : 9.48 % <br/>\n Kandungan Gula : 2.074 %<br/>\n Kandungan Lemak : 8.5 %<br/>\n Kandungan Kafein : 0.7 %<br/>\n pH : 5.7<br/>\"\"\"\n\n # myImage = ImageTransform(testimage)\n # myImage.resize(1500,'area').edgeDetect().cropImage().rotate(90).write('output.jpg')\n\n # histogram\n\n\n myimgname = id_generator()\n plt.savefig(\"./album/\" + myimgname + \"_r.png\")\n plt.close()\n\n\n plt.savefig(\"./album/\" + myimgname + \"_g.png\")\n plt.close()\n\n\n\n plt.savefig(\"./album/\" + myimgname + \"_b.png\")\n plt.close()\n\n return render(request, 'classification/hasil.html', {\n 'judul': 'Result',\n 'subjudul': 'RESULT',\n 'histogram_img_r': \"/album/\" + myimgname + \"_r.png\",\n 'histogram_img_g': \"/album/\" + myimgname + \"_g.png\",\n 'histogram_img_b': \"/album/\" + myimgname + \"_b.png\",\n 'banner': '/static/classification/image/oke.jpg',\n 'predictedLabel': predictedLabel,\n 'predi': predi,\n 'uploaded_file_url': filelink,\n })\n\n\ndef id_generator(size=8, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 13.5, "blob_id": "48e4a2c3c514c5d1ddbb775fcf9475fd65b384c5", "content_id": "fb7aa967e57947eb3a135bfedcd67439d4987065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/README.md", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "\"# Djangopro\" \n\"# Djangopro\" \n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 18.5, "blob_id": "a1c953af02b5020fb982f063cb408840c89c598e", "content_id": "0c00b49bb4ae0b010ac167d49ad545275f386e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/TA/admin.py", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\nclass TAConfig(AppConfig):\n name = 'TA'\n" }, { "alpha_fraction": 0.6448979377746582, "alphanum_fraction": 0.6448979377746582, "avg_line_length": 23.600000381469727, "blob_id": "dc81dc5679078397777e974f8e8d85db111bfe5d", "content_id": "5604a0539de578c69c78a6ab151659fe8d09c487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/TA/urls.py", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n #url('recent/', views.recent),\n url('',views.index, name='image'),\n url('review', views.review, name='review'),\n url('proses', views.prosesImg, name='proses'),\n]" }, { "alpha_fraction": 0.662162184715271, "alphanum_fraction": 0.7104247212409973, "avg_line_length": 38.846153259277344, "blob_id": "143781e8089bf9eb159c5f31b6a8dbce81cf0a24", "content_id": "36d1f116898608d3139a770d19d219d384001e59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 518, "license_type": "no_license", "max_line_length": 106, "num_lines": 13, "path": "/templates/about.html", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n\n{% block header %}\n<div class=\"col-lg-12\" style=\"box-shadow: 0 1px rgba(0,0,0,0.5); border\nradius: 12px; background-color: #d8c338; margin-top: 1px; padding: 30px; align-items: center\">\n\t<h1 style=\"margin-top: 0px;\">ABOUT</h1>\n\t<p style=\"margin-top: 20px; font\n\tsize: 25px;\">\n\t\tAplikasi web ini berguna untuk mentranslate Huruf Braille ke Huruf Alfabet.\n\t\tDiharapkan aplikasi ini dapat membantu orang-orang yang ingin mempelajari Huruf Braille dari dasar. </p>\n</div>\n{% endblock header %}\n" }, { "alpha_fraction": 0.7231638431549072, "alphanum_fraction": 0.7401130199432373, "avg_line_length": 28.66666603088379, "blob_id": "116f078c4c3a8b00b6ed4406693e30be2b2ac597", "content_id": "f9cb7262ddc24a6328fb853ee8d9442e22767e0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/identify/models.py", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass image(models.Model):\n name = models.CharField(max_length=200)\n images = models.ImageField(upload_to='image')" }, { "alpha_fraction": 0.6249460577964783, "alphanum_fraction": 0.6745792031288147, "avg_line_length": 65.22856903076172, "blob_id": "12600ffe2cae6d1eb07e6481cc7e5e214d2c68b3", "content_id": "1e7d263364ef87ccdee11a5bb164987e7260fbb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2317, "license_type": "no_license", "max_line_length": 669, "num_lines": 35, "path": "/templates/info.html", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %} \n{% block header %}\n<div class=\"col-lg-12\" style=\"box-shadow: 0 3px rgba(0,0,0,0.5); border\nradius: 12px; background-color: #0b483b; margin\ntop: 70px; padding: 30px;\">\n\t<h1 style=\"margin-top:20px; text\n\talign: justify;\">Informasi Terkait Tentang Kopi</h1>\n\t\t<h3 style=\"text\n\talign: justify;\">Berikut Beberapa Referensi Terkait Tentang Kopi :</h3>\n\t\t\t<div class=\"col-md-8\" >\n\t\t\t\t<ol>\n\t\t\t\t\t<li style=\"text-align: justify;\" >\n\t\t\t\t\t\t<a href=\"https://books.google.co.id/books?hl=id&lr=&id=WUneAg AAQBAJ&oi=fnd&pg=PR1&dq=kopi+arabika+gayo&ots=SWKl2DGDmn&sig=IlzaAPbmUS6U7LWouTuFiP8SHQ&redir_esc=y#v=onepage&q&f=false\"\n\t\t\t\t\t\t style=\"color: aliceblue;\">PANDUAN BUDIDAYA DAN PENGOLAHAN KOPI ARABIKA GAYO\n\t\t\t\t\t\t</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li style=\"text-align: justify;\">\n\t\t\t\t\t\t<a href=\"http://download.garuda.ristekdikti.go.id/article.php ?article=686164&val=9411&title=PERKEMBANGAN%20PENGOLAHAN%20KOPI%20ARABIKA%20G AYO%20MULAI%20DARI%20PANEN%20HINGGA%20PASCA%20PANEN%20DI%20KAMPUNG%20SIMPANG% 20TERITIT%20TAHUN%2020102017\" style=\"color: aliceblue;\">PERKEMBANGAN PENGOLAHAN KOPI ARABIKA GAYO MUL AI DARI PANEN HINGGA PASCA PANEN\n \n91 \n </a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li style=\"text-align: justify;\">\n\t\t\t\t\t\t<a href=\"http://caswellscoffee.com/cara-memanen-buahkopi/\" style=\"color: aliceblue;\">CARA MEMANEN BUAH KOPI</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li style=\"text-align: justify;\">\n\t\t\t\t\t\t<a href=\"http://cybex.pertanian.go.id/mobile/artikel/90787/P ANEN-DAN-PASCAPANENKOPI/\" style=\"color: aliceblue;\">PANEN DAN PASCAPANEN KOPI</a>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li style=\"text-align: justify;\">\n\t\t\t\t\t\t<a href=\"http://balittri.litbang.pertanian.go.id/index.php/pu blikasi/category/100-sirinov-vol-5-no-1-2017?download=476%3Asirinov-vol-5-no12017 \"style=\"color: aliceblue;\">PENGARUH TINGKAT KEMATANGAN DAN PENYIMPANAN T ERHADAP VIABILITAS BENIH KOPI ARABIKA</a> </li> <li style=\"text-align: justify;\"> <a href=\"https://kopitem.com/tentangkopi/\" style=\"color: aliceblue;\">TENTANG KOPI</a> </li> <li style=\"text-align: justify;\"> <a href=\"http://www.iontec-europe.com/2018/04/19/the-masteryof-coffee-processing-for-uniqueingredients/\" style=\"color: aliceblue;\">TINGKAT KEMATANGAN KOPI</a>\n\t\t\t\t</li>\n\t\t\t\t</ol>\n\t\t\t</div>\n</div>\n{% endblock header %}" }, { "alpha_fraction": 0.7302504777908325, "alphanum_fraction": 0.7302504777908325, "avg_line_length": 26.3157901763916, "blob_id": "918375d04fe6dd884ca53022e7d94f13c39caabc", "content_id": "e9c93195829016ca73343155c1ccdf6222907462", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 51, "num_lines": 19, "path": "/mywebsites/urls.py", "repo_name": "amandasaskia97/django", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\nfrom about import views as aboutViews\nfrom identify import views as idenViews\nfrom info import views as infoViews\n\n\nurlpatterns = [\n url('admin/', admin.site.urls),\n url('about', aboutViews.index),\n url('identify', idenViews.index),\n url('hasil', idenViews.prosesImg,name='hasil'),\n url('info', infoViews.index),\n url('', views.index),\n\n]\n" } ]
9
Pramit-Jha/Airline-Reservation-System
https://github.com/Pramit-Jha/Airline-Reservation-System
c9ef7ac0017053adf031aec0311bb575cc0e1490
a0627105566501a0713bc175249fd3d5241c5172
8cc2f4324dbfca231e3c49f2fff52c9de519159e
refs/heads/master
2020-08-21T19:58:59.819009
2019-10-19T16:21:46
2019-10-19T16:21:46
216,235,303
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.804347813129425, "alphanum_fraction": 0.804347813129425, "avg_line_length": 67.5, "blob_id": "093b85bee85b91094f20a818cefe0a723a86ae32", "content_id": "5c686fe99f89657783a98b6d28897ed5a5d0b027", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 138, "license_type": "no_license", "max_line_length": 107, "num_lines": 2, "path": "/README.md", "repo_name": "Pramit-Jha/Airline-Reservation-System", "src_encoding": "UTF-8", "text": "# Airline-Reservation-System\nIt is a python project used to create UI using tkinter library from one fixed location to other locations . \n" }, { "alpha_fraction": 0.4531049430370331, "alphanum_fraction": 0.4874020516872406, "avg_line_length": 40.22591018676758, "blob_id": "e944ec7afed9d2df12615fd0e6384234d9b42d9b", "content_id": "ff3ac72b9e7fc42f68e1a1d365e08d4a013187ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52191, "license_type": "no_license", "max_line_length": 172, "num_lines": 1235, "path": "/ARS.py", "repo_name": "Pramit-Jha/Airline-Reservation-System", "src_encoding": "UTF-8", "text": "\r\nfrom tkinter import *\r\nfrom tkinter import Tk, StringVar, ttk\r\nfrom tkinter import messagebox\r\nimport random \r\nimport time\r\nimport datetime\r\n\r\nroot=Tk()\r\nroot.geometry (\"1350x750+0+0\")\r\nroot.title(\"Airline Reservation System \")\r\nroot.configure(background='powder blue')\r\n\r\nTop = Frame(root,width=1350,height=150,bd=14, relief='raise')\r\nTop.pack(side=TOP)\r\n\r\nf1=Frame(root,width=900,height=650,bd=8, relief='raise')\r\nf1.pack(side=LEFT)\r\nf2=Frame(root,width=440,height=650,bd=8, relief='raise')\r\nf2.pack(side=RIGHT)\r\n\r\nframetopRight = Frame(f2,width=440,height=650,bd=12, relief='raise')\r\nframetopRight.pack(side=TOP)\r\nframeBottomRight = Frame(f2,width=440,height=50,bd=16, relief='raise')\r\nframeBottomRight.pack(side=BOTTOM)\r\n\r\n\r\nf1a=Frame(f1,width=900,height=330,bd=8, relief='raise')\r\nf1a.pack(side=TOP)\r\nf2a=Frame(f1,width=900,height=320,bd=6, relief='raise')\r\nf2a.pack(side=BOTTOM) \r\n\r\n\r\ntopLeft1=Frame(f1a,width=300,height=200,bd=16, relief='raise')\r\ntopLeft1.pack(side=LEFT)\r\ntopLeft2=Frame(f1a,width=300,height=200,bd=16, relief='raise')\r\ntopLeft2.pack(side=RIGHT)\r\ntopLeft3=Frame(f1a,width=300,height=200,bd=16, relief='raise')\r\ntopLeft3.pack(side=RIGHT)\r\n\r\n#------------------------------------------------------------------\r\n\r\nbottomLeft1=Frame(f2a,width=450,height=450,bd=14, relief='raise')\r\nbottomLeft1.pack(side=LEFT)\r\n\r\nbottomLeft2=Frame(f2a,width=450,height=450,bd=14, relief='raise')\r\nbottomLeft2.pack(side=RIGHT)\r\n\r\n#------------------------------------------------------------------\r\n\r\nTop.configure(background='powder blue')\r\nf1.configure(background='powder blue')\r\nf2.configure(background='powder blue')\r\nlblTitle=Label(Top,font=('arial',40,'bold'),text=\"Airline Ticketing System\",bd=10,width =41, justify='center')\r\nlblTitle.grid(row=0,column=0)\r\n\r\nDate1 = StringVar()\r\ntime1 = StringVar()\r\nTicketclass = StringVar()\r\nTicketPrice = StringVar()\r\nChild_Ticket = StringVar()\r\nAdult_Ticket = StringVar()\r\nFrom_Destination = StringVar()\r\nTo_Destination = StringVar()\r\nFee_Price = StringVar()\r\nRoute = StringVar()\r\nReceipt_Ref = StringVar()\r\n\r\n \r\nTicketclass.set(\"\")\r\nTicketPrice.set(\"\")\r\nChild_Ticket.set(\"\")\r\nAdult_Ticket.set(\"\")\r\nFrom_Destination.set(\"\")\r\nTo_Destination.set(\"\")\r\nFee_Price.set(\"\")\r\nRoute.set(\"\")\r\nReceipt_Ref.set(\"\")\r\n\r\n#--------------------------------------------------------------------------------------------------------------------------\r\n\r\nlblReceipt=Label(frametopRight,font=('arial',22,'bold','italic'),text=\"Airbus Ticket Summary\"\r\n ,width = 20,height=2, justify='center')\r\nlblReceipt.grid(row=0,column=0)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblSp = Label(frametopRight, font=('arial',14, 'bold'),width = 31,height=1,relief='sunken',\r\n justify='center')\r\nlblSp.grid(row=1, column=0, columnspan=4)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblClass1=Label(frameBottomRight,font=('arial',13,'bold',),text=\"Class\"\r\n ,width = 8,relief='sunken', justify='left')\r\nlblClass1.grid(row=0,column=0)\r\n\r\nlblClass2=Label(frameBottomRight,font=('arial',13,'bold'),\r\n width = 8,relief='sunken',textvariable=Ticketclass, justify='center')\r\nlblClass2.grid(row=1,column=0)\r\n\r\nlblTicket1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Ticket\"\r\n ,width = 8, relief='sunken',justify='center')\r\nlblTicket1.grid(row=0,column=1)\r\n\r\nlblticket2=Label(frameBottomRight,font=('arial',13,'bold'),\r\n width = 8,relief='sunken',textvariable=TicketPrice, justify='center')\r\nlblticket2.grid(row=1,column=1)\r\n\r\nlblAdult1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Adult\"\r\n , width = 8,relief='sunken', justify='center')\r\nlblAdult1.grid(row=0,column=2)\r\n\r\nlblAdult2=Label(frameBottomRight,font=('arial',13,'bold'),\r\n width = 8,relief='sunken',textvariable=Adult_Ticket, justify='center')\r\nlblAdult2.grid(row=1,column=2)\r\n\r\nlblChild1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Child\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblChild1.grid(row=0,column=3)\r\n\r\nlblChild2=Label(frameBottomRight,font=('arial',13,'bold'),\r\n width = 8,relief='sunken',textvariable=Child_Ticket, justify='center')\r\nlblChild2.grid(row=1,column=3)\r\n#----------------------------------------------------------------------------------\r\n\r\nlblsp=Label(frameBottomRight,font=('arial',13,'bold'),width = 34,height=2,relief='sunken', justify='center')\r\nlblsp.grid(row=2,column=0,columnspan=4)\r\n\r\n#-------------------------------------------------------------------------------------\r\nlblFrom1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"From\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblFrom1.grid(row=3,column=1)\r\n\r\nlblFrom2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken', textvariable=From_Destination, justify='center')\r\nlblFrom2.grid(row=3,column=2)\r\n#------------------------------------------------------------------------\r\nlblTo1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"To\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblTo1.grid(row=4,column=1)\r\n\r\nlblTo2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken', textvariable=To_Destination, justify='center')\r\nlblTo2.grid(row=4,column=2)\r\n\r\nlblPrice1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Price\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblPrice1.grid(row=5,column=1)\r\n\r\nlblPrice2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken', textvariable=Fee_Price, justify='center')\r\nlblPrice2.grid(row=5,column=2)\r\n#----------------------------------------------------------------------------------\r\n\r\nlblsp=Label(frameBottomRight,font=('arial',13,'bold'),width = 34,height=2,relief='sunken', justify='center')\r\nlblsp.grid(row=6,column=0,columnspan=4)\r\n\r\n#-------------------------------------------------------------------------------------\r\nlblRefNo1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"RefNo\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblRefNo1.grid(row=7,column=0)\r\n\r\nlblRefNo2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken', textvariable=Receipt_Ref, justify='center')\r\nlblRefNo2.grid(row=8,column=0)\r\n\r\nlblTime1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Time\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblTime1.grid(row=7,column=1)\r\n\r\nlblTime2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken',textvariable=time1, justify='center')\r\nlblTime2.grid(row=8,column=1)\r\n\r\nlblDate1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Date\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblDate1.grid(row=7,column=2)\r\n\r\nlblDate2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken', textvariable=Date1, justify='center')\r\nlblDate2.grid(row=8,column=2)\r\n\r\nlblRoute1=Label(frameBottomRight,font=('arial',13,'bold'),text=\"Route\"\r\n ,width = 8,relief='sunken', justify='center')\r\nlblRoute1.grid(row=7,column=3)\r\n\r\nlblRoute2=Label(frameBottomRight,font=('arial',13,'bold')\r\n ,width = 8,relief='sunken', textvariable=Route, justify='center')\r\nlblRoute2.grid(row=8,column=3)\r\n#------------------------------------Functions--------------------------------------\r\n\r\ndef btnClick(numbers):\r\n global operator\r\n operator = operator + str(numbers)\r\n text_Input.set(operator)\r\n\r\ndef btnClearDisplay():\r\n global operator\r\n operator = \"\"\r\n text_Input.set(\"\")\r\n\r\ndef btnEqualsInput():\r\n global operator\r\n sumup = str(eval(operator))\r\n text_Input.set(sumup)\r\n operator=\"\"\r\n\r\ndef iExit():\r\n qExit = messagebox.askyesno(\"Quit System\",\"Do you want to quit?\")\r\n if qExit > 0 : \r\n root.destroy()\r\n return\r\n\r\ndef Travel_Cost() :\r\n\r\n if (var9.get() == \"Chennai\" and var2.get() == 1 and var4.get() == 1 and var10.get() == 1):\r\n Tcost = float(4500)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.05))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.05)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Chennai\" and var2.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(3800)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #-----------------------------------------------------------------------------------\r\n elif (var9.get() == \"Delhi\" and var2.get() == 1 and var4.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(4000)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.05))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.05)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Delhi\" and var2.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(3500)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n #-----------------------------------------------------------------------------------------------\r\n\r\n elif (var9.get() == \"Kolkata\" and var2.get() == 1 and var4.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(5000)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.05))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.05)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Kolkata\" and var2.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(4200)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #----------------------------------------------------------------------------------------------\r\n elif (var9.get() == \"Banglore\" and var2.get() == 1 and var4.get() == 1 and var10.get() == 1):\r\n Tcost = float(3700)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.05))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.05)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Banglore\" and var2.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(3000)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n#------------------------------------------Business--------------------------------------------------------\r\n\r\n\r\n elif (var9.get() == \"Chennai\" and var3.get() == 1 and var4.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(7500)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.12))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.12)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Chennai\" and var3.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(7000)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #-----------------------------------------------------------------------------------\r\n elif (var9.get() == \"Delhi\" and var3.get() == 1 and var4.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(7000)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.12))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.12)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Delhi\" and var3.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(6600)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n #-----------------------------------------------------------------------------------------------\r\n\r\n elif (var9.get() == \"Kolkata\" and var3.get() == 1 and var4.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(8400)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.12))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.12)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Kolkata\" and var3.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(7600)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #----------------------------------------------------------------------------------------------\r\n elif (var9.get() == \"Banglore\" and var3.get() == 1 and var4.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(6800)\r\n Single = float(var12.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((Tcost * Single)*0.12))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(Tcost * Single))\r\n TotalCost = \"Rs\", str('%.2f'%((Tcost * Single) + ((Tcost * Single)*0.12)))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Banglore\" and var3.get() and var5.get() == 1 and var10.get() == 1 ):\r\n Tcost = float(6000)\r\n Single = float(var12.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(Tcost * Single))\r\n TotalCost =\"Rs\", str('%.2f'%((Tcost * Single)+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n ######################################################### RETURN ################################################################################################-\r\n\r\n elif (var9.get() == \"Chennai\" and var2.get() == 1 and var4.get() == 1 and var11.get() == 1):\r\n Tcost = float(4500)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.05)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single)) + ((2*(Tcost * Single)*0.05))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Chennai\" and var2.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(3800)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n #-----------------------------------------------------------------------------------\r\n elif (var9.get() == \"Delhi\" and var2.get() == 1 and var4.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(4000)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.05)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)) )\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single)) + ((2*(Tcost * Single)*0.05))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Delhi\" and var2.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(3500)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n #-----------------------------------------------------------------------------------------------\r\n\r\n elif (var9.get() == \"Kolkata\" and var2.get() == 1 and var4.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(5000)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.05)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single)) + ((2*(Tcost * Single)*0.05))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Kolkata\" and var2.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(4200)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #----------------------------------------------------------------------------------------------\r\n elif (var9.get() == \"Banglore\" and var2.get() == 1 and var4.get() == 1 and var11.get() == 1):\r\n Tcost = float(3700)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.05)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single)) + ((2*(Tcost * Single)*0.05))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Banglore\" and var2.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(3000)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Economy\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n#------------------------------------------Business--------------------------------------------------------\r\n\r\n\r\n elif (var9.get() == \"Chennai\" and var3.get() == 1 and var4.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(7500)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.12)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single) + ((Tcost * Single)*0.12))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Chennai\" and var3.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(7000)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Chennai\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #-----------------------------------------------------------------------------------\r\n elif (var9.get() == \"Delhi\" and var3.get() == 1 and var4.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(7000)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.12)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single) + ((Tcost * Single)*0.12))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Delhi\" and var3.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(6600)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Delhi\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n #-----------------------------------------------------------------------------------------------\r\n\r\n elif (var9.get() == \"Kolkata\" and var3.get() == 1 and var4.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(8400)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.12)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single) + ((Tcost * Single)*0.12))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Kolkata\" and var3.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(7600)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Kolkata\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n #----------------------------------------------------------------------------------------------\r\n elif (var9.get() == \"Banglore\" and var3.get() == 1 and var4.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(6800)\r\n Single = float(var6.get())\r\n Adult_Tax = \"Rs\" , str('%.2f'%((2*(Tcost * Single)*0.12)))\r\n Adult_Fees = \"Rs\" , str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost = \"Rs\", str('%.2f'%((2*(Tcost * Single) + ((Tcost * Single)*0.12))))\r\n Tax.set(Adult_Tax)\r\n SubTotal.set(Adult_Fees )\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Adult_Fees )\r\n Child_Ticket.set(\"No\")\r\n Adult_Ticket.set(\"Yes\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n X = random.randint(109, 5876)\r\n randomRef = str(X)\r\n Receipt_Ref.set(\"TFL\" + randomRef)\r\n\r\n elif (var9.get() == \"Banglore\" and var3.get() and var5.get() == 1 and var11.get() == 1 ):\r\n Tcost = float(6000)\r\n Single = float(var6.get())\r\n Child_Tax =\"Rs\", str('%.2f'%(Tcost * 0))\r\n Child_Fees =\"Rs\", str('%.2f'%(2*(Tcost * Single)))\r\n TotalCost =\"Rs\", str('%.2f'%((2*(Tcost * Single))+ (Tcost * 0)))\r\n Tax.set(Child_Tax)\r\n SubTotal.set(Child_Fees)\r\n Ticketclass.set(\"Business\")\r\n TicketPrice.set(Child_Fees)\r\n Child_Ticket.set(\"Yes\")\r\n Adult_Ticket.set(\"No\")\r\n From_Destination.set(\"Mumbai\")\r\n To_Destination.set(\"Banglore\")\r\n Fee_Price.set(TotalCost)\r\n Total.set(TotalCost)\r\n Route.set(\"Direct\")\r\n\r\n x = random.randint(109, 5876)\r\n randomRef = str(x)\r\n Receipt_Ref.set(\"TFL\"+ randomRef)\r\n\r\n\r\n##################################################################################################################################################\r\n\r\n\r\ndef chkbutton_value():\r\n if (var10.get() == 1):\r\n var12.set(\"\")\r\n EntSingle.configure(state= NORMAL) \r\n elif var10.get() == 0:\r\n EntSingle.configure(state= DISABLED)\r\n var12.set(\"0\")\r\n if (var11.get() == 1):\r\n var6.set(\"\")\r\n EntReturn.configure(state= NORMAL)\r\n elif var11.get() == 0:\r\n EntReturn.configure(state= DISABLED)\r\n var6.set(\"0\")\r\n \r\n \r\n \r\ndef Reset():\r\n Tax.set(\"0\")\r\n var1.set(\"0\")\r\n var2.set(\"0\")\r\n var3.set(\"0\")\r\n var4.set(\"0\")\r\n var5.set(\"0\")\r\n var6.set(\"0\")\r\n var7.set(\"0\")\r\n var8.set(\"0\")\r\n var9.set(\"0\")\r\n var10.set(\"0\")\r\n var11.set(\"0\")\r\n var12.set(\"0\")\r\n Total.set(\"0\")\r\n SubTotal.set(\"0\")\r\n Total.set(\"0\")\r\n Ticketclass.set(\"\")\r\n TicketPrice.set(\"\")\r\n Child_Ticket.set(\"\")\r\n Adult_Ticket.set(\"\")\r\n From_Destination.set(\"\")\r\n To_Destination.set(\"\")\r\n Fee_Price.set(\"\") \r\n Route.set(\"\") \r\n Receipt_Ref.set(\"\")\r\n#-------------------------------------variables--------------------------------\r\n\r\n\r\nvar1 = IntVar()\r\nvar2 = IntVar()\r\nvar3 = IntVar()\r\nvar4 = IntVar()\r\nvar5 = IntVar()\r\nvar6 = IntVar()\r\nvar7 = StringVar()\r\nvar8 = StringVar()\r\nvar9 = StringVar()\r\nvar10 = IntVar()\r\nvar11 = IntVar()\r\nvar12 = IntVar()\r\nvar1.set(\"0\")\r\nvar2.set(\"0\")\r\nvar3.set(\"0\")\r\nvar4.set(\"0\")\r\nvar5.set(\"0\")\r\nvar6.set(\"0\") #6,10,11,12\r\nvar7.set(\"0\")\r\nvar8.set(\"0\")\r\nvar9.set(\"0\")\r\nvar10.set(\"0\")\r\nvar11.set(\"0\")\r\nvar12.set(\"0\")\r\n\r\n\r\nTax = StringVar()\r\nTotal = StringVar()\r\nSubTotal = StringVar()\r\ntext_Input = StringVar()\r\noperator = \"\"\r\n\r\n#---------------------------------Date And Time--------------------------------------------------\r\n\r\nDate1.set(time.strftime(\"%d/%m/%Y\"))#Date\r\ntime1.set(time.strftime('%H:%M:%S'))#Time\r\n\r\n#-------------------------------------Create Widget topLeft1 --------------------------------\r\n\r\nlblClass=Label(topLeft1, font=('arial',21, 'bold'), text='Class', bd=8)\r\nlblClass.grid(row=0,column=0, sticky=W)\r\n\r\n\r\nlblSp = Label(topLeft1, font=('arial',14, 'bold'),width = 17,height=1,relief='raise',\r\n justify='center')\r\nlblSp.grid(row=1, column=0, columnspan=4)\r\n\r\n\r\nchkEconomy = Checkbutton(topLeft1,font=('arial',19, 'bold'), text='Economy', variable=var2 , onvalue=1, offvalue=0)\r\nchkEconomy.grid(row=2, column=0, sticky=W)\r\nchkBusinessClass = Checkbutton(topLeft1,font=('arial',19, 'bold'), text='Business', variable=var3 , onvalue=1, offvalue=0)\r\nchkBusinessClass.grid(row=3, column=0, sticky=W)\r\n\r\nlblSp = Label(topLeft1, font=('arial',14, 'bold'),width = 17,height=1,relief='raise',\r\n justify='center')\r\nlblSp.grid(row=4, column=0, columnspan=4)\r\n#-------------------------------------Create Widget topLeft3--------------------------------\r\n\r\nlblSelect=Label(topLeft3, font=('arial',21, 'bold'), text='Select A Destination', bd=8)\r\nlblSelect.grid(row=0,column=0, sticky=W, columnspan=2) \r\nlblDestination=Label(topLeft3, font=('arial',19, 'bold'), text='Destination', bd=4)\r\nlblDestination.grid(row=1,column=0, sticky=W)\r\ncboDestination =ttk.Combobox(topLeft3, textvariable = var9, state='readonly', font=('arial',19, 'bold'), width=8)\r\ncboDestination['value']=('', 'Chennai', 'Delhi', 'Kolkata', 'Banglore')\r\ncboDestination.current(0)\r\ncboDestination.grid(row=1,column=1)\r\n\r\nchkAdult = Checkbutton(topLeft3,font=('arial',19, 'bold'), text='Adult', variable=var4, onvalue=1, offvalue=0)\r\nchkAdult.grid(row=2, column=0, sticky=W)\r\nchkChild = Checkbutton(topLeft3,font=('arial',19, 'bold'), text='Child', variable=var5 , onvalue=1, offvalue=0)\r\nchkChild.grid(row=3, column=0, sticky=W)\r\n\r\n#-----------------------------------------Ticket-----------------------------------------------------\r\n\r\nlblClass=Label(topLeft2, font=('arial',21, 'bold'), text='Ticket Type', bd=8)\r\nlblClass.grid(row=0,column=0, sticky=W)\r\n\r\nlblSp = Label(topLeft2, font=('arial',14, 'bold'),width = 24,height=1,relief='raise',\r\n justify='center')\r\nlblSp.grid(row=1, column=0, columnspan=4)\r\n\r\nchkSingle = Checkbutton(topLeft2,font=('arial',19, 'bold'), text='Single', variable=var10 ,\r\n onvalue=1, offvalue=0,command=chkbutton_value)\r\nchkSingle.grid(row=2, column=0, sticky=W)\r\nEntSingle = Entry(topLeft2, font=('arial',19, 'bold'), textvariable = var12 , bd=2 ,width=8,state=DISABLED)\r\nEntSingle.grid(row=2,column=1, sticky=W)\r\nchkReturn = Checkbutton(topLeft2,font=('arial',19, 'bold'), text='Return', variable=var11 ,\r\n onvalue=1, offvalue=0,command=chkbutton_value)\r\nchkReturn.grid(row=3, column=0, sticky=W)\r\nEntReturn = Entry(topLeft2, font=('arial',19, 'bold'), textvariable = var6 , bd=2 ,width=8,state=DISABLED)\r\nEntReturn.grid(row=3,column=1, sticky=W)\r\n\r\nlblSp = Label(topLeft2, font=('arial',14, 'bold'),width = 24,height=1,relief='raise',\r\n justify='center')\r\nlblSp.grid(row=4, column=0, columnspan=4)\r\n\r\n#--------------------------------Calculator--------------------------------------------------\r\n\r\ntext_Input=StringVar()\r\ntxtDisplay = Entry(bottomLeft2,font=('arial', 20, 'bold'), textvariable=text_Input , bd=8, \r\n justify='right')\r\ntxtDisplay.grid(columnspan=4)\r\n\r\nbtn7=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"7\", command=lambda: btnClick(7),width=4).grid(row=2,column=0)\r\n\r\nbtn8=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"8\", command=lambda: btnClick(8),width=4).grid(row=2,column=1)\r\n\r\nbtn9=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"9\", command=lambda: btnClick(9),width=4).grid(row=2,column=2)\r\n\r\nAddition=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"+\", command=lambda: btnClick(\"+\"),width=4).grid(row=2,column=3)\r\n\r\n#------------------------------------------------------------------------------------------------------\r\n\r\nbtn4=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"4\", command=lambda: btnClick(4),width=4).grid(row=3,column=0)\r\n\r\nbtn5=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"5\", command=lambda: btnClick(5),width=4).grid(row=3,column=1)\r\n\r\nbtn6=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"6\", command=lambda: btnClick(6),width=4).grid(row=3,column=2)\r\n\r\nSubtraction=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"-\", command=lambda: btnClick(\"-\"),width=4).grid(row=3,column=3)\r\n\r\n#------------------------------------------------------------------------------------------------------\r\nbtn1=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"1\", command=lambda: btnClick(1),width=4).grid(row=4,column=0)\r\n\r\nbtn2=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"2\", command=lambda: btnClick(2),width=4).grid(row=4,column=1)\r\n\r\nbtn3=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"3\", command=lambda: btnClick(3),width=4).grid(row=4,column=2)\r\n\r\nMultiply=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"*\", command=lambda: btnClick(\"*\"),width=4).grid(row=4,column=3)\r\n\r\n#------------------------------------------------------------------------------------------------------\r\n\r\nbtn0=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"0\", command=lambda: btnClick(0),width=4).grid(row=5,column=0)\r\n\r\nbtnClear=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"C\",width=4,command=btnClearDisplay).grid(row=5,column=1)\r\n\r\nbtnEquals=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"=\",width=4,command=btnEqualsInput).grid(row=5,column=2)\r\n\r\nDvision=Button(bottomLeft2,padx=8,pady=8,bd=8, fg=\"black\", font=('arial',12, 'bold'),\r\n text=\"/\", command=lambda: btnClick(\"/\"),width=4).grid(row=5,column=3)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#------------------------------------------Tax,Subtotal And Total------------------------------------------------------------\r\n\r\n\r\nlblStateTax=Label(bottomLeft1, font=('arial',22, 'bold'), text='Tax', bd=16, anchor=\"w\")\r\nlblStateTax.grid(row=3,column=2)\r\n\r\ntxtStateTax = Entry(bottomLeft1, font=('arial',22, 'bold'), textvariable=Tax, bd=10 ,\r\n insertwidth=5, bg=\"#ffffff\", justify='right')\r\ntxtStateTax.grid(row=3,column=3)\r\n\r\nlblSubTotal=Label(bottomLeft1, font=('arial',22, 'bold'), text='Sub Total', bd=16, anchor=\"w\")\r\nlblSubTotal.grid(row=4,column=2)\r\n\r\ntxtSubTotal = Entry(bottomLeft1, font=('arial',22, 'bold'), textvariable=SubTotal, bd=10 ,\r\n insertwidth=5, bg=\"#ffffff\", justify='right')\r\ntxtSubTotal.grid(row=4,column=3)\r\n\r\nlblTotalCost=Label(bottomLeft1, font=('arial',22, 'bold'), text='Total Cost', bd=16, anchor='w')\r\nlblTotalCost.grid(row=5,column=2)\r\n\r\ntxtTotalCost = Entry(bottomLeft1, font=('arial',22, 'bold'), textvariable=Total, bd=10 ,\r\n insertwidth=5, bg=\"#ffffff\", justify='right')\r\ntxtTotalCost.grid(row=5,column=3)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblSp = Label(frameBottomRight, font=('arial',14, 'bold'),width = 31,height=2,relief='sunken',\r\n justify='center')\r\nlblSp.grid(row=2, column=0, columnspan=4)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblSp = Label(frameBottomRight, font=('arial',14, 'bold'),width = 31,height=2,relief='sunken',\r\n justify='center')\r\nlblSp.grid(row=6, column=0, columnspan=4)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblSpace = Label(bottomLeft1, font=('arial',1, 'bold'), text=\" \\n \", bd=0, anchor=\"w\",relief='sunken')\r\nlblSpace.grid(row=5,column=2)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblSpace = Label(bottomLeft2, font=('arial',1, 'bold'), text=\" \\n \", bd=0, anchor=\"w\")\r\nlblSpace.grid(row=2,columnspan=2)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nlblSp = Label(frameBottomRight, font=('arial',14, 'bold'),width = 31,height=2,relief='sunken',\r\n justify='center')\r\nlblSp.grid(row=9, column=0, columnspan=4)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\nlblSp = Label(frameBottomRight, font=('arial',14, 'bold'),width = 31,height=2,relief='sunken',\r\n justify='center')\r\nlblSp.grid(row=11, column=0, columnspan=4)\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\n#----------------------------------------------Button-------------------------------------------------\r\n\r\nlblReceipt = Label(frameBottomRight, font=('arial', 12,'bold'), bd=2, anchor='w')\r\nlblReceipt.grid(row = 10 , column=0,columnspan=4)\r\n\r\nbtnTotal = Button(frameBottomRight, text='Total', padx=2, pady=2, bd=2, fg=\"black\",\r\n font=('arial', 12, 'bold'), width= 6, height=1, command = Travel_Cost ).grid(row=10,column=0)\r\n \r\nbtnClear = Button(frameBottomRight, text='Clear', padx=2, pady=2, bd=2, fg=\"black\",\r\n font=('arial', 12, 'bold'), width= 6, height=1, command = Reset ).grid(row=10,column=1)\r\n\r\nbtnReset = Button(frameBottomRight, text='Reset', padx=2, pady=2, bd=2, fg=\"black\",\r\n font=('arial', 12, 'bold'), width= 6, height=1, command = Reset ).grid(row=10,column=2)\r\n\r\nbtnExit = Button(frameBottomRight, text='Exit', padx=2, pady=2, bd=2, fg=\"black\",\r\n font=('arial', 12, 'bold'), width= 6, height=1, command = iExit ).grid(row=10,column=3)\r\n\r\n#----------------------------------------------------------------------------------------------\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" } ]
2
Kamranbarlas/E-Comerece-Crawler
https://github.com/Kamranbarlas/E-Comerece-Crawler
3e9f0f2f13f60ca9f5aef5dcbf87f7747af1b1fc
96a140dc3ce8bcec13ed3d5d3f5edecac792a67d
aae43658f99ca2ded2bc8bf1b7c66a4c22738c53
refs/heads/master
2022-11-20T02:03:13.181079
2020-07-22T13:54:27
2020-07-22T13:54:27
281,441,117
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 26, "blob_id": "a7f3508711b25b2492ccde3bf6ec8cc4f267bde4", "content_id": "b57ed3d63bcada15f7917895580c85d04a72adff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/README.md", "repo_name": "Kamranbarlas/E-Comerece-Crawler", "src_encoding": "UTF-8", "text": "# E-Commerce-Crawler\nFor Crawling E-Commerce Websites \n" }, { "alpha_fraction": 0.5808823704719543, "alphanum_fraction": 0.5808823704719543, "avg_line_length": 35.266666412353516, "blob_id": "83a9c6e5de5c123d830d59c022a84dc1179f3d25", "content_id": "b0437ebce7ce356b8a7843c931808bf07ce28406", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 122, "num_lines": 15, "path": "/Amazon/Amazon/spiders/Best_Selling_Amazon_devices.py", "repo_name": "Kamranbarlas/E-Comerece-Crawler", "src_encoding": "UTF-8", "text": "import scrapy\n\n\nclass BestSellingAmazonDevicesSpider(scrapy.Spider):\n name = 'Best_Selling_Amazon_devices'\n allowed_domains = ['www.amazon.com']\n start_urls = ['http://www.amazon.com/Best-Sellers/zgbs/amazon-devices/']\n\n def parse(self, response):\n for products in response.xpath(\"//ol[@id='zg-ordered-list']/li[@class='zg-item-immersion']\"):\n yield{\n \"Title\":products.xpath(\".//span[@class='a-list-item']/div/span/a/div/text()\").get().strip('\\n ')\n\n \n }\n" } ]
2
rijuSen/Leankit_Hack_Public
https://github.com/rijuSen/Leankit_Hack_Public
af5f1600926124dd832b19e048a9a86887d5d600
6e177b2e3996925150e0611a99b9f0e4ce98004d
392f076228f01fb68671925c07f1bfafa43f29f0
refs/heads/master
2021-05-24T15:05:38.269364
2020-04-07T02:23:43
2020-04-07T02:23:43
253,619,224
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7723785042762756, "alphanum_fraction": 0.7851662635803223, "avg_line_length": 59.153846740722656, "blob_id": "fcda0ef75554fc02040a762289fd1a5f980f4eac", "content_id": "4ffdfa0d5e0ae1a33760e145c1c4e053d046e12a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 782, "license_type": "no_license", "max_line_length": 225, "num_lines": 13, "path": "/README.md", "repo_name": "rijuSen/Leankit_Hack_Public", "src_encoding": "UTF-8", "text": "# Leankit_Hack_Public\nSimple project to create timesheet from LeanKit task cards.\nUse LeanKit API with HTTPS Basic auth to fetch Leankit task cards. Use the task cards to create an entry into a timesheet file. Entries made when the card is moved to DONE. Has to be manually trigered to populate new entries.\nInstructions to run the script:\n1. Download the script\n2. Run main from the script\n3. Input your name\n4. Input your board number\n5. Input your DONE Lane number (toggle to API on Leankit to view Lane Number)\n6. Input your Leankit registered mail id and password \n7. Check for status code 0 \n8. If a CSV file exists, the scripts updated only new entries. Otherwise the script creates a new CSV file and appends entries.\n8. Find in the script folder a csv named - cardList.csv\n" }, { "alpha_fraction": 0.6221736073493958, "alphanum_fraction": 0.6258205771446228, "avg_line_length": 36.06756591796875, "blob_id": "b2976228721ffa053f2c19b69e94ddf53d96677f", "content_id": "2c0d9cc594552d3d8e9f00c46a08483168f71201", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2742, "license_type": "no_license", "max_line_length": 121, "num_lines": 74, "path": "/updateTimeSheetPublic.py", "repo_name": "rijuSen/Leankit_Hack_Public", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport csv\nimport datetime\nimport os.path\n\n\ndef updateTimeSheet(url, name, username, password):\n payload = {}\n response = requests.get(url=url, auth=(username,password))\n #response = requests.request(\"GET\", url, headers=headers, data = payload)\n response_str = response.text.encode('utf8')\n json_object = json.loads(response_str)\n card_list = json_object['cards']\n for card in card_list:\n if not isinstance(card['connectedCardStats'], dict):\n for user in card['assignedUsers']:\n if user['fullName'] == name:\n #print(card)\n cardCSVWriter(card)\n\ndef cardCSVWriter(card):\n if not os.path.isfile('cardList.csv'):\n with open('cardList.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n # print(card.keys())\n writer.writerow([\"ID\", \"Date\", \"Lean_Task_Name\", \"Actual_Time\", \"Running_Time\"])\n\n if checkEntry(card['id']) == False:\n with open('cardList.csv', 'a', newline='') as file:\n writer = csv.writer(file)\n actualStart_date_time_str = card['actualStart']\n actualStart_date_time_obj = datetime.datetime.strptime(actualStart_date_time_str, '%Y-%m-%dT%H:%M:%SZ')\n movedOn_date_time_str = card['movedOn']\n movedOn_date_time_obj = datetime.datetime.strptime(movedOn_date_time_str, '%Y-%m-%dT%H:%M:%S.%fZ')\n time_diff = movedOn_date_time_obj - actualStart_date_time_obj\n seconds = time_diff.seconds\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n diff_in_time_in_hr_min = f'{h:d}:{m:02d}'\n # print(diff_in_time_in_hr_min)\n writer.writerow([card['id'], actualStart_date_time_obj.date(), card['title'], '2 hrs', diff_in_time_in_hr_min])\n\ndef checkEntry(id):\n # print(id)\n with open('cardList.csv', newline='') as csvfile:\n readerDict = csv.DictReader(csvfile)\n # print(readerDict)\n for row in readerDict:\n # print(row)\n if id == row['ID']:\n return True\n else:\n # print('False')\n return False\n\nif __name__=='__main__':\n name = input(\"Enter Name: \").upper()\n if not name:\n name = 'NAME'\n boardNumber = input(\"Leankit Board Number: \")\n # if not boardNumber:\n # boardNumber = 'LEANKIT BOARD NUMBER'\n laneNumber = input(\"Leankit Lane Number: \")\n # if not laneNumber:\n # laneNumber = 'LEANKIT LANE NUMBER'\n username = input(\"Enter LeanKit registered mail id: \")\n if not username:\n username = 'user.monash.edu'\n password = input(\"Enter your password: \")\n # if not password:\n # password = PASSWORD\n url = 'https://monashie.leankit.com/io/user/me/card?board={0}&lanes={1}'.format(boardNumber, laneNumber)\n updateTimeSheet(url, name, username, password)" } ]
2
lgtejas/dsad
https://github.com/lgtejas/dsad
adf9afcec9fd0b85f10d342e7322e0859df95694
d8c70b62d89e16c163767fc370bfd3e52b18ca06
ae32d0c1bd10ec487a6e6a194758632c2154b1b0
refs/heads/main
2023-06-04T06:57:43.806480
2021-06-24T20:03:29
2021-06-24T20:03:29
380,035,545
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5384976267814636, "alphanum_fraction": 0.5497652292251587, "avg_line_length": 30.791044235229492, "blob_id": "0b277e6ed5791c3c8f643081a7bf7601a7bce24c", "content_id": "b235d53e084b26b2a4cc0fa4b6382b75af211899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2130, "license_type": "no_license", "max_line_length": 82, "num_lines": 67, "path": "/assignment.py", "repo_name": "lgtejas/dsad", "src_encoding": "UTF-8", "text": "import sys\n\nfrom base import Node # Node is a datastructure which is child class of TreeNode\nfrom inputs import read_inputs\nfrom outputs import write_outputs\n\n\ndef solve_ps3(input):\n if not isinstance(input, list):\n raise ValueError('Invalid input , please input a valid array as input')\n elif len(input) <= 1:\n raise ValueError('Invalid input , please input a an array with size > 1')\n min_sum = sum(input) # n\n stack = []\n pos = 0\n i = 0\n while len(input) > 1:\n i = input.index(min(input)) # n * n\n left = input[i - 1] if i > 0 else float(\"inf\")\n right = input[i + 1] if i < len(input) - 1 else float(\"inf\")\n p = input.pop(i)\n if pos == 0:\n stack.append(p)\n t = min(left, right) * p\n min_sum += t\n stack.append(min(left, right))\n stack.append(t)\n pos = pos + 1\n return min_sum, _build_tree(stack)\n\n\ndef _build_tree(stack): # n * n\n i = 0\n root_node = None\n current_node = None\n while (len(stack) > 0):\n if i == 0:\n root_node = Node(stack.pop())\n current_node = root_node\n else:\n right = stack.pop()\n left = stack.pop()\n current_node.right = Node(right)\n current_node.left = Node(left)\n current_node = current_node.left\n i = i + 1\n return root_node\n\n\nif __name__ == \"__main__\":\n inputs = read_inputs()\n if inputs is None:\n sys.exit(1)\n elif len(inputs) == 0:\n print('No inputs provided , please enter input values in inputPS3.txt')\n sys.exit(1)\n results = []\n result_format =\"Minimum sum = {0}, inorder traversal = {1}\"\n for index, input in enumerate(inputs):\n print(f'--------------- input [{index + 1}] -----------')\n print(f'Input = {input}')\n min_sum , root = solve_ps3(input)\n print(f'Minimum sum = {min_sum}')\n print(f'--------- inorder traversal ---------')\n print(root.inorder_traversal(root))\n results.append(result_format.format(min_sum,root.inorder_traversal(root)))\n write_outputs(results)\n" }, { "alpha_fraction": 0.5408163070678711, "alphanum_fraction": 0.545918345451355, "avg_line_length": 26.714284896850586, "blob_id": "296a47b8dd42bfb4cbab7f5a1ce1591c0d367088", "content_id": "72c5246b3797b54b73701fbb9d7739d740ae8641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/outputs.py", "repo_name": "lgtejas/dsad", "src_encoding": "UTF-8", "text": "import os\n\ndef write_outputs(results, path='./outputPS3.txt'):\n with open(path, 'w') as f:\n for result in results:\n f.write(result)\n f.write(os.linesep)\n\n\n" }, { "alpha_fraction": 0.5653543472290039, "alphanum_fraction": 0.5685039162635803, "avg_line_length": 36.235294342041016, "blob_id": "247726d5cd7ad1ba93d81e7b8ff973912ca5bc95", "content_id": "41fe3a06024284c6af1913f17e7acd0e3cd34a29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 139, "num_lines": 17, "path": "/inputs.py", "repo_name": "lgtejas/dsad", "src_encoding": "UTF-8", "text": "from os.path import isfile\n\ndef read_inputs(path=\"./inputPS3.txt\"):\n inputs =[]\n if not isfile(path):\n print(f'Input file {path} not found')\n return\n with open(path, 'r') as f:\n lines = f.readlines()\n for line_number, line in enumerate(lines):\n split_numbers = line.split(' ')\n try:\n split_numbers = [int(n) for n in split_numbers]\n inputs.append((split_numbers))\n except :\n print(f'Error reading line number {line_number + 1} with value - {split_numbers}, may contain invalid/non integer values')\n return inputs\n\n\n" }, { "alpha_fraction": 0.5358255505561829, "alphanum_fraction": 0.5358255505561829, "avg_line_length": 22.77777862548828, "blob_id": "682b1da483c6e64edc6ea8e714b1f03bef6c17fa", "content_id": "b58f6cfc3ef794681d0e44d7266466a9d65a0be1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 642, "license_type": "no_license", "max_line_length": 58, "num_lines": 27, "path": "/base.py", "repo_name": "lgtejas/dsad", "src_encoding": "UTF-8", "text": "# Tree Node Data structure\nclass TreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass Node(TreeNode):\n\n def __init__(self, data):\n super().__init__(data)\n\n def print_tree(self):\n if self.left:\n self.left.print_tree()\n print(self.data),\n if self.right:\n self.right.print_tree()\n\n def inorder_traversal(self, root):\n res = []\n if root:\n res = self.inorder_traversal(root.left)\n res.append(root.data)\n res = res + self.inorder_traversal(root.right)\n return res\n" } ]
4
Jamesbreon/django_meiduo_mall
https://github.com/Jamesbreon/django_meiduo_mall
eae5e2324161469ea55f202f57931c730e552f7b
0c88b0dfab22c37d0c3fdf15619360188c8564c6
3451d49336c732cda36b473957ff85408b84f48b
refs/heads/master
2020-06-05T05:45:53.658027
2019-06-17T11:21:39
2019-06-17T11:21:39
192,334,157
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6459854245185852, "alphanum_fraction": 0.6459854245185852, "avg_line_length": 20.153846740722656, "blob_id": "20c0ba4528af29f1accad21be1aecf303a6e39b6", "content_id": "acee73e58e8befa8a0dbbc976446b008f208c359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/apps/carts/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^carts/$', views.CartsView.as_view()),\n\n # 购物车全选\n url(r'^carts/selection/$', views.SelectedAllView.as_view()),\n # 简易购物车\n url(r'^carts/simple/$', views.SimpleCartsView.as_view()),\n\n]" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 20.086956024169922, "blob_id": "4eeee4788c158a441c113592d618813837c83b0d", "content_id": "b2a821ef75001b47402ac053d6fdd9f22c94200c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/apps/oanuth/utils.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData\nfrom django.conf import settings\n\n\ndef get_acess_token(openid):\n\n serializer = Serializer(settings.SECRET_KEY, 300)\n\n token = serializer.dumps(openid)\n\n acess_token = token.decode()\n\n return acess_token\n\n\ndef check_openid(token):\n\n serializer = Serializer(settings.SECRET_KEY, 300)\n try:\n openid = serializer.loads(token)\n except BadData:\n return None\n return openid\n\n" }, { "alpha_fraction": 0.5807682275772095, "alphanum_fraction": 0.5868735909461975, "avg_line_length": 36.08490753173828, "blob_id": "3d6a46f45b1f53c649c7c325beca380f5502c3e3", "content_id": "e4d08516fd00e6dc523e94d2e1319a018c3bc9a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4479, "license_type": "no_license", "max_line_length": 119, "num_lines": 106, "path": "/apps/payment/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django import http\nfrom alipay import AliPay\nimport os\nfrom django.conf import settings\n\nfrom meiduo_mall.utils.base_view import Base_view\nfrom orders.models import OrderInfo\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom .models import Payment\n\n\n# Create your views here.\nclass AlipayPaymentView(Base_view):\n\n def get(self, request, order_id):\n user = request.user\n # 校验两个条件\n try:\n order = OrderInfo.objects.get(order_id=order_id, status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'], user=user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('非法请求')\n\n # 支付宝 dev中设置\n # ALIPAY_APPID = '2016091900551154'\n # ALIPAY_DEBUG = True # 表示是沙箱环境还是真实支付环境\n # ALIPAY_URL = 'https://openapi.alipaydev.com/gateway.do'\n # ALIPAY_RETURN_URL = 'http://www.meiduo.site:8000/payment/status/'\n\n # 创建支付对象\n alipay = AliPay(\n appid=settings.ALIPAY_APPID,\n app_notify_url=None, # 默认回调url\n app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)) + '/key/app_private_key.pem'),\n # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n alipay_public_key_path=os.path.join(\n os.path.dirname(os.path.abspath(__file__)) + '/key/alipay_public_key.pem'),\n sign_type=\"RSA2\", # RSA 或者 RSA2\n debug=settings.ALIPAY_DEBUG # 默认False 修改为开发模式True\n )\n #\n order_string = alipay.api_alipay_trade_page_pay(\n out_trade_no=order_id,\n total_amount=str(order.total_amount),\n subject='哈哈商城%s' % order_id,\n return_url=settings.ALIPAY_RETURN_URL,\n )\n\n # 响应登录支付宝连接\n # 真实环境电脑网站支付网关:https://openapi.alipay.com/gateway.do? + order_string\n # 沙箱环境电脑网站支付网关:https://openapi.alipaydev.com/gateway.do? + order_string\n alipay_url = settings.ALIPAY_URL + '?' + order_string\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '支付成功', 'alipay_url': alipay_url})\n\n\nclass PaymentSuccessView(Base_view):\n\n def get(self, request):\n # 获取查询字典\n query_dict = request.GET\n # 将查询QueryDict转成字典格式\n data_dict = query_dict.dict()\n # 将字典中的sign找出\n signature = data_dict.pop(\"sign\")\n\n # 创建alipay对象\n alipay = AliPay(\n appid=settings.ALIPAY_APPID,\n app_notify_url=None, # 默认回调url\n app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)) + '/key/app_private_key.pem'),\n # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n alipay_public_key_path=os.path.join(\n os.path.dirname(os.path.abspath(__file__)) + '/key/alipay_public_key.pem'),\n sign_type=\"RSA2\", # RSA 或者 RSA2\n debug=settings.ALIPAY_DEBUG # 默认False 修改为开发模式True\n )\n # 将签名与data进行校验 返回True 或false 如果失败则说明支付失败\n success = alipay.verify(data_dict, signature)\n # 如果返回true说名支付成功,修改订单状态,将交易信息与订单进行绑定\n if success:\n # 交易订单号\n order_id = data_dict.get('out_trade_no')\n # 支付宝交易流水号\n trade_id = data_dict.get('trade_no')\n\n try:\n Payment.objects.get(order_id=order_id, trade_id=trade_id)\n except Payment.DoesNotExist:\n # 将支付信息保存到payment中\n Payment.objects.create(\n order_id=order_id,\n trade_id=trade_id,\n )\n # 修改支付信息\n OrderInfo.objects.filter(order_id=order_id,\n status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']).update(\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n\n context = {\n 'trade_id': trade_id\n }\n\n return render(request, 'pay_success.html', context)\n\n else:\n return http.HttpResponseForbidden('非法请求')\n" }, { "alpha_fraction": 0.7471264600753784, "alphanum_fraction": 0.7471264600753784, "avg_line_length": 16.399999618530273, "blob_id": "29bf282cd7f42d0e2bb6f6a76146507e4f0287db", "content_id": "733e983bf6cef0b59b90c11b070f29e58156c098", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/apps/oanuth/apps.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass OanuthConfig(AppConfig):\n name = 'oanuth'\n" }, { "alpha_fraction": 0.6958763003349304, "alphanum_fraction": 0.6958763003349304, "avg_line_length": 26.85714340209961, "blob_id": "fbe2edd90bda8401b3c1c97111fc3717f4e5e6d3", "content_id": "96092478c7acd74d565e587cc0b151dbbac8c155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 61, "num_lines": 7, "path": "/apps/oanuth/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r'^qq/authorization/$', views.QQLoginView.as_view()),\n url(r'^oauth_callback$', views.GetOpenIdView.as_view()),\n\n]" }, { "alpha_fraction": 0.5584129095077515, "alphanum_fraction": 0.5682096481323242, "avg_line_length": 32.88381576538086, "blob_id": "b94b1665ecb9ed55cf74ad19af6efadcda5d71a9", "content_id": "4c5c95a94bdc470d53cb3ad67f7f9afb8a0b9097", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9094, "license_type": "no_license", "max_line_length": 103, "num_lines": 241, "path": "/apps/goods/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.core.paginator import Paginator, EmptyPage\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\nfrom django import http\n\nfrom contents.utils import get_categories\nfrom .utils import get_breadcrumb\nfrom .models import GoodsCategory, SKU, GoodsVisitCount\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom meiduo_mall.utils.base_view import Base_view\nfrom orders.models import OrderInfo, OrderGoods\n\n\nclass ListView(View):\n\n def get(self, request, category_id, page_num):\n try:\n category = GoodsCategory.objects.get(id=category_id)\n except GoodsCategory.DoesNotExist:\n return http.HttpResponseForbidden('获取失败')\n sort = request.GET.get('sort')\n if sort == 'price':\n sort = 'price'\n elif sort == 'hot':\n sort = '-sales'\n else:\n sort = 'create_date'\n # 商品sku\n sku_qs = category.sku_set.filter(is_launched=True).order_by(sort)\n\n # 分页: 创建分页器\n paginator = Paginator(sku_qs, 5)\n try:\n page_skus = paginator.page(page_num)\n except EmptyPage:\n # 如果page_num不正确,默认给用户404\n return http.HttpResponseNotFound('empty page')\n\n # 获取列表页总页数\n total_page = paginator.num_pages\n\n context = {\n 'categories': get_categories(), # 频道分类\n 'breadcrumb': get_breadcrumb(category), # 面包屑导航\n 'sort': sort, # 排序字段\n 'category': category, # 第三级分类\n 'page_skus': page_skus, # 分页后数据\n 'total_page': total_page, # 总页数\n 'page_num': page_num, # 当前页码\n }\n return render(request, 'list.html', context)\n\n\nclass HotView(View):\n\n def get(self, request, category_id):\n # category_id 为三级商品分类\n # 根据分类获取sku\n try:\n category = GoodsCategory.objects.get(id=category_id)\n except GoodsCategory.DoesNotExist:\n return http.HttpResponseForbidden('Goods Category Does Not Exist')\n\n hot_skus_qs = SKU.objects.filter(category=category, is_launched=True).order_by('-sales')[:2]\n\n hot_skus = []\n for sku in hot_skus_qs:\n hot_skus.append({\n 'id': sku.id,\n 'name': sku.name,\n 'caption': sku.caption,\n 'price': sku.price,\n 'default_image_url': sku.default_image.url,\n })\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'hot_skus': hot_skus})\n\n\nclass SKUDetailView(View):\n\n def get(self, request, sku_id):\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return render(request, '404.html')\n\n category = sku.category\n spu = sku.spu\n\n # 通过skuid去查询ordergoods中的评论\n # order_goods = OrderGoods.objects.filter(sku_id=sku_id)\n # for order_good in order_goods:\n # order = order_good.order\n # comment = order_good.comment\n # username = order_good.is_anonymous\n # if comment:\n # sku.comment = comment\n # if username is False:\n # sku.username = '匿名用户'\n # else:\n # sku.username = order.user.username\n\n # # 根据sku_id查用户,将名字渲染到评论\n # order_qs = OrderInfo.objects.filter(sku_id=sku_id)\n # for order in order_qs:\n # sku.name = order.user.username\n\n\n \"\"\"1.准备当前商品的规格选项列表 [8, 11]\"\"\"\n # 获取出当前正显示的sku商品的规格选项id列表\n current_sku_spec_qs = sku.specs.order_by('spec_id')\n current_sku_option_ids = [] # [8, 11]\n for current_sku_spec in current_sku_spec_qs:\n current_sku_option_ids.append(current_sku_spec.option_id)\n\n \"\"\"2.构造规格选择仓库\n {(8, 11): 3, (8, 12): 4, (9, 11): 5, (9, 12): 6, (10, 11): 7, (10, 12): 8}\n \"\"\"\n # 构造规格选择仓库\n temp_sku_qs = spu.sku_set.all() # 获取当前spu下的所有sku\n # 选项仓库大字典\n spec_sku_map = {} # {(8, 11): 3, (8, 12): 4, (9, 11): 5, (9, 12): 6, (10, 11): 7, (10, 12): 8}\n for temp_sku in temp_sku_qs:\n # 查询每一个sku的规格数据\n temp_spec_qs = temp_sku.specs.order_by('spec_id')\n temp_sku_option_ids = [] # 用来包装每个sku的选项值\n for temp_spec in temp_spec_qs:\n temp_sku_option_ids.append(temp_spec.option_id)\n spec_sku_map[tuple(temp_sku_option_ids)] = temp_sku.id\n\n \"\"\"3.组合 并找到sku_id 绑定\"\"\"\n spu_spec_qs = spu.specs.order_by('id') # 获取当前spu中的所有规格\n\n for index, spec in enumerate(spu_spec_qs): # 遍历当前所有的规格\n spec_option_qs = spec.options.all() # 获取当前规格中的所有选项\n temp_option_ids = current_sku_option_ids[:] # 复制一个新的当前显示商品的规格选项列表\n for option in spec_option_qs: # 遍历当前规格下的所有选项\n temp_option_ids[index] = option.id # [8, 12]\n option.sku_id = spec_sku_map.get(tuple(temp_option_ids)) # 给每个选项对象绑定下他sku_id属性\n\n spec.spec_options = spec_option_qs # 把规格下的所有选项绑定到规格对象的spec_options属性上\n context = {\n 'categories': get_categories(),\n 'breadcrumb': get_breadcrumb(category),\n 'spu': spu,\n 'sku': sku,\n 'category': category,\n 'spec_qs': spu_spec_qs, # 当前商品的所有规格数据\n }\n return render(request, 'detail.html', context)\n\n\nclass DetailVisitView(View):\n \"\"\"\n 统计分类商品访问量是统计一天内该类别的商品被访问的次数\n \"\"\"\n\n def post(self, request, category_id):\n\n # 校验category_id\n try:\n category = GoodsCategory.objects.get(id=category_id)\n except GoodsCategory.DoesNotExist:\n return http.HttpResponseForbidden('参数无效')\n # 处理逻辑\n # 1 将category在表中仅从查找\n today_date = timezone.now() # 获取当天时间\n try:\n # 如果表中有记录则count +=1\n good_count = GoodsVisitCount.objects.get(date=today_date, category=category)\n except GoodsVisitCount.DoesNotExist:\n # 2 如果不能在表中查到,则直接在表中创建该条记录\n good_count = GoodsVisitCount(category=category)\n good_count.count += 1\n good_count.save()\n # 返回响应\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})\n\n\n# 用户中心订单详情\nclass UserOrderInfoView(Base_view):\n def get(self, request, page_num):\n\n user = request.user\n # 从数据库中获取订单\n orders_qs = OrderInfo.objects.filter(user=user).order_by('-create_date')\n\n # 分页: 创建分页器\n paginator = Paginator(orders_qs, 2)\n\n for order in orders_qs:\n order_skus = OrderGoods.objects.filter(order_id=order.order_id)\n\n for order_sku in order_skus:\n # 订单小记\n order_sku.amount = order_sku.count * order_sku.price\n # 动态增加图片\n # order_sku.default_image = order_sku.sku.default_image.url\n # 动态给order增加属性\n order.order_skus = order_skus\n order.status_name = OrderInfo.ORDER_STATUS[order.status]\n order.pay_method_name = OrderInfo.PAY_METHOD[order.pay_method]\n\n try:\n page_orders = paginator.page(page_num)\n except EmptyPage:\n # 如果page_num不正确,默认给用户404\n return http.HttpResponseNotFound('empty page')\n\n # 总页数\n total_page = paginator.num_pages\n context = {\n 'page_orders': page_orders,\n 'total_page': total_page,\n 'page_num': page_num,\n }\n\n return render(request, 'user_center_order.html', context)\n\n\n# 获取商品评价详情\nclass GetCommnetView(View):\n\n def get(self, request, sku_id):\n\n orders_goods = OrderGoods.objects.filter(sku_id=sku_id)\n if not orders_goods:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '此商品暂无评论'})\n comment_list = []\n # 拿到订单\n for orders_good in orders_goods:\n # 一一对应拿到comment和socore\n order = orders_good.order\n comment_list.append({\n 'score': orders_good.score,\n 'comment': orders_good.comment,\n 'username': order.user.username\n })\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'comment_list': comment_list})\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 17.272727966308594, "blob_id": "e7ca537e4604c023b6e4ea3a57fd0ae1af285f6f", "content_id": "85f0d0881bc3eb08f97a9c3904269b3326f4eb10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 67, "num_lines": 11, "path": "/utils/fastdfs/fastdfs_test.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from fdfs_client.client import Fdfs_client\n\n\n# 创建FastDFS实例对象\nclient = Fdfs_client('./client.conf')\n\n# 调用FastrDFS客户端上传文件\n\nret = client.upload_by_filename('/Users/liushiyang/Desktop/01.png')\n\nprint(ret)" }, { "alpha_fraction": 0.4956204891204834, "alphanum_fraction": 0.4996511936187744, "avg_line_length": 32.95000076293945, "blob_id": "cd19b248be67f3f6472e83b258e57607a56e0779", "content_id": "8bc11f1c1519cb92337f9fcbbc05e8c6c431237b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13987, "license_type": "no_license", "max_line_length": 101, "num_lines": 380, "path": "/apps/carts/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "import json\nimport base64\nimport pickle\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django import http\nfrom django_redis import get_redis_connection\n\n\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom goods.models import SKU\n\n\nclass CartsView(View):\n\n def post(self, request):\n\n json_dict = json.loads(request.body.decode())\n sku_id = json_dict.get('sku_id')\n count = json_dict.get('count')\n selected = json_dict.get('selected', True)\n user = request.user\n\n # 校验前端传来的数据\n if all([sku_id, count]) is False:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '缺少必要参数'})\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '商品不存在'})\n\n if isinstance(count, int) is False:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '参数格式错误'})\n\n if selected:\n if not isinstance(selected, bool):\n return http.HttpResponseForbidden('参数有误')\n # 逻辑处理\n # 1 登录用户存储到redis中\n if user.is_authenticated:\n \"\"\"\n hash:{sku_id:count}\n set:{sku_id, sku_id}\n \"\"\"\n # 连接数据库\n conn_redis = get_redis_connection('carts')\n # 将购物车保存到redis中\n # conn_redis.hincrby('user_%s' % user.id, sku_id, count)\n #\n # # 将selected为True的sku_id 保存到set集合\n # if selected:\n # conn_redis.sadd('selected_%s' % user.id, sku_id)\n pl = conn_redis.pipeline()\n pl.hincrby('user_%s' % user.id, sku_id, count)\n if selected:\n pl.sadd('selected_%s' % user.id, sku_id)\n pl.execute()\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '添加购物车成功'})\n\n else:\n # 2 未登录用户的存储cookie中\n \"\"\"\n 数据格式:\n {\n sku_id:{'sku_count':1,select’:True}\n }\n \"\"\"\n # 将获取到的sku_id 在存储的cookie中查询,如果存在则sku_count += count\n # cookie中的key和value都是加密后的字符串\n carts_str = request.COOKIES.get('carts')\n # 如果操作过购物车\n if carts_str:\n bytes_carts = carts_str.encode()\n bytes_carts = base64.b64decode(bytes_carts)\n carts_dict = pickle.loads(bytes_carts)\n else:\n # 如果没有操作过购物车\n carts_dict = {}\n\n if sku_id in carts_dict:\n origin_count = carts_dict[sku_id]['count']\n count += origin_count\n\n # 如果不存在则将sku_id保存到到cookie中\n carts_dict[sku_id] = {\n 'count': count,\n 'selected': selected\n }\n\n carts_str = base64.b64encode(pickle.dumps(carts_dict)).decode()\n\n # 响应对象\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})\n response.set_cookie('carts', carts_str)\n\n return response\n\n def get(self, request):\n user = request.user\n if user.is_authenticated:\n\n \"\"\"\n hash:{sku_id:count}\n set:{sku_id, sku_id}\n \"\"\"\n # 连接redis数据库\n conn_redis = get_redis_connection('carts')\n # 从hash中取出sku_id 和count {sku_id:count}\n redis_cart = conn_redis.hgetall('user_%s' % user.id)\n redis_sku = conn_redis.smembers('selected_%s' % user.id)\n # 将格式统一,把从redis格式cookie中的数据格式\n carts_dict = {}\n for sku_id, count in redis_cart.items():\n carts_dict[int(sku_id)] = {\n 'count': int(count),\n 'selected': sku_id in redis_sku,\n }\n\n else:\n \"\"\"\n {\n sku_id:{count:1, selected:True}\n }\n \"\"\"\n # 从cookie中取\n carts_str = request.COOKIES.get('carts')\n # 如果有\n if carts_str:\n carts_dict = pickle.loads(base64.b64decode(carts_str.encode()))\n else:\n carts_dict = {}\n return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '购物车为空'})\n\n # 从carts_dict中获取所有的key,并在SKU表中查找获取当前的SKU查询集\n sku_qs = SKU.objects.filter(id__in=carts_dict.keys())\n\n cart_skus = []\n for sku in sku_qs:\n count = carts_dict[sku.id]['count']\n selected = carts_dict[sku.id]['selected']\n cart_skus.append({\n 'id': sku.id,\n 'name': sku.name,\n 'price': str(sku.price), # 由于前端需要JSON解析所以需要将Decimal转成str\n 'count': count,\n 'selected': str(selected),\n 'amount': str(sku.price * count),\n 'default_image_url': sku.default_image.url\n })\n\n context = {\n 'cart_skus': cart_skus\n }\n\n return render(request, 'cart.html', context)\n\n def put(self, request):\n \"\"\"修改购物车\"\"\"\n json_dict = json.loads(request.body.decode())\n sku_id = json_dict.get('sku_id')\n count = json_dict.get('count')\n selected = json_dict.get('selected')\n\n # 校验参数\n if all([sku_id, count]) is False:\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '缺少必传参数'})\n\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '商品不存在'})\n\n if isinstance(count, int) is False:\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '参数格式不正确'})\n\n if isinstance(selected, bool) is False:\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '参数格式不正确'})\n\n # 用户是否登录\n user = request.user\n if user.is_authenticated:\n\n # 登录用户修改redis中数量\n conn_redis = get_redis_connection('carts')\n pl = conn_redis.pipeline()\n # 修改hash中的count {sku_id:count}\n # 修改set中的sku_id\n pl.hset('user_%s' % user.id, sku_id, count)\n if selected:\n pl.sadd('selected_%s' % user.id, sku_id)\n else:\n pl.srem('selected_%s' % user.id, sku_id)\n pl.execute()\n cart_skus = {\n 'id': sku.id,\n 'name': sku.name,\n 'price': sku.price, # 由于前端需要JSON解析所以需要将Decimal转成str\n 'count': count,\n 'selected': selected,\n 'amount': sku.price * count,\n 'default_image_url': sku.default_image.url\n }\n\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'cart_sku': cart_skus})\n return response\n else:\n # 未登录修改cookie中count值\n # 1 获取cookie中的值\n carts_str = request.COOKIES.get('carts')\n # 2 将cart_str 转换成 cart_dict\n carts_dict = pickle.loads(base64.b64decode(carts_str.encode()))\n # 3 修改count值,根据前端传来的count值重新赋值\n if carts_dict:\n\n carts_str = base64.b64encode(pickle.dumps(carts_dict)).decode()\n else:\n carts_dict = {}\n\n carts_dict[sku_id] = {\n 'count': count,\n 'selected': selected,\n }\n # 4 返回响应\n # 修改后需要重新渲染\n cart_skus = {\n 'id': sku.id,\n 'name': sku.name,\n 'price': sku.price, # 由于前端需要JSON解析所以需要将Decimal转成str\n 'count': count,\n 'selected': selected,\n 'amount': sku.price * count,\n 'default_image_url': sku.default_image.url\n }\n\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'cart_sku': cart_skus})\n response.set_cookie('carts', carts_str)\n return response\n\n def delete(self, request):\n\n # 获取前端传来的数据\n json_dict = json.loads(request.body.decode())\n sku_id = json_dict.get('sku_id')\n user = request.user\n\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '没有该商品'})\n if user.is_authenticated:\n # 连接redis数据库\n conn_redis = get_redis_connection('carts')\n pl = conn_redis.pipeline()\n # 删除hash中相应的key:value {sku_id:count}\n pl.hdel('user_%s' % user.id, sku_id)\n # 删除集合中的 sku_id\n pl.srem('selected_%s' % user.id, sku_id)\n pl.execute()\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '删除成功'})\n\n else:\n # 未登录页面 删除传来的sku_id 相应的cookie\n carts_str = request.COOKIES.get('carts')\n if carts_str:\n carts_dict = pickle.loads(base64.b64decode(carts_str.encode()))\n else:\n carts_dict = {}\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': '删除成功'})\n if sku_id in carts_dict:\n del carts_dict[sku_id]\n # 删除sku_id 对应的cookie后 cookie中可能有值有可能没有值\n if carts_dict:\n # 返回响应\n carts_str = base64.b64encode(pickle.dumps(carts_dict)).decode()\n response.set_cookie('carts', carts_str)\n # 没有值则删除cookie\n else:\n response.delete_cookie('carts')\n return response\n\n\nclass SelectedAllView(View):\n\n def put(self, request):\n json_dict = json.loads(request.body.decode())\n selected = json_dict.get('selected')\n\n if selected:\n if isinstance(selected, bool) is False:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '参数错误'})\n\n user = request.user\n if user.is_authenticated:\n # 连接数据库\n conn_redis = get_redis_connection('carts')\n # selected 将hash中sku_id 全都放到set中或将set中全部删除\n # {sku:count}\n cart = conn_redis.hgetall('user_%s' % user.id)\n sku_id_list = cart.keys()\n if selected:\n # 全选\n conn_redis.sadd('selected_%s' % user.id, *sku_id_list)\n else:\n # 取消全选\n conn_redis.srem('selected_%s' % user.id, *sku_id_list)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '全选购物车成功'})\n\n else:\n \"\"\"\n {\n sku_id:{count:1, selected:True}\n }\n \"\"\"\n # 用户未登录\n # 将 selected 设置为前端传来的 selected\n carts_str = request.COOKIES.get('carts')\n if carts_str:\n carts_dict = pickle.loads(base64.b64decode(carts_str.encode()))\n for sku_id in carts_dict:\n carts_dict[sku_id]['selected'] = selected\n carts_str = base64.b64encode(pickle.dumps(carts_dict)).decode()\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})\n response.set_cookie('carts', carts_str)\n\n return response\n\n\nclass SimpleCartsView(View):\n\n def get(self, request):\n user = request.user\n if user.is_authenticated:\n\n \"\"\"\n hash:{sku_id:count}\n set:{sku_id, sku_id}\n \"\"\"\n # 连接redis数据库\n conn_redis = get_redis_connection('carts')\n # 从hash中取出sku_id 和count {sku_id:count}\n redis_cart = conn_redis.hgetall('user_%s' % user.id)\n redis_sku = conn_redis.smembers('selected_%s' % user.id)\n # 将格式统一,把从redis格式cookie中的数据格式\n carts_dict = {}\n for sku_id, count in redis_cart.items():\n carts_dict[int(sku_id)] = {\n 'count': int(count),\n 'selected': sku_id in redis_sku,\n }\n\n else:\n \"\"\"\n {\n sku_id:{count:1, selected:True}\n }\n \"\"\"\n # 从cookie中取\n carts_str = request.COOKIES.get('carts')\n # 如果有\n if carts_str:\n carts_dict = pickle.loads(base64.b64decode(carts_str.encode()))\n else:\n carts_dict = {}\n return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '购物车为空'})\n\n # 从carts_dict中获取所有的key,并在SKU表中查找获取当前的SKU查询集\n sku_qs = SKU.objects.filter(id__in=carts_dict.keys())\n\n cart_skus = []\n for sku in sku_qs:\n count = carts_dict[sku.id]['count']\n cart_skus.append({\n 'id': sku.id,\n 'name': sku.name,\n 'count': count,\n 'default_image_url': sku.default_image.url\n })\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'cart_skus': cart_skus})\n" }, { "alpha_fraction": 0.6542810797691345, "alphanum_fraction": 0.6542810797691345, "avg_line_length": 23.760000228881836, "blob_id": "132ecf475f37f5004d4f4d41ec7924e90c68b083", "content_id": "759a21327b034ab39f6ee9b90b02f6400db38309", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 88, "num_lines": 25, "path": "/apps/contents/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views import View\n\nfrom .models import ContentCategory\nfrom .utils import get_categories\n\n\n# Create your views here.\n# 注册完成后的重定向到首页,这是广告内容视图函数\nclass IndexView(View):\n\n def get(self, request):\n\n contents = {}\n\n content_category_qs = ContentCategory.objects.all()\n for con in content_category_qs:\n contents[con.key] = con.content_set.filter(status=True).order_by('sequence')\n\n context = {\n 'categories': get_categories(),\n 'contents': contents\n }\n\n return render(request, 'index.html', context)\n" }, { "alpha_fraction": 0.5925324559211731, "alphanum_fraction": 0.5925324559211731, "avg_line_length": 25.826086044311523, "blob_id": "02e5e79e5ea08d2a74e6c1ccbbd5cf0ca13ac588", "content_id": "62ccca88a58359e795a456ba7b43b6ca865110e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "no_license", "max_line_length": 85, "num_lines": 23, "path": "/apps/goods/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n # 商品列表url\n url(r'^list/(?P<category_id>\\d+)/(?P<page_num>\\d+)/$', views.ListView.as_view()),\n # 热销排行\n url(r'^hot/(?P<category_id>\\d+)/$', views.HotView.as_view()),\n\n # 统计访问量\n url(r'^detail/visit/(?P<category_id>\\d+)/$', views.DetailVisitView.as_view()),\n\n # 商品详情页面\n url(r'^detail/(?P<sku_id>\\d+)/$', views.SKUDetailView.as_view()),\n\n # 获取商品评价详情\n url(r'^comments/(?P<sku_id>\\d+)/$', views.GetCommnetView.as_view()),\n\n # 用户全部订单展示\n url(r'^orders/info/(?P<page_num>\\d+)/$', views.UserOrderInfoView.as_view()),\n\n]" }, { "alpha_fraction": 0.6730769276618958, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 25, "blob_id": "e56f00b7ae2140f18c8243c1737815a18c346149", "content_id": "2fc4406d99698bdd18c9a180569cce1ad908c66d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/apps/verifications/constants.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "# 魔法数 用于替换代码中的常量\nREDIS_TIME_EXPIRE = 300 # 验证码过期时间\n" }, { "alpha_fraction": 0.6079514026641846, "alphanum_fraction": 0.6178906559944153, "avg_line_length": 29.711864471435547, "blob_id": "fc21154cb5e38df3c3b750c01b2843bcae9daa90", "content_id": "5d4fe1bf7a25725f7d0f5c3953eb8a3d717de3d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2021, "license_type": "no_license", "max_line_length": 100, "num_lines": 59, "path": "/apps/users/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^register/$', views.RegisterView.as_view(), name='register'),\n # 校验用户名是否重复\n url(r'^usernames/(?P<username>[a-zA-Z0-9_-]{5,20})/count/$', views.UsernameCountView.as_view()),\n # 校验电话是否重复\n url(r'^mobile/(?P<mobile>1[3-9]\\d{9})/count/$', views.MobileCountView.as_view()),\n # 登录页面\n url(r'^login/$', views.LoginView.as_view(), name='login'),\n # 登出页面\n url(r'^logout/$', views.LogoutView.as_view()),\n\n # 用户中心\n url(r'^info/$', views.UserInfoView.as_view(), name='info'),\n\n # email模块\n url(r'^emails/$', views.VerifyEmailView.as_view()),\n\n # email 激活验证\n url(r'^emails/verification/$', views.Check_Verify_Email.as_view()),\n\n # 收货地址\n url(r'^addresses/$', views.AdressesView.as_view(), name='address'),\n\n # 查找地区市\n url(r'^areas/$', views.AreaView.as_view()),\n\n # 增加收获地址\n url(r'^addresses/create/$', views.CreateAddressView.as_view()),\n\n # 修改地址\n url(r'^addresses/(?P<address_id>\\d+)/$', views.UpdateAddressView.as_view()),\n\n # 设置默认你地址\n url(r'^addresses/(?P<address_id>\\d+)/default/$', views.SetDefaultAddressView.as_view()),\n\n # 设置title\n url(r'^addresses/(?P<address_id>\\d+)/title/$', views.SetTitleView.as_view()),\n\n # 修改密码展示页面\n url(r'^password/$', views.ChangePasswordView.as_view()),\n\n # 展示最近浏览记录\n url(r'^browse_histories/$', views.UserBrowseHistory.as_view()),\n\n # 找回密码\n url(r'^find_password/$', views.FindePasswordView.as_view()),\n # 找回密码第一步\n url(r'^accounts/(?P<account>[a-zA-Z0-9_-]{5,20})/sms/token/$', views.StepOneView.as_view()),\n\n # 找回密码验证短信验证码\n url(r'^accounts/(?P<username>1[3-9]\\d{9})/password/token/$', views.CheckSMSCode.as_view()),\n\n # 设置密码\n url(r'^users/(?P<userid>\\d+)/password/$', views.SetPassword.as_view())\n\n]" }, { "alpha_fraction": 0.612817108631134, "alphanum_fraction": 0.6241655349731445, "avg_line_length": 31.182796478271484, "blob_id": "a8405c556ca89dba4aca0a804ba9417a75d7a4f2", "content_id": "4d08885346e0f172a2de2fa6b9846b04fec5ee36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3710, "license_type": "no_license", "max_line_length": 93, "num_lines": 93, "path": "/apps/verifications/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.views import View\nfrom django_redis import get_redis_connection\nfrom django import http\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom random import randint\nfrom meiduo_mall.libs.captcha.captcha import captcha\nfrom celery_tasks.sms.tasks import send_sms_code\nfrom verifications.constants import REDIS_TIME_EXPIRE\n\n\n# Create your views here.\nclass ImageCodeView(View):\n\n def get(self, request, uuid):\n # 1 接受前端传来的数据\n name, text, image = captcha.generate_captcha()\n # 2 生成随机验证码\n # 3 将随机验证码保存到redis数据库中以备后续校验使用\n redis_conn = get_redis_connection('verify_cache')\n redis_conn.setex('image_%s' % uuid, REDIS_TIME_EXPIRE, text)\n\n # 4 将image返回到前端进行渲染\n return http.HttpResponse(image, content_type='image/png')\n\n\nclass SMSCodeView(View):\n\n def get(self, request, mobile):\n # 1获取参数\n image_code_client = request.GET.get('image_code')\n uuid = request.GET.get('uuid')\n\n redis_conn = get_redis_connection('verify_cache')\n\n # 2校验\n\n # 防止恶意短时间内连续发送短信验证码,则对电话号码进行检验\n mobile_flag = redis_conn.get('flag_%s' % mobile)\n\n if mobile_flag:\n return http.JsonResponse({'code': RETCODE.THROTTLINGERR, 'errmsg': '访问过于频繁'})\n\n if all([image_code_client, uuid]) is False:\n return http.JsonResponse({'code': RETCODE.NECESSARYPARAMERR, 'errmsg': '缺少必传参数'})\n\n # 校验没问题后将image_code 与redis保存的进行比较\n\n image_code_server = redis_conn.get('image_%s' % uuid)\n # 为了防止一个验证码可以进行多次验证,在取到后应立即删除\n redis_conn.delete('image_%s' % uuid)\n\n # 如果image_code_server为None 则表示 验证码过期\n if image_code_server is None:\n return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'errmsg': '验证码过期'})\n\n # 从redis取出的数据为byte类型需要解码\n image_code_server = image_code_server.decode()\n\n # 验证码不区分大小写,则全部转为小写再进行比较\n if image_code_server.lower() != image_code_client.lower():\n return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'errmsg': '图形验证码错误'})\n\n # 3 生成6位随机数\n sms_code = '%06d' % randint(0, 999999)\n\n # print 为测试用\n print(sms_code)\n\n # # 3.1 将sms_code 保存到数据库 以备后续校验\n # redis_conn.setex('sms_%s' % mobile, REDIS_TIME_EXPIRE, sms_code)\n #\n # # 3.2 为了防止连续恶意发送验证码,将电话号码做标记存入redis\n # redis_conn.setex('flag_%s' % mobile, 60, 1)\n\n # 3.3 采用Pipline对访问redis进行优化\n pl = redis_conn.pipeline()\n pl.setex('sms_%s' % mobile, REDIS_TIME_EXPIRE, sms_code)\n pl.setex('flag_%s' % mobile, 60, 1)\n # 执行管道,否则将不会提交\n pl.execute()\n\n # 4 由容联云生成验证码\n # 采用容联云通讯发短信验证码\n\n # 使用容联云 发送短信验证码\n # 以下代码会对容联云服务器发送请求,因此会造成堵塞,前端验证码倒计时响应缓慢,\n # 采用 celery 异步发送 降低响应时间\n # CCP().send_template_sms(mobile, [sms_code, REDIS_TIME_EXPIRE // 60], 1)\n\n # 调用celery 异步发送短信\n send_sms_code.delay(mobile, sms_code)\n # 5 响应\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '成功'})\n\n\n\n" }, { "alpha_fraction": 0.7582417726516724, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 17.200000762939453, "blob_id": "9b4def962c70078436b19dfcad33611fb25d582a", "content_id": "1cf341d1fbaf38539e76fa7c3397f2e7473be03e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/apps/weboauth/apps.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass WeboauthConfig(AppConfig):\n name = 'weboauth'\n" }, { "alpha_fraction": 0.6720901131629944, "alphanum_fraction": 0.6745932698249817, "avg_line_length": 27.535715103149414, "blob_id": "0457f2b47f7622122154cf60864cf561311fd184", "content_id": "0ab7106fd5bda158d04faebc2c88a12216cf80cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 84, "num_lines": 28, "path": "/apps/contents/crons.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport time, os\nfrom django.conf import settings\n\nfrom contents.models import ContentCategory\nfrom .utils import get_categories\n\n\ndef generate_static_index_html():\n print('%s: generate_static_index_html' % time.ctime())\n\n contents = {}\n content_category_qs = ContentCategory.objects.all()\n for con in content_category_qs:\n contents[con.key] = con.content_set.filter(status=True).order_by('sequence')\n\n context = {\n 'categories': get_categories(),\n 'contents': contents\n }\n\n response = render(None, 'index.html', context)\n text_html = response.content.decode()\n # 静态文件路径\n path = os.path.join(settings.STATICFILES_DIRS[0], 'index.html')\n\n with open(path, 'w', encoding='utf-8') as f:\n f.write(text_html)\n" }, { "alpha_fraction": 0.5649350881576538, "alphanum_fraction": 0.5674325823783875, "avg_line_length": 34.53845977783203, "blob_id": "b8d680ebc871da312de2d350d7f8800443d473e9", "content_id": "0acd637fd2822de237521a7447fbbc8210aa8f50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6472, "license_type": "no_license", "max_line_length": 96, "num_lines": 169, "path": "/apps/weboauth/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.contrib.auth import login\nfrom django.shortcuts import render, redirect\nfrom django import http\nfrom django_redis import get_redis_connection\nfrom django.conf import settings\nfrom django.views import View\nimport json\n\nfrom users.models import User\nfrom .models import OAuthSinaUser\nfrom .utils import get_acess_token, check_openid\nimport re\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom carts.utils import merge_carts_cookie_2_redis\nfrom meiduo_mall.utils.sinaweibopy3 import APIClient\n\n\n# SinaLoginTool\nclass SinaLoginView(View):\n\n def get(self, request):\n # next参数的目的是 当登录后会自动跳转到登录前的页面\n\n next = request.GET.get('next')\n if next is None:\n return http.HttpResponseForbidden('非法访问')\n\n # 获取新浪_url\n web_code = APIClient(app_key=settings.SINA_CLIENT_ID,\n app_secret=settings.SINA_CLIENT_SECRET,\n redirect_uri=settings.SINA_REDIRECT_URI,\n )\n\n login_url = web_code.get_authorize_url()\n # 前端url var url = this.host + '/qq/authorization/?next=' + next;\n # 将返回的qq_url 以json格式返回给前端\n return http.JsonResponse({'code': RETCODE.OK, 'login_url': login_url})\n\n\nclass GetUidView(View):\n\n def get(self, request):\n\n code = request.GET.get('code')\n\n token_code = APIClient(\n app_key=settings.SINA_CLIENT_ID,\n app_secret=settings.SINA_CLIENT_SECRET,\n redirect_uri=settings.SINA_REDIRECT_URI)\n try:\n # 获取acess_token\n result = token_code.request_access_token(code)\n access_token = result.access_token\n # 给access_token需要加密\n token = get_acess_token(access_token)\n except Exception as ret:\n return ret\n try:\n oauth_user = OAuthSinaUser.objects.get(uid=access_token)\n except OAuthSinaUser.DoesNotExist:\n\n # 将token保存session中以备校验\n key_token = 'token_%s' % code\n request.session[key_token] = token\n return render(request, 'sina_callback.html')\n else:\n user = oauth_user.user\n login(request, user)\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK',\n 'user_id': user.id,\n 'username': user.username,\n 'token': token, })\n response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)\n # 合并购物车\n merge_carts_cookie_2_redis(request, response)\n return response\n\n\nclass SinaUserView(View):\n\n def get(self, request):\n\n code = request.GET.get('code')\n user = request.user\n key_token = 'token_%s' % code\n token = request.session.get(key_token)\n\n access_token = check_openid(token)\n if not token:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '参数错误'})\n # 获取之后删除\n # del request.session[key_token]\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK',\n 'user_id': user.id,\n 'username': user.username,\n 'token': token,\n 'access_token': access_token,\n })\n\n def post(self, request):\n\n query_dict = json.loads(request.body.decode())\n mobile = query_dict.get('mobile')\n password = query_dict.get('password')\n sms_code = query_dict.get('sms_code')\n access_token = query_dict.get('access_token')\n\n # 通过acess_token 加密获取token\n token = get_acess_token(access_token)\n\n if all([mobile, password, sms_code, access_token]) is False:\n return http.HttpResponseForbidden('缺少必传参数')\n\n if not re.match(r'^1[3-9]\\d{9}$', mobile):\n return http.JsonResponse({'code': RETCODE.MOBILEERR, 'errmsg': '请输入正确的手机号码'})\n if not re.match(r'^[0-9A-Za-z]{8,20}$', password):\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '手机号码或密码不正确'})\n\n # 检验短信验证码\n # 从数据库中取验证码\n redis_conn = get_redis_connection('verify_cache')\n sms_cod_server = redis_conn.get('sms_%s' % mobile)\n\n # 为了防止一个短信验证码可以多次验证,取到后应立即删除\n redis_conn.delete('sms_%s' % mobile)\n\n if sms_cod_server is None:\n return http.JsonResponse({'code': RETCODE.SMSCODERR, 'errmsg': '验证码过期'})\n\n # redis取出的为byte类型 进行解码\n sms_cod_server = sms_cod_server.decode()\n\n if sms_code != sms_cod_server:\n return http.JsonResponse({'code': RETCODE.SMSCODERR, 'errmsg': '短信验证码有误'})\n\n # 校验acess_token\n # access_token = check_openid(token)\n if not access_token:\n return render(request, 'oauth_callback.html', {'openid_errmsg': '无效的acess_token'})\n\n # 注册用户\n # 根据mobile查找\n # 1 如果存在则进行绑定\n try:\n user = User.objects.get(mobile=mobile)\n except User.DoesNotExist:\n # 2 如果没有则创建用户,再绑定\n user = User.objects.create_user(username=mobile, mobile=mobile, password=password)\n else:\n # 如果用用户则需要检查密码\n if not user.check_password(password):\n return render(request, 'oauth_callback.html', {'account_errmsg': '用户名或密码错误'})\n\n # 从获取uid\n OAuthSinaUser.objects.create(user=user, uid=access_token)\n\n login(request, user)\n # next = request.GET.get('state')\n response = http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK',\n 'user_id': user.id,\n 'username': user.username,\n 'token': token, })\n response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)\n\n # 合并购物车\n merge_carts_cookie_2_redis(request, response)\n\n return response\n" }, { "alpha_fraction": 0.5994071364402771, "alphanum_fraction": 0.6097819209098816, "avg_line_length": 32.71428680419922, "blob_id": "4205db57f28a105d18ffbd907b49ac35decf5944", "content_id": "82e2ca2a1a695d05e1a585de5c96cf494d85ea7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5151, "license_type": "no_license", "max_line_length": 96, "num_lines": 140, "path": "/apps/oanuth/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.contrib.auth import login\nfrom django.shortcuts import render, redirect\nfrom QQLoginTool.QQtool import OAuthQQ\nfrom django import http\nfrom django_redis import get_redis_connection\n\nfrom django.conf import settings\nfrom django.views import View\n\nfrom users.models import User\nfrom .models import OAuthQQUser\nfrom .utils import get_acess_token, check_openid\nimport re\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom carts.utils import merge_carts_cookie_2_redis\n\n\"\"\"\nQQ_CLIENT_ID = '101518219'\nQQ_CLIENT_SECRET = '418d84ebdc7241efb79536886ae95224'\nQQ_REDIRECT_URI = 'http://www.meiduo.site:8000/oauth_callback'\n\"\"\"\n\n\n# QQLoginTool\nclass QQLoginView(View):\n\n def get(self, request):\n # next参数的目的是 当登录后会自动跳转到登录前的页面\n\n next = request.GET.get('next')\n if next is None:\n return http.HttpResponseForbidden('非法访问')\n\n # 获取qq_url\n qq_code = OAuthQQ(client_id=settings.QQ_CLIENT_ID,\n client_secret=settings.QQ_CLIENT_SECRET,\n redirect_uri=settings.QQ_REDIRECT_URI,\n state=next)\n\n login_url = qq_code.get_qq_url()\n # 前端url var url = this.host + '/qq/authorization/?next=' + next;\n # 将返回的qq_url 以json格式返回给前端\n return http.JsonResponse({'code': RETCODE.OK, 'login_url': login_url})\n\n\nclass GetOpenIdView(View):\n\n def get(self, request):\n\n code = request.GET.get('code')\n\n token_code = OAuthQQ(\n client_id=settings.QQ_CLIENT_ID,\n client_secret=settings.QQ_CLIENT_SECRET,\n redirect_uri=settings.QQ_REDIRECT_URI)\n try:\n acess_token = token_code.get_access_token(code)\n openid = token_code.get_open_id(acess_token)\n print(openid)\n except Exception as ret:\n return ret\n try:\n oauth_user = OAuthQQUser.objects.get(openid=openid)\n except OAuthQQUser.DoesNotExist:\n # openid 需要加密\n token = get_acess_token(openid)\n # 将加密的唯一表示openid 响应给前端\n return render(request, 'oauth_callback.html', {'openid': token})\n else:\n user = oauth_user.user\n login(request, user)\n next = request.GET.get('state')\n response = redirect(next or '/')\n response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)\n\n # 合并购物车\n merge_carts_cookie_2_redis(request, response)\n return response\n\n def post(self, request):\n\n query_dict = request.POST\n mobile = query_dict.get('mobile')\n password = query_dict.get('password')\n sms_code = query_dict.get('sms_code')\n openid = query_dict.get('openid')\n\n if all([mobile, password, sms_code, openid]) is False:\n return http.HttpResponseForbidden('缺少必传参数')\n\n if not re.match(r'^1[3-9]\\d{9}$', mobile):\n return http.JsonResponse({'code': RETCODE.MOBILEERR, 'errmsg': '请输入正确的手机号码'})\n if not re.match(r'^[0-9A-Za-z]{8,20}$', password):\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '手机号码或密码不正确'})\n\n # 检验短信验证码\n # 从数据库中取验证码\n redis_conn = get_redis_connection('verify_cache')\n sms_cod_server = redis_conn.get('sms_%s' % mobile)\n\n # 为了防止一个短信验证码可以多次验证,取到后应立即删除\n redis_conn.delete('sms_%s' % mobile)\n\n if sms_cod_server is None:\n return http.JsonResponse({'code': RETCODE.SMSCODERR, 'errmsg': '验证码过期'})\n\n # redis取出的为byte类型 进行解码\n sms_cod_server = sms_cod_server.decode()\n\n if sms_code != sms_cod_server:\n return http.JsonResponse({'code': RETCODE.SMSCODERR, 'errmsg': '短信验证码有误'})\n\n # 校验openid\n openid = check_openid(openid)\n if not openid:\n return render(request, 'oauth_callback.html', {'openid_errmsg': '无效的openid'})\n\n # 注册用户\n # 根据mobile查找\n # 1 如果存在则进行绑定\n try:\n user = User.objects.get(mobile=mobile)\n except User.DoesNotExist:\n # 2 如果没有则创建用户,再绑定\n user = User.objects.create_user(username=mobile, mobile=mobile, password=password)\n else:\n # 如果用用户则需要检查密码\n if not user.check_password(password):\n return render(request, 'oauth_callback.html', {'account_errmsg': '用户名或密码错误'})\n\n OAuthQQUser.objects.create(user=user, openid=openid)\n\n login(request, user)\n next = request.GET.get('state')\n response = redirect(next or '/')\n response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)\n\n # 合并购物车\n merge_carts_cookie_2_redis(request, response)\n return response\n\n\n\n" }, { "alpha_fraction": 0.6214128136634827, "alphanum_fraction": 0.6291390657424927, "avg_line_length": 29.233333587646484, "blob_id": "0858dbba0ee7874bdccf28316b58ea63c8e1ba39", "content_id": "7b2e0f344d1ddf8e96a257c1b014a6e99209e239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 73, "num_lines": 30, "path": "/apps/carts/utils.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "import base64, pickle\nfrom django_redis import get_redis_connection\n\n\n# 定义函数:是否需要传参数\ndef merge_carts_cookie_2_redis(request, response):\n \"\"\"将cookie中的数据保存到redis中\"\"\"\n # 取出cookie中的数据\n carts_str = request.COOKIES.get('carts')\n user = request.user\n # 如果cookie中有数据则进行合并,如果没有则提前响应\n if carts_str:\n carts_dict = pickle.loads(base64.b64decode(carts_str.encode()))\n else:\n return\n # 连接redis数据库\n conn_redis = get_redis_connection('carts')\n pl = conn_redis.pipeline()\n # 将cookie中的数据保存到redis中\n # 将cookie中的数据取出\n for sku_id in carts_dict:\n # 将sku_id保存到hash中\n pl.hset('user_%s' % user.id, sku_id, carts_dict[sku_id]['count'])\n if carts_dict[sku_id]['selected']:\n pl.sadd('selected_%s' % user.id, sku_id)\n else:\n pl.srem('selected_%s' % user.id, sku_id)\n # 删除cookie\n pl.execute()\n response.delete_cookie('carts')" }, { "alpha_fraction": 0.6556016802787781, "alphanum_fraction": 0.6556016802787781, "avg_line_length": 23.200000762939453, "blob_id": "42f4ae3494ac643443d9edc83f2b5bb81f5fa03d", "content_id": "ce4142f657790b61c63dcd8c36946eef1840f95c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 76, "num_lines": 10, "path": "/apps/payment/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n # 跳转到支付页面\n url(r'^payment/(?P<order_id>\\d+)/$', views.AlipayPaymentView.as_view()),\n # 支付成功\n url(r'^payment/status/$', views.PaymentSuccessView.as_view()),\n]" }, { "alpha_fraction": 0.8156028389930725, "alphanum_fraction": 0.8156028389930725, "avg_line_length": 22.66666603088379, "blob_id": "dccd0a6c5b4eb80d3a42272b46c5d4bacedd4d8c", "content_id": "f4121e36d6d249f1f2a4f33c989407a581926f04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 57, "num_lines": 6, "path": "/utils/base_view.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views import View\n\n\nclass Base_view(LoginRequiredMixin, View):\n pass" }, { "alpha_fraction": 0.5607580542564392, "alphanum_fraction": 0.5674601197242737, "avg_line_length": 30.34782600402832, "blob_id": "2ff904711b89ec0a69874ba12b892e269376f59b", "content_id": "f320cef093c384cc8e09535458122b58886495bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23947, "license_type": "no_license", "max_line_length": 109, "num_lines": 690, "path": "/apps/users/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.paginator import Paginator, EmptyPage\nfrom django.http import HttpResponseForbidden, JsonResponse, HttpResponseNotFound\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views.generic import View\nfrom django.contrib.auth import login, logout\nfrom django_redis import get_redis_connection\nfrom django.conf import settings\nfrom django.core.cache import cache\nimport re\nimport logging\nimport json\n\nfrom users.models import User, Address\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom .utils import MulitUserAuthciate, gen_verify_url, verify_activate_email, username_or_mobile\nfrom celery_tasks.send_emails.tasks import send_verify_mail\nfrom meiduo_mall.utils.base_view import Base_view\nfrom areas.models import Area\nfrom goods.models import SKU\nfrom carts.utils import merge_carts_cookie_2_redis\nfrom orders.models import OrderInfo, OrderGoods\n\nlogger = logging.getLogger('django')\n\n\nclass RegisterView(View):\n\n def get(self, request):\n\n return render(request, 'register.html')\n\n def post(self, request):\n\n query_dict = request.POST\n username = query_dict.get('username')\n password = query_dict.get('password')\n password2 = query_dict.get('password2')\n mobile = query_dict.get('mobile')\n sms_code = query_dict.get('sms_code')\n allow = query_dict.get('allow')\n # 校验传来的参数\n if all([username, password, password2, mobile, sms_code, allow]) is False:\n return HttpResponseForbidden('缺少必须的参数')\n\n if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):\n return HttpResponseForbidden('不正确')\n\n if not re.match(r'^[0-9A-Za-z]{8,20}$', password):\n return HttpResponseForbidden('不正确')\n\n if password != password2:\n return HttpResponseForbidden('不正确')\n\n if not re.match(r'^1[3-9]\\d{9}$', mobile):\n return HttpResponseForbidden('不正确')\n\n if allow != 'on':\n return HttpResponseForbidden({'code': RETCODE.ALLOWERR, 'errmsg': '用户协议未勾选'})\n\n # 检验短信验证码\n # 从数据库中取验证码\n redis_conn = get_redis_connection('verify_cache')\n sms_cod_server = redis_conn.get('sms_%s' % mobile)\n\n # 为了防止一个短信验证码可以多次验证,取到后应立即删除\n redis_conn.delete('sms_%s' % mobile)\n\n if sms_cod_server is None:\n return HttpResponseForbidden({'code': RETCODE.SMSCODERR, 'errmsg': '验证码过期'})\n\n # redis取出的为byte类型 进行解码\n sms_cod_server = sms_cod_server.decode()\n\n if sms_code != sms_cod_server:\n return HttpResponseForbidden({'code': RETCODE.SMSCODERR, 'errmsg': '短信验证码有误'})\n\n user = User.objects.create_user(username=username, password=password, mobile=mobile)\n\n # 保持登录状态\n login(request, user)\n response = redirect(reverse('contents:index'))\n response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)\n return response\n\n\n# 校验注册是否重名\nclass UsernameCountView(View):\n\n def get(self, request, username):\n count = User.objects.filter(username=username).count()\n print(count)\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'count': count})\n\n\n# 校验电话是否重复\nclass MobileCountView(View):\n\n def get(self, request, mobile):\n count = User.objects.filter(mobile=mobile).count()\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'count': count})\n\n\n# 用户登录以及多账户登录(usermane or mobile)\nclass LoginView(View):\n\n def get(self, request):\n\n return render(request, 'login.html')\n\n def post(self, request):\n\n query_dict = request.POST\n account = query_dict.get('username')\n password = query_dict.get('pwd')\n remembered = query_dict.get('remembered')\n\n # 采用django自带的用户认证进行用户登录验证\n # authenticate 方法如果验证通过则返回user用户对象,如果没有通过验证则没有返回值,没有返回值默认为None\n\n authen_obj = MulitUserAuthciate()\n user = authen_obj.authenticate(request, username=account, password=password)\n if user is None:\n # 如果 返回值为None,说明用户或密码错误\n return render(request, 'login.html', {'account_errmsg': '用户名或密码错误'})\n\n # 验证通过保持登录状态\n login(request, user)\n # 如果用户不勾选 记住用户则清除cookie中的sessionid即可,即 设置session的过期时间为0\n if remembered != 'on':\n request.session.set_expiry(0)\n\n # 登录成功后显示用户名,在返回响应页面时设置带上cookie\n\n # next参数 用户来源参数,如果有next参数则登录后会进入该页面之前的页面\n next = request.GET.get('next')\n if next:\n\n response = redirect(next)\n else:\n response = redirect(reverse('contents:index'))\n\n response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)\n\n # 合并购物车\n merge_carts_cookie_2_redis(request, response)\n return response\n\n\n# 用户登出\nclass LogoutView(View):\n\n def get(self, request):\n # django中自带的登出\n logout(request)\n\n # 退出登录,重定向到登录也\n response = redirect(reverse('users:login'))\n\n # 退出登录删除cookie\n response.delete_cookie('username')\n\n return response\n\n\n# 用户中心验证第二种方式\nclass UserInfoView(LoginRequiredMixin, View):\n def get(self, request):\n return render(request, 'user_center_info.html')\n\n\n# 需要校验登录索引用LoginRequiredMixin 进行用户登录验证\nclass VerifyEmailView(LoginRequiredMixin, View):\n\n # 前端发送的为put请求所以用请求体非表单的方式提起参数\n def put(self, request):\n email = json.loads(request.body.decode())\n to_email = email.get('email')\n if not re.match(r'^[a-z0-9][\\w\\.\\-]*@[a-z0-9\\-]+(\\.[a-z]{2,5}){1,2}$', to_email):\n return HttpResponseForbidden('email填写有误')\n # print(email)\n # 将email 保存到email字典中\n # user = User.objects()\n # user.email = email\n # user.save()\n # 安全考虑用 乐观锁\n user = request.user\n # 在修改之前,在根据条件查找一次\n User.objects.filter(username=user.username, email='').update(email=to_email)\n\n verify_url = gen_verify_url(user)\n\n send_verify_mail.delay(to_email, verify_url)\n data = {\n 'code': RETCODE.OK,\n 'errmsg': '邮件发送成功'\n }\n\n return JsonResponse(data)\n\n\n# 验证信息并改变email_activate的激活状态\nclass Check_Verify_Email(View):\n\n def get(self, request):\n\n token = request.GET.get('token')\n # 校验token值中的email和userid\n if not token:\n return HttpResponseForbidden('缺少token参数')\n\n user = verify_activate_email(token)\n if not user:\n return HttpResponseForbidden('无效token值')\n\n # 验证成功后修改邮箱激活状态\n user.email_active = True\n user.save()\n\n # 返回邮箱验证结果\n return redirect('/info/')\n\n\n# 收获地址页面\nclass AdressesView(Base_view):\n\n # 展示收货地址\n def get(self, request):\n user = request.user\n address_set = Address.objects.filter(user=user, is_deleted=False)\n\n address_dict_list = []\n for address in address_set:\n address_dict = {\n \"id\": address.id,\n \"title\": address.title,\n \"receiver\": address.receiver,\n \"province\": address.province.name,\n \"city\": address.city.name,\n \"district\": address.district.name,\n \"place\": address.place,\n \"mobile\": address.mobile,\n \"tel\": address.tel,\n \"email\": address.email\n }\n address_dict_list.append(address_dict)\n\n context = {\n 'default_address_id': user.default_address_id,\n 'addresses': address_dict_list,\n }\n\n return render(request, 'user_center_site.html', context)\n\n\n# 增加收获地址\nclass AreaView(Base_view):\n\n def get(self, request):\n\n # TODO 用缓存进行优化 cache\n\n area_id = request.GET.get('area_id')\n\n # 如果area_id 为None则说明查询的为省\n if area_id is None:\n\n province_list = cache.get('province_list')\n if not province_list:\n try:\n province_queryset = Area.objects.filter(parent__isnull=True)\n province_list = []\n for province_model in province_queryset:\n province_list.append({'id': province_model.id, 'name': province_model.name})\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'province_list': province_list})\n\n except Exception as e:\n logger.error(e)\n return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '省份数据错误'})\n # 响应省份数据\n # 将省份信息保存到cache中\n cache.set('province_list', province_list, 3600)\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'province_list': province_list})\n else:\n sub_data = cache.get('sub_area')\n if sub_data is None:\n try:\n # 查找所对应的城市\n city = Area.objects.get(id=area_id)\n except Area.DoesNotExist:\n return JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '城市查找错误'})\n\n # city.subs.all 等价于 city.city_set.all() 由于是自关联 则必须要重命名 否则会报错\n # 城市所对应的所有的区\n direct_qs = city.subs.all()\n sub_list = []\n for direct in direct_qs:\n sub_list.append({'id': direct.id, 'name': direct.name})\n\n sub_data = {\n 'id': city.id,\n 'name': city.name,\n 'subs': sub_list\n }\n cache.set('sub_area', sub_data, 3600)\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'sub_data': sub_data})\n\n\nclass CreateAddressView(Base_view):\n\n def post(self, request):\n\n count = request.user.addresses.count()\n if count >= 20:\n return HttpResponseForbidden('地址数量大于20')\n\n form_json = json.loads(request.body.decode())\n receiver = form_json.get('receiver')\n province_id = form_json.get('province_id')\n city_id = form_json.get('city_id')\n district_id = form_json.get('district_id')\n place = form_json.get('place')\n mobile = form_json.get('mobile')\n tel = form_json.get('tel')\n email = form_json.get('email')\n\n # 校验前端传来的数据\n if all([receiver, province_id, city_id, district_id, place, mobile]) is False:\n return JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '缺少必传参数'})\n\n # 校验电话、 固定电话、 email正确格式\n if not re.match(r'^1[345789]\\d{9}$', mobile):\n return HttpResponseForbidden('手机号码格式不正确')\n if tel:\n if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):\n return HttpResponseForbidden('固定电话格式不正确')\n if email:\n if not re.match(r'^[a-z0-9][\\w\\.\\-]*@[a-z0-9\\-]+(\\.[a-z]{2,5}){1,2}$', email):\n return HttpResponseForbidden('邮箱格式输入不正确')\n # 逻辑处理\n try:\n address = Address.objects.create(\n user=request.user,\n title=receiver,\n receiver=receiver,\n province_id=province_id,\n city_id=city_id,\n district_id=district_id,\n place=place,\n mobile=mobile,\n tel=tel,\n email=email\n )\n\n # 查询默认地址,如果没有就将现有的做为默认地址\n if not request.user.default_address:\n request.user.default_address = address\n request.user.save()\n except Exception as e:\n logger.error(e)\n return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '新增地址失败'})\n\n address_json = {\n \"id\": address.id,\n \"title\": address.title,\n \"receiver\": address.receiver,\n \"province\": address.province.name,\n \"city\": address.city.name,\n \"district\": address.district.name,\n \"place\": address.place,\n \"mobile\": address.mobile,\n \"tel\": address.tel,\n \"email\": address.email\n }\n # 返回响应\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'address': address_json})\n\n\nclass UpdateAddressView(Base_view):\n\n def put(self, request, address_id):\n\n json_dict = json.loads(request.body.decode())\n receiver = json_dict.get('receiver')\n province_id = json_dict.get('province_id')\n city_id = json_dict.get('city_id')\n district_id = json_dict.get('district_id')\n place = json_dict.get('place')\n mobile = json_dict.get('mobile')\n tel = json_dict.get('tel')\n email = json_dict.get('email')\n\n # 校验参数\n if not all([receiver, province_id, city_id, district_id, place, mobile]):\n return HttpResponseForbidden('缺少必传参数')\n if not re.match(r'^1[3-9]\\d{9}$', mobile):\n return HttpResponseForbidden('参数mobile有误')\n if tel:\n if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):\n return HttpResponseForbidden('参数tel有误')\n if email:\n if not re.match(r'^[a-z0-9][\\w\\.\\-]*@[a-z0-9\\-]+(\\.[a-z]{2,5}){1,2}$', email):\n return HttpResponseForbidden('参数email有误')\n\n try:\n Address.objects.filter(id=address_id).update(\n title=receiver,\n receiver=receiver,\n province_id=province_id,\n city_id=city_id,\n district_id=district_id,\n place=place,\n mobile=mobile,\n tel=tel,\n email=email\n )\n except Exception as e:\n logger.error(e)\n return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '更新地址失败'})\n\n # 构造响应数据\n\n address = Address.objects.get(id=address_id)\n address_dict = {\n \"id\": address.id,\n \"title\": address.title,\n \"receiver\": address.receiver,\n \"province\": address.province.name,\n \"city\": address.city.name,\n \"district\": address.district.name,\n \"place\": address.place,\n \"mobile\": address.mobile,\n \"tel\": address.tel,\n \"email\": address.email\n }\n\n # 响应更新地址结果\n return JsonResponse({'code': RETCODE.OK, 'errmsg': '更新地址成功', 'address': address_dict})\n\n # 删除地址\n def delete(self, request, address_id):\n # 逻辑删除,将is_delete 参数改为True\n try:\n # 查询要删除的地址\n address = Address.objects.get(id=address_id)\n\n # 将地址逻辑删除设置为True\n address.is_deleted = True\n address.save()\n except Exception as e:\n logger.error(e)\n return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '删除地址失败'})\n\n # 响应删除地址结果\n return JsonResponse({'code': RETCODE.OK, 'errmsg': '删除地址成功'})\n\n\nclass SetDefaultAddressView(Base_view):\n\n def put(self, request, address_id):\n\n try:\n address = Address.objects.get(id=address_id)\n\n request.user.default_address = address\n request.user.save()\n\n except Exception as e:\n logger.error(e)\n return JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '设置默认值失败'})\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': '设置成功'})\n\n\n# 设置标题\nclass SetTitleView(Base_view):\n\n def put(self, request, address_id):\n\n # 1 接受前端传来的参数\n json_dict = json.loads(request.body.decode())\n title = json_dict.get('title')\n\n try:\n # 查询地址\n address = Address.objects.get(id=address_id)\n\n # 设置新的地址标题\n address.title = title\n address.save()\n\n except Exception as e:\n logger.error(e)\n return JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '修改失败'})\n # 响应给前端\n return JsonResponse({'code': RETCODE.OK, 'errmsg': '修改成功'})\n\n\nclass ChangePasswordView(Base_view):\n\n def get(self, request):\n\n return render(request, 'user_center_pass.html')\n\n def post(self, request):\n\n # 接受前端传来的数据\n old_password = request.POST.get('old_pwd')\n new_password = request.POST.get('new_pwd')\n new_password2 = request.POST.get('new_cpwd')\n\n # 校验数据\n if all([old_password, new_password, new_password2]) is False:\n return JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '缺少必传参数'})\n\n # 校验密码格式\n if not re.match(r'^[0-9A-Za-z]{8,20}$', old_password):\n return HttpResponseForbidden('密码格式不正确')\n\n if not re.match(r'^[0-9A-Za-z]{8,20}$', new_password):\n return HttpResponseForbidden('密码格式不正确')\n\n # 查询旧密码是否正确\n user = request.user\n if user.check_password(old_password) is False:\n return JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '密码不正确'})\n\n if new_password != new_password2:\n return JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '两次密码输入不一致'})\n\n user.set_password(new_password)\n user.save()\n\n # 返回响应\n # 1 退出登录,重定向登录页\n logout(request)\n response = redirect(reverse('users:login'))\n # 2 删除cookie信息\n response.delete_cookie('username')\n return response\n\n\nclass UserBrowseHistory(Base_view):\n \"\"\"保存用户浏览记录\"\"\"\n\n def post(self, request):\n\n json_dict = json.loads(request.body.decode())\n sku_id = json_dict.get('sku_id')\n\n # 校验前端传来的数据\n if sku_id is None:\n return HttpResponseForbidden('缺少必传参数')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return HttpResponseForbidden('参数有误')\n\n # 将sku_id 保存到redis中\n # 以列表的方式存储到redis中: key:[sku_id, sku_id, sku_id]\n # 1 连接数据库, 配置 记录用户浏览历史的redis\n conn_redis = get_redis_connection('history')\n # 1.1 创建唯一的key\n key = 'user_%s' % request.user.id\n # 2 储存之前去重 lrem(key, count, value)\n # conn_redis.lrem(key, 0, sku_id)\n # 3 lpush将最新的放到列表最前端\n # conn_redis.lpush(key, sku_id)\n # 4 截取,展示浏览记录时最多展示5个\n # conn_redis.ltrim(key, 0, 4)\n\n # 多次访问数据库 运用管道技术进行优化\n pl = conn_redis.pipeline()\n pl.lrem(key, 0, sku_id)\n pl.lpush(key, sku_id)\n pl.ltrim(key, 0, 4)\n pl.execute()\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})\n\n def get(self, request):\n\n # 1 从redis中获取保存的sku_id\n user = request.user\n conn_redis = get_redis_connection('history')\n key = 'user_%s' % user.id\n skus_qs = conn_redis.lrange(key, 0, -1)\n\n skus = []\n # 遍历获取sku_id\n for sku_id in skus_qs:\n sku = SKU.objects.get(id=sku_id)\n skus.append({\n 'id': sku.id,\n 'name': sku.name,\n 'price': sku.price,\n 'default_image_url': sku.default_image.url\n })\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': \"OK\", 'skus': skus})\n\n\n# 找回密码\nclass FindePasswordView(View):\n\n def get(self, request):\n return render(request, 'find_password.html')\n\n\n# 找回密码第一步\nclass StepOneView(View):\n def get(self, request, account):\n uuid = request.GET.get('image_code_id')\n image_code = request.GET.get('image_code')\n user = username_or_mobile(account)\n if not user:\n return JsonResponse({'code': RETCODE.USERERR, 'errmsg': '用户名或电话不存在'})\n\n # 获取电话\n mobile = user.mobile\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK',\n 'mobile': mobile,\n 'access_token': uuid,\n 'image_code': image_code})\n\n\nclass CheckSMSCode(View):\n def get(self, request, username):\n sms_code = request.GET.get('sms_code')\n access_token = request.GET.get('access_token')\n\n # 校验redis数据库中的手机验证码\n redis_conn = get_redis_connection('verify_cache')\n sms_cod_server = redis_conn.get('sms_%s' % username)\n\n # 为了防止一个短信验证码可以多次验证,取到后应立即删除\n redis_conn.delete('sms_%s' % username)\n\n if sms_cod_server is None:\n return HttpResponseForbidden({'code': RETCODE.SMSCODERR, 'errmsg': '验证码过期'})\n\n # redis取出的为byte类型 进行解码\n sms_cod_server = sms_cod_server.decode()\n\n if sms_code != sms_cod_server:\n return HttpResponseForbidden({'code': RETCODE.SMSCODERR, 'errmsg': '短信验证码有误'})\n\n user = User.objects.get(mobile=username)\n\n return JsonResponse({'code': RETCODE.OK,\n 'errmsg': 'OK', 'user_id': user.id,\n 'access_token': access_token})\n\n\n# 忘记密码第三步 设置密码\nclass SetPassword(View):\n\n def post(self, request, userid):\n qs_dict = json.loads(request.body.decode())\n password = qs_dict.get('password')\n password2 = qs_dict.get('password2')\n access_token = qs_dict.get('access_token')\n\n if all([password, password2, access_token]) is False:\n return JsonResponse({'message': '缺啥参数'})\n\n if not re.match(r'^[0-9A-Za-z]{8,20}$', password):\n return JsonResponse({'message': '密码格式不正确'})\n\n if not re.match(r'^[0-9A-Za-z]{8,20}$', password2):\n return JsonResponse({'message': '密码格式正确'})\n\n if password != password2:\n return HttpResponseForbidden({'message': '密码不一致'})\n\n # 修改数据库中用户密码\n try:\n user = User.objects.get(id=userid)\n except User.DoesNotExist:\n return HttpResponseForbidden({'message': '用户不存在'})\n\n user.set_password(password)\n user.save()\n\n return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})\n\n\n\n\n\n" }, { "alpha_fraction": 0.6991525292396545, "alphanum_fraction": 0.7118644118309021, "avg_line_length": 19.565217971801758, "blob_id": "ed8814d3e5420622a185e830c7957e5bfb310418", "content_id": "8db060a974ad0e9653678a30611d6e57a44d9897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/apps/weboauth/utils.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData\nfrom django.conf import settings\n\n\ndef get_acess_token(uid):\n\n serializer = Serializer(settings.SECRET_KEY, 300)\n\n token = serializer.dumps(uid)\n\n acess_token = token.decode()\n\n return acess_token\n\n\ndef check_openid(token):\n\n serializer = Serializer(settings.SECRET_KEY, 300)\n try:\n uid = serializer.loads(token)\n except BadData:\n return None\n return uid" }, { "alpha_fraction": 0.6743295192718506, "alphanum_fraction": 0.6743295192718506, "avg_line_length": 28.11111068725586, "blob_id": "300cbf354bfac55e3390035f83008f2e6d9d4867", "content_id": "775cb683ead91dd1e5a855edc30cb119835d8f89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/apps/weboauth/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "# 微博登录\nfrom django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r'^weibo/login/$', views.SinaLoginView.as_view()),\n url(r'^sina_callback.html$', views.GetUidView.as_view()),\n url(r'^oauth/sina/user/$', views.SinaUserView.as_view()),\n\n]" }, { "alpha_fraction": 0.6282528042793274, "alphanum_fraction": 0.6282528042793274, "avg_line_length": 41.47368240356445, "blob_id": "951e98c99b569a6ae263eb0027dc521f69b5af8c", "content_id": "d44fe40d708dc11f15a247ebde90f90b2a5f397b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 79, "num_lines": 19, "path": "/urls.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^search/', include('haystack.urls')),\n url(r'^', include('users.urls', namespace='users')), # 用户子应用\n url(r'^', include('contents.urls', namespace='contents')), # 主页\n url(r'^', include('verifications.urls', namespace='verifications')), # 验证码\n url(r'^', include('oanuth.urls', namespace='oanuth')), # qq登录模块\n url(r'^', include('weboauth.urls', namespace='weboauth')), # 微博登录模块\n\n url(r'^', include('goods.urls', namespace='goods')), # 商品模块\n url(r'^', include('carts.urls', namespace='carts')), # 购物车模块\n url(r'^', include('orders.urls', namespace='orders')), # 结算清单模块\n url(r'^', include('payment.urls', namespace='payment')), # 支付模块\n\n\n]\n" }, { "alpha_fraction": 0.5413461327552795, "alphanum_fraction": 0.5567307472229004, "avg_line_length": 27.108108520507812, "blob_id": "b0a0db52fcf6fa0cfcd8b00fb727bfd34d45cfb2", "content_id": "71c0df3930c40aabc3c3c868587950fc9836a61b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 77, "num_lines": 37, "path": "/apps/contents/utils.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from goods.models import GoodsChannel\n\n\ndef get_categories():\n \"\"\"\n {\n 'gruop_id':{\n 'channel': [cat1(手机), cat1(相机), cat1(数码)]\n 'sub_cats':[cat2(手机通讯), cat2(运营商)]\n }\n }\n\n \"\"\"\n # 商品分类\n # 查询出所有商品频道数据并且按照组号和列号进行排序\n categories = {}\n group_channels_qs = GoodsChannel.objects.order_by('group_id', 'sequence')\n for channel in group_channels_qs:\n # 获取到商品id\n group_id = channel.group_id\n if group_id not in categories:\n categories[group_id] = {'channels': [], 'sub_cats': []}\n # 获取一级分类,然后放入到channel类表中\n cat1 = channel.category\n categories[group_id]['channels'].append(cat1)\n cat1.url = channel.url\n\n # 根据parent_id查询到所有的二级分类\n\n sub_cats_qs = cat1.subs.all()\n for cat2 in sub_cats_qs:\n # 得到第三级查询集\n cat3_qs = cat2.subs.all()\n # 把二级下面的所有三级绑定给cat2对象的cat_subs属性\n cat2.sub_cats = cat3_qs\n categories[group_id]['sub_cats'].append(cat2)\n return categories\n" }, { "alpha_fraction": 0.5084875226020813, "alphanum_fraction": 0.5106809139251709, "avg_line_length": 34.91095733642578, "blob_id": "9454b4a2d31b0cec75037fea6e3383a87bbb174e", "content_id": "3c507b16f1bdfec43c8852914f62dfd47045f235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11446, "license_type": "no_license", "max_line_length": 112, "num_lines": 292, "path": "/apps/orders/views.py", "repo_name": "Jamesbreon/django_meiduo_mall", "src_encoding": "UTF-8", "text": "from django.db import transaction\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django_redis import get_redis_connection\nfrom decimal import Decimal\nimport json\nfrom django import http\n\nfrom meiduo_mall.utils.base_view import Base_view\nfrom users.models import Address\nfrom goods.models import SKU\nfrom meiduo_mall.utils.response_code import RETCODE\nfrom .models import OrderGoods, OrderInfo\n\n\nclass OrderSettlementView(Base_view):\n\n def get(self, request):\n # 收货地址\n # 展示当前用户的收货地址\n user = request.user\n\n address = Address.objects.filter(user=user, is_deleted=False)\n if address.exists():\n addresses = address\n else:\n addresses = None\n # 商品清单\n # 从redis中查询已勾选的商品和数量\n conn_redis = get_redis_connection('carts')\n # 查询出所以的sku_id {b'1': b'3'}\n sku_count = conn_redis.hgetall('user_%s' % user.id)\n # 查询勾选的商品id\n sku_ids = conn_redis.smembers('selected_%s' % user.id)\n carts = {}\n for sku_id_bytes in sku_ids:\n carts[int(sku_id_bytes)] = int(sku_count[sku_id_bytes])\n # 在SKU表中查找勾选商品查询集\n skus = SKU.objects.filter(id__in=sku_ids)\n\n total_count = 0\n total_amount = Decimal('0.00')\n if skus:\n for sku in skus:\n sku.count = carts[sku.id]\n sku.amount = sku.price * sku.count\n # 商品总金额和总数量\n total_count += sku.count\n total_amount += sku.amount\n\n freight = Decimal('10.00')\n\n payment_amount = total_amount + freight\n context = {\n 'addresses': addresses,\n 'skus': skus,\n 'count': sku.count,\n 'amount': sku.amount,\n 'total_count': total_count,\n 'total_amount': total_amount,\n 'freight': freight,\n 'payment_amount': payment_amount\n }\n return render(request, 'place_order.html', context)\n\n\nclass OrdersCommitView(Base_view):\n\n def post(self, request):\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n user = request.user\n\n if all([address_id, pay_method]) is False:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '缺啥必传参数'})\n try:\n address = Address.objects.get(id=address_id, is_deleted=False)\n except Address.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '收货地址填写有误'})\n\n # 获取支付方式\n if pay_method not in (OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']):\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '支付方式有误'})\n\n # 订单状态\n # 如果货到付款则显示UNAPID 如果在线支付则显示待发货\n if pay_method == OrderInfo.PAY_METHODS_ENUM['CASH']:\n status = OrderInfo.ORDER_STATUS_ENUM['UNSEND']\n else:\n status = OrderInfo.ORDER_STATUS_ENUM['UNPAID']\n\n total_amount = Decimal('0')\n freight = Decimal('10.00')\n total_count = 0\n\n # 订单号具有唯一性 下单时间+用户id\n # 用下单时间与用户id进行区分 转换成字符串\n order_id = timezone.now().strftime('%Y%m%d%H%M%S') + '%09d' % user.id\n\n # 手动开始事务\n with transaction.atomic():\n # 创建保存点\n save_point = transaction.savepoint()\n try:\n order = OrderInfo.objects.create(\n order_id=order_id,\n total_count=total_count,\n total_amount=total_amount,\n address=address,\n user=user,\n freight=freight,\n pay_method=pay_method,\n status=status\n )\n\n # 2 修改库存和销量\n # 1 从redis中取出要购买的商品,获取sku_id 和count\n # 从redis中查询已勾选的商品和数量\n conn_redis = get_redis_connection('carts')\n # 查询出所以的sku_id {b'1': b'3'}\n sku_count = conn_redis.hgetall('user_%s' % user.id)\n # 查询勾选的商品id\n sku_ids = conn_redis.smembers('selected_%s' % user.id)\n carts = {}\n for sku_id_bytes in sku_ids:\n carts[int(sku_id_bytes)] = int(sku_count[sku_id_bytes])\n\n for sku_id in carts:\n while True:\n # 一次只查询一个sku,用查询集会有缓存问题\n sku = SKU.objects.get(id=sku_id)\n # 取出购买数量\n buy_count = carts[sku_id]\n # 获取原本的库存\n origin_stock = sku.stock\n # 获取原本销量\n origin_sales = sku.sales\n # 判断库存是否满足\n\n # 创建回滚点\n if origin_stock < buy_count:\n # 如果库存不足,回滚,返回响应\n transaction.savepoint_rollback(save_point)\n return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})\n\n # 修改库存和销量,定义新库存和新销量\n new_stock = origin_stock - buy_count\n new_sales = origin_sales + buy_count\n # 加入乐观锁\n # sku.stock = new_stock\n # sku.sales = new_sales\n # sku.save()\n # 乐观锁 ,修改之前用原本数据查询一次\n result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock,\n sales=new_sales)\n # 如果失败,循环给用户机会,直到下单成功\n if result == 0:\n continue\n\n # 修改spu销量\n spu = sku.spu\n spu.sales += buy_count\n spu.save()\n\n # 保存订单中的商品记录\n OrderGoods.objects.create(\n count=buy_count,\n price=sku.price,\n order=order,\n sku=sku,\n )\n # 累加商品总数量\n order.total_count += buy_count\n # 累加商品总价\n order.total_amount += (sku.price * buy_count)\n\n break\n\n # 累加运费\n order.total_amount += order.freight\n order.save()\n except Exception:\n # 暴力回滚\n transaction.savepoint_rollback(save_point)\n return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '下单失败'})\n else:\n # 提交事务\n transaction.savepoint_commit(save_point)\n\n # 订单提交成功后删除redis中数据\n pl = conn_redis.pipeline()\n pl.hdel('user_%s' % user.id, *sku_ids)\n pl.srem('selected_%s' % user.id, *sku_ids)\n # pl.sdelete('selected_%s' % user.id)\n pl.execute()\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '提交订单成功', 'order_id': order.order_id})\n\n\nclass OrdersSuccessView(Base_view):\n\n def get(self, request):\n order_id = request.GET.get('order_id')\n pay_method = request.GET.get('pay_method')\n payment_amount = request.GET.get('payment_amount')\n\n try:\n order = OrderInfo.objects.get(order_id=order_id, pay_method=pay_method, total_amount=payment_amount)\n except OrderInfo.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '订单查询失败'})\n\n context = {\n 'order_id': order.order_id,\n 'payment_amount': order.total_amount,\n 'pay_method': order.pay_method\n }\n return render(request, 'order_success.html', context)\n\n\n# 商品评价\nclass GoodsCommentView(Base_view):\n\n def get(self, request):\n\n order_id = request.GET.get('order_id')\n user = request.user\n\n # 查到订单\n try:\n order = OrderInfo.objects.get(order_id=order_id, user=user)\n except OrderInfo.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.PARAMERR, 'errmsg': '订单有误'})\n\n # 根据order查询商品 sku\n # order_goods = order.skus.all()\n order_goods = order.skus.filter(order=order, is_commented=False)\n skus = []\n for order_good in order_goods:\n sku = order_good.sku\n sku.url = sku.default_image.url\n skus.append({\n 'sku_id': sku.id,\n 'order_id': order_id,\n 'name': sku.name,\n 'caption': sku.caption,\n 'price': str(sku.price),\n 'default_image_url': sku.default_image.url\n })\n\n return render(request, 'goods_judge.html', {'skus': skus})\n\n def post(self, request):\n\n qs_dict = json.loads(request.body.decode())\n order_id = qs_dict.get('order_id')\n sku_id = qs_dict.get('sku_id')\n comment = qs_dict.get('comment')\n score = qs_dict.get('score')\n is_anonymous = qs_dict.get('is_anonymous')\n user = request.user\n\n # 通过商品的id取到具体商品\n try:\n order = OrderInfo.objects.get(order_id=order_id, user=user)\n except OrderGoods.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '评论失败'})\n # 订单中有几个商品就应该有几个评论\n total_count = order.total_count\n\n # 将具体的评论保存到数据库中\n # comments = []\n try:\n # order_goods = OrderGoods.skus.filter(sku_id=sku_id, is_commented=False)\n order_goods = OrderGoods.objects.get(order=order, sku_id=sku_id)\n except order.DoesNotExist:\n return http.JsonResponse({'code': RETCODE.PWDERR, 'errmsg': '评论失败'})\n order_goods.comment = comment\n order_goods.score = score\n order_goods.is_anonymous = is_anonymous\n order_goods.is_commented = True\n order_goods.save()\n # if all(comments) is False:\n # # 不修改评论\n # order.status = OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT']\n # order.save()\n # 根据订单查询到几个商品\n # commented_count = order.skus.filter(is_commented=True).count()\n # if commented_count == total_count:\n order.status = OrderInfo.ORDER_STATUS_ENUM['FINISHED']\n order.save()\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})\n" } ]
26
emrecik/dotvim
https://github.com/emrecik/dotvim
a12f948a494a3f3b8e36742f36b2ec8080c6214a
de8b1b20fdfe765ab62f1a8c6080a919f087d34c
3bf077a9978987aeab0ef0439d90db8f2f1b3839
refs/heads/master
2020-04-05T10:26:42.663714
2012-05-26T12:53:44
2012-05-26T12:53:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.59375, "avg_line_length": 12.428571701049805, "blob_id": "9d2d2ec3d1c8dfa31782091acc9d02071144d6fa", "content_id": "2ec2c7a4d3dd284652d4473f2940ecbc4c9a12fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/t.py", "repo_name": "emrecik/dotvim", "src_encoding": "UTF-8", "text": "import random\n\t\nprint \"test de pep8\"\nfor i in range(0,10):\n print i\nif (i==2):\n print i \n" } ]
1
n1654/kuryr-kubernetes
https://github.com/n1654/kuryr-kubernetes
4864300bbaf0123c4ced3ea3249d1cc48e67e690
7c52546e19449eed0f05f61b40f2c4a00aa55bb8
3377eeb679ac15668a5390e7f021ead143f57e21
refs/heads/master
2020-12-15T13:33:29.097003
2020-01-16T08:30:32
2020-01-16T08:30:32
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6019381284713745, "alphanum_fraction": 0.6185861825942993, "avg_line_length": 40.921875, "blob_id": "9dbddfd6875e95b92783611d942c6c4672fdee4c", "content_id": "4c80a2f418b845974ceaf477e9552135ecc2a349", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8049, "license_type": "permissive", "max_line_length": 79, "num_lines": 192, "path": "/kuryr_kubernetes/tests/unit/controller/handlers/test_ingress_lbaas.py", "repo_name": "n1654/kuryr-kubernetes", "src_encoding": "UTF-8", "text": "# Copyright (c) 2018 RedHat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nimport uuid\n\nfrom kuryr_kubernetes.controller.handlers import ingress_lbaas as h_ing_lbaas\nfrom kuryr_kubernetes.objects import lbaas as obj_lbaas\nfrom kuryr_kubernetes.tests.unit.controller.handlers import \\\n test_lbaas as t_lbaas\n\n\nclass TestIngressLoadBalancerHandler(t_lbaas.TestLoadBalancerHandler):\n\n @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'\n '.LoadBalancerHandler._cleanup_leftover_lbaas')\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.LBaaSDriver.get_instance')\n def test_init(self, m_get_drv_lbaas, m_cleanup_leftover_lbaas):\n m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas\n\n handler = h_ing_lbaas.IngressLoadBalancerHandler()\n\n self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)\n\n @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')\n def test_on_present_no_ing_ctrlr(self, m_get_lbaas_spec):\n endpoints = mock.sentinel.endpoints\n\n m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)\n m_handler._l7_router = None\n h_ing_lbaas.IngressLoadBalancerHandler.on_present(m_handler, endpoints)\n\n m_get_lbaas_spec.assert_not_called()\n m_handler._should_ignore.assert_not_called()\n\n def test_should_ignore(self):\n endpoints = mock.sentinel.endpoints\n lbaas_spec = mock.sentinel.lbaas_spec\n\n m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)\n m_handler._has_pods.return_value = False\n\n ret = h_ing_lbaas.IngressLoadBalancerHandler._should_ignore(\n m_handler, endpoints, lbaas_spec)\n self.assertEqual(True, ret)\n\n m_handler._has_pods.assert_called_once_with(endpoints)\n\n def test_should_ignore_with_pods(self):\n endpoints = mock.sentinel.endpoints\n lbaas_spec = mock.sentinel.lbaas_spec\n\n m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)\n m_handler._has_pods.return_value = True\n\n ret = h_ing_lbaas.IngressLoadBalancerHandler._should_ignore(\n m_handler, endpoints, lbaas_spec)\n self.assertEqual(False, ret)\n\n m_handler._has_pods.assert_called_once_with(endpoints)\n\n def _generate_route_state(self, vip, targets, project_id, subnet_id):\n name = 'DUMMY_NAME'\n drv = t_lbaas.FakeLBaaSDriver()\n lb = drv.ensure_loadbalancer(\n name, project_id, subnet_id, vip, None, 'ClusterIP')\n pool = drv.ensure_pool_attached_to_lb(lb, 'namespace',\n 'svc_name', 'HTTP')\n\n members = {}\n for ip, (listen_port, target_port) in targets.items():\n members.setdefault((ip, listen_port, target_port),\n drv.ensure_member(lb, pool,\n subnet_id, ip,\n target_port, None, None))\n return obj_lbaas.LBaaSRouteState(\n pool=pool,\n members=list(members.values()))\n\n def _sync_route_members_impl(self, m_get_drv_lbaas, m_get_drv_project,\n m_get_drv_subnets, subnet_id, project_id,\n endpoints, state, spec):\n m_drv_lbaas = mock.Mock(wraps=t_lbaas.FakeLBaaSDriver())\n m_drv_project = mock.Mock()\n m_drv_project.get_project.return_value = project_id\n m_drv_subnets = mock.Mock()\n m_drv_subnets.get_subnets.return_value = {\n subnet_id: mock.sentinel.subnet}\n m_get_drv_lbaas.return_value = m_drv_lbaas\n m_get_drv_project.return_value = m_drv_project\n m_get_drv_subnets.return_value = m_drv_subnets\n\n handler = h_ing_lbaas.IngressLoadBalancerHandler()\n\n handler._l7_router = t_lbaas.FakeLBaaSDriver().ensure_loadbalancer(\n name='L7_Router',\n project_id=project_id,\n subnet_id=subnet_id,\n ip='1.2.3.4',\n security_groups_ids=None,\n service_type='ClusterIP')\n\n with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:\n m_get_pod_subnet.return_value = subnet_id\n handler._sync_lbaas_route_members(endpoints, state, spec)\n\n observed_targets = sorted(\n (str(member.ip), (\n member.port,\n member.port))\n for member in state.members)\n return observed_targets\n\n @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'\n '.LoadBalancerHandler._cleanup_leftover_lbaas')\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.PodSubnetsDriver.get_instance')\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.PodProjectDriver.get_instance')\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.LBaaSDriver.get_instance')\n def test__sync_lbaas_route_members(self, m_get_drv_lbaas,\n m_get_drv_project, m_get_drv_subnets,\n m_cleanup_leftover_lbaas):\n project_id = str(uuid.uuid4())\n subnet_id = str(uuid.uuid4())\n current_ip = '1.1.1.1'\n current_targets = {\n '1.1.1.101': (1001, 1001),\n '1.1.1.111': (1001, 1001),\n '1.1.1.201': (2001, 2001)}\n expected_ip = '2.2.2.2'\n expected_targets = {\n '2.2.2.101': (1201, 1201),\n '2.2.2.111': (1201, 1201),\n '2.2.2.201': (2201, 2201)}\n endpoints = self._generate_endpoints(expected_targets)\n state = self._generate_route_state(\n current_ip, current_targets, project_id, subnet_id)\n spec = self._generate_lbaas_spec(expected_ip, expected_targets,\n project_id, subnet_id)\n\n observed_targets = self._sync_route_members_impl(\n m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,\n subnet_id, project_id, endpoints, state, spec)\n\n self.assertEqual(sorted(expected_targets.items()), observed_targets)\n\n def test_on_deleted_no_ingress_controller(self):\n endpoints = mock.sentinel.endpoints\n m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)\n m_handler._l7_router = None\n h_ing_lbaas.IngressLoadBalancerHandler.on_deleted(m_handler, endpoints)\n\n m_handler._get_lbaas_route_state.assert_not_called()\n m_handler._remove_unused_route_members.assert_not_called()\n\n def test_on_deleted(self):\n endpoints = mock.sentinel.endpoints\n project_id = str(uuid.uuid4())\n subnet_id = str(uuid.uuid4())\n\n m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)\n m_handler._l7_router = t_lbaas.FakeLBaaSDriver().ensure_loadbalancer(\n name='L7_Router',\n project_id=project_id,\n subnet_id=subnet_id,\n ip='1.2.3.4',\n security_groups_ids=None,\n service_type='ClusterIP')\n\n m_handler._get_lbaas_route_state.return_value = (\n obj_lbaas.LBaaSRouteState())\n m_handler._remove_unused_route_members.return_value = True\n\n h_ing_lbaas.IngressLoadBalancerHandler.on_deleted(m_handler, endpoints)\n\n m_handler._get_lbaas_route_state.assert_called_once()\n m_handler._remove_unused_route_members.assert_called_once()\n" }, { "alpha_fraction": 0.6202420592308044, "alphanum_fraction": 0.6221379637718201, "avg_line_length": 31.34433937072754, "blob_id": "374d24c845ee21e40fb6d9738810d99833737820", "content_id": "a5c977344bff195218bd9be1e726f6b0f0c08451", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6857, "license_type": "permissive", "max_line_length": 78, "num_lines": 212, "path": "/kuryr_kubernetes/cni/handlers.py", "repo_name": "n1654/kuryr-kubernetes", "src_encoding": "UTF-8", "text": "# Copyright (c) 2016 Mirantis, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nimport six\n\nfrom os_vif import objects as obj_vif\nfrom oslo_log import log as logging\nfrom oslo_serialization import jsonutils\n\nfrom kuryr_kubernetes.cni.binding import base as b_base\nfrom kuryr_kubernetes import constants as k_const\nfrom kuryr_kubernetes.handlers import dispatch as k_dis\nfrom kuryr_kubernetes.handlers import k8s_base\nfrom kuryr_kubernetes import utils\n\nLOG = logging.getLogger(__name__)\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass CNIHandlerBase(k8s_base.ResourceEventHandler):\n OBJECT_KIND = k_const.K8S_OBJ_POD\n\n def __init__(self, cni, on_done):\n self._cni = cni\n self._callback = on_done\n self._vifs = {}\n\n def on_present(self, pod):\n vifs = self._get_vifs(pod)\n\n for ifname, vif in vifs.items():\n self.on_vif(pod, vif, ifname)\n\n if self.should_callback(pod, vifs):\n self.callback()\n\n @abc.abstractmethod\n def should_callback(self, pod, vifs):\n \"\"\"Called after all vifs have been processed\n\n Should determine if the CNI is ready to call the callback\n\n :param pod: dict containing Kubernetes Pod object\n :param vifs: dict containing os_vif VIF objects and ifnames\n :returns True/False\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def callback(self):\n \"\"\"Called if should_callback returns True\"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def on_vif(self, pod, vif, ifname):\n raise NotImplementedError()\n\n def _get_vifs(self, pod):\n # TODO(ivc): same as VIFHandler._get_vif\n try:\n annotations = pod['metadata']['annotations']\n state_annotation = annotations[k_const.K8S_ANNOTATION_VIF]\n except KeyError:\n return {}\n state_annotation = jsonutils.loads(state_annotation)\n state = utils.extract_pod_annotation(state_annotation)\n vifs_dict = state.vifs\n LOG.debug(\"Got VIFs from annotation: %r\", vifs_dict)\n return vifs_dict\n\n def _get_inst(self, pod):\n return obj_vif.instance_info.InstanceInfo(\n uuid=pod['metadata']['uid'], name=pod['metadata']['name'])\n\n\nclass AddHandler(CNIHandlerBase):\n\n def __init__(self, cni, on_done):\n LOG.debug(\"AddHandler called with CNI env: %r\", cni)\n super(AddHandler, self).__init__(cni, on_done)\n\n def on_vif(self, pod, vif, ifname):\n \"\"\"Called once for every vif of a Pod on every event.\n\n If it is the first time we see this vif, plug it in.\n\n :param pod: dict containing Kubernetes Pod object\n :param vif: os_vif VIF object\n :param ifname: string, name of the interfaces inside container\n \"\"\"\n if ifname not in self._vifs:\n\n self._vifs[ifname] = vif\n _vif = vif.obj_clone()\n _vif.active = True\n\n # set eth0's gateway as default\n is_default_gateway = (ifname == self._cni.CNI_IFNAME)\n b_base.connect(_vif, self._get_inst(pod),\n ifname, self._cni.CNI_NETNS,\n is_default_gateway=is_default_gateway,\n container_id=self._cni.CNI_CONTAINERID)\n\n def should_callback(self, pod, vifs):\n \"\"\"Called after all vifs have been processed\n\n Determines if CNI is ready to call the callback and stop watching for\n more events. For AddHandler the callback should be called if there\n is at least one VIF in the annotation and all the\n VIFs received are marked active\n\n :param pod: dict containing Kubernetes Pod object\n :param vifs: dict containing os_vif VIF objects and ifnames\n :returns True/False\n \"\"\"\n all_vifs_active = vifs and all(vif.active for vif in vifs.values())\n\n if all_vifs_active:\n if self._cni.CNI_IFNAME in self._vifs:\n self.callback_vif = self._vifs[self._cni.CNI_IFNAME]\n else:\n self.callback_vif = next(iter(self._vifs.values()))\n LOG.debug(\"All VIFs are active, exiting. Will return %s\",\n self.callback_vif)\n return True\n else:\n LOG.debug(\"Waiting for all vifs to become active\")\n return False\n\n def callback(self):\n self._callback(self.callback_vif)\n\n\nclass DelHandler(CNIHandlerBase):\n\n def on_vif(self, pod, vif, ifname):\n b_base.disconnect(vif, self._get_inst(pod),\n self._cni.CNI_IFNAME, self._cni.CNI_NETNS,\n container_id=self._cni.CNI_CONTAINERID)\n\n def should_callback(self, pod, vifs):\n \"\"\"Called after all vifs have been processed\n\n Calls callback if there was at least one vif in the Pod\n\n :param pod: dict containing Kubernetes Pod object\n :param vifs: dict containing os_vif VIF objects and ifnames\n :returns True/False\n \"\"\"\n if vifs:\n return True\n return False\n\n def callback(self):\n self._callback(None)\n\n\nclass CallbackHandler(CNIHandlerBase):\n\n def __init__(self, on_vif, on_del=None):\n super(CallbackHandler, self).__init__(None, on_vif)\n self._del_callback = on_del\n self._pod = None\n self._callback_vifs = None\n\n def on_vif(self, pod, vif, ifname):\n pass\n\n def should_callback(self, pod, vifs):\n \"\"\"Called after all vifs have been processed\n\n Calls callback if there was at least one vif in the Pod\n\n :param pod: dict containing Kubernetes Pod object\n :param vifs: dict containing os_vif VIF objects and ifnames\n :returns True/False\n \"\"\"\n self._pod = pod\n self._callback_vifs = vifs\n if vifs:\n return True\n return False\n\n def callback(self):\n self._callback(self._pod, self._callback_vifs)\n\n def on_deleted(self, pod):\n LOG.debug(\"Got pod %s deletion event.\", pod['metadata']['name'])\n if self._del_callback:\n self._del_callback(pod)\n\n\nclass CNIPipeline(k_dis.EventPipeline):\n\n def _wrap_dispatcher(self, dispatcher):\n return dispatcher\n\n def _wrap_consumer(self, consumer):\n return consumer\n" }, { "alpha_fraction": 0.6122809648513794, "alphanum_fraction": 0.6144897937774658, "avg_line_length": 36.31318664550781, "blob_id": "06ae339364bfd688ad1dfd069c1a63219055b4e0", "content_id": "b0ee1ec4922bdf992d4776746bbca344ffbde039", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6791, "license_type": "permissive", "max_line_length": 79, "num_lines": 182, "path": "/kuryr_kubernetes/controller/drivers/namespace_security_groups.py", "repo_name": "n1654/kuryr-kubernetes", "src_encoding": "UTF-8", "text": "# Copyright (c) 2018 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom kuryr.lib._i18n import _\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom kuryr_kubernetes import clients\nfrom kuryr_kubernetes import config\nfrom kuryr_kubernetes import constants\nfrom kuryr_kubernetes.controller.drivers import base\nfrom kuryr_kubernetes.controller.drivers import utils\nfrom kuryr_kubernetes import exceptions\n\nfrom neutronclient.common import exceptions as n_exc\n\nLOG = logging.getLogger(__name__)\n\nnamespace_sg_driver_opts = [\n cfg.StrOpt('sg_allow_from_namespaces',\n help=_(\"Default security group to allow traffic from the \"\n \"namespaces into the default namespace.\")),\n cfg.StrOpt('sg_allow_from_default',\n help=_(\"Default security group to allow traffic from the \"\n \"default namespaces into the other namespaces.\"))\n]\n\ncfg.CONF.register_opts(namespace_sg_driver_opts, \"namespace_sg\")\n\nDEFAULT_NAMESPACE = 'default'\n\n\ndef _get_net_crd(namespace):\n kubernetes = clients.get_kubernetes_client()\n\n try:\n ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE,\n namespace))\n except exceptions.K8sClientException:\n LOG.exception(\"Kubernetes Client Exception.\")\n raise exceptions.ResourceNotReady(namespace)\n try:\n annotations = ns['metadata']['annotations']\n net_crd_name = annotations[constants.K8S_ANNOTATION_NET_CRD]\n except KeyError:\n LOG.debug(\"Namespace missing CRD annotations for selecting the \"\n \"corresponding security group. Action will be retried.\")\n raise exceptions.ResourceNotReady(namespace)\n try:\n net_crd = kubernetes.get('%s/kuryrnets/%s' % (constants.K8S_API_CRD,\n net_crd_name))\n except exceptions.K8sClientException:\n LOG.exception(\"Kubernetes Client Exception.\")\n raise\n\n return net_crd\n\n\nclass NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):\n \"\"\"Provides security groups for Pod based on a configuration option.\"\"\"\n\n def get_security_groups(self, pod, project_id):\n namespace = pod['metadata']['namespace']\n net_crd = _get_net_crd(namespace)\n\n sg_list = [str(net_crd['spec']['sgId'])]\n\n extra_sgs = self._get_extra_sg(namespace)\n for sg in extra_sgs:\n sg_list.append(str(sg))\n\n sg_list.extend(config.CONF.neutron_defaults.pod_security_groups)\n\n return sg_list[:]\n\n def _get_extra_sg(self, namespace):\n # Differentiates between default namespace and the rest\n if namespace == DEFAULT_NAMESPACE:\n return [cfg.CONF.namespace_sg.sg_allow_from_namespaces]\n else:\n return [cfg.CONF.namespace_sg.sg_allow_from_default]\n\n def create_namespace_sg(self, namespace, project_id, crd_spec):\n neutron = clients.get_neutron_client()\n\n sg_name = \"ns/\" + namespace + \"-sg\"\n # create the associated SG for the namespace\n try:\n # default namespace is different from the rest\n # Default allows traffic from everywhere\n # The rest can be accessed from the default one\n sg = neutron.create_security_group(\n {\n \"security_group\": {\n \"name\": sg_name,\n \"project_id\": project_id\n }\n }).get('security_group')\n utils.tag_neutron_resources('security-groups', [sg['id']])\n neutron.create_security_group_rule(\n {\n \"security_group_rule\": {\n \"direction\": \"ingress\",\n \"remote_ip_prefix\": crd_spec['subnetCIDR'],\n \"security_group_id\": sg['id']\n }\n })\n except n_exc.NeutronClientException:\n LOG.exception(\"Error creating security group for the namespace \"\n \"%s\", namespace)\n raise\n return {'sgId': sg['id']}\n\n def delete_sg(self, sg_id):\n neutron = clients.get_neutron_client()\n try:\n neutron.delete_security_group(sg_id)\n except n_exc.NotFound:\n LOG.debug(\"Security Group not found: %s\", sg_id)\n except n_exc.NeutronClientException:\n LOG.exception(\"Error deleting security group %s.\", sg_id)\n raise\n\n def delete_namespace_sg_rules(self, namespace):\n LOG.debug(\"Security group driver does not create SG rules for \"\n \"namespace.\")\n\n def create_namespace_sg_rules(self, namespace):\n LOG.debug(\"Security group driver does not create SG rules for \"\n \"namespace.\")\n\n def update_namespace_sg_rules(self, namespace):\n LOG.debug(\"Security group driver does not create SG rules for \"\n \"namespace.\")\n\n def create_sg_rules(self, pod):\n LOG.debug(\"Security group driver does not create SG rules for \"\n \"the pods.\")\n\n def delete_sg_rules(self, pod):\n LOG.debug(\"Security group driver does not delete SG rules for \"\n \"the pods.\")\n\n def update_sg_rules(self, pod):\n LOG.debug(\"Security group driver does not update SG rules for \"\n \"the pods.\")\n\n\nclass NamespaceServiceSecurityGroupsDriver(base.ServiceSecurityGroupsDriver):\n \"\"\"Provides security groups for Service based on a configuration option.\"\"\"\n\n def get_security_groups(self, service, project_id):\n namespace = service['metadata']['namespace']\n net_crd = _get_net_crd(namespace)\n\n sg_list = []\n sg_list.append(str(net_crd['spec']['sgId']))\n\n extra_sgs = self._get_extra_sg(namespace)\n for sg in extra_sgs:\n sg_list.append(str(sg))\n\n return sg_list[:]\n\n def _get_extra_sg(self, namespace):\n # Differentiates between default namespace and the rest\n if namespace == DEFAULT_NAMESPACE:\n return [cfg.CONF.namespace_sg.sg_allow_from_default]\n else:\n return [cfg.CONF.namespace_sg.sg_allow_from_namespaces]\n" }, { "alpha_fraction": 0.6084905862808228, "alphanum_fraction": 0.6387578845024109, "avg_line_length": 28.929410934448242, "blob_id": "bfbc673f65933ab84a2acd18afc307d8c27dc329", "content_id": "af9c4c9ffca60f0b9b09285514462ea071bee3e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2544, "license_type": "permissive", "max_line_length": 78, "num_lines": 85, "path": "/kuryr_kubernetes/tests/fake.py", "repo_name": "n1654/kuryr-kubernetes", "src_encoding": "UTF-8", "text": "# Copyright (c) 2017 Red Hat.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport uuid\n\nfrom os_vif import objects as osv_objects\nfrom os_vif.objects import vif as osv_vif\nfrom oslo_serialization import jsonutils\n\n\ndef _fake_vif(cls=osv_vif.VIFOpenVSwitch):\n vif = cls(\n id=uuid.uuid4(),\n vif_name='h_interface',\n bridge_name='bridge',\n address='3e:94:b7:31:a0:83',\n port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch(\n interface_id='89eccd45-43e9-43d8-b4cc-4c13db13f782',\n profile_id=str(uuid.uuid4()),\n ),\n )\n vif.network = osv_objects.network.Network(id=uuid.uuid4(), mtu=1)\n subnet = osv_objects.subnet.Subnet(\n uuid=uuid.uuid4(),\n dns=['192.168.0.1'],\n cidr='192.168.0.0/24',\n gateway='192.168.0.1',\n routes=osv_objects.route.RouteList(objects=[]),\n )\n subnet.ips = osv_objects.fixed_ip.FixedIPList(objects=[])\n subnet.ips.objects.append(\n osv_objects.fixed_ip.FixedIP(address='192.168.0.2'))\n vif.network.subnets.objects.append(subnet)\n vif.active = True\n return vif\n\n\ndef _fake_vif_dict(obj=None):\n if obj:\n return obj.obj_to_primitive()\n else:\n return _fake_vif().obj_to_primitive()\n\n\ndef _fake_vif_string(dictionary=None):\n if dictionary:\n return jsonutils.dumps(dictionary)\n else:\n return jsonutils.dumps(_fake_vif_dict())\n\n\ndef _fake_vifs(cls=osv_vif.VIFOpenVSwitch, prefix='eth'):\n return {'eth0': _fake_vif(cls), prefix+'1': _fake_vif(cls)}\n\n\ndef _fake_vifs_dict(obj=None):\n if obj:\n return {\n ifname: vif.obj_to_primitive() for\n ifname, vif in obj.items()\n }\n else:\n return {\n ifname: vif.obj_to_primitive() for\n ifname, vif in _fake_vifs().items()\n }\n\n\ndef _fake_vifs_string(dictionary=None):\n if dictionary:\n return jsonutils.dumps(dictionary)\n else:\n return jsonutils.dumps(_fake_vifs_dict())\n" }, { "alpha_fraction": 0.7083616852760315, "alphanum_fraction": 0.7166893482208252, "avg_line_length": 34.2335319519043, "blob_id": "9a4e2ee6a36b9caf11ae575bb6661953c9e118b5", "content_id": "2ca80b9d7b3aa0ca63de64977a596f54aea6b8bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5884, "license_type": "permissive", "max_line_length": 101, "num_lines": 167, "path": "/doc/source/devref/kuryr_kubernetes_ocp_route_design.rst", "repo_name": "n1654/kuryr-kubernetes", "src_encoding": "UTF-8", "text": "..\n This work is licensed under a Creative Commons Attribution 3.0 Unported\n License.\n\n http://creativecommons.org/licenses/by/3.0/legalcode\n\n Convention for heading levels in Neutron devref:\n ======= Heading 0 (reserved for the title in a document)\n ------- Heading 1\n ~~~~~~~ Heading 2\n +++++++ Heading 3\n ''''''' Heading 4\n (Avoid deeper levels because they do not render well.)\n\n====================================================\nKuryr Kubernetes Openshift Routes integration design\n====================================================\n\nPurpose\n-------\n\nThe purpose of this document is to present how Openshift Routes are supported\nby kuryr-kubernetes.\n\n\nOverview\n--------\n\n`OpenShift Origin`_ is an open source cloud application development and\nhosting platform that automates the provisioning, management and scaling\nof applications.\n\nOpenShift Origin is a distribution of Kubernetes optimized for continuous\napplication development and multi-tenancy deployment. OpenShift adds developer\nand operations-centric tools on top of Kubernetes to enable rapid application\ndevelopment, easy deployment and scaling, and long-term lifecycle maintenance.\n\nThe `OpenShift Route`_ exposes a Service at a host name, like www.example.com,\nso that external clients can reach it by name.\nThe Route is an Openshift resource that defines the rules you want to apply to\nincoming connections.\nThe Openshift Routes concept was `introduced before Ingress`_ was supported by\nkubernetes, the Openshift Route matches the functionality of kubernetes Ingress.\n\n\nProposed Solution\n-----------------\n\nThe solution will rely on L7 router, Service/Endpoints handler and L7 router\ndriver components described at kuryr-kubernetes Ingress integration design,\nwhere a new component - OCP-Route handler, will satisfy requests for Openshift\nRoute resources.\n\n\nController Handlers impact:\n---------------------------\n\nThe controller handlers should be extended to support OCP-Route resource.\n\n\nThe OCP-Route handler\n~~~~~~~~~~~~~~~~~~~~~\n\nThe OCP-Route handler watches the apiserver's for updates to Openshift\nRoute resources.\nThe following scheme describes OCP-Route controller SW architecture:\n\n.. image:: ../../images/kuryr_k8s_ocp_route_ctrl_sw.svg\n :alt: Ingress/OCP-Route controllers SW architecture\n :align: center\n :width: 100%\n\nSimilar to Kubernetes Ingress, each OCP-Route object being translated to a L7\npolicy in L7 router, and the rules on OCP-Route become L7 (URL) mapping rules\nin that L7 policy. The L7 policy is configured to forward the filtered traffic\nto LbaaS Pool. The LbaaS pool represents an Endpoints resource, and it's the\nService/Endpoints handler responsibility to attach all its members to this\npool. Since the Endpoints resource is not aware of changes in OCP-Route objects\npointing to it, the OCP-Route handler should trigger this notification, the\nnotification will be implemented using annotation of the relevant Endpoint\nresource.\n\n\nUse cases examples\n~~~~~~~~~~~~~~~~~~\n\nThis section describes in details the following scenarios:\n\nA. Create OCP-Route, create Service/Endpoints.\nB. Create Service/Endpoints, create OCP-Route, delete OCP-Route.\n\n* Create OCP-Route, create Service/Endpoints:\n\n * OCP-Route is created under namespace 'mynamespace'\n\n * OCP-Route details :\n\n .. code-block:: yaml\n\n apiVersion: v1\n kind: Route\n metadata:\n name: test\n spec:\n host: www.example.com\n to:\n kind: Service\n name: s1\n\n * Since it's the first route pointing to this Service, the OCP-Route\n handler will create LbaaS pool (attached to L7 router)- named\n 'mynamespace_s1'.\n\n * The OCP-Route handler will create L7 rule and L7 policy, the L7\n policy direct it's filtered traffic towards 'mynamespace_s1' pool.\n\n * Service/Endpoints is created under namespace 'mynamespace'\n\n * name: s1\n\n * The Service/Endpoints handler will create user loadbalancer\n\n * The Service/Endpoints handler will check for pool named\n 'mynamespace_s1' and add its members to this pool.\n\n* Create Service/Endpoints, create OCP-Route, delete OCP-Route:\n\n * Service/Endpoints is created under namespace 'mynamespace'\n\n * name: s1\n\n * The Service/Endpoints handler will create user loadbalancer\n * Since no pool named 'mynamespace_s1' exist in L7 router,\n Service will exit.\n\n * OCP-Route is created with same details as described in above yaml file.\n\n * Since it's the first route pointing to this Service, the OCP-Route\n handler will create LbaaS pool (attached to L7 router) named\n 'mynamespace_s1'.\n * The OCP-Route handler will create L7 rule and L7 policy, the L7 policy\n configured to direct its filtered traffic towards 'mynamespace_s1' pool.\n\n * The last step from OCP-Route handler will be to notify\n (using annotation) s1 Endpoint.\n\n * As a result to the OCP-Route notification, the Endpoint handler will\n be called.\n The Service/Endpoints handler will update the members information\n attached to 'mynamespace_s1' pool.\n\n * OCP-Route is deleted\n\n * OCP-Route handler will first delete L7 rule and L7 policy.\n\n * In case no other L7 policy is pointing 'mynamespace_s1' pool, the\n OCP-Route handler will delete 'mynamespace_s1' pool's members and the pool\n itself. The last step from Ingress handler will be to notify s1\n Service/Endpoints.\n\n * As a result to the OCP-Route handler notification, the Service/Endpoints\n handler will set its internal state to 'no Ingress is pointing' state.\n\n\n.. _OpenShift Origin: https://www.openshift.org/\n.. _OpenShift Route: https://docs.openshift.com/enterprise/3.0/architecture/core_concepts/routes.html\n.. _introduced before Ingress: https://kubernetes.io/docs/concepts/Services-networking/ingress/\n" }, { "alpha_fraction": 0.40396925806999207, "alphanum_fraction": 0.4595070481300354, "avg_line_length": 37.80745315551758, "blob_id": "815adab1eb922987184a00fcda4bd8a833be14a3", "content_id": "63703645d534f85c236943b71f70a25564e3df86", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6248, "license_type": "permissive", "max_line_length": 112, "num_lines": 161, "path": "/doc/source/installation/ocp_route.rst", "repo_name": "n1654/kuryr-kubernetes", "src_encoding": "UTF-8", "text": "===============================\nEnable OCP-Router functionality\n===============================\n\nTo enable OCP-Router functionality we should set the following:\n\n- Setting L7 Router.\n- Configure Kuryr to support L7 Router and OCP-Route resources.\n\n\nSetting L7 Router\n------------------\n\nThe L7 Router is the ingress point for the external traffic destined for\nservices in the K8S/OCP cluster. The next steps are needed for setting the L7\nRouter:\n\n#. Create LoadBalancer that will run the L7 loadbalancing:\n\n .. code-block:: console\n\n $ openstack loadbalancer create --name kuryr-l7-router --vip-subnet-id k8s-service-subnet\n +---------------------+--------------------------------------+\n | Field | Value |\n +---------------------+--------------------------------------+\n | admin_state_up | True |\n | created_at | 2018-06-28T06:34:15 |\n | description | |\n | flavor | |\n | id | 99f580e6-d894-442a-bc5f-4d14b41e10d2 |\n | listeners | |\n | name | kuryr-l7-router |\n | operating_status | OFFLINE |\n | pools | |\n | project_id | 24042703aba141b89217e098e495cea1 |\n | provider | amphora |\n | provisioning_status | PENDING_CREATE |\n | updated_at | None |\n | vip_address | 10.0.0.171 |\n | vip_network_id | 65875d24-5a54-43fb-91a7-087e956deb1a |\n | vip_port_id | 42c6062a-644a-4004-a4a6-5a88bf596196 |\n | vip_qos_policy_id | None |\n | vip_subnet_id | 01f21201-65a3-4bc5-a7a8-868ccf4f0edd |\n +---------------------+--------------------------------------+\n $\n\n#. Create floating IP address that should be accessible from external network:\n\n .. code-block:: console\n\n $ openstack floating ip create --subnet public-subnet public\n +---------------------+--------------------------------------+\n | Field | Value |\n +---------------------+--------------------------------------+\n | created_at | 2018-06-28T06:31:36Z |\n | description | |\n | dns_domain | None |\n | dns_name | None |\n | fixed_ip_address | None |\n | floating_ip_address | 172.24.4.3 |\n | floating_network_id | 3371c2ba-edb5-45f2-a589-d35080177311 |\n | id | c971f6d3-ba63-4318-a9e7-43cbf85437c2 |\n | name | 172.24.4.3 |\n | port_details | None |\n | port_id | None |\n | project_id | 24042703aba141b89217e098e495cea1 |\n | qos_policy_id | None |\n | revision_number | 0 |\n | router_id | None |\n | status | DOWN |\n | subnet_id | 939eeb1f-20b8-4185-a6b1-6477fbe73409 |\n | tags | [] |\n | updated_at | 2018-06-28T06:31:36Z |\n +---------------------+--------------------------------------+\n $\n\n#. Bind the floating IP to LB vip:\n\n .. code-block:: console\n\n [stack@gddggd devstack]$ openstack floating ip set --port 42c6062a-644a-4004-a4a6-5a88bf596196 172.24.4.3\n\n\nConfigure Kuryr to support L7 Router and OCP-Route resources\n------------------------------------------------------------\n\n#. Configure the L7 Router by adding the LB UUID at kuryr.conf:\n\n .. code-block:: ini\n\n [ingress]\n l7_router_uuid = 99f580e6-d894-442a-bc5f-4d14b41e10d2\n\n#. Enable the ocp-route and k8s-endpoint handlers. For that you need to add\n this handlers to the enabled handlers list at kuryr.conf (details on how to\n edit this for containerized deployment can be found at\n :doc:`./devstack/containerized`):\n\n .. code-block:: ini\n\n [kubernetes]\n enabled_handlers=vif,lb,lbaasspec,ocproute,ingresslb\n\nNote: you need to restart the kuryr controller after applying the above\ndetailed steps. For devstack non-containerized deployments:\n\n.. code-block:: console\n\n $ sudo systemctl restart [email protected]\n\nAnd for containerized deployments:\n\n.. code-block:: console\n\n $ kubectl -n kube-system get pod | grep kuryr-controller\n $ kubectl -n kube-system delete pod KURYR_CONTROLLER_POD_NAME\n\nFor directly enabling both L7 router and OCP-Route handlers when deploying\nwith devstack, you just need to add the following at local.conf file:\n\n.. code-block:: bash\n\n KURYR_ENABLE_INGRESS=True\n KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,ocproute,ingresslb\n\n\nTesting OCP-Route functionality\n-------------------------------\n\n#. Create a service:\n\n .. code-block:: console\n\n $ oc run --image=celebdor/kuryr-demo kuryr-demo\n $ oc scale dc/kuryr-demo --replicas=2\n $ oc expose dc/kuryr-demo --port 80 --target-port 8080\n\n#. Create a Route object pointing to above service (kuryr-demo):\n\n .. code-block:: console\n\n $ cat >> route.yaml << EOF\n > apiVersion: v1\n > kind: Route\n > metadata:\n > name: testroute\n > spec:\n > host: www.firstroute.com\n > to:\n > kind: Service\n > name: kuryr-demo\n > EOF\n $ oc create -f route.yaml\n\n#. Curl L7 router's FIP using specified hostname:\n\n .. code-block:: console\n\n $ curl --header 'Host: www.firstroute.com' 172.24.4.3\n kuryr-demo-1-gzgj2: HELLO, I AM ALIVE!!!\n $\n" } ]
6
mikelupu/testaton
https://github.com/mikelupu/testaton
c52c950921a28551ec3d2d3f1feae3fe67117ef1
ad38764d35836f473270fafdcf5f06d946eee3f1
c2649987b6edfcb49cd847ff89cb3ea9796128dd
refs/heads/master
2020-05-07T15:52:45.294505
2019-04-23T20:57:06
2019-04-23T20:57:06
180,656,953
0
0
null
2019-04-10T20:15:44
2019-04-22T11:59:00
2019-04-23T20:59:10
Python
[ { "alpha_fraction": 0.6391880512237549, "alphanum_fraction": 0.6628003120422363, "avg_line_length": 40.63793182373047, "blob_id": "9c9ad69877a6222088c00253ce47832482fb4719", "content_id": "bf9029356f0ebdaced81a2abb9ce483228c2192a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2414, "license_type": "no_license", "max_line_length": 126, "num_lines": 58, "path": "/testaton/common_functions.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nfrom collections import namedtuple\n\ndef score(test, truth):\n \"\"\"\n A function that computes statistical difference between test and truth.\n The score function takes two arrays of real numbers (test = data that is being tested) and (truth = the ground truth that\n you're comparing against) and computes a number of statistics that measure the difference between the two data sets.\n It returns a named tuple called Score with a number of attributes of the result. This function just computes the measures.\n Pass the result to print_score to show a nice version of the score.\n \"\"\"\n assert(len(truth) == len(test))\n total_size = len(truth)\n difference = test - truth\n abs_difference = abs(difference)\n\n rmse = math.sqrt( ((truth - test) ** 2).sum() ) / len(test)\n std_dev = np.std(truth - test)\n mean = np.mean(difference)\n median = np.median(difference)\n abs_mean = np.mean(abs(difference))\n\n negative = difference[difference < 0]\n positive = difference[difference > 0]\n exact = difference[difference == 0]\n within_1 = abs_difference[abs_difference < 1]\n within_5 = abs_difference[abs_difference <= 5]\n within_10 = abs_difference[abs_difference <= 10]\n sum_test = sum(test)\n sum_truth = sum(truth)\n\n sum_diff = sum_test - sum_truth\n sum_diff_pc = (sum_diff / sum_truth) * 100\n\n assert(total_size == len(test))\n\n min_diff = min(difference)\n max_diff = max(difference)\n negative_count = len(negative)\n positive_count = len(positive)\n negative_pc = (negative_count / total_size) * 100\n positive_pc = (positive_count / total_size) * 100\n\n exact_count = len(exact)\n within_1_pc = (len(within_1) / total_size) * 100\n within_5_pc = (len(within_5) / total_size) * 100\n within_10_pc = (len(within_10) / total_size) * 100\n over_10_pc = 100 - within_10_pc\n\n assert(negative_count + positive_count + exact_count == len(difference))\n\n Score = namedtuple('Score', 'size rmse mean abs_mean median std_dev min max negative_pc positive_pc \\\n exact_count within_1_pc within_5_pc within_10_pc, over_10_pc sum_diff sum_diff_pc')\n\n return Score(total_size, rmse, mean, abs_mean, median, std_dev, min_diff, max_diff, negative_pc,\n positive_pc, exact_count, within_1_pc, within_5_pc, within_10_pc, over_10_pc,\n sum_diff, sum_diff_pc)" }, { "alpha_fraction": 0.5690410137176514, "alphanum_fraction": 0.5769996047019958, "avg_line_length": 37.67692184448242, "blob_id": "dcf6a4e12c508c55855b2e8f4adab68e1427a25a", "content_id": "37346dad70a4786f825fb970598e89eaf35b29ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2513, "license_type": "no_license", "max_line_length": 157, "num_lines": 65, "path": "/testaton/generate_sql.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "def generate_uniqueness_sql(dataset, test_def):\n \"\"\"Tests for uniqueness of a field, using a count and group by\"\"\"\n q = \"select \" + test_def['field'] + \", count(1) as dupes \" + \"from \" + dataset[test_def['dataset']].table_name\n if 'filter' in test_def:\n q += \" where \" + test_def['filter'] \n q += \" group by \" + test_def['field']\n q += \" having count(1) > 1 \"\n q += \" order by dupes desc \"\n q += \" limit 10\"\n return q\n\ndef generate_filter_sql(dataset, test_def):\n \"\"\"Simple filter test\"\"\"\n q = \"select count(1) as result_count from \" + dataset[test_def['dataset']].table_name\n q += \" where \" + test_def['filter'] \n return q\n\ndef generate_fk_sql(dataset, test_def):\n \"\"\"Tests for a foreign key constraint relationship\"\"\"\n q = \"\"\"\n select count(1) as result_count from (\n select {child_field} from {child_table}\n except\n select {parent_field} from {parent_table}\n ) a\"\"\".format(child_field=test_def['child_field'], child_table=dataset[test_def['child_dataset']].table_name, \n parent_field=test_def['parent_field'], parent_table=dataset[test_def['parent_dataset']].table_name)\n return q\n\ndef generate_field_sql(dataset, test_def):\n \"\"\"Pulls out the two required fields to be compared for accuracy\"\"\"\n q = \"select {field1}, {field2} from {table}\".format(field1=test_def['fields'][0],\n field2=test_def['fields'][1], table=dataset[test_def['dataset']].table_name)\n return q\n\n#TODO Setup the tests again\n\n##################################\n############ TESTS ############### \n##################################\n\"\"\"\ndef test_foreign_key_sql():\n fk_test = {\n \"test_name\" : \"customer vs transaction test\",\n \"test_type\" : \"foreign_key\",\n \"parent_table\" : \"customer\", \n \"parent_field\" : \"customer_id\", \n \"child_table\" : \"transaction\", \n \"child_field\" : \"customer_id\"\n } \n q = generate_fk_sql(fk_test)\n assert(q == '\\n select count(1) from (\\n select customer_id from transaction\\n minus\\n select customer_id from customer\\n )')\n\ndef test_unique_sql():\n unique_test = {\n \"test_name\" : \"product_id unique check\",\n \"test_type\" : \"unique\",\n \"table\" : \"cine\", \n \"field\" : \"id_cine\"\n }\n q = generate_uniqueness_sql(unique_test)\n assert(q == 'select id_cine, count(1) from cine group by id_cine having count(1) > 1 order by count(1) desc limit 10')\n\ntest_foreign_key_sql()\ntest_unique_sql()\n\"\"\"" }, { "alpha_fraction": 0.6798623204231262, "alphanum_fraction": 0.6798623204231262, "avg_line_length": 21.346153259277344, "blob_id": "89d8884af0ac21325e058685bbfaefc78747653d", "content_id": "fca2c170b0cb0c1f95ac74332a0276b96f3446d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/testaton/test_executor.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport sqlalchemy as sql\nimport time\nimport findspark\nfindspark.init()\n\nfrom pyspark.sql import SparkSession\n\n\ndef run_in_db(testSql, connection_string):\n print(connection_string)\n print(testSql)\n engine = sql.create_engine(connection_string)\n\n result = pd.read_sql_query(testSql, engine)\n return result\n\n\ndef run_in_spark(testSql, config):\n spark = SparkSession \\\n .builder \\\n .master(config['master']) \\\n .appName(config['app-name']) \\\n .getOrCreate()\n result = spark.sql(testSql)\n return result.toPandas()\n" }, { "alpha_fraction": 0.6010540127754211, "alphanum_fraction": 0.6044795513153076, "avg_line_length": 38.94736862182617, "blob_id": "b15db89af21a7a9dffc1772785619deac2640490", "content_id": "7a197b1b8ba6fd417201da96969c42d1cea08d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7590, "license_type": "no_license", "max_line_length": 107, "num_lines": 190, "path": "/testaton/tests_processor.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "from hamcrest import has_length, equal_to\nimport time\nimport sqlalchemy as sql\nimport pandas as pd\n\nfrom .generate_sql import generate_uniqueness_sql, generate_fk_sql, generate_filter_sql, generate_field_sql\nfrom .test_executor import run_in_db, run_in_spark\nfrom .common_functions import score\n\nimport findspark\nfindspark.init()\n\nfrom pyspark.sql import functions as sf\nfrom pyspark.sql import SparkSession\n\n\nclass Connection:\n \"\"\"An object to represent a connection to a dataset, e.g. JDBC string, file location, etc\"\"\"\n\n def __init__(self, connection_def):\n self.type = connection_def['type']\n if self.type == \"JDBC connection\":\n self.connection_string = connection_def['connection-string']\n\n\nclass Dataset:\n \"\"\"An object that defines a dataset upon which operations will be conducted\"\"\"\n\n def __init__(self, connection_dict, dataset_definition):\n self.type = dataset_definition['type']\n self.connection = connection_dict[dataset_definition['connection']]\n self.location = dataset_definition['location']\n self.table_name = dataset_definition['table-name']\n self.setup(dataset_definition)\n\n def validate(self):\n \"\"\"Validates whether the dataset exists and can be accessed\"\"\"\n # TODO create code to validate\n pass\n\n def setup(self, dataset_definition):\n \"\"\"Setups up the dataset into a table if it needs to be setup\"\"\"\n if self.type == 'db-query':\n # TODO this might need a refactor\n viewSql = \"create or replace view \" + self.table_name + \" as \" + \\\n dataset_definition['query']\n engine = sql.create_engine(self.connection.connection_string)\n if viewSql.find(';') != -1:\n raise Exception(\n \"Semi-colons in sql statements are not supported\")\n engine.execute(viewSql)\n\n if self.type[0:4] == 'file':\n spark = SparkSession \\\n .builder \\\n .master(\"local\") \\\n .appName(\"TestingApp\") \\\n .getOrCreate()\n if self.type == 'file-parquet':\n df = spark.read.parquet(self.location)\n df.createOrReplaceTempView(self.table_name)\n if self.type == 'file-csv':\n df = spark.read.format(\"csv\").option(\n \"header\", \"true\").load(self.location)\n df.createOrReplaceTempView(self.table_name)\n\n def destroy(self):\n \"\"\"Destroys temporary created datasets that where setup\"\"\"\n # TODO implement\n\n\ndef get_execution_environment(dataset):\n if dataset.type[0:2] == 'db':\n return {'type': 'db', 'connection': dataset.connection}\n if dataset.type[0:4] == 'file':\n return {'type': 'file', 'connection': dataset.location}\n\n\nclass Test:\n \"\"\"Defines a single test to be executed\"\"\"\n\n def __init__(self, dataset_dict, test_definition, spark_config, dtest_obj):\n self.description = test_definition['description']\n self.type = test_definition['test_type']\n self.severity = test_definition['severity']\n self.definition = test_definition\n self.spark_config = spark_config\n self.dtest = dtest_obj\n\n if self.type == 'unique':\n self.dataset = [dataset_dict[test_definition['dataset']]]\n self.execution_env = get_execution_environment(self.dataset[0])\n self.sql = generate_uniqueness_sql(dataset_dict, test_definition)\n\n if self.type == 'foreign_key':\n self.dataset = [dataset_dict[test_definition['parent_dataset']],\n dataset_dict[test_definition['child_dataset']]]\n if len(set([get_execution_environment(d)['type'] for d in self.dataset])) > 1:\n print(\n \"Operation across multiple types of datasets not currently supported\")\n exit(-1)\n else:\n self.execution_env = get_execution_environment(self.dataset[0])\n self.sql = generate_fk_sql(dataset_dict, test_definition)\n\n if self.type == 'filter':\n self.dataset = [dataset_dict[test_definition['dataset']]]\n self.execution_env = get_execution_environment(self.dataset[0])\n self.sql = generate_filter_sql(dataset_dict, test_definition)\n\n if self.type == 'field_accuracy':\n self.dataset = [dataset_dict[test_definition['dataset']]]\n self.execution_env = get_execution_environment(self.dataset[0])\n self.sql = generate_field_sql(dataset_dict, test_definition)\n\n self.validate_test()\n\n def validate_test(self):\n \"\"\"Validates that the test does not contain anything stupid\"\"\"\n if self.sql.find(';') != -1:\n raise Exception(\"Semi-colons in sql statements are not supported\")\n\n # Executes a test against a database\n def execute_db(self):\n print(self.sql)\n print(self.execution_env['connection'])\n return run_in_db(self.sql, self.execution_env['connection'].connection_string)\n\n # Execute a test in spark against a file\n def execute_file(self):\n return run_in_spark(self.sql, self.spark_config)\n\n def process_result(self, test_type, result, duration):\n \"\"\"Asserts the result of the test\"\"\"\n if test_type == 'unique':\n # TODO incorporate this with DTest - and inject duration\n # Idea: could these conditions be also defined as part of the test\n if len(result) == 0:\n print(\"Test: \" + self.description + \"; PASSED\")\n else:\n print(\"Test: \" + self.description + \"; FAILED\")\n self.dtest.assert_that(result, has_length(0), self.description)\n\n if test_type == 'foreign_key' or test_type == 'filter':\n if result['result_count'][0] == 0:\n print(\"Test: \" + self.description + \"; PASSED\")\n else:\n print(\"Test: \" + self.description + \"; FAILED\")\n self.dtest.assert_that(result['result_count'][0],\n equal_to(0), self.description)\n\n if test_type == 'field_accuracy':\n # ensure that the variable values are integers\n result.iloc[:, 0] = result.iloc[:, 0].astype('float')\n result.iloc[:, 1] = result.iloc[:, 1].astype('float')\n ans = score(result.iloc[:, 0].values, result.iloc[:, 1].values)\n self.dtest.publish_result(pd.DataFrame(\n data=[ans]), self.description)\n\n def execute(self):\n start_time = time.time()\n if self.execution_env['type'] == 'db':\n result = self.execute_db()\n if self.execution_env['type'] == 'file':\n result = self.execute_file()\n end_time = time.time()\n duration = end_time - start_time\n self.process_result(self.type, result, duration)\n\n\ndef process_connections(connection_definition):\n connections = {}\n for k in connection_definition.keys():\n connections[k] = Connection(connection_definition[k])\n return connections\n\n\ndef process_datasets(connection_dict, dataset_definition):\n datasets = {}\n for d_key in dataset_definition.keys():\n datasets[d_key] = Dataset(connection_dict, dataset_definition[d_key])\n return datasets\n\n\ndef process_tests(dataset_dict, tests_definition, spark_config, dtest_obj):\n tests = {}\n for t_key in tests_definition.keys():\n tests[t_key] = Test(\n dataset_dict, tests_definition[t_key], spark_config, dtest_obj)\n return tests\n" }, { "alpha_fraction": 0.6914893388748169, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 12.571428298950195, "blob_id": "ad4fc41865f2a3dbaeb3cb29242b269606e1a8e1", "content_id": "8fa951276db3ff7d611d24933259ae71da933ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 94, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/scripts/publish.sh", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 -m twine upload dist/*\n\nrm -rf dist\nrm -rf build\nrm -rf testaton.egg-info" }, { "alpha_fraction": 0.7135096192359924, "alphanum_fraction": 0.7229264974594116, "avg_line_length": 40.19403076171875, "blob_id": "f696c80ca8f5bbeb2d4e60b0c4479a9ad5e884e7", "content_id": "18d1a40cc09669b58239d44c3eaeccf4c4e4090a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2773, "license_type": "no_license", "max_line_length": 356, "num_lines": 67, "path": "/README.md", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "The json file `example_config/configuration.json` contains an example configuration of Dtest, Spark, and the data elements and tests that need to be executed. \n\nThere are 2 main types of connections:\n* Database connections\n* File connections (this will be subdivided into local and S3)\n\nThe data definition defines one of 3 things:\n* A database table\n* A file (csv or parquet)\n* A database query\n\nThe tests define the tests that can be executed. Currently there are 2 types of tests implemented:\n* Uniqueness - check for the uniqueness of a field\n* Foreign Key constraint - check for a key not existing \n\n## Installation\n\n`pip install testaton`\n\n## Requirements\n\nLocal installation of spark if `spark-config:master` is set to `local`\n\n## Execution \n\n`testaton configuration-file.json`\n\n## Configuration\n#### Dtest\nSee [Dtest](https://github.com/sjensen85/dtest) documentation.\n`test-suite-metadata` is translated to the `metadata` argument\n`message-broker-config` is translated to the `connectionConfig` argument\n\n#### Spark\nThe configuration values for Spark are the master node and the application name. These translate to the corresponding arguments needed to build a SparkSession. More information can be found in the official [SparkSession documentation](https://spark.apache.org/docs/2.1.0/api/python/pyspark.sql.html?highlight=sparksession#pyspark.sql.SparkSession.Builder).\n\nThe `master` configuration variable sets the Spark master URL to connect to, such as “local” to run locally, “local[4]” to run locally with 4 cores, or “spark://ip-of-master:7077” to run on a Spark standalone cluster.\n\nThe `app-name` configuration variable sets a name for the application, which will be shown in the Spark web UI.\n\n## TODO\n\n- [ ] json configuration validator (syntax)\n- [ ] validation of the existance of files, configurations, etc (semantics)\n- [ ] add code tests\n- [ ] remove username and password from test file\n- [ ] filter : a number is out of range (e.g. mileage < 0)\n- [ ] count of yesterday's record > today + 10%\n- [ ] clean up code\n- [ ] cross environment test execution (e.g. a table in a database and a file in parquet)\n- [ ] create generic sql test\n```\n \"raw-query-test-example\" : {\n \"description\" : \"NOT IMPLEMENTED!! example of a raw sql test\", \n \"test_type\" : \"custom_sql\",\n \"table\" : \"cinema-file\",\n \"sql_code\" : \"select count(1) error_cells from cinema where cinema_id < 1000\",\n \"validation\" : \"df['error_cells] < 100\"\n }\n```\n\n## Done\n\n- [x] add timing calculation to the execution of the test\n- [x] count of null fields > amount \n- [x] complete Dtest integration to the suite (sending the message) \n- [x] add a score function test against two variables from two data sets\n\n" }, { "alpha_fraction": 0.6713091731071472, "alphanum_fraction": 0.6713091731071472, "avg_line_length": 30.676469802856445, "blob_id": "47df2f2a69dba5c934e4b7ee6f2592bccb84027e", "content_id": "94d2d5b33dbafa942ed248ab14cf280a7dc4e777", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 97, "num_lines": 34, "path": "/testaton/cli.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "import argparse\nfrom dtest import Dtest\nfrom .tests_processor import process_connections, process_datasets, process_tests\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser(description='Test file')\n\n parser.add_argument('configuration_file', action='store', type=str,\n help='The JSON file defining the Dtest, Spark, and tests configurations')\n\n args = parser.parse_args()\n\n import json\n\n with open(args.test_file, 'r') as read_file:\n definition = json.load(read_file)\n\n dt = Dtest(definition['message-broker-config'],\n definition['test-suite-metadata'])\n connection_dict = process_connections(definition['connections'])\n datasets_dict = process_datasets(\n connection_dict, definition['data-definitions'])\n tests_dict = process_tests(\n datasets_dict, definition['tests'], definition['spark-config'], dt)\n\n for t in tests_dict:\n print(tests_dict[t].sql)\n tests_dict[t].execute()\n\n dt.publish()\n print(connection_dict)\n print(datasets_dict)\n print(tests_dict)\n" }, { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.698113203048706, "avg_line_length": 12.25, "blob_id": "8dd839210566ca5c91979af2d94fc3f55db07329", "content_id": "4e1880b61c853d302cc645a2cb6a96cffabba8dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/testaton/__main__.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom .cli import main\nmain()\n" }, { "alpha_fraction": 0.9074074029922485, "alphanum_fraction": 0.9074074029922485, "avg_line_length": 10, "blob_id": "b92cfe972447f067895ce3b030324376c536a607", "content_id": "d6d47971c3f0053e97356804917d362cbc7fc3b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 54, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/requirements.txt", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "pandas\nsqlalchemy\ndtest-framework\npyhamcrest\nfindspark" }, { "alpha_fraction": 0.5747460126876831, "alphanum_fraction": 0.5791001319885254, "avg_line_length": 24.5, "blob_id": "0fb950ef63f38e63d94be7ace0391a6c452d7f7d", "content_id": "eed394b738f5bc150000d293a31669304f3e095c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 71, "num_lines": 26, "path": "/setup.py", "repo_name": "mikelupu/testaton", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\r\n\r\nreadme = open('README.md').read().strip()\r\n\r\nsetup(\r\n name='testaton',\r\n version='0.1.2',\r\n license='MIT',\r\n author='Michael Farrugia',\r\n author_email='[email protected]',\r\n url='https://github.com/mikelupu/testaton',\r\n description='A command line tool to allow the testing of datasets',\r\n long_description=readme,\r\n packages=find_packages(),\r\n install_requires=[\r\n # put packages here\r\n 'six',\r\n 'findspark',\r\n 'pandas',\r\n 'sqlalchemy',\r\n 'dtest-framework',\r\n 'pyhamcrest'\r\n ],\r\n test_suite='tests',\r\n entry_points={'console_scripts': ['testaton = testaton.cli:main']}\r\n)\r\n" } ]
10
calumom/realchatserver
https://github.com/calumom/realchatserver
c6a68eb4cd43c40410b3be52ff40f600106b621a
31f9dfbaf8b2d2727c1a00c101a78af4c4641852
bc81bb197e79b0881205c3be08468e870c030915
refs/heads/master
2021-02-15T15:02:41.888358
2020-03-04T13:39:55
2020-03-04T13:39:55
244,909,898
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6540403962135315, "alphanum_fraction": 0.6616161465644836, "avg_line_length": 27.407407760620117, "blob_id": "b913d4009aabbc523004b3603ba78295ff998a3c", "content_id": "4be4eef25c9d5d48c7650cc7cb2303ce857ed379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 79, "num_lines": 27, "path": "/Message.py", "repo_name": "calumom/realchatserver", "src_encoding": "UTF-8", "text": "import json\r\nimport db_controller\r\n\r\n\r\ndef show_recent_messages(client_socket):\r\n\r\n message_to_send = '\\nRecent messages: \\n'\r\n\r\n recent_messages = db_controller.get_last_six_server_message()\r\n \r\n for m_id, name, timestamp, message in recent_messages:\r\n message_to_send += \"{0} ({1}): {2} \\n\".format(name, timestamp, message)\r\n\r\n return message_to_send\r\n\r\n\r\ndef show_direct_messages(client_socket, username):\r\n\r\n message_to_send = ' \\nDMs received while offline: \\n\\n'\r\n\r\n recent_messages = db_controller.get_direct_messages(username)\r\n\r\n for name, timestamp, message, recipient in recent_messages:\r\n message_to_send += \"{0} ({1}): {2} \\n\".format(name, timestamp, message)\r\n\r\n db_controller.delete_direct_messages(username)\r\n return message_to_send" }, { "alpha_fraction": 0.5738340020179749, "alphanum_fraction": 0.5794963240623474, "avg_line_length": 19.10377311706543, "blob_id": "273f7c282985e08c7860cbea01b37c0fc8b072ae", "content_id": "c7bcc91361ebd2490158f17f0c62832726e72caa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6711, "license_type": "no_license", "max_line_length": 78, "num_lines": 318, "path": "/db_controller.py", "repo_name": "calumom/realchatserver", "src_encoding": "UTF-8", "text": "import sqlite3\r\nfrom time import ctime\r\n\r\n\r\ndef create_table(create_table_sql):\r\n conn = sqlite3.connect(r\"users.db\")\r\n c = conn.cursor()\r\n c.execute(create_table_sql)\r\n\r\n\r\ndef create_user(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" INSERT INTO all_users(name, id, password)\r\n VALUES(?, ?, ?) \"\"\"\r\n \r\n cursor = conn.cursor()\r\n cursor.execute(sql, user)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef add_user_to_db(name, user_id, password):\r\n new_user = (name, user_id, password)\r\n create_user(new_user)\r\n\r\n\r\ndef check_user(name):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"SELECT name FROM all_users where name = ?\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (name,))\r\n conn.commit()\r\n\r\n try:\r\n result = cursor.fetchone()\r\n except:\r\n result = \"pass\"\r\n\r\n if result is None:\r\n result = \"pass\"\r\n\r\n conn.close()\r\n return result[0]\r\n\r\n\r\ndef get_all_users():\r\n conn = sqlite3.connect(r\"users.db\")\r\n \r\n sql = \"\"\" SELECT name FROM all_users \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n conn.commit()\r\n\r\n result = cursor.fetchall()\r\n conn.close()\r\n\r\n return list(map(lambda x: x[0], result))\r\n\r\n\r\ndef return_last_user_id():\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"SELECT id FROM all_users ORDER BY id DESC LIMIT 1\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n\r\n result = cursor.fetchone()[0]\r\n\r\n conn.close()\r\n return result\r\n\r\ndef get_user_id(name):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"SELECT id FROM all_users WHERE name = ?\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (name,))\r\n\r\n result = cursor.fetchone()[0]\r\n\r\n conn.close()\r\n return result\r\n\r\ndef return_password(name):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"SELECT password FROM all_users WHERE name=?\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (name,))\r\n\r\n password = cursor.fetchone()[0]\r\n\r\n conn.close()\r\n return password\r\n\r\n\r\ndef delete_user(name):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" DELETE FROM all_users WHERE name=? \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (name,))\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef delete_user_from_db(name):\r\n delete_user(name)\r\n\r\n\r\ndef delete_user_from_friends_list(name):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" DELETE FROM friends WHERE name = ? OR friend = ? \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, name)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef delete_user_from_friend_db(name):\r\n user_delete = (name, name)\r\n delete_user_from_friends_list(user_delete)\r\n\r\n\r\ndef add_friend(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\"INSERT INTO friends(name, friend)\r\n VALUES(?, ?) \"\"\"\r\n \r\n cursor = conn.cursor()\r\n cursor.execute(sql, user)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef add_friend_to_db(name, target):\r\n add = (name, target)\r\n add_friend(add)\r\n\r\n\r\ndef remove_friend(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" DELETE FROM friends WHERE name = ? AND friend = ? \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, user)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef remove_friend_from_db(name, target):\r\n remove = (name, target)\r\n remove_friend(remove)\r\n\r\n\r\ndef check_if_friend(user, friend):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" SELECT name FROM friends WHERE name = ? AND friend = ? \"\"\"\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (user, friend))\r\n conn.commit()\r\n\r\n try:\r\n result = cursor.fetchone()\r\n except:\r\n result = \"pass\"\r\n\r\n if result is None:\r\n result = \"pass\"\r\n\r\n conn.close()\r\n return result[0]\r\n\r\n\r\ndef show_all_friends(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" SELECT friend FROM friends WHERE name = ? \"\"\"\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (user,))\r\n conn.commit()\r\n\r\n result = cursor.fetchall()\r\n conn.close()\r\n\r\n return list(map(lambda x: x[0], result))\r\n\r\n\r\ndef get_user_added(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" SELECT name FROM friends WHERE friend = ? \"\"\"\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (user,))\r\n conn.commit()\r\n\r\n result = cursor.fetchall()\r\n conn.close()\r\n\r\n return list(map(lambda x: x[0], result))\r\n\r\n\r\ndef save_server_message(message):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" INSERT INTO server_messages(name, timestamp, message)\r\n VALUES(?, ?, ?) \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, message)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef save_server_message_to_db(username, message):\r\n save_message = (username, ctime()[:16], message)\r\n\r\n save_server_message(save_message)\r\n\r\n\r\ndef get_last_six_server_message():\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"SELECT * FROM server_messages ORDER BY id DESC LIMIT 6\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n conn.commit()\r\n\r\n result = cursor.fetchall()\r\n conn.close()\r\n\r\n return result\r\n\r\n\r\ndef save_direct_message(message):\r\n conn = sqlite3.connect(r\"users.db\")\r\n \r\n sql = \"\"\" INSERT INTO direct_messages(name, timestamp, message, recipient)\r\n VALUES(?, ?, ?, ?) \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, message)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef save_direct_message_to_db(username, message, recipient):\r\n save_message = (username, ctime()[:16], message, recipient)\r\n\r\n save_direct_message(save_message)\r\n\r\n\r\ndef get_direct_messages(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" SELECT * FROM direct_messages WHERE recipient = ? \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (user,))\r\n conn.commit()\r\n result = cursor.fetchall()\r\n\r\n conn.close()\r\n return result\r\n\r\n\r\ndef delete_direct_messages(user):\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"\"\" DELETE FROM direct_messages WHERE recipient = ? \"\"\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql, (user,))\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef get_last_user():\r\n conn = sqlite3.connect(r\"users.db\")\r\n\r\n sql = \"SELECT name FROM all_users ORDER BY id DESC LIMIT 1\"\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n conn.commit()\r\n\r\n result = cursor.fetchone()[0]\r\n conn.close()\r\n\r\n return result\r\n\r\n\r\ndef main():\r\n conn = sqlite3.connect(r\"users.db\")\r\n \r\n sql = \"\"\" \"\"\"\r\n \r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n conn.commit()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.5340590476989746, "alphanum_fraction": 0.6039914488792419, "avg_line_length": 22.95061683654785, "blob_id": "f4da3c9cb20e39b25ac012ef6a5ed3f5d9b2e625", "content_id": "a871bb5e69b77e25368bf484b95e7f3680bdf6ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12126, "license_type": "no_license", "max_line_length": 88, "num_lines": 486, "path": "/RobotTest.py", "repo_name": "calumom/realchatserver", "src_encoding": "UTF-8", "text": "import socket\r\nimport time\r\nfrom time import ctime\r\nimport json\r\nimport string\r\nimport random\r\nimport db_controller\r\n\r\n# TODO: be able to run in succession\r\n# @james not every test works right now, will fix the remaining ones soon\r\n\r\n\r\ndef rand_string():\r\n letters = string.ascii_lowercase\r\n return ''.join(random.choice(letters) for i in range(11))\r\n\r\n\r\ndef calum_login(sock1):\r\n login_input = '/login'\r\n user_input = 'calum'\r\n pass_input = 'test'\r\n\r\n sock1.send(login_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n time.sleep(0.1)\r\n sock1.send(user_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n time.sleep(0.1)\r\n sock1.send(pass_input.encode('utf-8'))\r\n time.sleep(0.1)\r\n\r\n return sock1\r\n\r\n\r\ndef joe_login(sock2):\r\n login_input = '/login'\r\n user_input = 'joe'\r\n pass_input = 'test'\r\n\r\n sock2.send(login_input.encode('utf-8'))\r\n sock2.recv(1024)\r\n time.sleep(0.1)\r\n sock2.send(user_input.encode('utf-8'))\r\n sock2.recv(1024)\r\n time.sleep(0.1)\r\n sock2.send(pass_input.encode('utf-8'))\r\n time.sleep(0.1)\r\n\r\n return sock2\r\n\r\n\r\ndef test_valid_login():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock1)\r\n data = sock1.recv(1024).decode('utf-8')\r\n print(data)\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.close()\r\n return True if data.startswith('logged') else False\r\n\r\n\r\ndef test_invalid_login_user_online():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock1)\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n login_input = '/login'\r\n user_input = 'calum'\r\n\r\n time.sleep(0.5)\r\n sock2.send(login_input.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock2.recv(1024)\r\n sock2.send(user_input.encode('utf-8'))\r\n data = sock2.recv(1024).decode('utf-8')\r\n print(data)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.close()\r\n sock2.close()\r\n\r\n return True if data == 'denied' else False\r\n\r\n\r\ndef test_invalid_login_account_doesnt_exist():\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect(('192.168.56.1', 5000))\r\n\r\n login_input = '/login'\r\n user_input = 'heghegdfg'\r\n\r\n sock.send(login_input.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock.recv(1024)\r\n sock.send(user_input.encode('utf-8'))\r\n\r\n data = sock.recv(1024).decode('utf-8')\r\n sock.close()\r\n\r\n return True if data == 'error' else False\r\n\r\n\r\ndef test_logout_and_login():\r\n\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect(('192.168.56.1', 5000))\r\n\r\n exit_input = '/exit'\r\n\r\n calum_login(sock)\r\n time.sleep(0.1)\r\n sock.recv(1024)\r\n\r\n sock.send(exit_input.encode('utf-8'))\r\n sock.recv(1024)\r\n time.sleep(1)\r\n\r\n calum_login(sock)\r\n time.sleep(0.1)\r\n\r\n data = sock.recv(1024).decode('utf-8')\r\n print(data)\r\n sock.send(exit_input.encode('utf-8'))\r\n sock.recv(1024)\r\n time.sleep(0.1)\r\n\r\n return True if data.startswith('logged') else False\r\n\r\n\r\ndef test_create_user_success():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n create_input = '/create'\r\n user_input = rand_string()\r\n pass_input = 'test'\r\n\r\n sock1.send(create_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n sock1.send(user_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n sock1.send(pass_input.encode('utf-8'))\r\n sock1.send(pass_input.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n\r\n time.sleep(1)\r\n\r\n user_check = db_controller.check_user(user_input)\r\n print(user_check)\r\n \r\n return True if user_check == user_input else False\r\n\r\n\r\ndef test_create_user_fail():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n create_input = '/create'\r\n user_input = 'calum'\r\n\r\n sock1.send(create_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n sock1.send(user_input.encode('utf-8'))\r\n data = sock1.recv(1024).decode('utf-8')\r\n\r\n return True if data == 'error' else False\r\n\r\n\r\ndef test_delete_user():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n login_input = '/login'\r\n user_input = db_controller.get_last_user()\r\n pass_input = 'test'\r\n delete = '/delete'\r\n\r\n sock1.send(login_input.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.send(user_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n time.sleep(0.1)\r\n sock1.send(pass_input.encode('utf-8'))\r\n sock1.recv(1024)\r\n time.sleep(0.1)\r\n sock1.send(delete.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.recv(1024)\r\n\r\n del_input = 'y'\r\n sock1.send(del_input.encode('utf-8'))\r\n\r\n time.sleep(0.5)\r\n\r\n user_check = db_controller.check_user(user_input)\r\n print(user_check)\r\n\r\n return True if user_check == \"p\" else False\r\n\r\n\r\ndef test_send_and_receive_message():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock1)\r\n joe_login(sock2)\r\n\r\n time.sleep(1)\r\n sock2.recv(1024)\r\n message = \"hello world\"\r\n sock1.send(message.encode('utf-8'))\r\n time.sleep(0.1)\r\n data = sock2.recv(1024).decode('utf-8')\r\n print(data)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock2.send(\"/exit\".encode('utf-8'))\r\n sock2.recv(1024)\r\n sock1.close()\r\n sock2.close()\r\n\r\n return True if data == 'calum (' + ctime()[11:16] + \"): hello world \\n\" else False\r\n\r\n\r\ndef test_direct_messages():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock1)\r\n joe_login(sock2)\r\n\r\n time.sleep(0.5)\r\n sock2.recv(1024)\r\n time.sleep(1)\r\n\r\n message = \"hello joe\"\r\n dm = '/dm joe'\r\n sock1.send(dm.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.send(message.encode('utf-8'))\r\n time.sleep(0.1)\r\n data = sock2.recv(1024).decode('utf-8')\r\n print(data)\r\n\r\n time.sleep(0.1)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock2.send(\"/exit\".encode('utf-8'))\r\n sock2.recv(1024)\r\n sock1.close()\r\n sock2.close()\r\n\r\n return True if data == ('(PM) calum says: ' + message) else False\r\n\r\n\r\ndef test_offline_direct_messages():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock1)\r\n\r\n time.sleep(0.5)\r\n\r\n message = \"hello joe\"\r\n dm = '/dm joe'\r\n sock1.send(dm.encode('utf-8'))\r\n time.sleep(0.1)\r\n sock1.send(message.encode('utf-8'))\r\n time.sleep(0.1)\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n joe_login(sock2)\r\n time.sleep(0.5)\r\n data = sock2.recv(1024).decode('utf-8')\r\n result = data.splitlines()[-1]\r\n print(result)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock2.send(\"/exit\".encode('utf-8'))\r\n sock2.recv(1024)\r\n sock1.close()\r\n sock2.close()\r\n\r\n return True if result == 'calum (' + ctime()[:16] + '): ' + message + ' ' else False\r\n\r\n\r\ndef test_recent_messages():\r\n\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock)\r\n\r\n time.sleep(0.5)\r\n\r\n data = sock.recv(1024).decode('utf-8')\r\n recent = []\r\n recent.append('Recent messages: \\n')\r\n\r\n messages = db_controller.get_last_six_server_message()\r\n\r\n for mid, name, timestamp, message in messages:\r\n recent.append(\"{0} ({1}): {2} \\n\".format(name, timestamp, message))\r\n\r\n test = data.splitlines(True)[1:8]\r\n print(test)\r\n print(recent)\r\n\r\n sock.send(\"/exit\".encode('utf-8'))\r\n sock.recv(1024)\r\n sock.close()\r\n\r\n return True if test == recent else False\r\n\r\n\r\ndef test_check_online_users():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n online_input = '/online'\r\n\r\n calum_login(sock1)\r\n time.sleep(0.5)\r\n sock1.recv(1024)\r\n\r\n sock1.send(online_input.encode('utf-8'))\r\n data1 = sock1.recv(1024).decode('utf-8')\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n joe_login(sock2)\r\n\r\n time.sleep(0.5)\r\n sock1.recv(1024)\r\n\r\n sock1.send(online_input.encode('utf-8'))\r\n data2 = sock1.recv(1024).decode('utf-8')\r\n\r\n print(data1)\r\n print(data2)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock2.send(\"/exit\".encode('utf-8'))\r\n sock2.recv(1024)\r\n sock1.close()\r\n sock2.close()\r\n\r\n check1 = \"/online calum, \"\r\n check2 = \"/online calum, joe, \"\r\n\r\n return True if data1 == check1 and data2 == check2 else False\r\n\r\n\r\ndef test_friend_online_message():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n calum_login(sock1)\r\n time.sleep(0.5)\r\n sock1.recv(1024)\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n joe_login(sock2)\r\n\r\n data = sock1.recv(1024).decode('utf-8')\r\n print(data)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock2.send(\"/exit\".encode('utf-8'))\r\n sock2.recv(1024)\r\n sock1.close()\r\n sock2.close()\r\n\r\n return True if data.startswith(\"Your friend joe has joined the server!\") else False\r\n\r\n\r\ndef test_add_friend():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock2.connect(('192.168.56.1', 5000))\r\n\r\n create_input = '/create'\r\n user_input = rand_string()\r\n pass_input = 'test'\r\n\r\n sock2.send(create_input.encode('utf-8'))\r\n sock2.recv(1024)\r\n sock2.send(user_input.encode('utf-8'))\r\n sock2.recv(1024)\r\n sock2.send(pass_input.encode('utf-8'))\r\n sock2.send(pass_input.encode('utf-8'))\r\n sock2.send(\"/exit\".encode('utf-8'))\r\n\r\n calum_login(sock1)\r\n time.sleep(0.1)\r\n sock1.recv(1024)\r\n\r\n add_input = '/add ' + user_input\r\n sock1.send(add_input.encode('utf-8'))\r\n\r\n time.sleep(0.1)\r\n\r\n result = db_controller.check_if_friend(\"calum\", user_input)\r\n print(result)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock1.close()\r\n\r\n return True if result == \"calum\" else False\r\n\r\n\r\ndef test_remove_friend():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n calum_friends = db_controller.show_all_friends(\"calum\")\r\n\r\n calum_login(sock1)\r\n time.sleep(0.1)\r\n sock1.recv(1024)\r\n\r\n random_friend = random.choice(calum_friends)\r\n while (random == \"joe\"):\r\n random_friend = random.choice(calum_friends)\r\n\r\n delete_message = '/remove ' + random_friend\r\n sock1.send(delete_message.encode('utf-8'))\r\n\r\n time.sleep(0.5)\r\n\r\n result = db_controller.check_if_friend(\"calum\", random_friend)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock1.close()\r\n\r\n return True if result == \"pass\" else False\r\n\r\n\r\ndef test_check_friends():\r\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock1.connect(('192.168.56.1', 5000))\r\n\r\n friend_input = '/friends'\r\n\r\n calum_login(sock1)\r\n time.sleep(0.5)\r\n sock1.recv(1024)\r\n\r\n sock1.send(friend_input.encode('utf-8'))\r\n data = sock1.recv(1024).decode('utf-8')\r\n print(data)\r\n\r\n sock1.send(\"/exit\".encode('utf-8'))\r\n sock1.recv(1024)\r\n sock1.close()\r\n\r\n return True if data.startswith(\"/friends\") else False\r\n" }, { "alpha_fraction": 0.5359218120574951, "alphanum_fraction": 0.5499207377433777, "avg_line_length": 26.25373077392578, "blob_id": "e99e4b30204046bd0320c51f03a564f0474165f5", "content_id": "4582b0f51b6a2ebb425dee2efcd6d33cd945cb61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3786, "license_type": "no_license", "max_line_length": 109, "num_lines": 134, "path": "/Client.py", "repo_name": "calumom/realchatserver", "src_encoding": "UTF-8", "text": "import socket\r\nimport threading\r\nimport sys\r\n\r\nhost = '192.168.101.88'\r\nport = 5000\r\n\r\nprint(\"test1\")\r\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient_socket.connect((host, port))\r\nprint(\"test2\")\r\n\r\n# TODO: prevent user overloading server with information\r\n\r\n\r\ndef login():\r\n\r\n log = input(\"Type /login to log into an account, or /create to create an account: \")\r\n login_input = log.encode('utf-8') # clean up confusing variables with c# client\r\n\r\n client_socket.send(login_input)\r\n response = client_socket.recv(1024).decode('utf-8')\r\n\r\n if response == 'create':\r\n create()\r\n elif response == 'fail':\r\n login()\r\n elif response == 'login':\r\n pass\r\n\r\n user = input(\"Enter your username: \")\r\n user_login = user.encode('utf-8')\r\n client_socket.send(user_login)\r\n\r\n while True:\r\n data = client_socket.recv(1024).decode('utf-8')\r\n if data == 'denied':\r\n print(\"User is already logged in\")\r\n login()\r\n elif data == 'accepted':\r\n password = input(\"Now enter your password: \")\r\n client_socket.send(password.encode('utf-8')) # TODO: hash password before sending in c# client\r\n\r\n data = client_socket.recv(1024).decode('utf-8')\r\n if data == 'logged':\r\n print(\"Logged in.\")\r\n send()\r\n elif data == 'passfail':\r\n print(\"Incorrect password.\")\r\n login()\r\n\r\n elif data == 'error':\r\n print(\"Account does not exist\")\r\n login()\r\n\r\n\r\ndef create():\r\n\r\n while True:\r\n create_input = input(\"Enter the name you would like to use for your account, or type -- to cancel: \")\r\n message = create_input.encode('utf-8')\r\n client_socket.send(message)\r\n\r\n if create_input == '--':\r\n login()\r\n\r\n data = client_socket.recv(1024).decode('utf-8')\r\n if data == 'error':\r\n print(\"Account already exists, try another name.\")\r\n elif data == 'pass':\r\n password = input(\"Now choose a password: \")\r\n client_socket.send(password.encode('utf-8'))\r\n print(\"Account created!\")\r\n login()\r\n\r\n\r\ndef delete():\r\n delete_input = input(\"Are you sure you want to delete your account? y/n: \")\r\n message = delete_input.encode('utf-8')\r\n\r\n client_socket.send(message)\r\n\r\n if delete_input == 'y':\r\n print(\"Account deleted\")\r\n\r\n login()\r\n\r\n # TODO: fix it breaking when logging in after deleting account\r\n\r\n\r\ndef recv():\r\n \"\"\"\r\n Waits for any incoming data from the server, unpacks it to find out who sent it, and prints\r\n out the message to the client with the name of the sender and a timestamp.\r\n \"\"\"\r\n while True:\r\n data = client_socket.recv(1024)\r\n if data is not None:\r\n try:\r\n if data.decode('utf-8') == 'exit':\r\n sys.exit(0)\r\n else:\r\n print(data.decode('utf-8'))\r\n except UnicodeDecodeError:\r\n pass\r\n\r\n\r\ndef send():\r\n \"\"\"\r\n Allows the user to input their message, packs it up for the server and sends the message to the\r\n server. Also allows the user to exit or delete their account.\r\n \"\"\"\r\n threading.Thread(target=recv).start()\r\n\r\n message = input(\"-> \")\r\n\r\n client_socket.send(message.encode('utf-8'))\r\n\r\n while message != '/exit' and message != '/delete':\r\n message = input(\"-> \")\r\n\r\n client_socket.send(message.encode('utf-8'))\r\n\r\n if message == '/delete':\r\n delete()\r\n\r\n if message == '/exit':\r\n client_socket.send(''.encode('utf-8'))\r\n print(\"Logged out.\")\r\n login()\r\n\r\n\r\nif __name__ == '__main__':\r\n login()\r\n" }, { "alpha_fraction": 0.539020836353302, "alphanum_fraction": 0.5424139499664307, "avg_line_length": 25.144737243652344, "blob_id": "4649efe527fdd4f3c1ce691a477ab3758eae9d07", "content_id": "effb3b67a6b72a47ccdcccaf4b20e3b56f70169a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 94, "num_lines": 76, "path": "/User.py", "repo_name": "calumom/realchatserver", "src_encoding": "UTF-8", "text": "import db_controller\r\n\r\nBUFFER_SIZE = 8192\r\n\r\n# TODO: change to OO\r\n\r\n\r\nclass UserClient:\r\n\r\n def __init__(self, username, identifier, password, friends):\r\n self.username = username\r\n self.identifier = identifier\r\n self.password = password\r\n self.friends = friends\r\n\r\n \r\n def add_friend(self, target_friend):\r\n\r\n friend_check = db_controller.check_user(target_friend)\r\n\r\n if friend_check == target_friend:\r\n result = db_controller.check_if_friend(self.username, target_friend)\r\n \r\n if result == 'pass':\r\n db_controller.add_friend_to_db(self.username, target_friend)\r\n\r\n message = \"/success\"\r\n\r\n else:\r\n message = \"/alreadyexists\"\r\n \r\n else:\r\n message = \"/nouser\"\r\n\r\n return message\r\n\r\n\r\n def remove_friend(self, target_remove):\r\n\r\n user_check = db_controller.check_user(target_remove)\r\n\r\n if user_check == target_remove:\r\n friend_check = db_controller.check_if_friend(self.username, target_remove)\r\n\r\n if friend_check == self.username:\r\n db_controller.remove_friend_from_db(self.username, target_remove)\r\n\r\n message = \"{0} has been removed from your friend list\\n\".format(target_remove)\r\n \r\n else:\r\n message = \"User is not a friend\" \r\n\r\n else:\r\n message = \"User does not exist\"\r\n\r\n return message\r\n\r\n\r\n def show_online_friends(self, online_users):\r\n\r\n online_friends = []\r\n offline_friends = []\r\n\r\n for friend in self.friends:\r\n if friend in online_users:\r\n online_friends.append(friend)\r\n else:\r\n offline_friends.append(friend)\r\n\r\n separator = ', '\r\n online = separator.join(online_friends)\r\n offline = separator.join(offline_friends)\r\n \r\n message = \"Online: {0} \\n Offline: {1}\".format(online, offline)\r\n\r\n return message\r\n" }, { "alpha_fraction": 0.5816088318824768, "alphanum_fraction": 0.5867823958396912, "avg_line_length": 31.008264541625977, "blob_id": "ab72408f323a1d6d4f7dd26ef6bec61cd149d51c", "content_id": "ba99bf546c077064bc1b2d071adc0ff47341a39b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11984, "license_type": "no_license", "max_line_length": 100, "num_lines": 363, "path": "/server.py", "repo_name": "calumom/realchatserver", "src_encoding": "UTF-8", "text": "import socket\r\nimport threading\r\nimport sys\r\nfrom User import UserClient\r\nfrom datetime import datetime\r\nimport Message\r\nimport time\r\nimport db_controller\r\nfrom passlib.hash import pbkdf2_sha256\r\n\r\nclients = set()\r\nclients_lock = threading.Lock()\r\nonline_users = []\r\nuser_sockets = {}\r\nBUFFER_SIZE = 8192\r\nSOCK_LISTEN_SIZE = 10\r\n\r\n\r\ndef create_socket():\r\n \"\"\"\r\n First creating the server socket, binding the host and port to it,\r\n then making it listen for any incoming client connections and when the\r\n connection comes in, create a thread for it.\r\n \"\"\"\r\n host = '192.168.101.72'\r\n port = 5000\r\n\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.bind((host, port))\r\n sock.listen(SOCK_LISTEN_SIZE)\r\n print(\"Server setup\")\r\n\r\n while True:\r\n conn, address = sock.accept()\r\n print(address, \"has connected to the server\")\r\n threading.Thread(target=begin_authentication, args=(conn, address)).start()\r\n\r\n\r\ndef begin_authentication(client_socket, address):\r\n\r\n with clients_lock:\r\n if client_socket not in clients:\r\n clients.add(client_socket)\r\n\r\n while True:\r\n\r\n try:\r\n data = client_socket.recv(BUFFER_SIZE)\r\n except OSError:\r\n \"Client disconnected auth check\"\r\n sys.exit(0)\r\n\r\n if data is not None:\r\n data_read = data\r\n data_decoded = data_read.decode('utf-8')\r\n\r\n if data_decoded.startswith('/login'):\r\n wrap_and_send_message(client_socket, \"login\")\r\n user_login(client_socket, address)\r\n\r\n elif data_decoded == '/create':\r\n wrap_and_send_message(client_socket, \"create\")\r\n create_user(client_socket, address)\r\n\r\n elif data_decoded == 'closing':\r\n pass\r\n\r\n else:\r\n pass\r\n\r\n\r\ndef new_client(client_socket, address, client_user, online_user_list, online_status):\r\n \"\"\"\r\n Add the client to a list of clients then waits to receive data. When the data\r\n comes in, it is sent to everyone who didn't send the message. Also handles the\r\n user requesting to do things such as adding friends.\r\n \"\"\"\r\n print(\"{0} logged in\".format(client_user.username))\r\n recent = Message.show_recent_messages(client_socket)\r\n dm = Message.show_direct_messages(client_socket, client_user.username)\r\n\r\n wrap_and_send_message(client_socket, recent)\r\n wrap_and_send_message(client_socket, dm)\r\n\r\n friend_online_message = \"Your friend {0} has joined the server! \\n\".format(client_user.username)\r\n friend_broadcast(client_user.username, friend_online_message)\r\n \r\n while online_status is True:\r\n\r\n try:\r\n data = client_socket.recv(BUFFER_SIZE)\r\n data_decoded = data.decode('utf-8')\r\n except OSError:\r\n \"Client disconnected receive check\"\r\n online_status = False\r\n client_disconnects(client_user, client_socket)\r\n sys.exit(0)\r\n\r\n while data_decoded is not None:\r\n data_decoded = data.decode('utf-8')\r\n \r\n if data_decoded == '/delete':\r\n online_users.remove(client_user.username)\r\n delete_user(client_socket, client_user.username, address) \r\n \r\n elif data_decoded == '/online':\r\n show_online_users(client_socket, online_user_list) \r\n\r\n elif data_decoded == '/users':\r\n show_all_users(client_socket)\r\n\r\n elif data_decoded == '/friends':\r\n show_friends(client_socket, client_user.username)\r\n \r\n elif data_decoded.startswith(\"/dm\"):\r\n target = data_decoded[4:]\r\n user_check = db_controller.check_user(target)\r\n\r\n if target in online_users:\r\n direct_message(client_socket, client_user.username, user_sockets, target)\r\n\r\n elif target not in online_users and target == user_check:\r\n\r\n data = client_socket.recv(BUFFER_SIZE)\r\n data = data.decode('utf-8')\r\n\r\n db_controller.save_direct_message_to_db(client_user.username, data, target)\r\n \r\n if data_decoded.startswith(\"/add\"):\r\n target_friend = data_decoded[5:]\r\n message = UserClient.add_friend(client_user, target_friend)\r\n if message == \"/success\":\r\n client_user.friends.append(target_friend)\r\n print(message)\r\n\r\n wrap_and_send_message(client_socket, message)\r\n\r\n elif data_decoded.startswith('/remove'):\r\n target = data_decoded[8:]\r\n message = UserClient.remove_friend(client_user, target)\r\n if message == \"{0} has been removed from your friend list\".format(target):\r\n client_user.friends.remove(target)\r\n\r\n wrap_and_send_message(client_socket, message)\r\n \r\n \r\n elif data_decoded == '/exit':\r\n online_status = False\r\n client_disconnects(client_user, client_socket)\r\n wrap_and_send_message(client_socket, \"/exit\")\r\n print(\"{0} has logged out\".format(client_user.username))\r\n \r\n print(client_user.username, \": \", data)\r\n\r\n if not data_decoded.startswith(\"/\"):\r\n db_controller.save_server_message_to_db(client_user.username, data_decoded)\r\n broadcast_message(client_socket, client_user.username, data_decoded)\r\n\r\n break\r\n begin_authentication(client_socket, address)\r\n\r\n\r\ndef wrap_and_send_message(client_socket, message):\r\n message_to_send = message.encode('utf-8')\r\n client_socket.send(message_to_send)\r\n\r\n\r\ndef client_disconnects(client_user, client_socket):\r\n # Lets friends of the user disconnecting they have disconnected\r\n\r\n message = \"{0} has disconnected from the server \\n\".format(client_user.username)\r\n friend_broadcast(client_user.username, message)\r\n\r\n online_users.remove(client_user.username)\r\n with clients_lock:\r\n clients.remove(client_socket)\r\n\r\n del client_user\r\n\r\n\r\ndef friend_broadcast(username, message):\r\n\r\n all_friends = db_controller.get_user_added(username)\r\n friend_keys = []\r\n for friend in all_friends:\r\n friend_keys.append(friend)\r\n print(friend_keys)\r\n\r\n for name, user_socket in user_sockets.items():\r\n if name in friend_keys:\r\n try:\r\n user_sockets.get(name).send(message.encode('utf-8'))\r\n except ConnectionResetError:\r\n print(\"Client disconnected friend broadcast check\")\r\n except OSError:\r\n print(\"Client disconnected\")\r\n\r\n\r\ndef broadcast_message(client_socket, username, data):\r\n with clients_lock:\r\n for c in clients:\r\n try:\r\n now = datetime.now()\r\n time = now.strftime(\"%H:%M\")\r\n c.send(\"{0} ({1}): {2} \\n\".format(username, time, data).encode('utf-8'))\r\n except ConnectionResetError:\r\n print(\"Client disconnected message broadcast check\")\r\n except OSError:\r\n print(\"Client disconnected\")\r\n\r\n\r\ndef user_login(client_socket, address):\r\n\r\n while True:\r\n data = client_socket.recv(BUFFER_SIZE).decode('utf-8')\r\n name = data.lower()\r\n\r\n name_check = db_controller.check_user(name)\r\n\r\n if name == name_check and name not in online_users:\r\n wrap_and_send_message(client_socket, \"accepted\")\r\n\r\n password = db_controller.return_password(name)\r\n data = client_socket.recv(BUFFER_SIZE).decode('utf-8')\r\n print(data)\r\n\r\n if check_password(password, data) is True:\r\n online_users.append(name)\r\n print(online_users)\r\n online_status = True\r\n\r\n user_sockets[name] = client_socket\r\n\r\n wrap_and_send_message(client_socket, \"logged \")\r\n identifier = db_controller.get_user_id(name)\r\n friends = db_controller.show_all_friends(name)\r\n print(friends)\r\n\r\n client_user = UserClient(name, identifier, password, friends)\r\n\r\n new_client(client_socket, address, client_user, online_users, online_status)\r\n else:\r\n print(\"login failed\")\r\n wrap_and_send_message(client_socket, \"passfail\")\r\n begin_authentication(client_socket, address)\r\n\r\n elif name in online_users:\r\n print(\"online\")\r\n wrap_and_send_message(client_socket, \"denied\")\r\n begin_authentication(client_socket, address)\r\n\r\n else:\r\n print(\"error\")\r\n wrap_and_send_message(client_socket, \"error\")\r\n begin_authentication(client_socket, address)\r\n\r\n\r\ndef create_user(client_socket, address):\r\n\r\n while True:\r\n data = client_socket.recv(BUFFER_SIZE).decode('utf-8')\r\n name = data.lower()\r\n\r\n if data == \"closing\":\r\n print(\"closing\")\r\n begin_authentication(client_socket, address)\r\n\r\n name_check = db_controller.check_user(name)\r\n print(name_check)\r\n\r\n if name == '/login':\r\n begin_authentication(client_socket, address)\r\n\r\n if name_check == \"p\":\r\n wrap_and_send_message(client_socket, \"pass\")\r\n break\r\n \r\n elif name == name_check:\r\n wrap_and_send_message(client_socket, \"error\")\r\n\r\n password = client_socket.recv(BUFFER_SIZE).decode('utf-8')\r\n password_two = client_socket.recv(BUFFER_SIZE).decode('utf-8')\r\n\r\n if password == password_two:\r\n hashed_password = hash_password(password)\r\n\r\n max_id = db_controller.return_last_user_id()\r\n identifier = max_id + 1\r\n\r\n db_controller.add_user_to_db(name, identifier, hashed_password)\r\n print(\"accountmade\")\r\n wrap_and_send_message(client_socket, \"success\")\r\n\r\n if password != password_two:\r\n wrap_and_send_message(client_socket, \"fail\")\r\n create_user(client_socket, address)\r\n\r\n begin_authentication(client_socket, address)\r\n\r\n\r\ndef delete_user(client_socket, username, address):\r\n\r\n db_controller.delete_user_from_db(username)\r\n db_controller.delete_user_from_friend_db(username)\r\n wrap_and_send_message(client_socket, \"/exit\")\r\n\r\n begin_authentication(client_socket, address)\r\n\r\n\r\ndef hash_password(password):\r\n return pbkdf2_sha256.hash(password)\r\n\r\n\r\ndef check_password(hashed_password, input_password):\r\n return pbkdf2_sha256.verify(input_password, hashed_password)\r\n\r\n\r\ndef direct_message(client_socket, username, sockets, target):\r\n\r\n data = client_socket.recv(BUFFER_SIZE)\r\n data_to_send = \"(PM) {0} says: {1}\".format(username, data.decode('utf-8'))\r\n\r\n try:\r\n sockets.get(target).send(data_to_send.encode('utf-8'))\r\n except ConnectionResetError:\r\n print(\"Client disconnected dm check\")\r\n except OSError:\r\n print(\"Client disconnected\")\r\n\r\n\r\ndef show_online_users(client_socket, online_user_list):\r\n\r\n online_message = '/online '\r\n for user in online_user_list:\r\n online_message += user + ', '\r\n\r\n wrap_and_send_message(client_socket, online_message)\r\n\r\n\r\ndef show_all_users(client_socket):\r\n\r\n all_users = db_controller.get_all_users()\r\n user_message = '/users '\r\n\r\n for user in all_users:\r\n user_message += user + ', '\r\n \r\n wrap_and_send_message(client_socket, user_message)\r\n\r\n\r\ndef show_friends(client_socket, username):\r\n\r\n friend_list = db_controller.show_all_friends(username)\r\n friend_message = '/friends '\r\n\r\n for user in friend_list:\r\n friend_message += user + ', '\r\n\r\n wrap_and_send_message(client_socket, friend_message)\r\n\r\n\r\nif __name__ == '__main__':\r\n create_socket()\r\n\r\n" } ]
6
leleobhz/powerline-shell
https://github.com/leleobhz/powerline-shell
1e5c22cee7983705890d302b9024ef2503599bef
d5d365fe057d04107ad2362c8fa681d42095deb6
6973f9d2c79cf89daa9fa779dd60e7747d5cdf04
refs/heads/master
2020-12-29T00:25:29.936623
2016-03-21T19:03:39
2016-03-21T19:03:39
54,412,521
0
0
null
2016-03-21T18:21:58
2016-03-21T18:19:11
2016-01-18T20:57:22
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 26.733333587646484, "blob_id": "f36ff6fccc5777aaa681a4aca636d6a6e62d0623", "content_id": "bb23b62cc0f9ec097059c8fd1deaae033bc94cf3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "permissive", "max_line_length": 101, "num_lines": 15, "path": "/segments/netns.py", "repo_name": "leleobhz/powerline-shell", "src_encoding": "UTF-8", "text": "import subprocess\n\ndef add_netns_segment(powerline):\n try:\n env = subprocess.check_output(['ip', 'netns', 'identify'], stderr=subprocess.STDOUT).rstrip()\n except Exception, e: \n env = str(e.output)\n\n if not bool(env.strip()):\n return\n\n env_name = os.path.basename(env)\n bg = Color.VIRTUAL_ENV_BG\n fg = Color.VIRTUAL_ENV_FG\n powerline.append(' NS %s ' % env_name, fg, bg)\n" } ]
1
manmeetb/IGA-DG-Deprecated
https://github.com/manmeetb/IGA-DG-Deprecated
af132a3e60ba5a6fb6d7742d054d3b0690a11d1d
4e5adc95a5720f979cd4bb47ced5d1f4ebf07c45
139bd1428dbebecd48ce1b0fb54455cc0279b490
refs/heads/master
2021-06-21T06:08:54.467603
2017-07-29T19:48:44
2017-07-29T19:48:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 27.16666603088379, "blob_id": "d5834897aa3a13bd57662bb16ac7688c528b53b3", "content_id": "50d4461a0061e02da30293dea1a2c30314e0f909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 172, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/include/initialize_test_case.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#ifndef DG_initialize_test_case_h__INCLUDED\n#define DG_initialize_test_case_h__INCLUDED\n\nvoid initialize_test_case(void);\n\n#endif // DG_initialize_test_case_h__INCLUDED\n\n" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 15.47826099395752, "blob_id": "837c057d5c2f8c8ca7df89e1cde7f9521cc9b1db", "content_id": "57f5a38999af83684cc5e3a28f98a609bf8fd275", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 378, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/include/S_BC.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG_S_BC_h__INCLUDED\n#define DG_S_BC_h__INCLUDED\n\n/*\nStruct: S_BC\n\nPurpose:\n\tHolding the boundary condition information for the mesh\n*/\n\nstruct S_BC {\n\n\tint BCType;\n\n\tint *BC_Con, // Matrix with the BC connectivity information (row major form)\n\t\tnBC_Con_row, nBC_Con_col; // number of rows and cols for the matrix\n\n\tstruct S_BC *next;\n\n};\n\n\n#endif // DG_S_BC_h__INCLUDED" }, { "alpha_fraction": 0.5744975805282593, "alphanum_fraction": 0.6063756346702576, "avg_line_length": 16.728395462036133, "blob_id": "18d5bef1ab4c620a4c95cfee6bcdc2867b0e7538", "content_id": "60a9be3414f4b8879c23108d67879f3c1b303237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 72, "num_lines": 81, "path": "/archive/testScripts/compareWHatDPG.py", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\nimport math\n\nDPGFile = \"DPGInput.txt\"\nMatlabFile = \"MatlabInput.txt\"\n\n# For comparing the points\nCONST_FloatComparison_Tolerance = 1E-3\n\n\n\n# Method for reading in the files and loading the point data\ndef processFiles(fileName):\n\n\t# Fill the points into a list with the form:\n\t#\tl = [ [x,y,[wVec]], ... ]\n\n\tretVec = []\n\twith open(fileName, \"r\") as fp:\n\n\t\twhile(True):\n\t\t\tl1 = fp.readline()\n\t\t\tif l1 == \"\":\n\t\t\t\tbreak\n\t\t\tl2 = fp.readline()\n\n\t\t\t# Get the points:\n\t\t\tl1 = l1.rstrip(\"\\n\").split()\n\t\t\txVal = float(l1[1].rstrip(\",\"))\n\t\t\tyVal = float(l1[3])\n\n\t\t\tl2 = l2.rstrip(\"\\n\").rstrip(\" \")\n\t\t\tl2 = l2.split(\" = \")[1]\n\t\t\tl2 = l2.rstrip(\"]\")\n\t\t\tl2 = l2.lstrip(\"[\")\n\t\t\tl2 = l2.split(\",\")\n\n\t\t\twVec = []\n\t\t\tfor s in l2:\n\t\t\t\twVec.append(float(s))\n\t\t\t\n\t\t\tretVec.append([xVal, yVal, wVec])\n\n\n\treturn retVec\n\n\ndef compareOutputes(l1, l2):\n\t# return a list with the x,y value of the point and the L2 norm of the \n\t# difference\n\n\tretList = []\n\n\tfor a in l1:\n\t\tfor b in l2:\n\t\t\tif (abs(b[0]-a[0])<CONST_FloatComparison_Tolerance and\n\t\t\t\tabs(b[1]-a[1])<CONST_FloatComparison_Tolerance):\n\n\t\t\t\tL2 = 0\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tL2 = L2 + (a[2][i]-b[2][i])**2\n\n\t\t\t\tL2 = math.sqrt(L2)\n\n\t\t\t\tretList.append([a[0], a[1], L2])\n\n\treturn retList\n\n\ndef main():\n\tmatlabPoints = processFiles(MatlabFile)\n\tDPGPoints = processFiles(DPGFile)\n\n\tdiffVec = compareOutputes(matlabPoints, DPGPoints)\n\n\tdiffVec.sort(key=lambda x: x[2])\n\n\tfor l in diffVec:\n\t\tprint l\n\n\nmain()\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 24.33333396911621, "blob_id": "17efa6c28c601144c50644f72b28d1bc5b5ba580", "content_id": "7bd0e8a1da23c189b2a07e13b024a320ad0685d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 156, "license_type": "no_license", "max_line_length": 41, "num_lines": 6, "path": "/include/setup_operators.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__setup_operators_h__INCLUDED\n#define DG__setup_operators_h__INCLUDED\n\nvoid setup_operators(void);\n\n#endif // DG__setup_operators_h__INCLUDED\n\n\n\n" }, { "alpha_fraction": 0.6361650824546814, "alphanum_fraction": 0.6566935777664185, "avg_line_length": 23.052631378173828, "blob_id": "4b18421ea98d44c6879b934dc07404152b381374", "content_id": "287bafccf5137b3ae58a8f6cac15cb44496f97dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4579, "license_type": "no_license", "max_line_length": 87, "num_lines": 190, "path": "/archive/testScripts/visualizeMeshDPG.py", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\nimport matplotlib.pyplot as plt\n\ninputFile = \"input.txt\"\n\n\n\n# The node points on the computational domain\n\"\"\"\nCONST_PointsMatrix = [[(-1.,1.),(1.,1.)],\\\n\t\t\t\t[(-1.,-1.),(1.,-1.)]]\n\"\"\"\n\nCONST_PointsMatrix = [[(-1.,-1.),(-1.,0.),(-1.,1.)],\\\n\t\t\t\t\t [(0.,-1.),(0.,0.),(0.,1.)],\\\n\t\t\t\t\t [(1.,-1.),(1.,0.),(1.,1.)]]\n\nCONST_Dim = 3\n\n# this function computes the lagrange basis at a node\n# i. It takes in a list of roots and the index value for\n# the root at which the lagrange basis (l_i(x)) is being\n# computed\ndef LagrangeBasis_i(listRoots,index_i,x):\n\t\n\t# Compute the numerator of the basis:\n\n\tproductNumerator = 1.\n\t# go through each of the roots and take the x value and subtract\n\t# from it the value of the root. Dont subtract the ith root.\n\tfor i in range(len(listRoots)):\n\t\tif (i != index_i):\n\t\t\tproductNumerator *= (float(x) - listRoots[i])\n\n\n\t# Compute the denominator of the basis\n\tproductDenominator = 1.\n\troot_i = listRoots[index_i]\n\tfor i in range(len(listRoots)):\n\t\tif (i != index_i):\n\t\t\tproductDenominator *= (root_i - listRoots[i])\n\n\n\tsolution = float(productNumerator)/float(productDenominator)\n\treturn solution\n\n\n\n# The method that is used for computing the shape functions. It will\n# then store these functions into a 2D list, where each spot will be the\n# function for that corresponding node.\ndef computeShapeFunctions(ShapeFunctionsMatrix):\n\tfor iNode in range(CONST_Dim):\n\t\tfor jNode in range(CONST_Dim):\n\t\t\t\n\t\t\t# For this point in the nodes matrix, compute the\n\t\t\t# constant xi lagrange polynomial first\n\t\t\t\n\t\t\txiNodes = []\n\t\t\t# change the i while keeping j fixed in computational\n\t\t\t# element\n\t\t\tfor i in range(CONST_Dim):\n\t\t\t\txiNodes.append(CONST_PointsMatrix[i][jNode][0])\n\t\t\t\n\t\t\tetaNodes = []\n\t\t\t# change the j while keeping i fixed in computational\n\t\t\t# element\n\t\t\tfor j in range(CONST_Dim):\n\t\t\t\tetaNodes.append(CONST_PointsMatrix[iNode][j][1])\n\t\t\t\n\t\t\t# Compute the lagrange bases:\n\t\t\tLXi = lambda xi, iNode=iNode: (LagrangeBasis_i(xiNodes, iNode, xi))\n\t\t\tLEta = lambda eta, jNode=jNode: (LagrangeBasis_i(etaNodes, jNode, eta))\n\t\t\t\n\t\t\t# Compute the shape function at the node:\n\t\t\tM = lambda xi,eta, LXi=LXi,LEta = LEta: (LXi(xi)*LEta(eta))\n\t\n\t\t\t# store the shape function at the node location\n\t\t\tShapeFunctionsMatrix[iNode][jNode] = M\n\n\"\"\"\n\nplt.scatter(elemGeoNodesX, elemGeoNodesY, c = 'b')\nplt.scatter(elemSolNodesX, elemSolNodesY, c = 'r')\nplt.grid()\nplt.show(block=True)\n\n\"\"\"\n\ndef main():\n\n\n\twith open(inputFile, 'r') as fp:\n\t\tl = fp.readline()\n\t\tnumElems, numNodes = int(l.split()[0]), int(l.split()[1])\n\n\t\telemGeoNodesX = []\n\t\telemGeoNodesY = []\n\n\t\telemSolNodesX = []\n\t\telemSolNodesY = []\n\n\t\tindividualElemNodes = []\n\t\tfor elem in range(numElems):\n\n\t\t\tindElemNodes = []\n\n\t\t\tfor node in range(numNodes):\n\t\t\t\tl = fp.readline()\n\t\t\t\tx,y = float(l.split()[-2]), float(l.split()[-1])\n\n\t\t\t\tindElemNodes.append((x,y))\n\t\t\t\telemGeoNodesY.append(y)\n\t\t\t\telemGeoNodesX.append(x)\n\n\t\t\tfor node in range(numNodes):\n\t\t\t\tl = fp.readline()\n\t\t\t\tx,y = float(l.split()[-2]), float(l.split()[-1])\n\n\t\t\t\telemSolNodesY.append(y)\n\t\t\t\telemSolNodesX.append(x)\n\n\t\t\tindividualElemNodes.append(indElemNodes)\n\n\t\"\"\"\n\tplt.scatter(elemGeoNodesX, elemGeoNodesY, c = 'b')\n\tplt.scatter(elemSolNodesX, elemSolNodesY, c = 'r')\n\tplt.grid()\n\tplt.show(block=True)\n\t\"\"\"\n\n\t\"\"\"\n\tGL Nodes:\n\tx1 = -0.774597\n\tx2 = 0\n\tx3 = 0.774597\n\t\"\"\"\n\n\t# Check the first element's geometry nodes and get the solution node positions\n\t# First, place the nodes into a 2D array in the correct order\n\n\telem1GeoNodes = []\n\tfor i in range(3):\n\t\trow = []\n\t\tfor j in range(3):\n\t\t\trow.append(None)\n\t\telem1GeoNodes.append(row)\n\n\t# Ordering of the nodes for some reason\n\tjOrder = [0,2,1]\n\tiOrder = [0,2,1]\n\n\tn = 0\n\tfor j in range(3):\n\t\tfor i in range(3):\n\t\t\telem1GeoNodes[iOrder[i]][jOrder[j]] = individualElemNodes[0][n]\n\t\t\tn = n+1\n\t\t\n\n\n\tfor j in range(3):\n\t\tfor i in range(3):\n\t\t\tprint \"i,j = %d, %d -> %f, %f \"%(i,j,elem1GeoNodes[i][j][0], elem1GeoNodes[i][j][1])\n\n\n\t# Create the matrix for the Shape functions\n\tShapeFunctionsMatrix = []\n\tfor i in range(CONST_Dim):\n\t\trowArray = []\n\t\tfor j in range(CONST_Dim):\n\t\t\trowArray.append(None)\n\t\tShapeFunctionsMatrix.append(rowArray)\n\t\t\t\n\tcomputeShapeFunctions(ShapeFunctionsMatrix)\n\n\t# Interpolate now the x,y position of the solution nodes\n\txiGLL = -0.774597\n\tetaGLL = -0.0\n\n\txVal = 0\n\tyVal = 0\n\n\tfor i in range(CONST_Dim):\n\t\tfor j in range(CONST_Dim):\n\t\t\txVal = xVal + ShapeFunctionsMatrix[i][j](xiGLL, etaGLL)*elem1GeoNodes[i][j][0]\n\t\t\tyVal = yVal + ShapeFunctionsMatrix[i][j](xiGLL, etaGLL)*elem1GeoNodes[i][j][1]\n\t\t\t\n\tprint \"xGLL, yGLL: %f %f \"%(xVal, yVal)\n\n\nmain()\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6461873650550842, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 17.467741012573242, "blob_id": "7663164ee9e227f857b9d201c5540ac2bb0de81a", "content_id": "22209d21ee3605d222ee637a6bba664d166bf8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2295, "license_type": "no_license", "max_line_length": 39, "num_lines": 124, "path": "/src/memory_free.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#include \"memory_free.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"memory_destructors.h\"\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"S_FACE.h\"\n#include \"S_ELEMENT.h\"\n\nvoid memory_free(void){\n\tint i;\n\n\t// DB Parameters:\n\tfree(DB.NodeType);\n\tfree(DB.MeshFileName);\n\tfree(DB.BasisType);\n\tfree(DB.TestType);\n\n\t// Structures Freed in setup_mesh:\n\t//free(DB.XYZ_G);\n\t//free(DB.XYZ_Ve);\n\t//free(DB.GeoCon);\n\t//free(DB.VeCon);\n\t//free(DB.PCon);\n\n\t// Structs:\n\t// - VOLUME Linked List\n\tstruct S_VOLUME *VOLUME, *VOLUME_NEXT;\n\tVOLUME = DB.VOLUME_HEAD; \n\twhile(1){\n\t\tVOLUME_NEXT = VOLUME->next;\n\n\t\t// Free allocated arrays\n\n\t\t// Geometry: \n\t\tfree(VOLUME->XYZ);\n\t\tfree(VOLUME->XYZ_S);\n\t\tfree(VOLUME->XYZ_P);\n\n\t\t// Metric Terms\n\t\tfree(VOLUME->C_vS);\n\t\tfree(VOLUME->detJV_vS);\n\n\t\t// Solution: \n\t\tfree(VOLUME->What);\n\t\tfree(VOLUME->MInv);\n\t\tfree(VOLUME->RHS_FACE);\n\t\tfree(VOLUME->RHS_VOL);\n\t\tfree(VOLUME->RHS);\n\t\tfree(VOLUME->RES);\n\t\tfree(VOLUME->F_Comm);\n\n\t\t// Other Structs\n\t\tfree(VOLUME->FACE);\n\n\t\t// Last VOLUME case\n\t\tif(VOLUME_NEXT == NULL){\n\t\t\tmemory_destructor_V(VOLUME);\n\t\t\tbreak;\n\t\t}\n\t\t\n\t\tmemory_destructor_V(VOLUME);\n\t\tVOLUME = VOLUME_NEXT;\n\t}\n\n\t// - FACE Linked List\n\tstruct S_FACE *FACE, *FACE_NEXT;\n\tFACE = DB.FACE_HEAD;\n\twhile(1){\n\t\tFACE_NEXT = FACE->next;\n\n\t\t// Free allocated arrays\n\t\tfree(FACE->n_fI);\n\t\tfree(FACE->C_fI);\n\n\t\t// Last FACE case\n\t\tif(FACE_NEXT == NULL){\n\t\t\tmemory_destructor_F(FACE);\n\t\t\tbreak;\n\t\t}\n\t\t\n\t\tmemory_destructor_F(FACE);\n\t\tFACE = FACE_NEXT;\n\t}\n\n\t// - ELEMENT\n\t// Allocated Arrays\n\tfree(DB.ELEMENT->nodes_xi);\n\tfree(DB.ELEMENT->nodes_wi);\n\n\t// Operators\n\n\t// - Chi Operators\n\tfree(DB.ELEMENT->Chi_vS);\n\tfree(DB.ELEMENT->ChiInv_vS);\n\tfree(DB.ELEMENT->Chi_vG);\n\tfree(DB.ELEMENT->Chi_vP);\n\tfree(DB.ELEMENT->Chi_fI);\n\n\t// - Derivative Operators\n\tfree(DB.ELEMENT->GradChi_vS_xi);\n\tfree(DB.ELEMENT->GradChi_vS_eta);\n\tfree(DB.ELEMENT->GradChi_fI_xi);\n\tfree(DB.ELEMENT->GradChi_fI_eta);\n\n\t// - Interpolation Operators\n\tfree(DB.ELEMENT->I_vG_vS);\n\tfree(DB.ELEMENT->I_vS_vG);\n\tfree(DB.ELEMENT->I_vG_vP);\n\tfree(DB.ELEMENT->I_vS_fI);\n\n\tfree(DB.ELEMENT->XiEtaZeta_S);\n\tfree(DB.ELEMENT->XiEtaZeta_G);\n\tfree(DB.ELEMENT->XiEtaZeta_P);\n\tfree(DB.ELEMENT->XiEtaZeta_F);\n\n\tfree(DB.ELEMENT->xiVector);\n\tfree(DB.ELEMENT->etaVector);\n\n\tmemory_destructor_E(DB.ELEMENT);\n\t\n}\n\n\n\n" }, { "alpha_fraction": 0.6240260004997253, "alphanum_fraction": 0.633766233921051, "avg_line_length": 22.303030014038086, "blob_id": "1e6f9a4554a49b2065a5353474fd2dab6705fc72", "content_id": "d387d9b12633653423c572518322f88e636cb04d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 95, "num_lines": 66, "path": "/src/initialize_test_case.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"initialize_test_case.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\n#include \"exact_solutions.h\"\n#include \"matrix_functions.h\"\n#include \"S_VOLUME.h\"\n#include \"S_DB.h\"\n#include \"S_ELEMENT.h\"\n\n\nvoid initialize_test_case(void){\n\n\t/*\n\tPurpose:\n\t\tSetup the What vector for all volumes with the initial condition\n\t*/\n\n\tstruct S_VOLUME *VOLUME;\n\tint i;\n\tdouble *XYZ_Sx, *XYZ_Sy;\n\tdouble WTemp[4], XYZTemp[2];\n\tdouble *W_Sol, *WHat, *ro, *ro_u, *ro_v, *ro_e;\n\n\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\t\t// Loop over all the volumes\n\t\tXYZ_Sx = &VOLUME->XYZ_S[0];\n\t\tXYZ_Sy = &VOLUME->XYZ_S[VOLUME->NvnS];\n\n\t\tW_Sol = malloc(VOLUME->NVar*VOLUME->NvnS* sizeof *W_Sol); //free\n\n\t\tro = &W_Sol[0*VOLUME->NvnS];\n\t\tro_u = &W_Sol[1*VOLUME->NvnS];\n\t\tro_v = &W_Sol[2*VOLUME->NvnS];\n\t\tro_e = &W_Sol[3*VOLUME->NvnS];\n\n\t\tfor(i=0; i<VOLUME->NvnS; i++){\n\t\t\tXYZTemp[0] = XYZ_Sx[i];\n\t\t\tXYZTemp[1] = XYZ_Sy[i];\n\n\t\t\t// Set the initial state at the solution node:\n\t\t\tif (strstr(DB.TestType,\"InviscidChannel\")){\n\t\t\t\tuniform_solution_InternalSubsonic(XYZTemp, WTemp);\n\t\t\t} else if (strstr(DB.TestType,\"PeriodicVortex\")){\n\t\t\t\texact_solution_IsentropicVortex(XYZTemp, WTemp);\n\t\t\t} else{\n\t\t\t\tprintf(\"Case Not Implemented \\n\");\n\t\t\t\texit(1);\n\t\t\t}\n\n\t\t\tro[i] = WTemp[0];\n\t\t\tro_u[i] = WTemp[1];\n\t\t\tro_v[i] = WTemp[2];\n\t\t\tro_e[i] = WTemp[3];\n\n\t\t}\n\n\t\t// Use Interpolation operator to find WHat (modal coefficients)\n\t\tmm_CNN(VOLUME->NvnG, VOLUME->NvnS, VOLUME->NVar, DB.ELEMENT->ChiInv_vS, W_Sol, VOLUME->What);\n\n\t\tfree(W_Sol);\n\n\t}\n}\n\n" }, { "alpha_fraction": 0.6523702144622803, "alphanum_fraction": 0.6613995432853699, "avg_line_length": 25, "blob_id": "8c483702f7b0342d797eeaef226e0051a29793e6", "content_id": "71308672404c1e46234baf450ea144f04a9605bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 886, "license_type": "no_license", "max_line_length": 77, "num_lines": 34, "path": "/include/S_FACE.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG_S_FACE_h__INCLUDED\n#define DG_S_FACE_h__INCLUDED\n\nstruct S_FACE {\n\t\n\t// Properties:\n\tint P, // The order for the face (P+1 integration points on face)\n\t\tBoundary, // 1 = External Boundary, 0 = Internal Face\n\t\tVeInd[2], // The two vertices nodes that make up this volume\n\t\tfin,\n\t\tfout,\n\t\tBCType;\n\n\t// Geometry:\n\tdouble \t*n_fI; // Normal vectors in column major form at integration points.\n\t\t\t\t\t// Is the normal outward vector from the in volume.\n\t\t\t\t\t// nx_1 ny_1\n\t\t\t\t\t// nx_2 ny_2\n\n\t// Metric Terms:\n\tdouble \t*C_fI; // Metric terms at the face integration nodes for this face\n\n\n\t// Structures:\n\tstruct S_VOLUME *VIn, // The pointer to the left volume\n\t\t\t\t\t*VOut; // The pointer to the right volume\n\n\tstruct S_FACE \t*next, // Pointer to the next face in linked list\n\t\t\t\t\t*parent; // Pointer to the previous face in the linked list\n\t\n\t\n};\n\n#endif // DG_S_FACE_h__INCLUDED\n\n" }, { "alpha_fraction": 0.7482993006706238, "alphanum_fraction": 0.7482993006706238, "avg_line_length": 23.16666603088379, "blob_id": "3ea5b33658bca3e95460aac15e51c395765ad8bb", "content_id": "fff21c3e0a0f5da17d74fac5ce77f2193fc9f71c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 147, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/include/setup_geometry.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG_setup_geometry_h__INCLUDED\n#define DG_setup_geometry_h__INCLUDED\n\nvoid setup_geometry(void);\n\n#endif // DG_setup_geometry_h__INCLUDED\n\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 21.375, "blob_id": "74b130cd172d9d98ff4357fd274aa823cda16181", "content_id": "17457f38afd685b4b58ef12c4b15489c1117c6e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 180, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/include/setup_normals.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__setup_normals_h__INCLUDED\n#define DG__setup_normals_h__INCLUDED\n\n#include \"S_FACE.h\"\n\nvoid setup_normals(struct S_FACE *FACE);\n\n#endif //DG__setup_normals_h__INCLUDED\n\n" }, { "alpha_fraction": 0.7027027010917664, "alphanum_fraction": 0.7027027010917664, "avg_line_length": 23, "blob_id": "924fd9862560cda54439a50fbd3a04b8073bbc39", "content_id": "5e8d0b64a5570090bd1ded41309f5169308f7ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 148, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/include/memory_free.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__memory_free_h__INCLUDED\n#define DG__memory_free_h__INCLUDED\n\nextern void memory_free (void);\n\n#endif // DG__memory_free_h__INCLUDED\n\n\n\n" }, { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 22.16666603088379, "blob_id": "548d72a2322f44622ef7e0ad7db2896e1de75528", "content_id": "feb176ca450c21198c02795962478fcae1304f70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 140, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/include/finalize_RHS.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG_finalize_RHS_h__INCLUDED\n#define DG_finalize_RHS_h__INCLUDED\n\ndouble finalize_RHS(void);\n\n#endif // DG_finalize_RHS_h__INCLUDED\n" }, { "alpha_fraction": 0.5272244215011597, "alphanum_fraction": 0.5471447706222534, "avg_line_length": 20.283018112182617, "blob_id": "1ea005b4aebab4f1d0f04652e426ce4d8545a168", "content_id": "874e80a31150b6f66746c9348555d0e201e3f225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2259, "license_type": "no_license", "max_line_length": 96, "num_lines": 106, "path": "/src/setup_geometry.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_geometry.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"setup_geom_factors.h\"\n#include \"matrix_functions.h\"\n#include \"setup_normals.h\"\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"S_ELEMENT.h\"\n#include \"S_FACE.h\"\n\n\n\nvoid setup_geometry(void){\n\tprintf(\"Setup Geometry \\n\");\n\n\tstruct S_VOLUME *VOLUME;\n\tstruct S_FACE *FACE;\n\n\tdouble *XYZ_Sx, *XYZ_Sy;\n\tdouble *C_vS_11, *C_vS_12, *C_vS_21, *C_vS_22;\n\tint i;\n\n\t// ----------------------------------------\n\t//\t\tVolume/Plotting Solution Nodes\n\t// ----------------------------------------\n\t// Set up XYZ_S and XYZ_P\n\n\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\n\t\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, VOLUME->d, DB.ELEMENT->Chi_vS, VOLUME->XYZ, VOLUME->XYZ_S);\n\t\tmm_CNN(VOLUME->NvnP, VOLUME->NvnG, VOLUME->d, DB.ELEMENT->Chi_vP, VOLUME->XYZ, VOLUME->XYZ_P);\n\n\t\t// Print solution node location of each volume\n\t\tif(DB.Testing == 1 || DB.Testing == 4){\n\t\t\tXYZ_Sx = &VOLUME->XYZ_S[0];\n\t\t\tXYZ_Sy = &VOLUME->XYZ_S[VOLUME->NvnS];\n\n\t\t\tprintf(\"Volume: %d \\n\" , VOLUME->index);\n\t\t\tfor(i=0; i<VOLUME->NvnS; i++){\n\t\t\t\tprintf(\"Sol (x,y): (%.14f, %.14f) \\n\", XYZ_Sx[i], XYZ_Sy[i]);\n\t\t\t}\n\t\t}\n\n\t\t// Setup Geometric Factors\n\t\tsetup_geom_factors(VOLUME);\n\n\t\tif(DB.Testing == 1){\n\n\t\t\tC_vS_11 = &VOLUME->C_vS[0];\n\t\t\tC_vS_12 = &VOLUME->C_vS[1*VOLUME->NvnS];\n\t\t\tC_vS_21 = &VOLUME->C_vS[2*VOLUME->NvnS];\n\t\t\tC_vS_22 = &VOLUME->C_vS[3*VOLUME->NvnS];\n\n\t\t\tprintf(\"Volume: %d \\n\" , VOLUME->index);\n\t\t\tfor(i=0; i<VOLUME->NvnS; i++){\n\t\t\t\tprintf(\"C : (%f, %f, %f, %f) \\n\", \tC_vS_11[i], C_vS_12[i],\n\t\t\t\t\t\t\t\t\t\t\t\t\tC_vS_21[i], C_vS_22[i]);\n\n\t\t\t\tprintf(\"det J : %f \\n\", VOLUME->detJV_vS[i]);\n\t\t\t}\n\n\t\t}\n\n\t}\n\t\n\t\t// Setup Normals\n\tfor(FACE=DB.FACE_HEAD; FACE; FACE = FACE->next){\n\t\tsetup_normals(FACE);\n\t}\n\n\tif(DB.Testing == 1){\n\t\tprintf(\"Volume Face Normals \\n\");\n\n\t\tint i, j;\n\t\tdouble *nx, *ny;\n\n\t\tstruct S_FACE *FACE;\n\t\tfor(VOLUME=DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\t\t\tfor(i=0; i<4; i++){\n\t\t\t\tprintf(\"Face Processed : %d \\n\", VOLUME->FIndFound[i]);\n\n\t\t\t\tFACE = VOLUME->FACE[i];\n\n\t\t\t\tnx = &FACE->n_fI[0];\n\t\t\t\tny = &FACE->n_fI[FACE->P+1];\n\n\t\t\t\tprintf(\"face in = %d \\n\", FACE->fin);\n\n\t\t\t\tfor(j=0; j<FACE->P+1; j++){\n\t\t\t\t\tprintf(\"\tnx = %f ny = %f \\n\", nx[j], ny[j]);\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t}\n\n\n\n\n\n\n}\n\n\n" }, { "alpha_fraction": 0.7424749135971069, "alphanum_fraction": 0.7424749135971069, "avg_line_length": 32, "blob_id": "86720a9e794b4d1189be9cc10f3c796f04a7565f", "content_id": "323b5e3efa313cf4c3d65f58d1b02767a8c96095", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 299, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/include/memory_constructors.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__memory_constructors_h__INCLUDED\n#define DG__memory_constructors_h__INCLUDED\n\nextern struct S_VOLUME *New_VOLUME (void);\nextern struct S_FACE *New_FACE(void);\nextern struct S_ELEMENT *New_ELEMENT(void);\nextern struct S_BC *New_BC(void);\n\n#endif // DG__memory_constructors_h__INCLUDED\n\n" }, { "alpha_fraction": 0.7014925479888916, "alphanum_fraction": 0.7014925479888916, "avg_line_length": 35.09090805053711, "blob_id": "063507d9593fad5373813a5d6c4196bf9bf88303", "content_id": "1a64c876ed4e00f0c12b89038de3b31fb1f0d261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 402, "license_type": "no_license", "max_line_length": 70, "num_lines": 11, "path": "/include/matrix_functions.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__matrix_functions_h__INCLUDED\n#define DG__matrix_functions_h__INCLUDED\n\n\nvoid mm_CNN(int m, int k, int n, double *A, double *B, double *C);\nvoid mm_inv_d(int N, double *matrix);\nvoid mm_inv_d_secondInPlace(int N, double *matrix, double *matrixInv);\ndouble *mm_inv_d_alloc(int N, double *matrix);\nvoid mm_transposeR_d(int m, int n, double *A);\n\n#endif //DG__matrix_functions_h__INCLUDED\n\n\n\n\n" }, { "alpha_fraction": 0.6266666650772095, "alphanum_fraction": 0.6266666650772095, "avg_line_length": 13.609756469726562, "blob_id": "7a89fc7040a16a63753c90a8fc79f1dfc780d40b", "content_id": "d515ffe7cd4c4efc34f6f9604fcdad35999041a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 600, "license_type": "no_license", "max_line_length": 45, "num_lines": 41, "path": "/src/setup_connectivity.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_connectivity.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"S_DB.h\"\n\n\n/*\n *\tPurpose:\n *\t\tSet up the mesh connectivity matrices. \n *\n *\tComments:\n *\n *\tNotation:\n *\t\tNVe = (N)umber of (Ve)rtices\n *\n *\t\tVeX = (Ve)rtices (X) coordinates\n *\t\tVeY = (Ve)rtices (Y) coordinates\n *\n *\t\tVToVe\t: (V)olume to (Ve)rtex connectivity\n *\t\tVToV\t: (V)olume to (V)olume connectivity\n *\t\tVToF\t: (V)olume to (F)ace connectivity\n *\t\tFToF\t: (F)ace to (F)ace connectivity\n *\n *\n *\tReferences:\n */\n\nvoid setup_connectivity(void){\n\tprintf(\"Setup Connectivity \\n\");\n\n\n\n\n\t// Setup vertices arrays\n\n\n\n\n}\n" }, { "alpha_fraction": 0.624492347240448, "alphanum_fraction": 0.6335520148277283, "avg_line_length": 25.823530197143555, "blob_id": "32902afd4d02b764f735eafbf4c75878f1276843", "content_id": "364f26843d1343302f5be025ada17657ff33a3d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3201, "license_type": "no_license", "max_line_length": 85, "num_lines": 119, "path": "/src/setup_volumes.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_volumes.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"memory_constructors.h\"\n\n\n/*\n *\tPurpose:\n *\t\tHolds the methods for setting up the structs for the Volumes and \n *\t\tFaces. \n *\n *\tComments:\n *\n *\tNotation:\n *\n *\tReferences:\n */\n\nvoid setup_volumes(void){\n\tprintf(\"Setup Volumes \\n\");\n\n\t// Variables:\n\tint i, VIndex;\n\tdouble *XVals, *YVals;\n\tstruct S_VOLUME *VOLUME, *PrevVOLUME;\n\n\t// Create first the NV volume structs linked list\n\tVOLUME = New_VOLUME();\n\tDB.VOLUME_HEAD = VOLUME;\n\n\tPrevVOLUME = DB.VOLUME_HEAD;\n\n\t// Create and link the next NV-1 volumes\n\tfor(i=0; i<DB.NV-1; i++){\n\t\tVOLUME = New_VOLUME();\n\t\tVOLUME->parent = PrevVOLUME;\n\t\tPrevVOLUME->next = VOLUME;\n\t\tPrevVOLUME = VOLUME;\n\t}\n\n\t// Fill the mesh information for each volume and initialize any \n\t// arrays\n\tVIndex=0;\n\tfor (VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\t\tVOLUME->index = VIndex;\n\n\t\t// Vertices Connectivity Information (ccw)\n\t\t// Fill the indeces of the vertices for each volume\n\t\tfor(i=0; i<4; i++){\n\t\t\tVOLUME->VeInd[i] = DB.VeCon[VIndex*4+i];\n\t\t}\n\n\t\t// Properties of each Volume\n\t\tVOLUME->P = DB.P;\n\t\tVOLUME->d = DB.d;\n\t\tVOLUME->NVar = 4;\n\t\tVOLUME->update = 1; // Volume just created so it's ops must be updated\n\n\t\tVOLUME->NvnG = (DB.P+1)*(DB.P+1);\n\t\tVOLUME->NvnS = VOLUME->NvnG;\n\t\tVOLUME->NvnP = VOLUME->NvnG;\n\t\tVOLUME->NfnI = 4*(VOLUME->P+1);\n\n\t\t// Geometry Structures:\n\t\tVOLUME->XYZ = malloc(DB.d*VOLUME->NvnG * sizeof *VOLUME->XYZ);\n\t\tVOLUME->XYZ_S = malloc(DB.d*VOLUME->NvnS * sizeof *VOLUME->XYZ_S);\n\t\tVOLUME->XYZ_P = malloc(DB.d*VOLUME->NvnP * sizeof *VOLUME->XYZ_P);\n\n\t\t// - Setup the geometry node points\n\t\tXVals = &VOLUME->XYZ[0];\n\t\tYVals = &VOLUME->XYZ[VOLUME->NvnG];\n\t\tfor(i=0; i<DB.NGConPerV; i++){\n\t\t\tXVals[i] = DB.XYZ_G[DB.GeoCon[VIndex*DB.NGConPerV + i]];\n\t\t\tYVals[i] = DB.XYZ_G[DB.GeoCon[VIndex*DB.NGConPerV + i] + DB.NGP];\n\t\t}\n\n\t\t// Set initial processed face to be 0 for all faces\n\t\tfor(i=0; i<4; i++){\n\t\t\tVOLUME->FIndFound[i] = 0;\n\t\t}\n\n\t\tVOLUME->FACE = malloc(4*sizeof *VOLUME->FACE);\n\n\t\t// Metric Structures:\n\t\tVOLUME->C_vS = malloc(VOLUME->NvnS*4* sizeof *VOLUME->C_vS);\n\t\tVOLUME->detJV_vS = malloc(VOLUME->NvnS * sizeof *VOLUME->detJV_vS);\n\n\t\t// Solution Structures:\n\t\tVOLUME->What = malloc(VOLUME->NVar*VOLUME->NvnS* sizeof *VOLUME->What);\n\t\tVOLUME->RHS = malloc(VOLUME->NvnG*VOLUME->NVar* sizeof *VOLUME->RHS);\n\t\tVOLUME->RES = malloc(VOLUME->NvnG*VOLUME->NVar* sizeof *VOLUME->RES);\n\t\tVOLUME->RHS_VOL = malloc(VOLUME->NvnG*VOLUME->NVar* sizeof *VOLUME->RHS_VOL);\n\t\tVOLUME->RHS_FACE = malloc(VOLUME->NvnG*VOLUME->NVar* sizeof *VOLUME->RHS_FACE);\n\t\tVOLUME->MInv = malloc(VOLUME->NvnG*VOLUME->NvnG* sizeof *VOLUME->MInv);\n\t\tVOLUME->F_Comm = malloc(VOLUME->NfnI*VOLUME->NVar* sizeof *VOLUME->F_Comm);\n\n\t\tVIndex++;\n\t}\n\n\tif (DB.Testing == 3){\n\t\t// Print the Geometry node points for each volume\n\t\tfor (VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\t\t\tprintf(\"\\n New Volume \\n\");\n\t\t\tfor(i=0; i<4; i++){\n\t\t\t\tprintf(\"%d \", VOLUME->VeInd[i]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\n\t\t\tfor(i=0; i<(DB.P+1)*(DB.P+1); i++){\n\t\t\t\tprintf(\"(x,y): (%f, %f) \\n\", VOLUME->XYZ[i], VOLUME->XYZ[(DB.P+1)*(DB.P+1) + i]);\n\t\t\t}\n\t\t}\n\t}\n\n}\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7544910311698914, "alphanum_fraction": 0.7544910311698914, "avg_line_length": 26.66666603088379, "blob_id": "136062cfd209a236249b55753b5bc9a04ecdcbf4", "content_id": "b222769f92161ced9da1828c993a5ee4c84920a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 167, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/include/initialization.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__initialization_h__INCLUDED\n#define DG__initialization_h__INCLUDED\n\nvoid initialization(int nargc, char **argv);\n\n#endif // DG__initialization_h__INCLUDED\n" }, { "alpha_fraction": 0.5350733995437622, "alphanum_fraction": 0.5579119324684143, "avg_line_length": 15.94444465637207, "blob_id": "aaeafeace35825cc9a52985529c11b9fe4eb037f", "content_id": "b1f4177adb71c740ff3b9dc998aea42c1bff1bd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1226, "license_type": "no_license", "max_line_length": 66, "num_lines": 72, "path": "/src/euler_flux.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"euler_flux.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n\nvoid euler_flux_2D(double *W, double *F, double *G){\n\t\n\t/*\n\tPurpose:\n\t\tCompute the Euler Flux vector using the state vector W.\n\t\tStore results for both vectors in F and G\n\t*/\n\n\tdouble ro, u, v, e_tot, P;\n\tdouble GAMMA = 1.4;\n\n\tro = W[0];\n\tu = W[1]/ro;\n\tv = W[2]/ro;\n\te_tot = W[3]/ro;\n\n\tP = (GAMMA-1)*ro*(e_tot - 0.5*(u*u+ v*v));\n\n\t// F Vector:\n\tF[0] = ro*u;\n\tF[1] = ro*u*u + P;\n\tF[2] = ro*u*v;\n\tF[3] = ro*u*e_tot + P*u;\n\n\t// G Vector:\n\tG[0] = ro*v;\n\tG[1] = ro*u*v;\n\tG[2] = ro*v*v + P;\n\tG[3] = ro*v*e_tot + P*v;\n\n}\n\n\nvoid euler_flux_2D_matrix(double *W, double *F, double *G, int N){\n\n\t/*\n\tPurpose:\n\t\tCompute the Euler Flux vector using the state vector W at\n\t\tmultiple points. Will return a matrix for F and G (multiple\n\t\trows for the different W vectors given). Note all vectors \n\t\tare in column major form\n\t*/\n\n\t// For storing the values in a row of the matrix\n\tdouble W_row[4], F_row[4], G_row[4];\n\n\tint i,j;\n\n\tfor(i=0; i<N; i++){\n\n\t\t// Fill the W_row vector:\n\t\tfor(j=0; j<4; j++){\n\t\t\tW_row[j] = W[j*N + i];\n\t\t}\n\n\t\teuler_flux_2D(W_row, F_row, G_row);\n\n\t\tfor(j=0; j<4; j++){\n\t\t\tF[j*N+i] = F_row[j];\n\t\t\tG[j*N+i] = G_row[j];\n\t\t}\n\n\t}\n\n}\n\n\n\n\n\n" }, { "alpha_fraction": 0.7158470153808594, "alphanum_fraction": 0.7158470153808594, "avg_line_length": 20.875, "blob_id": "ec04e16f54ca188b830bc53f97e1173f061ed4b7", "content_id": "536a0c7b613cf72d6f84b2e4bb348acd62d68b2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 183, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/include/output_solution.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__output_solution_h__INCLUDED\n#define DG__output_solution_h__INCLUDED\n\n\nvoid output_tecplot(int t);\nvoid outputWSol(void);\n\n#endif //DG__output_solution_h__INCLUDED\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7210884094238281, "alphanum_fraction": 0.7210884094238281, "avg_line_length": 17.25, "blob_id": "abede6560f86011775add727e5b0455bdb29596d", "content_id": "463f7ea5ac8452a0d35300297ce4fd237d0c8af0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 147, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/include/setup_volumes.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__setup_volumes_h__INCLUDED\n#define DG__setup_volumes_h__INCLUDED\n\n\nvoid setup_volumes(void);\n\n\n#endif // DG__setup_volumes_h__INCLUDED\n" }, { "alpha_fraction": 0.6861313581466675, "alphanum_fraction": 0.6861313581466675, "avg_line_length": 21.16666603088379, "blob_id": "e902c700085021b1550bfe829d0f9167697fac9e", "content_id": "6e05e0a2b8820010af8bf5f399361bc2fd89234d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 137, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/include/setup_mesh.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#ifndef DG__setup_mesh_h__INCLUDED\n#define DG__setup_mesh_h__INCLUDED\n\nvoid setup_mesh (void);\n\n#endif // DG__setup_mesh_h__INCLUDED\n\n\n" }, { "alpha_fraction": 0.7089632749557495, "alphanum_fraction": 0.7102231979370117, "avg_line_length": 29.67403221130371, "blob_id": "afba476dcbb3edd32cc275e0eee32ad090e68009", "content_id": "b315a89eea8444477fbefbf8fd6dd3565e988cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5556, "license_type": "no_license", "max_line_length": 76, "num_lines": 181, "path": "/src/main.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"setup_mesh.h\"\n#include \"initialization.h\"\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"memory_free.h\"\n#include \"setup_operators.h\"\n#include \"setup_geometry.h\"\n#include \"matrix_functions.h\"\n#include \"initialize_test_case.h\"\n#include \"output_solution.h\"\n#include \"solver_explicit.h\"\n#include \"compute_errors.h\"\n\nstruct S_DB DB;\n\n/*\nPurpose:\n\tSolve the 2D and 3D Euler Equations using the Discontinuous \n\tGalerkin method. \n*/\n\n\n/*\n---------------------------------------------------------------------\n\nPhilip Code:\n- Get test case working on new branch of Philip's code\n\n- test_integration_Euler\n\t(Done)- Only do the convergence order testing.\n\t\t- Note: When value is one in ctrl file, means perform that test. Otherwise\n\t\t\tit will be 0.\n\t- Compare certain aspects between the polynomial and NURBS basis to ensure\n\t\teverything has been implemented correctly.\n\t\t- Figure out what should be compared.\n\n- Implement NURBS mesh to be read by Philip's code. \n\t- Create a gmsh file with the same skeleton. Then, if IGA is \n\t\tan option when being run, overwrite the geometry node points\n\t\tfor each element with the geometry node points in a seperate\n\t\tmesh.\n\t\t- This way, connectivity does not have to be set since the gmsh file\n\t\t\tdoes this anyways.\n\t\t- The gmsh reader will still work the same, it will just have the \n\t\t\twring XYZ vector for each volume which will then be overwritten.\n\n- Implement NURBS basis functions into Philip's code\n\t- First, study how the the polynomial basis functions are set up.\n\t- Using this information, set up the chi operators for the code.\n\t- Set the relevant interpolation operators. \n\t\t- For instance, interpolating from geometry to any node\n\t\t\tdoesn't make sense since there are no geometry nodes.\n\t\t- Will need to check which interpolation operators are \n\t\t\tbeing used at each step.\n\n- Check how Bezier implemented in Philip Code\n\t- Modify setup_ELEMENT_operators for the line (tensor product) \n\t\tand these should transfer over to the quad elements.\n\t- Modify the setup_geom_factors to use the gradient of the \n\t\tbasis at the points and not the interpolation of metric terms\n\t- Modify setup_normals \n\n---------------------------------------------------------------------\n\n---------------------------------------------------------------------\n\nCode: \n\n(Done) - Add proper Makefile to the code\n\t- Follow Philip's format\n\n- Add a ctrl file to be read by the code.\n\t- Should have a reference to the mesh file as well as time step\n\t\tand what type of time stepping to use.\n\t- Have a data file generated with as its perfix the name of the ctrl file.\n\t- Code will now take as command line argument the ctrl file name \n\t- Output all error the same way as CPR code now and in the same format\n\t\tso that orders test can be used on it.\n\n- Perform a polynomial and B Splines convergence study for coarsest \n\tmesh in inviscid channel case.\n\n- Adjust orders to work with finding error for square undeformed case\n\twithout having to convect complete distance.\n\t- Perform orders study for both B-Spline and Polynomial basis\n\n- Create mesh generator for B Splines\n\t- Code in python\n\t- Create the coarsest mesh and then use Bezier extraction\n\t\tto generate the bezier elements that have the exact same\n\t\tgeometry.\n\t\t- Use numpy for the Bezier extraction operators\n\t\t- Use matplotlib for plotting final mesh\n\n- Adjust orders test to now work by comparing vortex solution at any \n\ttime based on how much the vortex has convected.\n\t- Check the orders on the undeformed mesh with this case.\n\t- All that is needed is to modify how errors are computed for\n\t\tvortex case.\n\n- Compute metrics at the face integration and geometry nodes.\n\t- That is, no longer interpolate the face metric terms\n\n- Add negative pressure check\n\n- Adjust the mesh generator for the polynomial to take command line\n\targuments for generating the mesh in the case of making\n\tit automatically.\n\n\n- Complete explicit time stepping (RK4) \n\t- Make sure results with DPG code are identical\n\n- Make NURBS Mesh Generator\n\t- Make it possible to visalize the mesh.\n\t- Make the Gaussian bump case.\n\n---------------------------------------------------------------------\n\n- Check:\n\t(Done) - Slip Wall Boundary Condition Algorithm\n\t(Done) - Total TP Boundary Condition Algorithm\n\t(Done) - Back Pressure Boundary Condition Algorithm\n\n(Done) - Load the mesh manually for the coarsest case from Philip's \n\tDPG code. \n\t(Done) - Create a python script or modify the current one to be able\n\t\tto output the exact same mesh.\n\t(Done)- Load the new mesh into the code and check its convergence.\n\n- Compare properties of solution between the DPG and DG code. Compare:\n\t(Done) - Volume metric terms at integration points\n\t- Inverse mass matrix\n\t- RHS_VOLUME contribution\n\t- RHS_FACE contribution\n\n\n- Study Reimann Invariants and BC setting\n- Derive Euler Equations properly\n- Compute characteristics of the Euler Equations in 2D\n\t- Compute Reimann invariant formulas\n\t\n*/\n\nint main(int nargc, char **argv){\n\n\tprintf(\"Initialization \\n\");\n\tinitialization(nargc, argv);\n\n\tprintf(\"Setup mesh \\n\");\n\tsetup_mesh();\n\n\tprintf(\"Setup operators \\n\");\n\tsetup_operators();\n\n\tprintf(\"Setup geometry \\n\");\n\tsetup_geometry();\n\n\tprintf(\"Initialize test operators \\n\");\n\tinitialize_test_case();\n\n\t// Output the initial solution (function should take in\n\t// maybe a string)\n\toutput_tecplot(0);\n\n\tprintf(\"Start Solver\");\n\tsolver_explicit();\n\n\tprintf(\"Compute Errors\");\n\t// compute_errors_global();\n\n\toutput_tecplot(DB.numTimeSteps);\n\n\tmemory_free();\n\n\treturn 0;\n\n}\n\n\n\n" }, { "alpha_fraction": 0.7397590279579163, "alphanum_fraction": 0.7397590279579163, "avg_line_length": 28.714284896850586, "blob_id": "4db4a94093721631b8e88f02f6984f7183f08a48", "content_id": "d3c33cefcba232235a909d807d5283ddaefbe074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 415, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/include/memory_destructors.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__memory_destructors_h__INCLUDED\n#define DG__memory_destructors_h__INCLUDED\n\n#include \"S_VOLUME.h\"\n#include \"S_FACE.h\"\n#include \"S_ELEMENT.h\"\n#include \"S_BC.h\"\n\nvoid memory_destructor_V (struct S_VOLUME *VOLUME);\nvoid memory_destructor_F (struct S_FACE *FACE);\nvoid memory_destructor_E (struct S_ELEMENT *ELEMENT);\nvoid memory_destructor_BC (struct S_BC *BC);\n\n#endif // DG__memory_destructors_h__INCLUDED" }, { "alpha_fraction": 0.7636363506317139, "alphanum_fraction": 0.7636363506317139, "avg_line_length": 19.75, "blob_id": "6347cd3d985b451b29c64291dd0ce7bd1147f43a", "content_id": "855a1a21ad3010d7ca9b892dd95d7d7234ac3413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 165, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/include/setup_connectivity.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__setup_connectivity_h__INCLUDED\n#define DG__setup_connectivity_h__INCLUDED\n\n\nvoid setup_connectivity(void);\n\n\n#endif // DG__setup_connectivity_h__INCLUDED" }, { "alpha_fraction": 0.6977777481079102, "alphanum_fraction": 0.7066666483879089, "avg_line_length": 27, "blob_id": "b5482dba0d4ef9208d9f2e63b34ffee06a28a994", "content_id": "cd7b85d310a093fdffa5790220c0bb4031fe06f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 225, "license_type": "no_license", "max_line_length": 65, "num_lines": 8, "path": "/include/EulerFlux.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__EulerFlux_h__INCLUDED\n#define DG__EulerFlux_h__INCLUDED\n\n\nvoid EulerFlux_2D(double *W, double *F, double *G);\nvoid EulerFlux_2D_Matrix(double *W, double *F, double *G, int N);\n\n#endif //DG__EulerFlux_h__INCLUDED\n\n" }, { "alpha_fraction": 0.5651440024375916, "alphanum_fraction": 0.5710497498512268, "avg_line_length": 24.82866096496582, "blob_id": "3b2d385934eacac13134e3ac325b71a889d5755e", "content_id": "984a2095f0e237aee5dbe8dfd632d7b1e473a3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8297, "license_type": "no_license", "max_line_length": 89, "num_lines": 321, "path": "/src/setup_mesh.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_mesh.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\n#include \"S_DB.h\"\n#include \"Parameters.h\"\n#include \"setup_volumes.h\"\n#include \"setup_faces.h\"\n#include \"S_BC.h\"\n#include \"memory_constructors.h\"\n#include \"memory_destructors.h\"\n\n/*\n *\tPurpose:\n *\t\tSet up mesh related parameters. This module will read the \n *\t\tmesh file with the geometry node and connectivity data.\n *\n *\tComments:\n *\n *\tNotation:\n *\t\tNV = (N)umber of (V)olumes\n *\n *\t\tXYZ_Ve = XYZ coordinates of (Ve)rtices (for whole mesh)\n *\t\tVeCon = (Ve)rtices (Con)nectivity arrays for each volume\n *\t\tNVe = (N)umber of (Ve)rtices\n *\t\n *\t\tXYZ_G = XYZ coordinates of (Geometry) nodes (for whole mesh)\n *\t\tGeoCon = (Geo)metry Nodes (Con)nectivity arrays for each volume\n *\t\tNGConPerV = (N)umber of (G)eometry (Con)nectivity entries (Per)\n *\t\t\t(V)olume\n *\t\tNGP = (N)umber of (G)eometry node (P)oints\n *\n *\t\tNPF = (N)umber of (P)eriodic (F)aces\n *\t\tPCon = (P)eriodic (Con)nectivity arrays\n *\t\n *\n *\tReferences:\n */\n\nstatic void setup_readMeshFile(){\n\n\t/*\n\tMesh File:\n\t- Node coordinates are all listed one after another (geometry node points)\n\t- Connectivity array gives the value of each geometry node point in the element. The \n\t\tpoints are listed with all j = 0, then j = 1, ... where i=0, j=0 is at the \n\t\tbottom left corner of the reference element which the physical element maps to. \n\t- Periodic boundary conditions list what faces (between which vertices) each element is \n\t\tconnected. Using this, we can find what xi or eta line is touching between elements\n\t\tto know which is the right and which is the left element.\n\t*/\n\n\tprintf(\"Process Mesh File \\n\");\n\n\tchar StringRead[200];\n\tint i, j, NGConPerV;\n\tFILE *file_id;\n\n\tif ((file_id = fopen(DB.MeshFileName,\"r\")) == NULL){\n\t\tprintf(\"Mesh file: %s not present.\\n\",DB.MeshFileName);\n\t\texit(1);\n\t}\n\n\t// ----------------------------------------------------------\n\t//\t\t\t\t\tMesh Properties\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\tsscanf(StringRead, \"%d\", &DB.NGP); // Num Grid Points\n\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\tsscanf(StringRead, \"%d\", &DB.NVe); // Num Vertices\n\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\tsscanf(StringRead, \"%d\", &DB.NV); // Num Quads\n\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\tsscanf(StringRead, \"%d\", &DB.P); // Order P\n\t// ----------------------------------------------------------\n\n\t// ----------------------------------------------------------\n\t//\t\t\t\t\t\tVertices\n\t// Read the vertices:\n\tdouble *XYZ_Ve, *X_Ve, *Y_Ve;\n\n\tXYZ_Ve = malloc(DB.NVe*DB.d* sizeof *XYZ_Ve);\n\tX_Ve = &XYZ_Ve[0];\n\tY_Ve = &XYZ_Ve[DB.NVe];\n\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header\n\tfor(i=0; i<DB.NVe; i++){\n\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\t\tsscanf(StringRead, \"%lf %lf\", &X_Ve[i], &Y_Ve[i]);\n\t}\n\n\t// Read Vertices Connectivity:\n\t// - Matrix stored in column major form (each column for each element)\n\tint *VeCon;\n\tVeCon = malloc(DB.NV*4* sizeof *VeCon);\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header\n\tfor(i=0; i<DB.NV; i++){\n\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\n\t\t// read in the information into this quad\n\t\tfor(j=0; j<4; j++){\n\t\t\tsscanf(StringRead, \"%d %[^\\t\\n]\", &VeCon[i*4+j], StringRead);\n\t\t}\n\t}\n\t// ----------------------------------------------------------\n\n\t// ----------------------------------------------------------\n\t//\t\t\t\t\tGeometry Nodes\n\t// - For storing all the xyz geometry node points in column major form.\n\tdouble * XYZ_G, *X_G, *Y_G; \n\tXYZ_G = malloc(DB.NGP*DB.d* sizeof *XYZ_G);\n\tX_G = &XYZ_G[0];\n\tY_G = &XYZ_G[DB.NGP];\n\t\n\t// Read all geometry node points\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfor(i=0; i<DB.NGP; i++){\n\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\t\tsscanf(StringRead, \"%lf %lf\", &X_G[i], &Y_G[i]);\n\t}\n\n\n\t// Setup temporary array to hold quad connectivity information\n\tint *GeoCon;\n\tNGConPerV = DB.P+1;\n\tfor(i=0; i<DB.d-1; i++){\n\t\tNGConPerV = NGConPerV*(DB.P+1);\n\t}\n\n\t// Matrix of size (NGConPerV x NV).\n\t// Stored in column major form.\n\tGeoCon = \n\t\tmalloc(DB.NV*NGConPerV* sizeof *GeoCon);\n\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfor(i=0; i<DB.NV; i++){\n\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\n\t\t// read in the information into this quad\n\t\tfor(j=0; j<NGConPerV; j++){\n\t\t\tsscanf(StringRead, \"%d %[^\\t\\n]\", &GeoCon[i*NGConPerV+j], StringRead);\n\t\t}\n\t}\n\t// ----------------------------------------------------------\n\n\t// ----------------------------------------------------------\n\t// \t\t\t\t\tBoundary Conditions\n\n\tint numBCTypes, index_BCType, numBCRows, numBCCols;\n\tstruct S_BC *BC, *BC_Next;\n\n\t// Find the number of BCs present in the mesh\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Header Line\n\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Num BC Types\n\tsscanf(StringRead, \"%d\", &numBCTypes);\n\n\tBC = New_BC();\n\tDB.BC_HEAD = BC;\n\n\tfor(i=0; i<numBCTypes-1; i++){\n\t\tBC_Next = New_BC();\n\t\tBC->next = BC_Next;\n\t\tBC = BC_Next;\n\t}\n\n\t// Load all the BC information now\n\tBC = DB.BC_HEAD;\n\tfor(index_BCType=0; index_BCType < numBCTypes; index_BCType++){\n\n\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // BC Type Line\n\t\t\n\t\t// Find the type of BC this is and set that parameter in the BC struct\n\t\tif(strstr(StringRead, \"Periodic\")){\n\t\t\tBC->BCType = BC_PERIODIC;\n\t\t} else if(strstr(StringRead, \"SlipWall\")){\n\t\t\tBC->BCType = BC_SLIPWALL;\n\t\t} else if(strstr(StringRead, \"TotalTemperaturePressure\")){\n\t\t\tBC->BCType = BC_TOTAL_TP;\n\t\t} else if(strstr(StringRead, \"BackPressure\")){\n\t\t\tBC->BCType = BC_BACKPRESSURE;\n\t\t} else{\n\t\t\tprintf(\"Unsupported BC Type \\n\");\n\t\t\texit(1);\n\t\t}\n\n\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead); // Number of BC Rows\n\t\tsscanf(StringRead, \"%d\", &numBCRows);\n\n\t\t// Number of columns depends on the type of BC. Periodic will have 4 \n\t\t// columns for the BC matrix (n bcs x 4 (elem_1_index, elem_1_face, ...))\n\t\t// All other BCs will have only 2\n\t\tif(BC->BCType == BC_PERIODIC){\n\t\t\tnumBCCols = 4;\n\t\t} else{\n\t\t\tnumBCCols = 2;\n\t\t}\n\n\t\tBC->nBC_Con_row = numBCRows;\n\t\tBC->nBC_Con_col = numBCCols;\n\n\t\tBC->BC_Con = malloc(BC->nBC_Con_row*BC->nBC_Con_col* sizeof *BC->BC_Con);\n\n\t\t// Read all the BC lines into the matrix in row major form\n\t\tfor(i=0; i<numBCRows; i++){\n\t\t\tfscanf(file_id, \"%[^\\n]\\n\", StringRead);\n\t\t\tfor(j=0; j<numBCCols; j++){\n\t\t\t\tsscanf(StringRead, \"%d %[^\\t\\n]\", \n\t\t\t\t\t\t\t\t&(BC->BC_Con[i*BC->nBC_Con_col+j]), \n\t\t\t\t\t\t\t\tStringRead);\n\t\t\t}\n\t\t}\n\n\t\t// Load the next data into the next BC struct\n\t\tBC = BC->next;\n\t}\n\n\n\t// ----------------------------------------------------------\n\n\tDB.XYZ_G = XYZ_G;\n\tDB.XYZ_Ve = XYZ_Ve;\n\n\tDB.NGConPerV = NGConPerV;\n\n\tDB.GeoCon = GeoCon;\n\tDB.VeCon = VeCon;\n\n}\n\nvoid setup_mesh(){\n\n\t// Read the mesh file\n\tsetup_readMeshFile();\n\n\t// Setup the volumes\n\tsetup_volumes();\n\n\t// Setup the faces\n\tsetup_faces();\n\n\n\tint i,j;\n\tstruct S_BC *BC, *BC_NEXT;\n\t\n\tif (DB.Testing == 1 || DB.Testing == 2){\n\t\t// Print all the information\n\t\tprintf(\"Vertices \\n\");\n\t\tfor(i=0; i<DB.NVe; i++){\n\t\t\tprintf(\"(x,y): %f %f \\n\", DB.XYZ_Ve[i], DB.XYZ_Ve[DB.NVe + i]);\n\t\t}\n\n\t\tprintf(\"Vertices Connectivity \\n\");\n\t\tfor(i=0; i<DB.NV; i++){\n\t\t\tprintf(\"Connect: \");\n\t\t\tfor(j=0; j<4; j++){\n\t\t\t\tprintf(\"%d \", DB.VeCon[i*4+j]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"Geometry Nodes \\n\");\n\t\tfor(i=0; i<DB.NGP; i++){\n\t\t\tprintf(\"(x,y): %f %f \\n\", DB.XYZ_G[i], DB.XYZ_G[DB.NGP + i]);\n\t\t}\t\n\n\t\tprintf(\"Geometry Nodes Connectivity \\n\");\n\t\tfor(i=0; i<DB.NV; i++){\n\t\t\tprintf(\"Connect: \");\n\t\t\tfor(j=0; j<DB.NGConPerV; j++){\n\t\t\t\tprintf(\"%d \", DB.GeoCon[i*DB.NGConPerV+j]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\t// BCs:\n\t\tfor(BC=DB.BC_HEAD; BC; BC = BC->next){\n\t\t\tprintf(\"BC Type : %d \\n\", BC->BCType);\n\n\t\t\tfor(i=0; i<BC->nBC_Con_row; i++){\n\t\t\t\tfor(j=0; j<BC->nBC_Con_col; j++){\n\t\t\t\t\tprintf(\"%d \", BC->BC_Con[i*BC->nBC_Con_col+j]);\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\");\n\t\t\t}\n\t\t}\n\t}\n\n\n\t// Free temporary memory\n\tfree(DB.XYZ_G);\n\tfree(DB.XYZ_Ve);\n\tfree(DB.GeoCon);\n\tfree(DB.VeCon);\n\n\t// - BC Linked List\n\tBC = DB.BC_HEAD;\n\twhile(1){\n\t\tBC_NEXT = BC->next;\n\n\t\t// Free allocated arrays\n\t\tfree(BC->BC_Con);\n\n\t\t// Last BC case\n\t\tif(BC_NEXT == NULL){\n\t\t\tmemory_destructor_BC(BC);\n\t\t\tbreak;\n\t\t}\n\t\t\n\t\tmemory_destructor_BC(BC);\n\t\tBC = BC_NEXT;\n\t}\n\n}\n\n\n\n\n\n" }, { "alpha_fraction": 0.7028985619544983, "alphanum_fraction": 0.7028985619544983, "avg_line_length": 21.66666603088379, "blob_id": "3355716a5229eb487b660e3ed228ec7a8e21e986", "content_id": "760c3e42865f1f0a35653dd30ba9cfca908affe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 138, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/include/setup_faces.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__setup_faces_h__INCLUDED\n#define DG__setup_faces_h__INCLUDED\n\nvoid setup_faces (void);\n\n#endif // DG__setup_mesh_h__INCLUDED\n\n" }, { "alpha_fraction": 0.6125587224960327, "alphanum_fraction": 0.6193934082984924, "avg_line_length": 20.657407760620117, "blob_id": "4b2e35aee196ffab1daf9e62b6281ff3fb1b4c14", "content_id": "8337ac4a422411db67b582f5cf490d41b733e112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2341, "license_type": "no_license", "max_line_length": 81, "num_lines": 108, "path": "/src/update_VOLUMEs.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"update_VOLUMEs.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"S_VOLUME.h\"\n#include \"S_DB.h\"\n#include \"S_ELEMENT.h\"\n#include \"matrix_functions.h\"\n\nvoid compute_inverse_mass(struct S_VOLUME *VOLUME){\n\n\t/*\n\tPurpose:\n\t\tCompute the inverse Mass matrix of the volume.\n\t*/\n\n\tint i, j, k, iBasisSolPt, jBasisSolPt, iSolPt, jSolPt, nSolPt;\n\tdouble *nodes_wi, *wBasis_i, *wBasis_j;\n\tdouble intVal; \n\n\t// Mass and mass inverse matrix stored in column major form.\n\t// ordering of modal coefficients that it multiplies is all\n\t// eta = -1, ...\n\tdouble *M;\n\t\n\tnodes_wi = DB.ELEMENT->nodes_wi;\n\n\twBasis_i = malloc(VOLUME->NvnS* sizeof *wBasis_i); // free\n\twBasis_j = malloc(VOLUME->NvnS* sizeof *wBasis_j); // free\n\n\tM = VOLUME->MInv; // keep (after inverse)\n\n\tk = 0;\n\t// Loop order = Fix column and go down all rows\n\tfor(j=0; j<VOLUME->NvnG; j++){\n\t\tfor(i=0; i<VOLUME->NvnG; i++){\n\n\t\t\t// i,j entry involves w_i and w_j\n\n\t\t\t// Fill the w_i and w_j values at the solution nodes. Need\n\t\t\t// To fill the ith and jth column of V matrix\n\t\t\t// w_i: \n\t\t\tfor(iBasisSolPt=0; iBasisSolPt < VOLUME->NvnS; iBasisSolPt++){\n\t\t\t\twBasis_i[iBasisSolPt] = DB.ELEMENT->Chi_vS[i*DB.ELEMENT->NvnS + iBasisSolPt];\n\t\t\t}\n\t\t\t// w_j: \n\t\t\tfor(jBasisSolPt=0; jBasisSolPt < VOLUME->NvnS; jBasisSolPt++){\n\t\t\t\twBasis_j[jBasisSolPt] = DB.ELEMENT->Chi_vS[j*DB.ELEMENT->NvnS + jBasisSolPt];\n\t\t\t}\n\n\n\t\t\t// Compute Integral using Gaussian Quadrature\n\t\t\tintVal = 0;\n\t\t\tnSolPt = 0;\n\t\t\tfor(iSolPt=0; iSolPt<VOLUME->P+1; iSolPt++){\n\t\t\t\tfor(jSolPt=0; jSolPt<VOLUME->P+1; jSolPt++){\n\t\t\t\t\tintVal = intVal + wBasis_i[nSolPt]*wBasis_j[nSolPt]*VOLUME->detJV_vS[nSolPt]\n\t\t\t\t\t\t*nodes_wi[iSolPt]*nodes_wi[jSolPt];\n\t\t\t\t\tnSolPt++;\n\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tM[j*VOLUME->NvnG + i] = intVal;\n\t\t\tk++;\n\t\t}\n\t}\n\n\tmm_inv_d(VOLUME->NvnG, M);\n\n\tfree(wBasis_i);\n\tfree(wBasis_j);\n\n}\n\n\nvoid update_VOLUME_Ops(void){\n\t/*\n\tPurpose:\n\t\tUpdate the operators needed for each volume\n\t*/\n\n\tstruct S_VOLUME *VOLUME;\n\n\tint i,j;\n\n\tfor (VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next) {\n\t\tif (VOLUME->update) {\n\t\t\tVOLUME->update = 0;\n\t\t\tcompute_inverse_mass(VOLUME);\n\n\t\t\tif (DB.Testing == 1){\n\n\t\t\t\tprintf(\"Inverse Mass Matrix : \\n\");\n\n\t\t\t\tfor(i=0; i<VOLUME->NvnG; i++){\n\t\t\t\t\tfor(j=0; j<VOLUME->NvnG; j++){\n\t\t\t\t\t\tprintf(\" %f \", VOLUME->MInv[j*VOLUME->NvnG + i]);\n\t\t\t\t\t}\n\t\t\t\t\tprintf(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.6238410472869873, "alphanum_fraction": 0.629801332950592, "avg_line_length": 27.742856979370117, "blob_id": "483b1d1a88a60620b1c1896ce0fa3b1263a4d794", "content_id": "1bbaa1a7879072379729c4e34d916b2d7ae626bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3020, "license_type": "no_license", "max_line_length": 93, "num_lines": 105, "path": "/src/explicit_VOLUME_info.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"explicit_VOLUME_info.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"S_ELEMENT.h\"\n#include \"matrix_functions.h\"\n#include \"euler_flux.h\"\n\n\nvoid explicit_VOLUME_info(void){\n\n\tstruct S_VOLUME *VOLUME;\n\n\tint iSolPt, jSolPt, nSolPt;\n\tint iBasis, jBasis, nBasis;\n\n\tdouble intVal;\n\tdouble *W_Sol; // Solution at each solution node as a matrix\n\tdouble *nodes_wi;\n\n\t// The flux vectors (each component). Will be a matrix in column \n\t// major form with F or G for each of the NVar equations at a solution \n\t// point (row in matrix) in the different columns.\n\tdouble *Flux_F, *Flux_G, Flux_F_Eq, Flux_G_Eq; \n\n\tdouble *RHS_VOL, *C_vS_y_eta, *min_C_vS_y_xi, *min_C_vS_x_eta, *C_vS_x_xi;\n\tdouble *gradChi_xi, *gradChi_eta;\n\n\tint iVar;\n\n\tW_Sol = malloc(DB.VOLUME_HEAD->NvnS*DB.VOLUME_HEAD->NVar* sizeof *W_Sol); // free\n\tFlux_F = malloc(DB.VOLUME_HEAD->NvnS*DB.VOLUME_HEAD->NVar* sizeof *Flux_F); // free\n\tFlux_G = malloc(DB.VOLUME_HEAD->NvnS*DB.VOLUME_HEAD->NVar* sizeof *Flux_G); // free\n\n\tnodes_wi = DB.ELEMENT->nodes_wi;\n\n\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\n\t\tRHS_VOL = VOLUME->RHS_VOL;\n\n\t\t// Get W_Sol (approximate solution at solution nodes)\n\t\t// Ordering is always all bottom row (j=1) of nodes in the \n\t\t// volume and in positive j direction. This is a matrix (NVar cols)\n\n\t\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, VOLUME->NVar, \n\t\t\tDB.ELEMENT->Chi_vS, VOLUME->What, W_Sol);\n\n\t\t// Compute Flux vector at each solution node using solution\n\t\teuler_flux_2D_matrix(W_Sol, Flux_F, Flux_G, VOLUME->NvnS);\n\n\t\t// metric terms\n\t\tC_vS_y_eta = &VOLUME->C_vS[0]; \n\t\tmin_C_vS_y_xi = &VOLUME->C_vS[VOLUME->NvnS]; \n\t\tmin_C_vS_x_eta = &VOLUME->C_vS[2*VOLUME->NvnS]; \n\t\tC_vS_x_xi = &VOLUME->C_vS[3*VOLUME->NvnS]; \n\n\t\t// Build the volume contribution vector using\n\t\t// Gaussian Quadrature\n\t\tfor(iVar=0; iVar < VOLUME->NVar; iVar++){\n\t\t\tnBasis = 0;\n\t\t\tfor(jBasis=0; jBasis<VOLUME->P+1; jBasis++){\n\t\t\t\tfor(iBasis=0; iBasis<VOLUME->P+1; iBasis++){\n\n\t\t\t\t\t// Gradient of the i,j th basis function at all the solution \n\t\t\t\t\t// nodes\n\t\t\t\t\tgradChi_xi = &DB.ELEMENT->GradChi_vS_xi[nBasis*VOLUME->NvnS];\n\t\t\t\t\tgradChi_eta = &DB.ELEMENT->GradChi_vS_eta[nBasis*VOLUME->NvnS];\n\n\t\t\t\t\tintVal = 0;\n\t\t\t\t\tnSolPt = 0;\n\t\t\t\t\tfor(jSolPt=0; jSolPt<VOLUME->P+1; jSolPt++){\n\t\t\t\t\t\tfor(iSolPt=0; iSolPt<VOLUME->P+1; iSolPt++){\n\t\t\t\t\t\t\t// Loop over solution points all j=1 (first row) ...\n\n\t\t\t\t\t\t\t// Find flux at this solution node\n\t\t\t\t\t\t\tFlux_F_Eq = Flux_F[iVar*VOLUME->NvnS + nSolPt];\n\t\t\t\t\t\t\tFlux_G_Eq = Flux_G[iVar*VOLUME->NvnS + nSolPt];\n\n\t\t\t\t\t\t\tintVal = intVal\t+ \n\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t(C_vS_y_eta[nSolPt]*Flux_F_Eq + min_C_vS_x_eta[nSolPt]*Flux_G_Eq)*gradChi_xi[nSolPt]\n\t\t\t\t\t\t\t+ \t(min_C_vS_y_xi[nSolPt]*Flux_F_Eq + C_vS_x_xi[nSolPt]*Flux_G_Eq)*gradChi_eta[nSolPt]\n\t\t\t\t\t\t\t\t)*nodes_wi[iSolPt]*nodes_wi[jSolPt];\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tnSolPt++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tRHS_VOL[iVar*VOLUME->NvnG + nBasis] = intVal;\n\n\t\t\t\t\tnBasis++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tfree(W_Sol);\n\tfree(Flux_F);\n\tfree(Flux_G);\n\n}\n\n" }, { "alpha_fraction": 0.5515029430389404, "alphanum_fraction": 0.5712488889694214, "avg_line_length": 24.559486389160156, "blob_id": "f4ed3515cfc3ed244f74fc2ffaf24e17bf9e5b71", "content_id": "d6eafdc9f0b3c7452983314dd07aa10e61b229d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7951, "license_type": "no_license", "max_line_length": 90, "num_lines": 311, "path": "/src/setup_faces.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_faces.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"memory_constructors.h\"\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"S_FACE.h\"\n#include \"S_BC.h\"\n#include \"Parameters.h\"\n\n/*\n *\tPurpose:\n *\t\tSet up the faces between the volumes on the mesh. This \n *\t\twill therefore create the S_FACE structures which \n *\t\twill hold references to the volumes that compose each given \n *\t\tface.\n *\n *\tComments:\n *\n *\tNotation:\n *\t\n *\n *\tReferences:\n */\n\nvoid setup_faces(void){\n\n\tprintf(\"Setup Faces \\n\");\n\n\tstruct S_VOLUME *VOLUME, *VIn, *VOut;\n\tstruct S_FACE *FACE, *PrevFACE; \n\tstruct S_BC *BC, *BCLoop;\n\n\tint ni0, ni1, ni2, ni3, fi_index, // Nodes for each face information\n\t\tno0, no1, no2, no3, fo_index,\n\t\tfi[4][2], fo[4][2],\t\n\t\ti, j, check, \n\t\tperLIndex, perRIndex, // (Per)iodic (L)eft/(R)ight Vol Index\n\t\tperLF, perRF, //(Per)iodic (L)eft/(R)ight (F)ace\n\t\tnPer1, nPer2; // node vertices for periodic faces\n\n\tint ExternalBCIndex, ExternalBCF;\n\n\t/*\n\tVolumme - Volume inner face connections:\n\n\tLoop through all the inner volumes. For each volume,\n\tloop through all other volumes. If another volume is found with \n\ttwo nodes in common, then we have found a face. There will be a \n\tconvention for the face ordering on each element based on the node\n\tordering:\n\n\t\t f2\n\t\tn3\t n2\n\t\t*----*\n\t f3\t| | f1\n\t\t*----*\n\t\tn0\t n1\n\t\t f0\n\t\n\tThat is, given the node ordering, if the two nodes found to form a face\n\tfor instance are the first two nodes in the list of node vertices for the\n\telement, this will correspond to face 0 for that element. The face index\n\tis important because the mapping of the node vertices to the mapped element\n\ton the computational domain will always be the same (n0 => xi = -1, eta = -1\n\tand n2 => xi = 1, eta = 1). \n\n\tHold all S_FACE structs in a linked list.\n\n\tNOTE: All volumes will have a counter clockwise node ordering in the mesh.\n\t\tSo, integration node ordering of inner and outer volumes will need to flipped (one will)\n\t\tduring computations.\n\t*/\n\n\t// Search through the BCs structs to find if there are any periodic BCs\n\tBC = NULL;\n\tfor(BCLoop = DB.BC_HEAD; BCLoop; BCLoop = BCLoop->next){\n\t\tif(BCLoop->BCType == BC_PERIODIC){\n\t\t\tBC = BCLoop;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tDB.FACE_HEAD = NULL;\n\n\tfor(VIn = DB.VOLUME_HEAD; VIn; VIn = VIn->next){\n\t\t\n\t\t// Indeces of the vertex nodes for VIn\n\t\tni0 = VIn->VeInd[0];\n\t\tni1 = VIn->VeInd[1];\n\t\tni2 = VIn->VeInd[2];\n\t\tni3 = VIn->VeInd[3];\n\n\t\t// Faces for VIn (using index convention provided above)\n\t\tfi[0][0] = ni0; fi[0][1] = ni1;\n \tfi[1][0] = ni1; fi[1][1] = ni2;\n \tfi[2][0] = ni2; fi[2][1] = ni3;\n \tfi[3][0] = ni3; fi[3][1] = ni0;\n\n\t\tfor(VOut = DB.VOLUME_HEAD; VOut; VOut = VOut->next){\n\t\t\t\n\t\t\t// Indeces of the vertex node for VOut\n\t\t\tno0 = VOut->VeInd[0];\n\t\t\tno1 = VOut->VeInd[1];\n\t\t\tno2 = VOut->VeInd[2];\n\t\t\tno3 = VOut->VeInd[3];\t\n\n\t\t\t// Faces for VOut\n\t\t\tfo[0][0] = no0; fo[0][1] = no1;\n\t \tfo[1][0] = no1; fo[1][1] = no2;\n\t \tfo[2][0] = no2; fo[2][1] = no3;\n\t \tfo[3][0] = no3; fo[3][1] = no0;\n\n\t\t\tif (VIn != VOut){\n\t\t\t\t// Have two non common volumes\n\n\t\t\t\tif(BC != NULL){\n\t\t\t\t\t// \t\t\tPeriodic Faces\n\t\t\t\t\t// Consider the periodic faces here. If a periodic face\n\t\t\t\t\t// does exist between these elements, then set the \n\t\t\t\t\t// VOut volume to have the same nodes as the VIn volume for\n\t\t\t\t\t// the corresponding face in order for a match to be found.\n\t\t\t\t\t// Recall: Periodic data given as\n\t\t\t\t\t// Vol1_index Vol2_index Face1_index Face2_index\n\n\t\t\t\t\tfor(i=0; i<BC->nBC_Con_row; i++){\n\t\t\t\t\t\tperLIndex = BC->BC_Con[BC->nBC_Con_col*i];\n\t\t\t\t\t\tperRIndex = BC->BC_Con[BC->nBC_Con_col*i+1];\n\t\t\t\t\t\tperLF = BC->BC_Con[BC->nBC_Con_col*i+2];\n\t\t\t\t\t\tperRF = BC->BC_Con[BC->nBC_Con_col*i+3];\n\n\t\t\t\t\t\tif(VIn->index == perLIndex && VOut->index == perRIndex){\n\t\t\t\t\t\t\t// Have two periodic elements\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t// VIn nodes for periodic face\n\t\t\t\t\t\t\tnPer1 = fi[perLF][0];\n\t\t\t\t\t\t\tnPer2 = fi[perLF][1];\n\n\t\t\t\t\t\t\t// Set VOut face nodes to be same as VIn due to periodicity\n\t\t\t\t\t\t\tfo[perRF][0] = nPer1;\n\t\t\t\t\t\t\tfo[perRF][1] = nPer2;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif(VIn->index == perRIndex && VOut->index == perLIndex){\n\t\t\t\t\t\t\t// Have two periodic elements\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t// VIn nodes for periodic face\n\t\t\t\t\t\t\tnPer1 = fi[perRF][0];\n\t\t\t\t\t\t\tnPer2 = fi[perRF][1];\n\n\t\t\t\t\t\t\t// Set VOut face nodes to be same as VIn due to periodicity\n\t\t\t\t\t\t\tfo[perLF][0] = nPer1;\n\t\t\t\t\t\t\tfo[perLF][1] = nPer2;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Loop through the faces to find the common inner faces\n\t\t\t\tfor(fi_index=0; fi_index<4; fi_index++){\n\t\t\t\t\tfor(fo_index=0; fo_index<4; fo_index++){\n\t\t\t\t\t\t\n\t\t\t\t\t\t// Flag that will become 1 if a face has been found\n\t\t\t\t\t\tcheck = 0;\n\n\t\t\t\t\t\tif(((fi[fi_index][0] == fo[fo_index][0]) && (fi[fi_index][1] == fo[fo_index][1])) ||\n\t\t\t\t\t\t ((fi[fi_index][1] == fo[fo_index][0]) && (fi[fi_index][0] == fo[fo_index][1]))){\n\t\t\t\t\t\t\t// Found a matching face\n\t\t\t\t\t\t\tcheck = 1;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif(check==1){\n\t\t\t\t\t\t\t// Have a matching face\n\n\t\t\t\t\t\t\tif(VIn->FIndFound[fi_index]==0 && VOut->FIndFound[fo_index]==0){\n\t\t\t\t\t\t\t\t// If this face on the volume has not been processed yet\n\n\t\t\t\t\t\t\t\tVIn->FIndFound[fi_index] = 1;\n\t\t\t\t\t\t\t\tVOut->FIndFound[fo_index] = 1;\n\n\t\t\t\t\t\t\t\tFACE = New_FACE();\n\n\t\t\t\t\t\t\t\tif(DB.FACE_HEAD == NULL){\n\n\t\t\t\t\t\t\t\t\t// This is the first face being processed\n\t\t\t\t\t\t\t\t\tFACE->parent = NULL;\n\t\t\t\t\t\t\t\t\tDB.FACE_HEAD = FACE;\n\t\t\t\t\t\t\t\t\tPrevFACE = DB.FACE_HEAD;\n\n\t\t\t\t\t\t\t\t} else{\n\n\t\t\t\t\t\t\t\t\tFACE->parent = PrevFACE;\n\t\t\t\t\t\t\t\t\tPrevFACE->next = FACE;\n\t\t\t\t\t\t\t\t\tPrevFACE = FACE;\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tFACE->fin = fi_index;\n\t\t\t\t\t\t\t\tFACE->fout = fo_index;\n\n\t\t\t\t\t\t\t\tFACE->VIn = VIn;\n\t\t\t\t\t\t\t\tFACE->VOut = VOut;\n\n\t\t\t\t\t\t\t\t//Store face references\n\t\t\t\t\t\t\t\tVIn->FACE[fi_index] = FACE;\n\t\t\t\t\t\t\t\tVOut->FACE[fo_index] = FACE;\n\n\t\t\t\t\t\t\t\t// Face Properties:\n\t\t\t\t\t\t\t\tFACE->P = VIn->P;\n\t\t\t\t\t\t\t\tFACE->Boundary = 0;\n\t\t\t\t\t\t\t\tFACE->BCType = BC_INTERNAL;\n\n\t\t\t\t\t\t\t\t// Create any array structures needed by the face\n\t\t\t\t\t\t\t\tFACE->n_fI = malloc((FACE->P+1)*DB.d* sizeof *FACE->n_fI);\n\t\t\t\t\t\t\t\tFACE->C_fI = malloc((FACE->P+1)*4* sizeof *FACE->C_fI); \n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t} // End If Check\n\n\t\t\t\t\t} // End for fo_index\n\t\t\t\t} // End for fi_index\n\n\t\t\t} // End If VIn != VOut\n\t\t} // End for VOut\n\t} // End for VIn\n\n\n\tif(PrevFACE == NULL || FACE == NULL){\n\t\tprintf(\"Need at least one internal face \\n\");\n\t\texit(1);\n\t}\n\n\t// Set external boundary faces\n\tfor(BC = DB.BC_HEAD; BC; BC = BC->next){\n\t\tif(BC->BCType != BC_PERIODIC){\n\t\t\t// Build faces for all other non periodic faces\n\t\t\t\n\t\t\tfor(i=0; i<BC->nBC_Con_row; i++){\n\t\t\t\tExternalBCIndex = BC->BC_Con[BC->nBC_Con_col*i];\n\t\t\t\tExternalBCF = BC->BC_Con[BC->nBC_Con_col*i + 1];\n\n\t\t\t\t// Find the volume and create this face\n\t\t\t\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\t\t\t\t\tif(VOLUME->index == ExternalBCIndex){\n\n\t\t\t\t\t\t// Create the Face\n\t\t\t\t\t\tFACE = New_FACE();\n\t\t\t\t\t\tFACE->parent = PrevFACE;\n\t\t\t\t\t\tPrevFACE->next = FACE;\n\t\t\t\t\t\tPrevFACE = FACE;\n\n\t\t\t\t\t\tFACE->fin = ExternalBCF;\n\t\t\t\t\t\tFACE->fout = -1;\n\n\t\t\t\t\t\tFACE->VIn = VOLUME;\n\t\t\t\t\t\tFACE->VOut = NULL;\n\n\t\t\t\t\t\t//Store face references\n\t\t\t\t\t\tVOLUME->FACE[ExternalBCF] = FACE;\n\t\t\t\t\t\tVOLUME->FIndFound[ExternalBCF] = 1;\n\n\t\t\t\t\t\t// Face Properties:\n\t\t\t\t\t\tFACE->P = VOLUME->P;\n\t\t\t\t\t\tFACE->Boundary = 1;\n\t\t\t\t\t\tFACE->BCType = BC->BCType;\n\n\t\t\t\t\t\t// Create any array structures needed by the face\n\t\t\t\t\t\tFACE->n_fI = malloc((FACE->P+1)*DB.d* sizeof *FACE->n_fI);\n\t\t\t\t\t\tFACE->C_fI = malloc((FACE->P+1)*4* sizeof *FACE->C_fI); \n\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n\n\tif(DB.Testing == 1 || DB.Testing == 2){\n\t\ti=0;\n\t\tfor(FACE = DB.FACE_HEAD; FACE; FACE = FACE->next){\n\t\t\tprintf(\"FACE: %d \\n\", i);\n\n\t\t\tprintf(\"\tfi: %d fo: %d \\n\", FACE->fin, FACE->fout);\n\n\t\t\tprintf(\"\tBoundary : %d \\n\", FACE->Boundary);\n\t\t\tprintf(\"\tBCType : %d \\n\", FACE->BCType);\n\n\t\t\tif(!FACE->Boundary){\n\t\t\t\tprintf(\"\tVIn: \");\n\t\t\t\tfor(j=0; j<4; j++){\n\t\t\t\t\tprintf(\" %d \", FACE->VIn->VeInd[j]);\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\");\n\t\t\t\tprintf(\"\tVOut: \");\n\t\t\t\tfor(j=0; j<4; j++){\n\t\t\t\t\tprintf(\" %d \", FACE->VOut->VeInd[j]);\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\");\n\t\t\t}\n\n\t\t\ti++;\n\t\t}\n\t}\n\n\n}\n\n" }, { "alpha_fraction": 0.746666669845581, "alphanum_fraction": 0.746666669845581, "avg_line_length": 23.77777862548828, "blob_id": "91e1ae294d7873add94f3c3cbc846be36de2d6d5", "content_id": "98ab9cfe3a17b23f0fb3f7fc7a3b408e6489d39d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 225, "license_type": "no_license", "max_line_length": 51, "num_lines": 9, "path": "/include/update_VOLUMEs.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG_update_VOLUMEs_h__INCLUDED\n#define DG_update_VOLUMEs_h__INCLUDED\n\n#include \"S_VOLUME.h\"\n\nvoid compute_inverse_mass(struct S_VOLUME *VOLUME);\nvoid update_VOLUME_Ops(void);\n\n#endif // DG_update_VOLUMEs_h__INCLUDED\n\n\n" }, { "alpha_fraction": 0.6826087236404419, "alphanum_fraction": 0.6913043260574341, "avg_line_length": 27.625, "blob_id": "a95261d3427677b792e1f01aa22c93bccb95d67b", "content_id": "c11294d199a0872778a81d2f02117cef860ceb0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 230, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/include/euler_flux.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__euler_flux_h__INCLUDED\n#define DG__euler_flux_h__INCLUDED\n\n\nvoid euler_flux_2D(double *W, double *F, double *G);\nvoid euler_flux_2D_matrix(double *W, double *F, double *G, int N);\n\n#endif //DG__euler_flux_h__INCLUDED\n\n" }, { "alpha_fraction": 0.6486803293228149, "alphanum_fraction": 0.6563050150871277, "avg_line_length": 28.34482765197754, "blob_id": "6ff9279dbbbbd5e593858f273d1b2273373d41d1", "content_id": "1cd384713aa389912510841c3c3a9e4fb2957ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1705, "license_type": "no_license", "max_line_length": 79, "num_lines": 58, "path": "/include/S_VOLUME.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#ifndef DG__S_VOLUME_h__INCLUDED\n#define DG__S_VOLUME_h__INCLUDED\n\nstruct S_VOLUME {\n\n\t// Properties:\n\tint index, P, d, update; // The index of this volume in the mesh (from \n\t\t\t\t// connectivity file) starting from 0\n\n\t// Geometry Information:\n\tint VeInd[4], // (Ve)rtices (Ind)eces in mesh vertices node list\n\t\tFIndFound[4]; // (F)aces (I)eces Found. Will be 1 if that face has been found\n\t\t\t\t\t// before and 0 otherwise\n\n\tint NvnG, // (N)umber of (v)olume (n)odes for (g)eometry\n\t\tNvnP, // (N)umber of (v)olume (n)odes for (p)lotting\n\t\tNvnS, // (N)umber of (v)olume (n)odes for (s)olution\n\t\tNfnI; \n\n\tdouble\t*XYZ, // Geometry Node Points matrix in column major form \n\t\t\t*XYZ_S, // Solution Node Points matrix in column major form\n\t\t\t*XYZ_P; // Plotting Node Points matrix in column major form\n\n\t// structs:\n\t//\t- Linked List of volumes for the mesh\n\tstruct S_VOLUME *next, // Pointer to next volume in the L List\n\t\t\t\t\t*parent; // Pointer to parent in L List\n\n\tstruct S_FACE **FACE;\n\n\n\t//\tMetric Terms:\n\t/*\n\tC = y_eta -y_xi\n\t\t-x_eta\tx_xi\n\n\tJ = x_xi * y_eta - y_xi * x_eta\n\n\t*/\n\tdouble \t*detJV_vS, // Jacobian at solution nodes as a vector\n\t\t\t*C_vS; // metric terms at solution nodes at a matrix. At\n\t\t\t\t\t// ith solution node (row of matrix) ordering \n\t\t\t\t\t// of column elements is [C11, C12, C21, C22]\n\n\t// Solution Data Structures:\n\tint \tNVar;\n\tdouble \t*What; \t// Modal coefficients. Ordering is in same ordering in which\n\t\t\t\t\t// solution points are arranged in XYZ_vS.\n\n\t// Solving: \n\t// \t- Data structures used during the explicit solving.\n\tdouble \t*RHS, *RES, *RHS_VOL, *RHS_FACE, *MInv,\n\t\t\t*F_Comm; // matrix of F_comm values at all the integration nodes\n\t\n};\n\n\n#endif // DG__S_VOLUME_h__INCLUDED\n\n" }, { "alpha_fraction": 0.5074385404586792, "alphanum_fraction": 0.5253881216049194, "avg_line_length": 22.957365036010742, "blob_id": "ff6012420edbd9f8fbda533b9cf3355fa7b04464", "content_id": "0f85ab547f1b80cb372909dfa4f761f5e9eaf88a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6184, "license_type": "no_license", "max_line_length": 136, "num_lines": 258, "path": "/src/solver_explicit.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"solver_explicit.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"update_VOLUMEs.h\"\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n\n#include \"explicit_VOLUME_info.h\"\n#include \"explicit_FACE_info.h\"\n#include \"finalize_RHS.h\"\n#include \"output_solution.h\"\n\n\nvoid solver_explicit(void){\n\n\t/*\n\tPurpose:\n\t\tSolve the flow using the explicit solver.\n\t*/\n\n\tprintf(\"Start Solver \\n\");\n\n\t// First, setup the mass matrix for each volume\n\tupdate_VOLUME_Ops();\n\n\tint time_step_type = 1; // 0 = euler, 1 = rk\n\n\tint t, i, rk;\n\tdouble *RHS_VOL, *RHS_FACE, maxRHS;\n\tint nBasis, iVar, iEq;\n\tstruct S_VOLUME *VOLUME;\n\n\tdouble *RES, *RHS, *What;\n\tint NvnS, iMax;\n\tint Neq = 4;\n\n\tdouble dt = DB.dt;\n\n\tint output_solutionT = 2;\n\t// Temporary for testing purposes\n\tif (DB.numTimeSteps != 1){\n\t\toutput_solutionT = (double)(DB.numTimeSteps)/3.;\n\t}\n\n\n\t// Loop through all time steps (for now do only one step)\n\tfor(t=0; t<DB.numTimeSteps; t++){\n\n\t\tif (time_step_type == 0){\n\n\t\t\t// Compute the RHS:\n\t\t\t//\t- Volume contribution\n\t\t\tprintf(\"T = %d \", t);\n\t\t\tprintf(\"V\"); explicit_VOLUME_info();\n\t\t\t//\t- Face contribution\n\t\t\tprintf(\"F\"); explicit_FACE_info();\n\t\t\t// \t- Combining both contributions\n\t\t\tprintf(\"F \"); maxRHS = finalize_RHS();\n\n\t\t\t// Perform the time stepping\n\t\t\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\t\t\t\t// Euler Explicit:\n\t\t\t\tfor(i = 0; i < VOLUME->NvnG; i++){\n\t\t\t\t\tfor(iEq = 0; iEq < 4; iEq ++){\n\t\t\t\t\t\tVOLUME->What[iEq*(VOLUME->NvnG) + i] = \n\t\t\t\t\t\t\tVOLUME->What[iEq*(VOLUME->NvnG) + i] + dt*VOLUME->RHS[iEq*(VOLUME->NvnG) + i];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (DB.Testing == 1){\n\t\t\t\t\tif (VOLUME->index == 1){\n\t\t\t\t\tRHS_VOL = VOLUME->RHS_VOL;\n\n\t\t\t\t\tprintf(\"VOLUME : %d \\n \", VOLUME->index);\n\t\t\t\t\tprintf(\"\tRHS VOLUME: \\n\");\n\t\t\t\t\tfor(nBasis=0; nBasis<VOLUME->NvnG; nBasis++){\n\t\t\t\t\t\tfor(iVar = 0; iVar < VOLUME->NVar; iVar++){\n\t\t\t\t\t\t\tprintf(\" %.15f \", RHS_VOL[iVar*VOLUME->NvnG + nBasis]);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tprintf(\"\\n\");\n\t\t\t\t\t}\n\n\t\t\t\t\tRHS_FACE = VOLUME->RHS_FACE;\n\t\t\t\t\tprintf(\"\tRHS FACE: \\n\");\n\t\t\t\t\tfor(nBasis=0; nBasis<VOLUME->NvnG; nBasis++){\n\t\t\t\t\t\tfor(iVar = 0; iVar < VOLUME->NVar; iVar++){\n\t\t\t\t\t\t\tprintf(\" %.15f \", RHS_FACE[iVar*VOLUME->NvnG + nBasis]);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tprintf(\"\\n\");\n\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\t\t\t\n\n\t\t\tprintf(\" maxRHS (no MInv): %.3e \", maxRHS);\n\n\t\t\tprintf(\"\\n\");\n\n\t\t\t// Output 2 solutions in the middle of the run\n\t\t\tif(t%output_solutionT == 0){\n\t\t\t\toutput_tecplot(t);\n\t\t\t}\n\n\t\t\tif(maxRHS < DB.exit_tol){\n\t\t\t\tprintf(\"maxRHS Below %.4e. Exiting\\n\", DB.exit_tol);\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t}\n\n\n\n\t\tif (time_step_type == 1){\n\n\t\t\tprintf(\"T = %d \", t);\n\t\t\t\n\t\t\tfor (rk = 0; rk < 3; rk++) {\n\t\t\t\t// Build the RHS (== -Residual)\n\t\t\t\tprintf(\"V\"); explicit_VOLUME_info();\n\t\t\t\tprintf(\"F\"); explicit_FACE_info();\n\t\t\t\tprintf(\"F \"); maxRHS = finalize_RHS();\n\n\t\t\t\t// Update What\n\t\t\t\tfor (VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next) {\n\t\t\t\t\tNvnS = VOLUME->NvnS;\n\n\t\t\t\t\tRES = VOLUME->RES;\n\t\t\t\t\tRHS = VOLUME->RHS;\n\t\t\t\t\tWhat = VOLUME->What;\n\n\t\t\t\t\tif (rk == 0) {\n\t\t\t\t\t\tfor (iMax = Neq*NvnS; iMax--; ) {\n\t\t\t\t\t\t\t*RES++ = *What;\n\t\t\t\t\t\t\t*What++ += dt*(*RHS++);\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (rk == 1) {\n\t\t\t\t\t\tfor (iMax = Neq*NvnS; iMax--; ) {\n\t\t\t\t\t\t\t*What = 0.25*(3.0*(*RES++) + *What + dt*(*RHS++));\n\t\t\t\t\t\t\tWhat++;\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (rk == 2) {\n\t\t\t\t\t\tfor (iMax = Neq*NvnS; iMax--; ) {\n\t\t\t\t\t\t\t*What = (1.0/3.0)*(*RES++ + 2.0*(*What) + 2.0*dt*(*RHS++));\n\t\t\t\t\t\t\tWhat++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tprintf(\" maxRHS (no MInv): %.3e \", maxRHS);\n\n\t\t\tprintf(\"\\n\");\n\n\t\t\tif(maxRHS < DB.exit_tol){\n\t\t\t\tprintf(\"maxRHS Below %.4e. Exiting\\n\", DB.exit_tol);\n\t\t\t\tbreak;\n\t\t\t}\n\n\n\t\t}\n\n\t\tif (DB.Testing){\n\t\t\tprintf(\"\\n \");\n\t\t\tprintf(\"====================================================================== \\n\");\n\t\t\tprintf(\"\t\t\t\t\tStart Test \\n\");\n\n\n\t\t\tstruct S_VOLUME *VOLUME_TEST;\n\n\t\t\tint numC, iTest, jTest;\n\t\t\tdouble *C11, *C12, *C21, *C22;\n\n\t\t\tfor(VOLUME_TEST = DB.VOLUME_HEAD; VOLUME_TEST; VOLUME_TEST = VOLUME_TEST->next){\n\t\t\t\t\n\t\t\t\tprintf(\"VOLUME : \\n\");\n\t\t\t\t// Print the volume grid point locations:\n\t\t\t\tfor(iTest = 0; iTest < VOLUME_TEST->NvnG; iTest++){\n\t\t\t\t\tprintf(\"\t(x,y)_g : (%.15f, %.15f) \\n\", VOLUME_TEST->XYZ[iTest], VOLUME_TEST->XYZ[VOLUME_TEST->NvnG +iTest]);\n\t\t\t\t}\n\n\t\t\t\t// Print the metric terms\n\t\t\t\tnumC = (VOLUME_TEST->P+1)*(VOLUME_TEST->P+1);\n\n\t\t\t\tC11 = &VOLUME_TEST->C_vS[0];\n\t\t\t\tC21 = &VOLUME_TEST->C_vS[2*numC];\n\t\t\t\tC12 = &VOLUME_TEST->C_vS[1*numC];\n\t\t\t\tC22 = &VOLUME_TEST->C_vS[3*numC];\n\n\t\t\t\tif(1){\n\t\t\t\t\tprintf(\"- Metric Terms \\n\");\n\t\t\t\t\tfor(iTest=0; iTest<numC; iTest++){\n\t\t\t\t\t\tprintf(\"\tC : (%.15f, %.15f, %.15f, %.15f) \\n\", \n\t\t\t\t\t\t\tC11[iTest], C21[iTest], C12[iTest], C22[iTest]);\n\t\t\t\t\t\tprintf(\"\tdet J : %.15f \\n\", VOLUME_TEST->detJV_vS[iTest]);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif(1){\n\t\t\t\t\tprintf(\"\tRHS_VOL: \\n\");\n\t\t\t\t\tfor(iTest = 0; iTest<VOLUME_TEST->NvnG; iTest++){\n\t\t\t\t\t\t// Loop through the rows\n\t\t\t\t\t\tfor(jTest=0; jTest<VOLUME_TEST->NVar; jTest++){\n\t\t\t\t\t\t\t// Loop through the columns\n\t\t\t\t\t\t\tprintf(\"%.15f \", VOLUME_TEST->RHS_VOL[jTest*VOLUME_TEST->NvnG + iTest]);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tprintf(\"\\n\");\n\n\t\t\t\t\t}\n\n\t\t\t\t\tprintf(\"\tRHS_FACE: \\n\");\n\t\t\t\t\tfor(iTest = 0; iTest<VOLUME_TEST->NvnG; iTest++){\n\t\t\t\t\t\t// Loop through the rows\n\t\t\t\t\t\tfor(jTest=0; jTest<VOLUME_TEST->NVar; jTest++){\n\t\t\t\t\t\t\t// Loop through the columns\n\t\t\t\t\t\t\tprintf(\"%.15f \", VOLUME_TEST->RHS_FACE[jTest*VOLUME_TEST->NvnG + iTest]);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tprintf(\"\\n\");\n\n\t\t\t\t\t}\n\n\t\t\t\t\tprintf(\"\tRHS_VOL - RHS_FACE: \\n\");\n\t\t\t\t\tfor(iTest = 0; iTest<VOLUME_TEST->NvnG; iTest++){\n\t\t\t\t\t\t// Loop through the rows\n\t\t\t\t\t\tfor(jTest=0; jTest<VOLUME_TEST->NVar; jTest++){\n\t\t\t\t\t\t\t// Loop through the columns\n\t\t\t\t\t\t\tprintf(\"%.15f \", VOLUME_TEST->RHS_VOL[jTest*VOLUME_TEST->NvnG + iTest] - VOLUME_TEST->RHS_FACE[jTest*VOLUME_TEST->NvnG + iTest]);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tprintf(\"\\n\");\n\n\t\t\t\t\t}\n\n\t\t\t\t\tprintf(\"\tRHS: \\n\");\n\t\t\t\t\tfor(iTest = 0; iTest<VOLUME_TEST->NvnG; iTest++){\n\t\t\t\t\t\t// Loop through the rows\n\t\t\t\t\t\tfor(jTest=0; jTest<VOLUME_TEST->NVar; jTest++){\n\t\t\t\t\t\t\t// Loop through the columns\n\t\t\t\t\t\t\tprintf(\"%.15f \", VOLUME_TEST->RHS[jTest*VOLUME_TEST->NvnG + iTest]);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tprintf(\"\\n\");\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tprintf(\"\t\t\t\t\tEnd Test \\n\");\n\t\t\tprintf(\"====================================================================== \\n\");\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t}\n\n\n}\n\n\n" }, { "alpha_fraction": 0.7431694269180298, "alphanum_fraction": 0.7431694269180298, "avg_line_length": 29.16666603088379, "blob_id": "5becd1a9427b275f7ac087ebc4e83ac0c22bb98d", "content_id": "ef930cb38296244a7ee2556000bbf7bd0831cea8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 183, "license_type": "no_license", "max_line_length": 80, "num_lines": 6, "path": "/include/cubature.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG_cubature_h__INCLUDED\n#define DG_cubature_h__INCLUDED\n\nvoid cubature_literature(int P, char *NodeType, double *Nodes, double *Weights);\n\n#endif // DG_cubature_h__INCLUDED\n\n" }, { "alpha_fraction": 0.7439024448394775, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 22.428571701049805, "blob_id": "ef8685843ba419026344d7a686efd6522a91403b", "content_id": "9829956252df1be637ab2421e2349b25bce7cb44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 164, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/include/explicit_FACE_info.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__explicit_FACE_info_h__INCLUDED\n#define DG__explicit_FACE_info_h__INCLUDED\n\n\nvoid explicit_FACE_info(void);\n\n#endif //DG__explicit_FACE_info_h__INCLUDED\n" }, { "alpha_fraction": 0.7514451146125793, "alphanum_fraction": 0.7514451146125793, "avg_line_length": 23.571428298950195, "blob_id": "be341ad473c1370f535439dd4e7da9a205c1ce5c", "content_id": "3b2e36bf11e88ea97aaeca4dafa092fcbb454c2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 173, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/include/explicit_VOLUME_info.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__explicit_VOLUME_info_h__INCLUDED\n#define DG__explicit_VOLUME_info_h__INCLUDED\n\n\nvoid explicit_VOLUME_info(void);\n\n#endif //DG__explicit_VOLUME_info_h__INCLUDED\n\n" }, { "alpha_fraction": 0.7450980544090271, "alphanum_fraction": 0.7450980544090271, "avg_line_length": 24.375, "blob_id": "259eb5680ad65acbad67a28abef1b71b7d935f94", "content_id": "29b6c259656a1dadc5c9fd11c30372dab6fc3764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 204, "license_type": "no_license", "max_line_length": 49, "num_lines": 8, "path": "/include/setup_geom_factors.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG_setup_geom_factors_h__INCLUDED\n#define DG_setup_geom_factors_h__INCLUDED\n\n#include \"S_VOLUME.h\"\n\nvoid setup_geom_factors(struct S_VOLUME *VOLUME);\n\n#endif // DG_setup_geom_factors_h__INCLUDED\n\n" }, { "alpha_fraction": 0.5838061571121216, "alphanum_fraction": 0.6148988604545593, "avg_line_length": 34.44545364379883, "blob_id": "ae2886a79485afee4d36fc243d0cd1c743876244", "content_id": "3abdc4f0b5ff6f2f02c021dcf5249d9b1df45cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31197, "license_type": "no_license", "max_line_length": 282, "num_lines": 880, "path": "/meshes/MeshGenerator/MeshGeneratorPolynomial_2D.py", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\"\"\"\n Mesh Generator for the IGA-DG code (2D)\n \nInputs:\n - Dimension of the grid (along x and y directions)\n - Order of the solution (P)\n - Number of processors the mesh will be run on\n - The domain of the mesh\n - Type of flow to be solved:\n - Periodic Vortex\n - Inviscid Channel\n\nOutputs:\n - A mesh file and solution point file will be output that\n satisfy the given input parameters.\n - If configured (set CONST_Plot to True), a figure of the mesh will\n be shown using Matplotlib on a seperate window once the grid has been \n generated.\n \nSupported BCs:\n - Periodic\n - SlipWall\n - TotalTemperaturePressure\n - BackPressure\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport math\n \n\n#=================================================================\n\n #INPUT PARAMETERS\n\nCONST_DIMENSION_X = 6 #Number of elements along x coordinate direction\nCONST_DIMENSION_Y = 1 #Number of elements along y coordinate direction\n\n# This gives the order p of the solution approximation.\n# (ex: If p=2, then 3 solution points created along each\n# coordinate direction for the element)\nCONST_P = 1\n\n# Number of processors that will be used with the mesh.\n# The number of processors must be a power of 2.\nCONST_NumProcessors = 4\n\n# The domain of the rectangular (undeformed) mesh\nCONST_xMin = -4.0\nCONST_xMax = 4.0\nCONST_yMin = 0.0\nCONST_yMax = 1.5\n\n# Set to True if the user wants a visual representation of the mesh (needs matplotlib)\nCONST_Plot = True\nCONST_PlotXRange = [-4.2, 4.2]\nCONST_PlotYRange = [0.0, 4]\n\n# If the mesh will be deformed\nCONST_Deform = True\n\n# If a label needs to be added to the mesh's file name\nCONST_Label = \"\"\n\nCONST_CaseType = \"InviscidChannel\" # PeriodicVortex, InviscidChannel\n#=================================================================\n\n\n\n\n\n\n\n#================================================================\n\n #THE DEFORMATION FUNCTION\n\n# The sinusoidal perturbation function that is used to perturb to\n# deform the rectangular mesh. It has as parameters a (which is\n# either x or y) . In addition, the parameters n, A and Lo define the\n# number of periods and the curvature of the perturbation.\n\nCONST_n = 4.\nCONST_A = 0.4\nCONST_Lo = 16.\ndef sinPertrubFunction(a):\n return CONST_A*math.sin((CONST_n/CONST_Lo)*math.pi*a)\n\n\nCONST_a = 0.5\nCONST_b = 0.0\nCONST_c = 0.6\ndef gaussianBumpPerturbFunction(x, j, numJ):\n\n \"\"\"\n j = j index (starting with 0 being bottom most node)\n \"\"\"\n\n aVal = CONST_a*(numJ-j-1)/(numJ-1)\n\n return aVal*math.exp(-1.*(((x-CONST_b)**2)/(2.*CONST_c**2)))\n\n\n# This method is called if the grid must be deformed. It will loop over\n# all the solution points and node points of the grid.\ndef perturbGrid(Mesh, MeshNumPointsX, MeshNumPointsY):\n \n for i in range(MeshNumPointsX):\n for j in range(MeshNumPointsY):\n \n xOld = Mesh[i][j][0]\n yOld = Mesh[i][j][1]\n \n \"\"\"\n dx = sinPertrubFunction(yOld)\n dy = sinPertrubFunction(xOld)\n \"\"\"\n\n \"\"\"\n dy = gaussianBumpPerturbFunction(xOld, j, MeshNumPointsY)\n dx = 0.0\n\n xNew = xOld + dx\n yNew = yOld + dy\n \n Mesh[i][j][0] = xNew\n Mesh[i][j][1] = yNew\n \"\"\"\n\n exactPointTuple = gaussianBumpExact(i,j)\n Mesh[i][j][0] = exactPointTuple[0]\n Mesh[i][j][1] = exactPointTuple[1]\n\n# The exact mesh case for the 1x6 mesh for the inviscid channel (P=1)\n\nExactPointsList = [\n [(-1.594143975246173, 0.187500000000000),(-1.062762650165489, 0.187500000000000),(-0.531381325083476, 0.187500000000000),(0.000000000000000, 0.187500000000000),(0.531381325080713, 0.187500000000000),(1.062762650162713, 0.187500000000000),(1.594143975246173, 0.187500000000000)],\n [(-1.594143975246173, 0.000000000000001),(-1.061329516525977, 0.000000047962662),(-0.528527827678252, 0.001902931798748),(0.000000000000000, 0.062500000000000),(0.528527827674931, 0.001902931798832),(1.061329516522372, 0.000000047962662),(1.594143975246173, 0.000000000000001)],\n]\n\ndef gaussianBumpExact(i,j):\n\n \"\"\"\n Set the exact value for the geometry node points for the P1 mesh\n j = 1 is at the highest point, and j = max is at the minimum y \n point. However, j input is such that j minimum is at the minimum y point\n \"\"\"\n\n jExact = 1-j\n\n return ExactPointsList[jExact][i]\n\n#================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n# the thickness of each element along each coordinate direction\nCONST_dx = (CONST_xMax - CONST_xMin)/(CONST_DIMENSION_X)\nCONST_dy = (CONST_yMax - CONST_yMin)/(CONST_DIMENSION_Y)\n\n\n# Note that the 1D GLL points must be placed in ascending sorted order\nCONST_GaussLobattoRootsAndCoefficients = {\n 2: [[-1,1],[0,0]],\n 3: [[-1., 0, 1.], [0.3333333333333333333333, 1.333333333333333333333, 0.3333333333333333333333]],\n 4: [[-1, -math.sqrt(5.)/5., math.sqrt(5.)/5., 1],\n [0.1666666666666666666667, 0.833333333333333333333, 0.833333333333333333333, 0.1666666666666666666667]],\n 5: [[-1., -math.sqrt(21.)/7., 0.0, math.sqrt(21.)/7., 1.0],[0,0,0,0,0]]\n}\n\n\n# Creating the mesh's file name\nCONST_Version = \"V4.2\"\nmeshfileName = str(CONST_DIMENSION_X) + \"x\" + \\\n str(CONST_DIMENSION_Y) + \"_\" + \"P\" + str(CONST_P)+\"_\"\n\nif(CONST_Deform == True):\n meshfileName = meshfileName + \"Deform_\" + str(CONST_CaseType) + \"_\" + CONST_Version \\\n + \".msh\"\nelse:\n meshfileName = meshfileName + \"Rect_\" + str(CONST_CaseType) + \"_\" + CONST_Version \\\n + \".msh\"\n\nCONST_MeshFileName = meshfileName\n\n\n# This is the data structure for holding all the information about an element.\n# It will hold arrays for its solution points and vertices that will be \"pointers\"\n# to a global 2D Mesh array\nclass Element(object):\n\n def __init__(self, Mesh, iMin, jMin):\n # iMin and jMin are the indeces for the point in\n # the mesh with the min (x,y) values of the element\n \n # A 2D matrix that holds the vertices of the element. As\n # usual, i=0,j=0 index of this matrix holds the xMin,yMin\n # vertex\n self.Vertices = []\n for i in range(2):\n rowArray = []\n for j in range(2):\n rowArray.append(None)\n self.Vertices.append(rowArray)\n \n for i in range(2):\n for j in range(2):\n # The location of the vertices for the element in\n # 2D mesh array\n iMesh = iMin + i*CONST_P\n jMesh = jMin + j*CONST_P\n \n self.Vertices[i][j] = Mesh[iMesh][jMesh]\n \n #A 2D matrix that holds the the solution points for the element.\n # Same as usual, the i=0, j=0 index holds the minX minY dof.\n self.GeometryNodePoints = []\n for i in range(CONST_P+1):\n rowArray = []\n for j in range(CONST_P+1):\n rowArray.append(None)\n self.GeometryNodePoints.append(rowArray)\n \n #fill the dof data\n for i in range(CONST_P+1):\n for j in range(CONST_P+1):\n iMesh = iMin + i\n jMesh = jMin + j\n \n self.GeometryNodePoints[i][j] = Mesh[iMesh][jMesh]\n\n self.createGeometryNodePoints()\n \n # Place the vertices/nodes into a 1D array. This is the order in which\n # the nodes will be placed into the triangle->node array in the CPR code.\n # The ordering of the nodes has been made to be consistent with what was\n # done with the old mesh generators.\n self.NodeArray = []\n\n self.NodeArray.append(self.Vertices[0][0]) # Bottom Left Node\n self.NodeArray.append(self.Vertices[1][0]) # Bottom Right Node\n self.NodeArray.append(self.Vertices[1][1]) # Top Right Node\n self.NodeArray.append(self.Vertices[0][1]) # Top Left Node\n\n # Store all the geometry node points into a 1D array for all j = 0, then j = 1, etc.\n self.GeometryNodeArray = []\n for j in range(CONST_P+1):\n for i in range(CONST_P+1):\n self.GeometryNodeArray.append(self.GeometryNodePoints[i][j])\n \n #For storing the indeces of the node (in the connectivity file)\n self.NodeArrayConnectivityFileIndeces = []\n \n\n self.IMEX = 0\n self.Partition = 0 #The partition number for the element\n \n\n # The method that is used for locating the positions of the solution points\n # (or DOFs) for the element based on the locations of the vertices.\n def createGeometryNodePoints(self):\n \n dxi = 2./CONST_P\n deta = 2./CONST_P\n\n #Create a 2D tensor product of the 1D GLL points.\n for i in range(CONST_P+1):\n for j in range(CONST_P+1):\n # The (xi,eta) value of the solution point in Computational Domain\n #xi = CONST_GaussLobattoRootsAndCoefficients[CONST_P+1][0][i]\n #eta = CONST_GaussLobattoRootsAndCoefficients[CONST_P+1][0][j]\n \n xi = i*dxi + -1.\n eta = j*deta + -1.\n\n # The mapped (x,y) value of the point in the Physical Domain\n (x,y) = self.mapParentToPhysicalRectangular(xi,eta)\n \n # Set the (x,y) value of the mapped solution point\n self.GeometryNodePoints[i][j][0] = x\n self.GeometryNodePoints[i][j][1] = y\n\n\n # Given a (xi,eta) value on the computational domain, this method\n # will give the (x,y) value of the point on the physical domain for\n # if the element was rectangular.\n def mapParentToPhysicalRectangular(self,xi,eta):\n \n # The width (dx) and length (dy) of the element (when it is in its\n # original rectangular state)\n deltaX = self.Vertices[1][1][0] - self.Vertices[0][0][0] #xMax - xMin\n deltaY = self.Vertices[1][1][1] - self.Vertices[0][0][1] #yMax - yMin\n \n xFactor = (xi-(-1.))/2.\n yFactor = (eta-(-1.))/2.\n \n #Mapped location of point on physical domain\n x = self.Vertices[0][0][0] + (deltaX)*(xFactor)\n y = self.Vertices[0][0][1] + (deltaY)*(yFactor)\n \n return (x,y)\n\n # Given two nodes, get the face of the element. As input, this\n # method takes the pointers to the Mesh's point/node.\n def getFaceIndex(self,node1,node2):\n \"\"\"\n The convention used in the code for denoting what the index\n of a face should be based on the two nodes (n) it is in between\n \n f=1\n n2 ---- n1\n f=2 | | f=0\n n3 ---- n0\n f=3\n \n \"\"\"\n \n faceIndex = -1\n # search through the node array. Break when the first match is found\n # with one of the nodes\n index1 = self.NodeArray.index(node1)\n index2 = self.NodeArray.index(node2)\n \n if (index1 <= index2):\n faceIndex = index1\n else:\n faceIndex = index2\n \n # because the nodes loop, add a condition that if the two points\n # are the last and first of the array, then faceIndex shouldn't be 0,\n # it should be the index of the last point of the array\n \n if((index1 == 0 and index2 == (len(self.NodeArray)-1)) or \\\n (index2 == 0 and index1 == (len(self.NodeArray)-1))):\n faceIndex = len(self.NodeArray)-1\n\n return faceIndex\n\n\n#Takes as input the Element matrix and plots all the\n# elements\ndef plotElements(ElementObjects, MeshVerticesArray):\n xVector = []\n yVector = []\n for i in range(CONST_DIMENSION_X):\n for j in range(CONST_DIMENSION_Y):\n elemObject = ElementObjects[i][j]\n \n for iDof in range(CONST_P+1):\n for jDof in range(CONST_P+1):\n #loop through all the solution points for the element\n xVector.append(elemObject.GeometryNodePoints[iDof][jDof][0])\n yVector.append(elemObject.GeometryNodePoints[iDof][jDof][1])\n\n plt.scatter(xVector,yVector,s=10,c='b')\n plt.grid()\n\n # create the lines around all the elements\n for i1 in range(CONST_DIMENSION_X):\n for j1 in range(CONST_DIMENSION_Y):\n xVector = []\n yVector = []\n \n elemObject = ElementObjects[i1][j1]\n \n # bottom edge\n for i in range(CONST_P+1):\n xVector.append(elemObject.GeometryNodePoints[i][0][0])\n yVector.append(elemObject.GeometryNodePoints[i][0][1])\n \n # right edge\n for i in range(CONST_P+1):\n xVector.append(elemObject.GeometryNodePoints[CONST_P][i][0])\n yVector.append(elemObject.GeometryNodePoints[CONST_P][i][1])\n \n # top edge\n for i in range(CONST_P+1):\n xVector.append(elemObject.GeometryNodePoints[CONST_P-i][CONST_P][0])\n yVector.append(elemObject.GeometryNodePoints[CONST_P-i][CONST_P][1])\n \n # left edge\n for i in range(CONST_P+1):\n xVector.append(elemObject.GeometryNodePoints[0][CONST_P-i][0])\n yVector.append(elemObject.GeometryNodePoints[0][CONST_P-i][1])\n \n plt.plot(xVector,yVector)\n\n # Put annotation boxes next to all the vertices points\n deltaX = (CONST_xMax-CONST_xMin)*0.01\n deltaY = (CONST_yMax-CONST_yMin)*0.01\n\n placedLabels = []\n for i in range(CONST_DIMENSION_X):\n for j in range(CONST_DIMENSION_Y):\n elemObject = ElementObjects[i][j]\n\n for node in elemObject.NodeArray:\n if node not in placedLabels:\n placedLabels.append(node)\n indexVal = MeshVerticesArray.index(node)\n textVal = str(indexVal)\n plt.text(node[0]+deltaX, node[1]+deltaY, textVal)\n\n if CONST_PlotXRange is not None and CONST_PlotYRange is not None:\n plt.gca().set_xlim(CONST_PlotXRange)\n plt.gca().set_ylim(CONST_PlotYRange)\n\n plt.show(block=True)\n\n\n# For printing the mesh file that will be read by the DG code. \ndef PrintDGMeshFile(MeshNodesArray, MeshVerticesArray, ElementObjectsList, \n BoundaryConditionsDict):\n \n file = open(CONST_MeshFileName, \"w\")\n \n #Print some information in the header of the file\n file.write(\"Number of grid points: \\n\")\n file.write(str(len(MeshNodesArray)) + \"\\n\")\n\n file.write(\"Number of vertices points: \\n\")\n file.write(str(len(MeshVerticesArray))+ \"\\n\")\n \n file.write(\"Number of QUADS: \\n\")\n file.write(str(len(ElementObjectsList)) + \"\\n\")\n\n file.write(\"P: \\n\")\n file.write(str(CONST_P) + \"\\n\")\n\n file.write(\"Vertices Nodes Coordinates: \\n\")\n for point in MeshVerticesArray:\n file.write(\"%.15e %.15e \\n\" %(point[0], point[1]))\n\n file.write(\"Connectivity Vertices Nodes: \\n\")\n for elementObject in ElementObjectsList:\n connectivityString = \"\"\n for node in elementObject.NodeArray:\n indexValue = MeshVerticesArray.index(node) # Index starts from 0\n connectivityString = connectivityString + \\\n str(indexValue) + \" \"\n\n connectivityString = connectivityString + \"\\n\"\n file.write(connectivityString)\n\n file.write(\"Geometry Nodes Coordinates: \\n\")\n #print all the node coordinates with 15 digits of accuracy:\n for point in MeshNodesArray:\n file.write(\"%.15e %.15e \\n\" %(point[0], point[1]))\n\n file.write(\"Connectivity Geometry Nodes QUAD: \\n\")\n \n # Now, go through all the elements and print the information about the geometry node \n # points. Points will be printed for all j = 0, then j = 1, ... Therefore, to check\n # vertex connectivity, simply access every P+1 point\n for elementObject in ElementObjectsList:\n connectivityString = \"\"\n for node in elementObject.GeometryNodeArray:\n indexValue = MeshNodesArray.index(node) # Index starts from 0\n connectivityString = connectivityString + \\\n str(indexValue) + \" \"\n\n connectivityString = connectivityString + \"\\n\"\n file.write(connectivityString)\n\n\n # Print all the boundary conditions now. The code will need to know\n # what the types of boundary conditions there are in the mesh\n file.write(\"Boundary Conditions : \\n%d \\n\" % len(BoundaryConditionsDict))\n\n for k in BoundaryConditionsDict:\n file.write(\"%s \\n%d \\n\" % (k, len(BoundaryConditionsDict[k])))\n\n for BCString in BoundaryConditionsDict[k]:\n file.write(\"%s \\n\" % BCString)\n\n file.close()\n\n\n#For filling the geometry nodes of all the elements into a 1D array\ndef Fill1DGeometryNodeArray(Mesh, NodeArray1D, MeshNumPointsX, MeshNumPointsY):\n # loop through all the node points and place them into\n # a 1D array. Place the points starting from the bottom left, moving\n # up and then repeat by moving to the right.\n \n\n for j in range(MeshNumPointsY):\n for i in range(MeshNumPointsX):\n NodeArray1D.append(Mesh[i][j])\n\n\n#Given a number of processors, this method is in charge of partitioning\n#the grid\ndef PartitionGrid(ElementObjectsMatrix):\n \n \"\"\"\n \n How the grid is partitioned in the case when the number of processors is 4\n \n 2 3\n \n 0 1\n \n \n \"\"\"\n \n # get the number of divisions needed along each coordinate direction of the grid\n numDivisions = int(CONST_NumProcessors**(1./2.))\n \n # Note that this may be an underestimate if there are an odd number\n # of elements along a coordinate direction. To account for this, the\n # last block along the given direction will then have one more element.\n numElementsX = int(CONST_DIMENSION_X/numDivisions)\n numElementsY = int(CONST_DIMENSION_Y/numDivisions)\n \n \n # ni is the ith block in i direction and nj is the jth block in the j direction\n for ni in range(numDivisions):\n for nj in range(numDivisions):\n \n partitionNumber = ni + numDivisions*nj;\n \n # Calculate the number of elements along the i direction and j\n # direction to set their partition number\n \n iElemMin = ni*numElementsX\n jElemMin = nj*numElementsY\n \n iElemMax = iElemMin + numElementsX\n jElemMax = jElemMin + numElementsY\n \n # If we are on the last block along a coordinate direction, set the\n # partition number of all elements up to the edge of the mesh.\n if(ni == numDivisions-1):\n iElemMax = CONST_DIMENSION_X\n if(nj == numDivisions-1):\n jElemMax = CONST_DIMENSION_Y\n \n \n for i in range(iElemMin, iElemMax):\n for j in range(jElemMin, jElemMax):\n ElementObjectsMatrix[i][j].Partition = partitionNumber\n\n\n\n#This method is for computing the periodic Boundary conditions of the elements.\ndef SetupPeriodicVortexBoundaryConditions(BoundaryConditionsDict, \\\n ElementObjectsMatrix, ElementObjectsList):\n \n \"\"\"\n A sample grid\n 1\n -----\n 2 | | 4\n -----\n 3\n \"\"\"\n \n PeriodicBoundaryConditionsList = []\n\n #compute the periodic connections between the bottom and top faces of mesh\n # (i.e. side 1 and 3).\n for i in range(CONST_DIMENSION_X):\n elementTopRow = ElementObjectsMatrix[i][CONST_DIMENSION_Y-1]\n elementTopRowIndex = ElementObjectsList.index(elementTopRow)\n elementBotRow = ElementObjectsMatrix[i][0]\n elementBotRowIndex = ElementObjectsList.index(elementBotRow)\n \n #Get the grid points that are on the \"open\" side of the element\n \n #get the top node points for elementTopRow\n elementTopRowGP1 =elementTopRow.Vertices[0][1]\n elementTopRowGP2 =elementTopRow.Vertices[1][1]\n \n #get the bottom node points for elementTopRow\n elementBotRowGP1 =elementBotRow.Vertices[0][0] #bottom left grid point\n elementBotRowGP2 =elementBotRow.Vertices[1][0]\n \n #get the face index values for the elements\n elementTopRowFaceIndex = elementTopRow.getFaceIndex(elementTopRowGP1,elementTopRowGP2)\n elementBotRowFaceIndex = elementBotRow.getFaceIndex(elementBotRowGP1, elementBotRowGP2)\n \n #store the data in a tuple\n dataTuple = (elementTopRowIndex, elementBotRowIndex, \\\n elementTopRowFaceIndex, elementBotRowFaceIndex)\n \n PeriodicBoundaryConditionsList.append(dataTuple)\n \n \n #compute the periodic connections between the left and right faces of the mesh\n # (i.e. side 2 and 4).\n for j in range(CONST_DIMENSION_Y):\n elementLeftCol = ElementObjectsMatrix[0][j]\n elementLeftColIndex = ElementObjectsList.index(elementLeftCol)\n elementRightCol = ElementObjectsMatrix[CONST_DIMENSION_X-1][j]\n elementRightColIndex = ElementObjectsList.index(elementRightCol)\n \n \n #get the Left node points for elementLeftCol\n elementLeftColGP1 =elementLeftCol.Vertices[0][0]\n elementLeftColGP2 =elementLeftCol.Vertices[0][1]\n \n #get the right node points for elementRightCol\n elementRightColGP1 = elementRightCol.Vertices[1][0]\n elementRightColGP2 = elementRightCol.Vertices[1][1]\n \n #get the face indeces for the two elements\n elementLeftColFaceIndex = elementLeftCol.getFaceIndex(elementLeftColGP1, elementLeftColGP2)\n elementRightColFaceIndex = elementRightCol.getFaceIndex(elementRightColGP1, elementRightColGP2)\n \n #store the data in a tuple\n dataTuple = (elementLeftColIndex, elementRightColIndex, elementLeftColFaceIndex, elementRightColFaceIndex)\n \n #print dataTuple\n PeriodicBoundaryConditionsList.append(dataTuple)\n\n # Convert each tuple of BC information into the string and place all strings into a list that \n # the dictionary keyword will point to\n PeriodicBoundaryConditionsStringList = []\n\n for tup in PeriodicBoundaryConditionsList:\n s = \"\"\n for t in tup:\n s = s + str(t) + \" \"\n PeriodicBoundaryConditionsStringList.append(s)\n\n BoundaryConditionsDict[\"Periodic\"] = PeriodicBoundaryConditionsStringList\n\n\n\ndef Fill1DVerticesNodeArray(Mesh, MeshVerticesArray):\n\n for i in range(CONST_DIMENSION_X+1):\n for j in range(CONST_DIMENSION_Y+1):\n iNode = i*(CONST_P)\n jNode = j*(CONST_P)\n\n MeshVerticesArray.append(Mesh[iNode][jNode])\n\n\ndef SetupInviscidChannelBoundaryConditions(BoundaryConditionsDict, ElementObjectsMatrix, \n ElementObjectsList):\n \n \"\"\"\n Setup the Boundary Conditions for the Inviscid Channel case\n Will have the following boundary conditions:\n - SlipWall at the bottom and top of mesh\n - Total Temperature and Pressure at left of mesh\n - Back Pressure at right of mesh\n \"\"\"\n\n SlipWallBC = []\n\n # Top Surface:\n for i in range(CONST_DIMENSION_X):\n elementTopRow = ElementObjectsMatrix[i][CONST_DIMENSION_Y-1]\n elementTopRowIndex = ElementObjectsList.index(elementTopRow)\n \n #Get the grid points that are on the \"open\" side of the element\n \n #get the top node points for elementTopRow\n elementTopRowGP1 =elementTopRow.Vertices[0][1]\n elementTopRowGP2 =elementTopRow.Vertices[1][1]\n \n #get the face index values for the elements\n elementTopRowFaceIndex = elementTopRow.getFaceIndex(elementTopRowGP1,elementTopRowGP2)\n \n # Get the string for the BC information:\n bcString = str(elementTopRowIndex) + \" \" + str(elementTopRowFaceIndex)\n \n SlipWallBC.append(bcString)\n\n # Bottom Surface:\n for i in range(CONST_DIMENSION_X):\n elem = ElementObjectsMatrix[i][0]\n elemIndex = ElementObjectsList.index(elem)\n \n #Get the grid points that are on the \"open\" side of the element\n \n #get the bottom node points for elem\n elemGP1 =elem.Vertices[0][0]\n elemGP2 =elem.Vertices[1][0]\n \n #get the face index values for the elements\n elemFaceIndex = elem.getFaceIndex(elemGP1,elemGP2)\n \n # Get the string for the BC information:\n bcString = str(elemIndex) + \" \" + str(elemFaceIndex)\n \n SlipWallBC.append(bcString)\n\n\n BoundaryConditionsDict[\"SlipWall\"] = SlipWallBC\n\n TotalTemperaturePressureBC = []\n\n # Left Surface:\n for j in range(CONST_DIMENSION_Y):\n elem = ElementObjectsMatrix[0][j]\n elemIndex = ElementObjectsList.index(elem)\n \n #Get the grid points that are on the \"open\" side of the element\n \n #get the left node points for elem\n elemGP1 =elem.Vertices[0][0]\n elemGP2 =elem.Vertices[0][1]\n \n #get the face index values for the elements\n elemFaceIndex = elem.getFaceIndex(elemGP1,elemGP2)\n \n # Get the string for the BC information:\n bcString = str(elemIndex) + \" \" + str(elemFaceIndex)\n \n TotalTemperaturePressureBC.append(bcString)\n\n BoundaryConditionsDict[\"TotalTemperaturePressure\"] = TotalTemperaturePressureBC\n\n BackPressureBC = []\n\n # Right Surface:\n for j in range(CONST_DIMENSION_Y):\n elem = ElementObjectsMatrix[CONST_DIMENSION_X-1][j]\n elemIndex = ElementObjectsList.index(elem)\n \n #Get the grid points that are on the \"open\" side of the element\n \n #get the right node points for elem\n elemGP1 =elem.Vertices[1][0]\n elemGP2 =elem.Vertices[1][1]\n \n #get the face index values for the elements\n elemFaceIndex = elem.getFaceIndex(elemGP1,elemGP2)\n \n # Get the string for the BC information:\n bcString = str(elemIndex) + \" \" + str(elemFaceIndex)\n \n BackPressureBC.append(bcString)\n\n BoundaryConditionsDict[\"BackPressure\"] = BackPressureBC\n\n\n\n# The Main method for the script.\n\n# IMPORTANT: This code works by using implicit references to array elements.\n# So, once the Mesh list is created and pointers to values in this list are\n# created in the Element objects, they cannot be reassigned to another point\n# but only changed. So the = operator cannot be used.\ndef main():\n \n # First, create the 2D matrix that will hold all the Mesh points.\n # This will include the vertices as well as the dofs\n \n MeshNumPointsX = CONST_DIMENSION_X*(CONST_P) + 1\n MeshNumPointsY = CONST_DIMENSION_Y*(CONST_P) + 1\n \n # Create the 2D Mesh object and initialize all the elements\n # in it to be [0,0]. The mesh object will have at i=0,j=0 the\n # xMin, YMin point and at i=Max, j=Max the xMax, YMax point.\n Mesh = []\n for i in range(MeshNumPointsX):\n rowArray = []\n for j in range(MeshNumPointsY):\n rowArray.append([0,0])\n Mesh.append(rowArray)\n \n for i in range(CONST_DIMENSION_X+1):\n for j in range(CONST_DIMENSION_Y+1):\n\n VertexXValue = i*CONST_dx + CONST_xMin\n VertexYValue = j*CONST_dy + CONST_yMin\n \n MeshIndexI = i*(CONST_P)\n MeshIndexJ = j*(CONST_P)\n \n Mesh[MeshIndexI][MeshIndexJ][0] = VertexXValue #x value of point\n Mesh[MeshIndexI][MeshIndexJ][1] = VertexYValue #y value of point\n\n\n # Create the element objects\n ElementObjectsMatrix = [] #The 2D array that will hold all the element objects\n \n # The i=0, j=0 index will refer to the element in the bottom\n # left of the grid.\n for i in range(CONST_DIMENSION_X):\n rowArray = []\n for j in range(CONST_DIMENSION_Y):\n rowArray.append(None)\n ElementObjectsMatrix.append(rowArray)\n\n for i in range(CONST_DIMENSION_X):\n for j in range(CONST_DIMENSION_Y):\n # Starting from the bottom left of the grid, create all the\n # element objects\n \n # the index of the min (x,y) value of the vertex for the element in\n # question point on the Mesh element\n iMin = i*(CONST_P)\n jMin = j*(CONST_P)\n \n ElementObjectsMatrix[i][j] = Element(Mesh,iMin,jMin)\n\n #Fill a 1D array with all the geometry node points\n MeshNodesArray = []\n Fill1DGeometryNodeArray(Mesh, MeshNodesArray, MeshNumPointsX, MeshNumPointsY)\n\n # Fill a 1D array with the vertices node points\n MeshVerticesArray = []\n Fill1DVerticesNodeArray(Mesh, MeshVerticesArray)\n\n # Fill a 1D array with all the mesh elements:\n ElementObjectsList = []\n for i in range(CONST_DIMENSION_X):\n for j in range(CONST_DIMENSION_Y):\n # Creating pointers in this 1D list to the element objects. This is\n # the order in which the elements will be printed into the connectivity\n # file.\n ElementObjectsList.append(ElementObjectsMatrix[i][j])\n\n # Dictionary that will hold all the boundary conditions. It will have along\n # with each keyword the list of strings that will be printed for the boundary \n # condition.\n BoundaryConditionsDict = {}\n\n # All that will change for the mesh will depend on what case is being run\n if CONST_CaseType == \"PeriodicVortex\":\n # Set up the periodic BC information for the periodicVortex case\n\n SetupPeriodicVortexBoundaryConditions(BoundaryConditionsDict, \\\n ElementObjectsMatrix, ElementObjectsList)\n\n\n elif CONST_CaseType == \"InviscidChannel\":\n SetupInviscidChannelBoundaryConditions(BoundaryConditionsDict, \\\n ElementObjectsMatrix, ElementObjectsList)\n \n else:\n print \"Case Not Available\"\n exit(0)\n \n # Partition the grid properly:\n PartitionGrid(ElementObjectsMatrix)\n\n # If the mesh needs to be deformed\n if CONST_Deform:\n perturbGrid(Mesh, MeshNumPointsX, MeshNumPointsY)\n\n # Print all the data for the elements into the connectivity file\n PrintDGMeshFile(MeshNodesArray, \n MeshVerticesArray, \n ElementObjectsList, \n BoundaryConditionsDict)\n\n # Plot the Mesh\n if CONST_Plot:\n plotElements(ElementObjectsMatrix, MeshVerticesArray)\n\nmain()\n\n\n\"\"\"\n \n The sign convention used in the arrays is that increasing i means\n moving to the right and increasing j means moving up in the element object\n matrix, solution points matrix, etc ...\n \n ^\n |\n j|\n ----> i\n \n\"\"\"\n\n\n\n\n" }, { "alpha_fraction": 0.7200736403465271, "alphanum_fraction": 0.7274401187896729, "avg_line_length": 33, "blob_id": "b4a5973c2882abcc648225e51f2588cd268d85ca", "content_id": "773961dd22d17b69baf33f900dc451eb828b2870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 543, "license_type": "no_license", "max_line_length": 101, "num_lines": 16, "path": "/include/bases.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG_bases_h__INCLUDED\n#define DG_bases_h__INCLUDED\n\ndouble basis_TP_Lagrange_2D(int P, int basis_i, int basis_j, double *nodeLocations, \n\tdouble xi, double eta);\n\ndouble *basis_TP_Lagrange_2D_Grad(int P, int basis_i, int basis_j, double *nodeLocations, \n\tdouble xi, double eta);\n\ndouble basis_TP_NURBS_2D(int P, int basis_i, int basis_j, double *xiVector, double *etaVector, \n\tdouble xi, double eta);\n\ndouble *basis_TP_NURBS_2D_Grad(int P, int basis_i, int basis_j, double *xiVector, double *etaVector, \n\tdouble xi, double eta);\n\n#endif" }, { "alpha_fraction": 0.6519736647605896, "alphanum_fraction": 0.6605263352394104, "avg_line_length": 26.089284896850586, "blob_id": "9815d2bfb93950be9d76ea2673801cafbe28e16d", "content_id": "a6f5cb7a331fead17f9e292b5711f8092dd69b27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 95, "num_lines": 56, "path": "/include/S_ELEMENT.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG_S_ELEMENT_h__INCLUDED\n#define DG_S_ELEMENT_h__INCLUDED\n\nstruct S_ELEMENT {\n\t// Structure for Reference Element and holding all\n\t// operators of the reference element\n\n\t/*\n\tNotation:\n\tI_(1)(2)_(3)(4) : (I)nterpolation operator from (1) nodes of type (2) to (4) nodes of type (5)\n \t\t(1/3): (v)olume, (f)ace\n\t\t(2/4): (P)lotting, (G)eometry, (I)ntegration, (S)olution\n\t*/\n\n\t// Cubature:\n\tdouble\t*nodes_xi, // Location of 1D cubature points\n\t\t\t*nodes_wi; // Weights at each node\n\n\t// Properties:\n\tint NvnG, NvnS, NvnP, NfnI, //Note: (N)umber of (f)ace (n)odes for (I)ntegration for one face\n\t\td, P;\n\n\t// Operators:\n\t// - Chi Operators\n\tdouble \t*Chi_vS, // Vandermonde\n\t\t\t*ChiInv_vS, // Inverse Vandermonde\n\t\t\t*Chi_vG,\n\t\t\t*Chi_vP,\n\t\t\t*Chi_fI;\n\n\t// - Derivative Matrices: (on computational domain)\n\tdouble \t*GradChi_vS_xi, // Derivative of basis function evalutaed at solution nodes.\n\t\t\t*GradChi_vS_eta,\n\t\t\t*GradChi_fI_xi, // Derivative of basis function evaluated at face integration\n\t\t\t*GradChi_fI_eta; // nodes\n\n\t// - Interpolation Operators:\n\tdouble\t*I_vG_vP, // Not implemented yet\n\t\t\t*I_vG_vS, \n\t\t\t*I_vS_vG,\n\t\t\t*I_vS_fI;\n\n\t// Geometry:\n\tdouble\t*XiEtaZeta_S, // coordinates Comp. Elem. Solution Pts.\n\t\t\t*XiEtaZeta_G, // coordinates Comp. Elem Geometry Pts.\n\t\t\t*XiEtaZeta_P, // coordinates Comp. Elem Plotting Pts.\n\t\t\t*XiEtaZeta_F;\n\n\t// NURBS Basis\n\t// The xi and eta vector for the reference element using the order\n\t// of the mesh\n\tdouble *xiVector, *etaVector;\n\n};\n\n#endif // DG_S_ELEMENT_h__INCLUDED\n\n\n" }, { "alpha_fraction": 0.6556291580200195, "alphanum_fraction": 0.6556291580200195, "avg_line_length": 12.636363983154297, "blob_id": "7e833dede2468f87c42720a7432d077b781bb1d1", "content_id": "e9be907b06890da912e1aedea36e7aff71408320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 755, "license_type": "no_license", "max_line_length": 36, "num_lines": 55, "path": "/src/memory_constructors.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#include \"memory_constructors.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"S_FACE.h\"\n#include \"S_ELEMENT.h\"\n#include \"S_BC.h\"\n\nstruct S_ELEMENT *New_ELEMENT(void){\n\n\tstruct S_ELEMENT *ELEMENT;\n\tELEMENT = malloc(sizeof *ELEMENT);\n\n\treturn ELEMENT;\n\n}\n\nstruct S_VOLUME *New_VOLUME(void){\n\n\tstruct S_VOLUME *VOLUME;\n\tVOLUME = malloc(sizeof *VOLUME);\n\n\t//Structs\n\tVOLUME->next = NULL;\n\tVOLUME->parent = NULL;\n\n\treturn VOLUME;\n\t\n}\n\nstruct S_FACE *New_FACE(void){\n\n\tstruct S_FACE *FACE;\n\tFACE = malloc(sizeof *FACE);\n\n\t//Structs\n\tFACE->next = NULL;\n\tFACE->parent = NULL;\n\n\treturn FACE;\n\t\n}\n\nstruct S_BC *New_BC(void){\n\tstruct S_BC *BC;\n\tBC = malloc(sizeof *BC);\n\n\t//Structs\n\tBC->next = NULL;\n\n\treturn BC;\n}\n\n\n\n" }, { "alpha_fraction": 0.5977215766906738, "alphanum_fraction": 0.6045891642570496, "avg_line_length": 27.113636016845703, "blob_id": "577e4bfec453d4b6b262b7e1e3111cf473657ab4", "content_id": "275774b3c3923f289e81213f96302044260cd1d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 12377, "license_type": "no_license", "max_line_length": 91, "num_lines": 440, "path": "/src/explicit_FACE_info.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"explicit_FACE_info.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"S_DB.h\"\n#include \"S_FACE.h\"\n#include \"S_VOLUME.h\"\n#include \"S_ELEMENT.h\"\n#include \"matrix_functions.h\"\n#include \"euler_flux.h\"\n#include \"fluxes_inviscid.h\"\n#include \"boundary_conditions.h\"\n#include \"Parameters.h\"\n\nstatic void flipMatrixOrder(double *A, int rowNum, int colNum){\n\t/*\n\tPurpose:\n\t\tFlip the row order of a matrix A\n\t*/\n\n\tint r, c, i;\n\n\tdouble *TempMatrix;\n\n\tTempMatrix = malloc(rowNum*colNum* sizeof *TempMatrix); // free\n\n\tfor(r=0; r<rowNum; r++){\n\t\t// Loop over the rows\n\n\t\tfor(c=0; c<colNum; c++){\n\t\t\t// Loop over the variables\n\n\t\t\tTempMatrix[c*rowNum + r] = A[c*rowNum + rowNum-r-1];\n\t\t}\n\t}\n\n\tfor(i=0; i<rowNum*colNum; i++){\n\t\tA[i] = TempMatrix[i];\n\t}\n\n\tfree(TempMatrix);\n\n}\n\n\nvoid explicit_FACE_info(void){\n\n\t/*\n\tPurpose:\n\t\tCompute RHS_FACE for all volumes\n\t*/\n\n\t// General Variables:\n\tint ifI, i, j, iVar, i_face;\n\tint iBasis, jBasis, nBasis, nodeF_index, numfI; \n\n\t// Temporary vectors for finding flux at integration nodes\n\tdouble WNode[4], FNode[4], GNode[4];\n\n\tdouble \t*C_fI_all, // Matrix of interpolated metric terms at all faces of Volume\n\t\t\t*Face_integral_met, // integral metric term multiplier from change of variable\n\t\t\tmet1, met2, // metric terms arising in line integral \n\t\t\t*Chi_f, *F_comm_1Var, cumulFaceIntVal; \n\n\tdouble *fI_wi; // The weights at the integration nodes (one face)\n\n\tdouble *F_comm; // Common (numerical) flux\n\n\tstruct S_FACE *FACE;\n\tstruct S_VOLUME *VOLUME;\n\tstruct S_ELEMENT *ELEMENT;\n\n\t// VIn Variables:\n\tstruct S_VOLUME *VIn;\n\tint fin;\n\t// - Matrix of variables at face of inner volume\n\tdouble *What_in_fAll, *W_fIn, *F_in, *G_in; \n\n\t// VOut Variables:\n\tstruct S_VOLUME *VOut;\n\tint fout;\n\t// - Matrix of variables at face of outer volume\n\tdouble *What_out_fAll, *W_fOut, *F_out, *G_out; \n\n\n\t// Set up arrays and structures:\n\tELEMENT = DB.ELEMENT;\n\n\t// Loop over all faces\n\tfor(FACE = DB.FACE_HEAD; FACE; FACE = FACE->next){\n\t\tif (FACE->Boundary != 1){\n\t\t\t// Pointers to the inner and outer volumes of the face\n\t\t\tVIn = FACE->VIn;\n\t\t\tVOut = FACE->VOut;\n\n\t\t\t// What face this is for each volume\n\t\t\tfin = FACE->fin;\n\t\t\tfout = FACE->fout;\n\n\t\t\t// Number of face integration nodes for this face\n\t\t\tnumfI = (VIn->P+1);\n\n\t\t\t// VIn:\n\t\t\t// - Get solution matrix W_fIn at the face integration nodes\n\t\t\t// - Get the flux vector at each integration node using this vector\n\n\t\t\tWhat_in_fAll = malloc(VIn->NfnI*VIn->NVar* sizeof *What_in_fAll); // free\n\t\t\tW_fIn = malloc(numfI*VIn->NVar* sizeof *W_fIn); // free\n\t\t\tF_in = malloc(numfI*VIn->NVar* sizeof *F_in); // free\n\t\t\tG_in = malloc(numfI*VIn->NVar* sizeof *G_in); // free\n\n\t\t\tmm_CNN(VIn->NfnI, VIn->NvnG, VIn->NVar, ELEMENT->Chi_fI, VIn->What, What_in_fAll);\n\n\t\t\t// Fill the matrices of the solution and flux at the integration nodes\n\t\t\t// of the face for the inner volume\n\t\t\tfor(ifI=0; ifI<numfI; ifI++){\n\t\t\t\t// Loop over the face integration nodes for ONE face\n\n\t\t\t\tfor(iVar=0; iVar < VIn->NVar; iVar++){\n\t\t\t\t\t// Loop over the variables at the integration node\n\n\t\t\t\t\tW_fIn[iVar*numfI + ifI] = \n\t\t\t\t\t\tWhat_in_fAll[iVar*(VIn->NfnI) // column\n\t\t\t\t\t\t\t\t\t+ fin*numfI + ifI]; // row\n\n\t\t\t\t\tWNode[iVar] = W_fIn[iVar*numfI + ifI];\n\n\t\t\t\t}\n\n\t\t\t\t// Compute the flux vector at each integration node\n\t\t\t\teuler_flux_2D(WNode, FNode, GNode);\n\n\t\t\t\t// Fill the F and G flux vectors for all equations \n\t\t\t\t// for this integration node.\n\t\t\t\tfor(iVar=0; iVar<VIn->NVar; iVar++){\n\t\t\t\t\tF_in[iVar*numfI + ifI] = FNode[iVar];\n\t\t\t\t\tG_in[iVar*numfI + ifI] = GNode[iVar];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// VOut:\n\t\t\t// - Get solution matrix W_fOut at the face integration nodes\n\t\t\t//\t\t- Flip the ordering of the nodes since we need the integration\n\t\t\t//\t\t\tnodes to line up between the faces.\n\t\t\t// - Get the flux vector at each integration node using this vector\n\n\t\t\tWhat_out_fAll = malloc(VOut->NfnI*VOut->NVar* sizeof *What_out_fAll); // free\n\t\t\tW_fOut = malloc(numfI*VOut->NVar* sizeof *W_fOut); // free\n\t\t\tF_out = malloc(numfI*VOut->NVar* sizeof *F_out); // free\n\t\t\tG_out = malloc(numfI*VOut->NVar* sizeof *G_out); // free\n\n\t\t\tmm_CNN(VOut->NfnI, VOut->NvnG, VOut->NVar, ELEMENT->Chi_fI, VOut->What, What_out_fAll);\n\n\t\t\t// Fill the matrices of the solution and flux at the integration nodes\n\t\t\t// of the face for the inner volume\n\t\t\tfor(ifI=0; ifI<numfI; ifI++){\n\t\t\t\t// Loop over the face integration nodes for ONE face\n\n\t\t\t\tfor(iVar=0; iVar < VIn->NVar; iVar++){\n\t\t\t\t\t// Loop over the variables at the integration node\n\n\t\t\t\t\tW_fOut[iVar*numfI + ifI] = \n\t\t\t\t\t\tWhat_out_fAll[iVar*(VOut->NfnI) // column\n\t\t\t\t\t\t\t\t\t+ fout*numfI + ifI]; // row\n\n\t\t\t\t\tWNode[iVar] = W_fOut[iVar*numfI + ifI];\n\n\t\t\t\t}\n\n\t\t\t\t// Compute the flux vector at each integration node\n\t\t\t\teuler_flux_2D(WNode, FNode, GNode);\n\n\t\t\t\t// Fill the F and G flux vectors for all equations \n\t\t\t\t// for this integration node.\n\t\t\t\tfor(iVar=0; iVar<VIn->NVar; iVar++){\n\t\t\t\t\tF_out[iVar*numfI + ifI] = FNode[iVar];\n\t\t\t\t\tG_out[iVar*numfI + ifI] = GNode[iVar];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Flip order of all VOut matrices\n\t\t\tflipMatrixOrder(W_fOut, numfI, VOut->NVar);\n\t\t\tflipMatrixOrder(F_out, numfI, VOut->NVar);\n\t\t\tflipMatrixOrder(G_out, numfI, VOut->NVar);\n\n\t\t\t// Compute the Numerical Flux\n\t\t\tF_comm = malloc(numfI*VIn->NVar* sizeof *F_comm); // free\n\t\t\tflux_LF(W_fIn, W_fOut, F_in, F_out, G_in, G_out, F_comm, FACE->n_fI, VIn->P, VIn->NVar);\n\n\t\t\tif (DB.Testing == 1){\n\t\t\t\t// Print the common flux at the face\n\t\t\t\t\n\t\t\t\tprintf(\"NEW FACE F_Comm: \\n\");\n\t\t\t\tfor(i=0; i<numfI; i++){\n\t\t\t\t\t// Row loop\n\t\t\t\t\tfor(j=0; j<(VIn->NVar); j++){\n\t\t\t\t\t\t// Column loop\n\t\t\t\t\t\tprintf(\" %.14f \", F_comm[j*(VIn->P+1) + i]);\n\t\t\t\t\t}\n\t\t\t\t\tprintf(\"\\n\");\n\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Store the F_Comm values for this face for both volumes\n\t\t\tfor(i=0; i<numfI; i++){\n\t\t\t\t// Row loop\n\t\t\t\tfor(j=0; j<(VIn->NVar); j++){\n\t\t\t\t\t// Column loop\n\n\t\t\t\t\tVIn->F_Comm[j*VIn->NfnI + fin*numfI + i] = F_comm[j*numfI + i];\n\n\t\t\t\t\t// order must be switched for outer volume\n\t\t\t\t\tVOut->F_Comm[j*VOut->NfnI + fout*numfI + i] = -1*F_comm[j*numfI + numfI-i-1];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Free temporary vectors\n\t\t\tfree(F_comm);\n\n\t\t\tfree(What_in_fAll);\n\t\t\tfree(W_fIn);\n\t\t\tfree(F_in);\n\t\t\tfree(G_in);\n\n\t\t\tfree(What_out_fAll);\n\t\t\tfree(W_fOut);\n\t\t\tfree(F_out);\n\t\t\tfree(G_out);\n\t\t} else{\n\t\t\t// External Boundary Face:\n\n\t\t\t// Pointer to the inner volume\n\t\t\tVIn = FACE->VIn;\n\t\t\tfin = FACE->fin;\n\t\t\t\n\t\t\t// Number of face integration nodes for this face\n\t\t\tnumfI = (VIn->P+1);\n\n\t\t\t// VIn:\n\t\t\t// - Get solution matrix W_fIn at the face integration nodes\n\t\t\t// - Get the flux vector at each integration node using this vector\n\n\t\t\tWhat_in_fAll = malloc(VIn->NfnI*VIn->NVar* sizeof *What_in_fAll); // free\n\t\t\tW_fIn = malloc(numfI*VIn->NVar* sizeof *W_fIn); // free\n\t\t\tF_in = malloc(numfI*VIn->NVar* sizeof *F_in); // free\n\t\t\tG_in = malloc(numfI*VIn->NVar* sizeof *G_in); // free\n\n\t\t\tmm_CNN(VIn->NfnI, VIn->NvnG, VIn->NVar, ELEMENT->Chi_fI, VIn->What, What_in_fAll);\n\n\t\t\t// Fill the matrices of the solution and flux at the integration nodes\n\t\t\t// of the face for the inner volume\n\t\t\tfor(ifI=0; ifI<numfI; ifI++){\n\t\t\t\t// Loop over the face integration nodes for ONE face\n\n\t\t\t\tfor(iVar=0; iVar < VIn->NVar; iVar++){\n\t\t\t\t\t// Loop over the variables at the integration node\n\n\t\t\t\t\tW_fIn[iVar*numfI + ifI] = \n\t\t\t\t\t\tWhat_in_fAll[iVar*(VIn->NfnI) // column\n\t\t\t\t\t\t\t\t\t+ fin*numfI + ifI]; // row\n\n\t\t\t\t\tWNode[iVar] = W_fIn[iVar*numfI + ifI];\n\n\t\t\t\t}\n\n\t\t\t\t// Compute the flux vector at each integration node\n\t\t\t\teuler_flux_2D(WNode, FNode, GNode);\n\n\t\t\t\t// Fill the F and G flux vectors for all equations \n\t\t\t\t// for this integration node.\n\t\t\t\tfor(iVar=0; iVar<VIn->NVar; iVar++){\n\t\t\t\t\tF_in[iVar*numfI + ifI] = FNode[iVar];\n\t\t\t\t\tG_in[iVar*numfI + ifI] = GNode[iVar];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// VOut:\n\t\t\t// - Get solution matrix W_fOut at the face integration nodes\n\t\t\t// \tusing the proper boundary condition.\n\t\t\t//\t\t- No flipping needed for matrix since boundary condition\n\t\t\t//\t\t\tcomes in correct order\n\t\t\t// - Get the flux vector at each integration node using this vector\n\n\t\t\tW_fOut = malloc(numfI*VOut->NVar* sizeof *W_fOut); // free\n\t\t\tF_out = malloc(numfI*VOut->NVar* sizeof *F_out); // free\n\t\t\tG_out = malloc(numfI*VOut->NVar* sizeof *G_out); // free\n\n\t\t\tif(FACE->BCType == BC_SLIPWALL){\n\t\t\t\tboundary_SlipWall(W_fIn, FACE->n_fI, W_fOut, F_out, G_out, numfI);\n\t\t\t} else if (FACE->BCType == BC_BACKPRESSURE){\n\t\t\t\tboundary_BackPressure(W_fIn, FACE->n_fI, W_fOut, F_out, G_out, numfI);\n\t\t\t} else if (FACE->BCType == BC_TOTAL_TP){\n\t\t\t\tboundary_Total_TP(W_fIn, FACE->n_fI, W_fOut, F_out, G_out, numfI);\n\t\t\t} else{\n\t\t\t\tprintf(\"Unsupported BC Type \\n\");\n\t\t\t\texit(1);\n\t\t\t}\n\n\t\t\t// Compute the Numerical Flux\n\t\t\tF_comm = malloc(numfI*VIn->NVar* sizeof *F_comm); // free\n\t\t\tflux_LF(W_fIn, W_fOut, F_in, F_out, G_in, G_out, F_comm, FACE->n_fI, VIn->P, VIn->NVar);\n\n\t\t\tif (DB.Testing == 1){\n\t\t\t\t// Print the common flux at the face\n\t\t\t\t\n\t\t\t\tprintf(\"NEW FACE F_Comm: \\n\");\n\t\t\t\tfor(i=0; i<numfI; i++){\n\t\t\t\t\t// Row loop\n\t\t\t\t\tfor(j=0; j<(VIn->NVar); j++){\n\t\t\t\t\t\t// Column loop\n\t\t\t\t\t\tprintf(\" %.14f \", F_comm[j*(VIn->P+1) + i]);\n\t\t\t\t\t}\n\t\t\t\t\tprintf(\"\\n\");\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Store the F_Comm values for this face for both volumes\n\t\t\tfor(i=0; i<numfI; i++){\n\t\t\t\t// Row loop\n\t\t\t\tfor(j=0; j<(VIn->NVar); j++){\n\t\t\t\t\t// Column loop\n\n\t\t\t\t\tVIn->F_Comm[j*VIn->NfnI + fin*numfI + i] = F_comm[j*numfI + i];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Free temporary vectors\n\t\t\tfree(F_comm);\n\n\t\t\tfree(What_in_fAll);\n\t\t\tfree(W_fIn);\n\t\t\tfree(F_in);\n\t\t\tfree(G_in);\n\n\t\t\tfree(W_fOut);\n\t\t\tfree(F_out);\n\t\t\tfree(G_out);\n\n\t\t}\n\t}\n\n\tC_fI_all = malloc(4*DB.VOLUME_HEAD->NfnI* sizeof *C_fI_all); // free\n\tFace_integral_met = malloc((DB.VOLUME_HEAD->P+1) * sizeof *Face_integral_met); // free\n\tfI_wi = DB.ELEMENT->nodes_wi;\n\n\t// Compute the surface integral for each volume using the F_Comm\n\t// terms:\n\tint r,c;\n\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\n\n\t\t// Interpolate the metric terms to the face integration nodes of all the\n\t\t// faces and get the metric jacobian term that comes from the transformation\n\t\t// of the line integral\n\t\tmm_CNN(VOLUME->NfnI, VOLUME->NvnS, 4, ELEMENT->I_vS_fI, VOLUME->C_vS, C_fI_all);\n\n\t\tif (DB.Testing == 1){\n\t\t\tprintf(\"\\n C_fI : \\n\");\n\t\t\tfor(i=0; i<VIn->NfnI; i++){\n\t\t\t\tfor(j=0; j<4; j++){\n\t\t\t\t\tprintf(\"%.14f \", C_fI_all[j*VIn->NfnI + i]);\n\t\t\t\t}\n\t\t\t\tprintf(\"\\n\");\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\t// Compute the RHS_FACE components for the volume\n\t\tfor(iVar=0; iVar<VIn->NVar; iVar++){\n\t\t\t// Loop through the variables\n\n\t\t\tnBasis = 0;\n\t\t\tfor(jBasis=0; jBasis<VIn->P+1; jBasis++){\n\t\t\t\tfor(iBasis=0; iBasis<VIn->P+1; iBasis++){\n\t\t\t\t\t// Loop through the basis functions (nbasis = row of RHS_FACE)\n\t\t\t\t\t\n\t\t\t\t\t// Value of the i,j th basis function at the face integration\n\t\t\t\t\t// nodes\n\t\t\t\t\tChi_f = &DB.ELEMENT->Chi_fI[nBasis*VOLUME->NfnI];\n\t\t\t\t\t\n\t\t\t\t\tcumulFaceIntVal = 0;\n\t\t\t\t\tfor(i_face = 0; i_face < 4; i_face++){\n\t\t\t\t\t\t// Loop over all the faces\n\t\t\t\t\t\tnumfI = VOLUME->P+1;\n\t\t\t\t\t\t\n\t\t\t\t\t\tF_comm_1Var = malloc(numfI*1* sizeof *F_comm_1Var); // free\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor(i=0; i<numfI; i++){\n\t\t\t\t\t\t\t// Loop over the integration nodes\n\n\t\t\t\t\t\t\t// Get the face integration metrics for the nodes for the current face\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t// Choose the correct metric terms depending on the face. \n\t\t\t\t\t\t\t// Care needs to be taken in choosing the correct row in the matrix\n\t\t\t\t\t\t\tif (i_face == 0 || i_face == 2){\n\t\t\t\t\t\t\t\t// Constant eta faces. Take the xi metric terms\n\t\t\t\t\t\t\t\tmet1 = C_fI_all[1*VOLUME->NfnI + i_face*numfI + i];\n\t\t\t\t\t\t\t\tmet2 = C_fI_all[3*VOLUME->NfnI + i_face*numfI + i];\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// Constant xi faces. Take the eta metric terms\n\t\t\t\t\t\t\t\tmet1 = C_fI_all[0*VOLUME->NfnI + i_face*numfI + i];\n\t\t\t\t\t\t\t\tmet2 = C_fI_all[2*VOLUME->NfnI + i_face*numfI + i];\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tFace_integral_met[i] = sqrt(met1*met1 + met2*met2);\n\n\t\t\t\t\t\t\t// Get the F_Comm values for the integration nodes for this\n\t\t\t\t\t\t\t// equation\n\t\t\t\t\t\t\tF_comm_1Var[i] = VOLUME->F_Comm[iVar*VOLUME->NfnI + // col\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ti_face*numfI + i]; // row\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Add integral of this face to the integral value\n\t\t\t\t\t\tfor(i=0; i < numfI; i++){\n\t\t\t\t\t\t\tcumulFaceIntVal = cumulFaceIntVal + \n\t\t\t\t\t\t\tChi_f[i_face*numfI + i]*F_comm_1Var[i]*Face_integral_met[i]*fI_wi[i];\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfree(F_comm_1Var);\n\n\t\t\t\t\t}\n\n\t\t\t\t\t// Assign value to RHS_FACE\n\t\t\t\t\tVOLUME->RHS_FACE[iVar*VOLUME->NvnG + nBasis] = cumulFaceIntVal;\n\t\t\t\t\t\n\t\t\t\t\tnBasis++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tfree(C_fI_all);\n\tfree(Face_integral_met);\n\n}\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5732337832450867, "alphanum_fraction": 0.5919011831283569, "avg_line_length": 27.727272033691406, "blob_id": "22dc5805af5619c52d36d81a1b190c87fbce97ed", "content_id": "442c203b2a3509b1997ccd3a89d36d73348922b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3482, "license_type": "no_license", "max_line_length": 122, "num_lines": 121, "path": "/src/initialization.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"initialization.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <math.h>\n\n#include \"S_DB.h\"\n#include \"Parameters.h\"\n\n/*\n* Purpose:\n*\tModule to be used to read in the control file when we have one.\n* \tFor now, we will simply set everything using constants.\n\n* Notation:\n *\n *\t\tCode Parameters\n *\t\t\tDimension : d = 2 (Will add d=3 after)\n *\t\t\tMeshType : Optional\n *\t\t\t\t\t\t\t- Will have a flag to curve the mesh potentially if needed. Most \n *\t\t\t\t\t\t\t\tlikely will do this in the mesh generation file itself.\n *\n *\t\t\tForm : Form of the equations (i.e. how many times they are itnegrated by parts)\n *\t\t\t Options: Weak (Only implement the weak form for now)\n *\n *\t\t\tNodeType : Type of VOLUME nodes to use for different element types\n *\t\t\t Options: (T)ensor(P)roduct : (G)auss(L)egendre -> Only option implemented currently\n *\t\t\t (G)auss(L)obatto(L)egendre\n *\t\t\tBasisType : Type of basis functions\n *\t\t\t Options: Modal\n *\t\t\tP : Polynomial order to be used (not used if p-adaptation is enabled)\n *\n *\t\t\tTesting : Run tests for standard checks.\n *\t\t\t Options: 0 (No testing)\n *\t\t\t 1 (Testing)\n*/\n\nvoid initialization(int nargc, char **argv){\n\n\t/*\n\tRead and process the control file. The control file\n\tshould be given as a command line argument to the solver\n\t\n\tParameters:\n\t\tint nargc: Number of command line arguments\n\t\tchar **argv: Array of strings for each command line argument\n\t\n\tReturn:\n\n\t*/\n\n\t// Validation:\n\t// - There should be two arguments\n\n\tif (nargc != 2){\n\t\tprintf(\"NEED 2 COMMAND LINE ARGUMENTS \\n\");\n\t\texit(1);\n\t}\n\n\tchar *CtrlFile; \n\n\t// Setup Control File Parameters\n\tCtrlFile = malloc(200 * sizeof *CtrlFile); // free\n\tstrcpy(CtrlFile, argv[1]);\n\tprintf(\"Control File : %s \\n\", CtrlFile);\n\n\t// -----------------------------------------------------------------\n\t//\t\t\t\t\t\tRead Control File\n\t// -----------------------------------------------------------------\n\n\t// Setup the parameters for the test\n\tDB.NodeType = malloc(200 * sizeof *DB.NodeType); // keep\n\tDB.MeshFileName = malloc(200 * sizeof *DB.MeshFileName); // keep\n\tDB.BasisType = malloc(200 * sizeof *DB.BasisType); // keep\n\tDB.TestType = malloc(200* sizeof *DB.TestType); // keep\n\n\tstrcpy(DB.NodeType, \"GL\");\n\tstrcpy(DB.MeshFileName, \"/Users/jm-034232/Documents/McGill/Thesis/IGA-DG/meshes/6x1_P1_Deform_InviscidChannel_V4.2.msh\");\n\tstrcpy(DB.BasisType, \"Polynomial\"); // NURBS or Polynomial\n\tstrcpy(DB.TestType, \"InviscidChannel\"); // PeriodicVortex, InviscidChannel\n\n\t// Start reading the initialization file\n\tprintf(\"COMPLETED READING \\n\");\n\texit(0);\n\n\tDB.d = 2;\n\tDB.Testing = 0;\n\tDB.numTimeSteps = 100000;\n\tDB.printSolFreq = 50;\n\tDB.shapeFuncType = 0;\n\n\tDB.dt = 1e-2;\n\tDB.exit_tol = 1e-5;\n\n\n\t//TODO: Place this in method initialize_test_case_parameters\n\t// \tin this same file\n\n\t// -----------------------------------------------------------------\n\t//\t\t\t\t\t\tSetup Flow Properties\n\t// -----------------------------------------------------------------\n\n\t// Flow Properties: (used for the subsonic channel in Philip's Code)\n\n\tDB.p_Total = 1.0;\n\tDB.T_Total = 1.0;\n\tDB.Rg = 1.0;\n\tDB.pBack = 0.99*DB.p_Total;\n\n\tDB.rhoInf = DB.p_Total/(DB.Rg*DB.T_Total);\n\tDB.pInf = DB.p_Total;\n\n\tDB.MInf = sqrt(2.0/GM1*(pow((DB.pBack/DB.p_Total),-GM1/GAMMA)-1.0));\n\tDB.cInf = sqrt(GAMMA*DB.pInf/DB.rhoInf);\n\n\n\t// Free Memory:\n\tfree(CtrlFile); \n\n}\n\n\n\n\n\n" }, { "alpha_fraction": 0.6542916893959045, "alphanum_fraction": 0.6705281138420105, "avg_line_length": 25.98768424987793, "blob_id": "f097bd9a591e7797cc76f0620502028a63dcdd26", "content_id": "3b428083805a174c3cb70d2af89754c508cb6fac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10963, "license_type": "no_license", "max_line_length": 101, "num_lines": 406, "path": "/src/bases.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#include \"bases.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n/*\n *\tPurpose:\n *\t\tCompute the value of the chosen basis function at the\n *\t\tgiven points on the computational domain\n *\n *\tComments:\n *\t\t- First, only the polynomial Lagrange Basis will be implemented.\n *\t\t- Once this is complete, the NURBS basis will be added.\n *\n *\tNotation:\n *\t\n *\n *\tReferences:\n */\n\n\nstatic double Lagrange_iPrime(int iBasis, double x, double *nodeList, int numNodes);\nstatic double Lagrange_i(int iBasis, double x, double *nodeList, int numNodes);\ndouble basis_TP_Lagrange_2D(int P, int basis_i, int basis_j, double *nodeLocations, \n\tdouble xi, double eta);\ndouble *basis_TP_Lagrange_2D_Grad(int P, int basis_i, int basis_j, double *nodeLocations, \n\tdouble xi, double eta);\n\nstatic double N_ip(int iBasis, int P, double xi, double *xiVector);\nstatic double N_ipPrime(int iBasis, int P, double xi, double *xiVector);\ndouble basis_TP_NURBS_2D(int P, int basis_i, int basis_j, double *xiVector, double *etaVector, \n\tdouble xi, double eta);\ndouble *basis_TP_NURBS_2D_Grad(int P, int basis_i, int basis_j, double *xiVector, double *etaVector, \n\tdouble xi, double eta);\n\n\n\nstatic double Lagrange_iPrime(int iBasis, double x, double *nodeList, int numNodes){\n\t/*\n\tCompute the value of the ith Lagrange basis function (1D) evaluated at\n\tpoint x. 3 cases for computing the derivative:\n\t\t1) If x is an arbitrary point on the domain.\n\t\t2) If x is one of the node points (but not the ith node where basis is 1)\n\t\t3) If the x is at the ith node point\n\n\t:param iBasis : Which basis function is being used\n\t:param x : The value along the real number line to evaluate the basis function at\n\t:param *nodeList : The array of the location of the node points along the 1D number line.\n\t:param numNodes : The number of nodes\n\n\t:return value of the derivative of the basis function at x\n\t*/\n\n\tint i, j, k, commonIndex;\n\tdouble factor, w_i, w_j, val;\n\n\tcommonIndex = -1;\n\t// Check what case this is\n\tfor(i=0; i<numNodes; i++){\n\t\tif (fabs(x - nodeList[i]) < 1E-7){\n\t\t\tcommonIndex = i;\n\t\t\tbreak;\n\t\t} \n\t}\n\n\tif(commonIndex == -1){\n\n\t\t// Case 1:\n\n\t\tfactor = 0.0;\n\n\t\tfor(i=0; i<numNodes; i++){\n\t\t\tif(i!=iBasis){\n\t\t\t\tfactor = factor + 1./(x-nodeList[i]);\n\t\t\t}\n\t\t}\n\n\t\treturn factor*Lagrange_i(iBasis, x, nodeList, numNodes);\n\n\t} else{\n\n\t\tif (commonIndex != iBasis){\n\n\t\t\t// Case 2:\n\n\t\t\tj = iBasis;\n\t\t\ti = commonIndex;\n\n\t\t\tw_i = 1.0;\n\t\t\tfor (k=0; k<numNodes; k++){\n\t\t\t\tif (i != k){\n\t\t\t\t\tw_i = w_i*(nodeList[i] - nodeList[k]);\n\t\t\t\t}\n\t\t\t}\n\t\t\tw_i = 1./w_i;\n\n\t\t\tw_j = 1.;\n\t\t\tfor(k=0; k<numNodes; k++){\n\t\t\t\tif(j != k){\n\t\t\t\t\tw_j = w_j*(nodeList[j] - nodeList[k]);\n\t\t\t\t}\n\t\t\t}\n\t\t\tw_j = 1./w_j;\n\n\t\t\treturn (w_j/w_i)/(nodeList[i] - nodeList[j]);\n\n\t\t} else{\n\n\t\t\t// Case 3:\n\n\t\t\tval = 0;\n\n\t\t\tfor(j=0; j<numNodes; j++){\n\t\t\t\tif(j != commonIndex){\n\t\t\t\t\tval = val + Lagrange_iPrime(j, x, nodeList, numNodes);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn val*-1.;\n\n\t\t}\n\n\t}\n\n}\n\nstatic double Lagrange_i(int iBasis, double x, double *nodeList, int numNodes){\n\t/*\n\tCompute the value of the ith 1D Lagrange basis function evaluated\n\tat point x.\n\n\t:param iBasis : Which basis function is being used\n\t:param x : The value along the real number line to evaluate the basis function at\n\t:param *nodeList : The array of the location of the node points along the 1D number line.\n\t:param numNodes: The number of nodes\n\n\t:return value of the basis function at x\n\t*/\n\n\tint i;\n\tdouble numer, denom;\n\n\tnumer = 1.0;\n\tdenom = 1.0;\n\n\tfor(i=0; i<numNodes; i++){\n\t\tif(i!=iBasis){\n\t\t\tnumer = numer*(x-nodeList[i]);\n\t\t\tdenom = denom*(nodeList[iBasis] - nodeList[i]);\n\t\t}\n\t}\n\n\treturn (numer/denom);\n\n}\n\ndouble basis_TP_Lagrange_2D(int P, int basis_i, int basis_j, double *nodeLocations, \n\tdouble xi, double eta){\n\n\t/*\n\tCompute the lagrange shape function (2D) at the xi, eta value on the computational \n\tdomain. Xi and eta are the coordinate axes on the computational domain.\n\n\t:param P : The order of the shape functions\n\t:param basis_i : The i index of the basis \n\t:param basis_j : The j index of the basis \n\t:param *nodeLocations : The matrix of the value of the xi,eta points at the nodes.\n\t:param xi : The xi value at which to evaluate chosen basis\n\t:param eta : The eta value at which to evaluate chosen basis function\n\n\t:return value of the shape function at the chosen point\n\t*/\n\n\tdouble *nodeVectorXi, *nodeVectorEta, *nodeLocations_xi, *nodeLocations_eta;\n\tint i;\n\n\t// This is a tensor product element. Find the node location \n\t// along each coordinate axis (xi and eta)\n\tnodeVectorXi = malloc((P+1)* sizeof *nodeVectorXi); //free\n\tnodeVectorEta = malloc((P+1)* sizeof *nodeVectorEta); //free\n\n\t// Recall: The nodeLocations matrix is stored with all eta = -1, ... It is \n\t// also in column major form.\n\tnodeLocations_xi = &nodeLocations[0];\n\tnodeLocations_eta = &nodeLocations[(P+1)*(P+1)];\n\n\tfor(i=0; i<(P+1); i++){\n\t\tnodeVectorXi[i] = nodeLocations_xi[i];\n\t\tnodeVectorEta[i] = nodeLocations_eta[i*(P+1)];\n\t}\n\n\treturn Lagrange_i(basis_i, xi, nodeVectorXi, P+1)*Lagrange_i(basis_j, eta, nodeVectorEta, P+1);\n\n}\n\ndouble *basis_TP_Lagrange_2D_Grad(int P, int basis_i, int basis_j, double *nodeLocations, \n\tdouble xi, double eta){\n\n\t/*\n\tCompute the gradient of the lagrange shape function (2D) at the xi, eta value on the \\\n\tcomputational domain. Xi and eta are the coordinate axes on the computational domain.\n\n\t:param P : The order of the shape functions\n\t:param basis_i : The i index of the basis \n\t:param basis_j : The j index of the basis \n\t:param *nodeLocations : The matrix of the value of the xi,eta points at the nodes.\n\t:param xi : The xi value at which to evaluate chosen basis\n\t:param eta : The eta value at which to evaluate chosen basis function\n\n\t:return value of the shape function at the chosen point\n\t*/\n\n\tdouble del_by_del_xi, del_by_del_eta, *grad;\n\tdouble *nodeVectorXi, *nodeVectorEta, *nodeLocations_xi, *nodeLocations_eta;\n\tint i;\n\n\tgrad = malloc(2* sizeof *grad);\n\n\t// This is a tensor product element. Find the node location \n\t// along each coordinate axis (xi and eta)\n\tnodeVectorXi = malloc((P+1)* sizeof *nodeVectorXi); //free\n\tnodeVectorEta = malloc((P+1)* sizeof *nodeVectorEta); //free\n\n\t// Recall: The nodeLocations matrix is stored with all eta = -1, ... It is \n\t// also in column major form.\n\tnodeLocations_xi = &nodeLocations[0];\n\tnodeLocations_eta = &nodeLocations[(P+1)*(P+1)];\n\n\tfor(i=0; i<(P+1); i++){\n\t\tnodeVectorXi[i] = nodeLocations_xi[i];\n\t\tnodeVectorEta[i] = nodeLocations_eta[i*(P+1)];\n\t}\n\n\tdel_by_del_xi = Lagrange_i(basis_j, eta, nodeVectorEta, P+1)*Lagrange_iPrime(basis_i,\n\t\t xi, nodeVectorXi, P+1);\n\tdel_by_del_eta = Lagrange_i(basis_i, xi, nodeVectorXi, P+1)*Lagrange_iPrime(basis_j,\n\t\t eta, nodeVectorEta, P+1);\n\n\tgrad[0] = del_by_del_xi;\n\tgrad[1] = del_by_del_eta;\n\n\treturn grad; \n\n}\n\n\n\n\n\n\n\n// B Spline Basis Functions\nstatic double N_ipPrime(int i, int p, double xi, double *xiVector){\n\t/*\n\tCompute the value of the ith B Spline basis function derivative (1D) evaluated at\n\tpoint xi on the mapped domain.\n\n\t:param iBasis : Which basis function is being used\n\t:param x : The value along the real number line to evaluate the basis function at\n\t:param *nodeList : The array of the location of the node points along the 1D number line.\n\t:param numNodes : The number of nodes\n\n\t:return value of the derivative of the basis function at x\n\t*/\n\n\n\tdouble xi_i, xi_iPlusP, xi_iPlusPPlus1, xi_iPlus1;\n\tdouble numer1, denom1, term1, numer2, denom2, term2;\n\n\txi_i = xiVector[i];\n\txi_iPlusP = xiVector[i+p];\n\txi_iPlusPPlus1 = xiVector[i+p+1];\n\txi_iPlus1 = xiVector[i+1];\n\n\tnumer1 = p*N_ip(i, p-1, xi, xiVector);\n\tdenom1 = (xi_iPlusP - xi_i);\n\n\tif(fabs(denom1) < 1E-14){\n\t term1 = 0;\n\t} else{\n\t term1 = numer1/denom1;\n\t}\n\n\tnumer2 = p*N_ip(i+1, p-1, xi, xiVector);\n\tdenom2 = (xi_iPlusPPlus1 - xi_iPlus1);\n\tif(fabs(denom2) < 1E-14){\n\t term2 = 0;\n\t} else{\n\t term2 = numer2/denom2;\n\t}\n\n\treturn term1 - term2;\n\n}\n\nstatic double N_ip(int i, int p, double xi, double *xiVector){\n\t/*\n\tCompute the value of the ith 1D NURBS basis function evaluated\n\tat point xi. The NURSB domain is from -1 to 1.\n\n\t:return value of the basis function at x\n\t*/\n\n\tdouble xi_i, xi_iPlus1, xi_iPlusP, xi_iPlusPPlus1;\n\n\tdouble num1, denom1, num2, denom2, \n\t\tterm1, term2; // Numerator and denominator of B-Spline basis\n\n\n\tif (p == 0){\n\t\t// Base Case\n\t\t\n\t\tif(xi < xiVector[i+1] && xi >= xiVector[i]){\n\t\t\treturn 1.;\n\t\t} else{\n\t\t\treturn 0.;\n\t\t}\n\n\t} else{\n\t\t// Recursive Case\n\n\t\txi_i = xiVector[i];\n \txi_iPlus1 = xiVector[i+1];\n \txi_iPlusP = xiVector[i+p];\n \txi_iPlusPPlus1 = xiVector[i+p+1];\n\n \t// - First Term\n \tnum1 = (xi - xi_i)*(N_ip(i,p-1, xi, xiVector));\n \tdenom1 = xi_iPlusP - xi_i;\n\n \tif (fabs(num1) < 1E-14 && fabs(denom1) < 1E-14){\n \t\tterm1 = 0;\n \t} else{\n \t\tterm1 = num1/denom1;\n \t}\n\n \t// - Second Term\n \tnum2 = (xi_iPlusPPlus1-xi)*N_ip(i+1,p-1,xi,xiVector);\n \tdenom2 = xi_iPlusPPlus1 - xi_iPlus1;\n \tif(fabs(num2) < 1E-14 && fabs(denom2) < 1E-14){\n \t\tterm2 = 0;\n \t}\n \telse{\n \tterm2 = num2/denom2;\n \t}\n\n \treturn term1 + term2;\n\t}\n\n}\n\ndouble basis_TP_NURBS_2D(int P, int basis_i, int basis_j, double *xiVector, double *etaVector, \n\tdouble xi, double eta){\n\n\t/*\n\tCompute the NURBS shape function (2D) at the xi, eta value on the computational \n\tdomain. Xi and eta are the coordinate axes on the computational domain.\n\n\t:param P : The order of the shape functions\n\t:param basis_i : The i index of the basis \n\t:param basis_j : The j index of the basis \n\t:param *xiVector : The knot vector along the xi direction\n\t:param *etaVector : The knot vector along the eta direction\t\n\t:param xi : The xi value at which to evaluate chosen basis\n\t:param eta : The eta value at which to evaluate chosen basis function\n\n\t:return value of the shape function at the chosen point\n\t*/\n\n\treturn N_ip(basis_i, P, xi, xiVector)*N_ip(basis_j, P, eta, etaVector);\n\n}\n\ndouble *basis_TP_NURBS_2D_Grad(int P, int basis_i, int basis_j, double *xiVector, double *etaVector, \n\tdouble xi, double eta){\n\n\t/*\n\tCompute the gradient of the NURBS shape function (2D) at the xi, eta value on the \\\n\tcomputational domain. Xi and eta are the coordinate axes on the computational domain.\n\n\t:param P : The order of the shape functions\n\t:param basis_i : The i index of the basis \n\t:param basis_j : The j index of the basis \n\t:param *xiVector : The knot vector along the xi direction\n\t:param *etaVector : The knot vector along the eta direction\t\n\t:param xi : The xi value at which to evaluate chosen basis\n\t:param eta : The eta value at which to evaluate chosen basis function\n\n\t:return value of the gradient of the shape function at the chosen point\n\t*/\n\n\tdouble *grad;\n\tdouble del_by_del_xi, del_by_del_eta;\n\n\tgrad = malloc(2* sizeof *grad);\n\n\tdel_by_del_xi = N_ip(basis_j, P, eta, etaVector)*N_ipPrime(basis_i, P, xi, xiVector);\n\tdel_by_del_eta = N_ip(basis_i, P, xi, xiVector)*N_ipPrime(basis_j, P, eta, etaVector);\n\n\tgrad[0] = del_by_del_xi;\n\tgrad[1] = del_by_del_eta;\n\n\treturn grad;\n\n}\n\n\n\n\n" }, { "alpha_fraction": 0.6846796870231628, "alphanum_fraction": 0.6891365051269531, "avg_line_length": 26.15151596069336, "blob_id": "015fb3dacb5bb8ade28696cf8d9e771615533317", "content_id": "f3600329669c2c0ce6dfb82f90db0aa448f96def", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1795, "license_type": "no_license", "max_line_length": 73, "num_lines": 66, "path": "/include/S_DB.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#ifndef DG__S_DB_h__INCLUDED\n#define DG__S_DB_h__INCLUDED\n\nstruct S_DB {\n\t// Time\n\tdouble time_total;\n\n\tchar *NodeType, // GL or GLL\n\t\t *BasisType, // Modal or Nodal\n\t\t *MeshFileName, // Path to the mesh file\n\t\t *TestType; // Type of test being completed\n\n\n\tint d, // Dimension of the mesh\n\t\tP, // Order of the solution polynomials\n\t\t// 0 = No, 1 = All, 2 = Structures, 3 = Operators, 4 = SetupGeometry\n\t\t// 5 = Solver Explicit\n\t\tTesting, \n\t\tnumTimeSteps, // Total number of time steps\n\t\tprintSolFreq, // Every how many time steps to print solution\n\t\tshapeFuncType; // 0 = Polynomial, 1 = NURBS\n\n\tdouble exit_tol;\n\n\t// Mesh Information:\n\tint NV; // Number of Volumes\n\t\n\tdouble dt; // Size of one time step\n\n\t// Connectivity Temporary data structures:\n\tint NGP, // Total number of geometry node points\n\t\tNVe; // Total number of vertices\n\n\tdouble\t*XYZ_G,\t// The matrix, stored in column major form,\n\t\t\t\t\t// of all geometry node points.\n\t\t\t*XYZ_Ve; // Vertices in column major form\n\n\tint\tNGConPerV;\t// Number of geometry connectivity ints per\n\t\t\t\t\t// element\n\n\tint \t*GeoCon, // Connectivity information for geometry nodes\n\t\t\t*VeCon; // Connectivity information for vertices\n\n\t// Flow Properties:\n\tdouble \tp_Total, T_Total, Rg, pBack, \n\t\t\trhoInf, pInf, MInf, cInf;\n\n\n\t// Structs\n\tstruct S_VOLUME *VOLUME_HEAD; // Pointer to the first volume.\n\tstruct S_FACE *FACE_HEAD; // Pointer to the first face in linked list\n\tstruct S_ELEMENT *ELEMENT; // Pointer to the reference element\n\tstruct S_BC *BC_HEAD; // Pointer to the structs with the BC information\n\n\n\t// NURBS Basis\n\t// The xi and eta vector for the reference element using the order\n\t// of the mesh\n\tdouble *xiVector_ref, *etaVector_ref;\n\n};\n\n// Make this database globally accessible.\nextern struct S_DB DB;\n\n#endif // DG__S_DB_h__INCLUDED\n\n" }, { "alpha_fraction": 0.5787694454193115, "alphanum_fraction": 0.5970250368118286, "avg_line_length": 26.36111068725586, "blob_id": "8e321a5e8348bc5a0c568f71480ab0be072bb871", "content_id": "73c0bb16ea2de0f9927ff7004678ddc1559d240a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2958, "license_type": "no_license", "max_line_length": 93, "num_lines": 108, "path": "/src/setup_normals.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_normals.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"S_ELEMENT.h\"\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"matrix_functions.h\"\n\nvoid setup_normals(struct S_FACE *FACE){\n\n\t/*\n\tPurpose:\n\t\tSet up the physical normal vectors at each face integration point.\n\t\tOutward normals will be set up for the VIn volume.\n\t\tTo do this the following will be done\n\t\t\t1) Compute the metric terms at the face integration nodes. \n\t\t\t\tStore these values at in a structure.\n\t\t\t2) Use the metric terms at the face integration nodes to compute\n\t\t\t\tthe normal vector to each point. The convention used is that \n\t\t\t\tall vertices will be ordered in counterclockwise order in the\n\t\t\t\tmapping so the rotation matrix to be used for each tangent vector\n\t\t\t\tis simply a function of which mapped face is being worked with.\n\t*/\n\n\tint fin, i;\n\tdouble *n_fI, *n_fI_x, *n_fI_y, norm_n;\n\tdouble y_eta, min_y_xi, min_x_eta, x_xi; \n\n\tstruct S_ELEMENT *ELEMENT;\n\tstruct S_VOLUME *VIn;\n\n\t// Metric terms at the face integration nodes for this face\n\tdouble *C_fI, *xy_xi, *xy_eta;\n\n\tfin = FACE->fin;\n\tVIn = FACE->VIn;\n\n\tC_fI = FACE->C_fI;\n\txy_xi = malloc(VIn->NfnI*2* sizeof *xy_xi); // free\n\txy_eta = malloc(VIn->NfnI*2* sizeof *xy_eta); // free\n\n\tmm_CNN(DB.ELEMENT->NfnI, DB.ELEMENT->NvnG, 2, DB.ELEMENT->GradChi_fI_xi, VIn->XYZ, xy_xi);\n\tmm_CNN(DB.ELEMENT->NfnI, DB.ELEMENT->NvnG, 2, DB.ELEMENT->GradChi_fI_eta, VIn->XYZ, xy_eta);\n\n\t// Store the values of the metric for the current face\n\tfor(i=0; i<FACE->P+1; i++){\n\t\t// There are P+1 face integration nodes to loop over. Use fIn to \n\t\t// find the correct sequence of nodes\n\n\t\t// Store order to be C matrix in row major form\n\t\tC_fI[i] = xy_eta[VIn->NfnI + fin*(VIn->P+1) + i]; // C11 = y_eta\n\t\tC_fI[i+1*(FACE->P+1)] = -1*xy_xi[VIn->NfnI + fin*(VIn->P+1) + i]; // C12 = - y_xi\n\t\tC_fI[i+2*(FACE->P+1)] = -1*xy_eta[fin*(VIn->P+1) + i]; // C21 = - x_eta\n\t\tC_fI[i+3*(FACE->P+1)] = xy_xi[fin*(VIn->P+1) + i]; // C22 = x_xi\n\n\t}\n\n\t// Compute outward normal vector (to VIn) using metric terms\n\tn_fI = FACE->n_fI;\n\tn_fI_x = &n_fI[0];\n\tn_fI_y = &n_fI[(FACE->P+1)];\n\n\tfor(i=0; i<(FACE->P+1); i++){\n\n\t\ty_eta = C_fI[i];\n\t\tmin_y_xi = C_fI[i+1*(FACE->P+1)];\n\t\tmin_x_eta = C_fI[i+2*(FACE->P+1)];\n\t\tx_xi = C_fI[i+3*(FACE->P+1)];\n\n\t\tif(fin == 0){\n\t\t\t// f = 0 face : Constant eta line (eta = -1)\n\t\t\tn_fI_x[i] = -1*min_y_xi;\n\t\t\tn_fI_y[i] = -1*x_xi;\n\n\t\t}else if(fin == 1){\n\t\t\t// f = 1 face : Constant xi line (xi = 1)\n\t\t\tn_fI_x[i] = y_eta;\n\t\t\tn_fI_y[i] = min_x_eta;\n\n\n\t\t} else if(fin == 2){\n\t\t\t// f = 2 face : Constant eta line (eta = 1)\n\t\t\tn_fI_x[i] = min_y_xi;\n\t\t\tn_fI_y[i] = x_xi;\n\n\n\t\t} else{\n\t\t\t// f = 3 face : Constant xi line (xi = -1)\n\t\t\tn_fI_x[i] = -1*y_eta;\n\t\t\tn_fI_y[i] = -1*min_x_eta;\n\t\t}\n\n\t\t// Normalize Normal Vector\n\t\tnorm_n = n_fI_x[i]*n_fI_x[i] + n_fI_y[i]*n_fI_y[i];\n\t\tnorm_n = sqrt(norm_n);\n\n\t\tn_fI_x[i] = n_fI_x[i]/norm_n;\n\t\tn_fI_y[i] = n_fI_y[i]/norm_n;\n\n\t}\n\n\tfree(xy_xi);\n\tfree(xy_eta);\n\n}\n\n\n" }, { "alpha_fraction": 0.2947903573513031, "alphanum_fraction": 0.6149936318397522, "avg_line_length": 23.952381134033203, "blob_id": "0f543e3d311f223403719cc8160af890ec9bf2dd", "content_id": "745640ae9313231cb977b32ef1e07ce038fc3f62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1574, "license_type": "no_license", "max_line_length": 121, "num_lines": 63, "path": "/src/cubature.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"cubature.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\n\nvoid cubature_literature(int P, char *NodeType, double *Nodes, double *Weights){\n\t/*\n\t *\tPurpose:\n\t *\t\tReturn the 1D nodes and weights for the chosen node type\n\t */\n\n\tint i;\n\n\tif(strstr(NodeType, \"GL\")){\n\t\t\n\t\tif (P+1 == 2){\n\t\t\tdouble xi[2] = {-0.5773502691896257, 0.5773502691896257};\n\t\t\tdouble wi[2] = { 1.0000000000000000, 1.0000000000000000};\n\n\t\t\tfor(i=0; i<(P+1); i++){\n\t\t\t\tNodes[i] = xi[i];\n\t\t\t\tWeights[i] = wi[i];\n\t\t\t}\n\t\t} else if(P+1 == 3){\n\n\t\t\tdouble xi[3] = {-0.7745966692414834, 0.0000000000000000, 0.7745966692414834};\n\t\t\tdouble wi[3] = { 0.5555555555555556, 0.8888888888888888, 0.5555555555555556};\n\n\t\t\tfor(i=0; i<(P+1); i++){\n\t\t\t\tNodes[i] = xi[i];\n\t\t\t\tWeights[i] = wi[i];\n\t\t\t}\n\t\t} else if(P+1 == 4){\n\n\t\t\tdouble xi[4] = {-0.8611363115940526, -0.3399810435848563, 0.3399810435848563, 0.8611363115940526};\n\t\t\tdouble wi[4] = { 0.3478548451374538, 0.6521451548625461, 0.6521451548625461, 0.3478548451374538};\n\n\t\t\tfor(i=0; i<(P+1); i++){\n\t\t\t\tNodes[i] = xi[i];\n\t\t\t\tWeights[i] = wi[i];\n\t\t\t}\n\t\t} else if(P+1 == 5){\n\n\t\t\tdouble xi[5] = {-0.9061798459386640, -0.5384693101056831, 0.0000000000000000, 0.5384693101056831, 0.9061798459386640};\n\t\t\tdouble wi[5] = { 0.2369268850561891, 0.4786286704993665, 0.5688888888888889, 0.4786286704993665, 0.2369268850561891};\n\n\t\t\tfor(i=0; i<(P+1); i++){\n\t\t\t\tNodes[i] = xi[i];\n\t\t\t\tWeights[i] = wi[i];\n\t\t\t}\n\t\t} else {\n\t\t\tprintf(\"UNSUPPORTED CUBATURE N : %d \\n\", P+1);\n\t\t\texit(1);\n\t\t}\n\n\t} else{\n\t\tprintf(\"UNSUPPORTED CUBATURE TYPE \\n\");\n\t\texit(1);\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.714324414730072, "alphanum_fraction": 0.7148659825325012, "avg_line_length": 34.81553268432617, "blob_id": "c647c0a5818c2c10435148131e7db0cc6735c119", "content_id": "72501ccc942d5e977e72d75bf7a4762275d76fcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3693, "license_type": "no_license", "max_line_length": 122, "num_lines": 103, "path": "/Makefile", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "# Makefile for most code functionality\n\n# References\n# GNU Make manual\n\n# clean: Will remove all files from data and error folder\n\n# C Compiler\nCC := gcc\n\n# C standard\nCSTD := -std=c11\n\n# Directories for the program\nSRCDIR := src\nINCDIR := include\nOBJDIR := obj\nDEPDIR := depend\nEXECDIR := bin\n\n# Name given to the Exectuable generated. Add prefix\n# will add the file name\nEXECUTABLE := DGSolver.exe\nEXECUTABLE := $(addprefix $(EXECDIR)/,$(EXECUTABLE))\n\nLOCAL_INC := -I./include \nLIBS := -framework Accelerate \nINCS := $(LOCAL_INC) \n\n# Here, wildcard will create a list of all files that have a given\n# pattern that is provided. This way we will get all src and header files.\n\n# get the list of all .c files in src\nSOURCES := $(wildcard $(SRCDIR)/*.c)\n\n# get the list of all .h files in includes\nHEADERS := $(wildcard $(INCDIR)/*.h)\n\n# Here we are doing path replacement. That is, any pattern\n# that has the form wiht % sign in sources directory\n# has everything replaced other than the percent to \n# the thing on the right.\nOBJECTS := $(SOURCES:$(SRCDIR)/%.c=$(OBJDIR)/%.o)\nDEPENDS := $(SOURCES:$(SRCDIR)/%.c=$(DEPDIR)/%.d)\n\n# Virtual path to directories that make will search for a prereq file\n# that was not found. \nVPATH = ./src:./include\n\n# @ = suppresses the echoing of the command (printing twice)\n# $^ = place all prerequisites in the line with spaces in between them\n\n# Compile executable file (Default goal)\n$(EXECUTABLE) : $(OBJECTS)\n\t@echo\n\t@echo Creating/updating: $@\n\t@$(CC) -o $@ $^ $(INCS) $(LIBS)\n\t@echo\n\n# Include dependencies (Must be placed after default goal otherwise some \n# random file will be the default target). Here we are including all the \n# dependency lines by placing the lines in each dependency file here which\n# gives us how each object file relates to other header files. Included as the \n# target in each rule is also the dependency file which must be regenerated, like the \n# object file, if any prerequisites change.\ninclude $(DEPENDS)\n\n# include Depends will create a list of rules with the targets as object and\n# depend files and prerequisites being the files they both depend on. No recipe has\n# been defined yet however. Although a target can have multiple rules, it can only have \n# one recipe. When make executes, all prereqs from all rules are combined and the ONE\n# recipe that the target depends on (if a prepreq changes) is run if needed. So, from\n# here to the end, specify the recipe to generate the object file and depend file, but\n# no prereqs are needed here since they are defined in the depends portion.\n\n# Using patterns in the rule will treat each form with the given stem (%) \n# using the format specified (i.e. its the same as if each file was copied and pasted\n# in % and the whole rule was copied and pasted for each file)\n\n# $< = The name of the first prerequisite.\n# %@ = Variable for the target\n\n# Create object files\n$(OBJDIR)/%.o : %.c\n\t@echo Creating/updating: $@\n\t@$(CC) $(CSTD) -c -o $@ $< $(INCS)\n\n# Create the recipes for how to create the dependency file for each source file here. \n# Recall % is a pattern element so each dependency file's matching .c file can \n# be found easily using pattern.\n\n# MM = do not include files found in system files (built in libraries that are linked)\n# MG = -MG assumes missing header files are generated files and adds them to the dependency list without raising an error.\n\n$(DEPDIR)/%.d : %.c\n\t@$(CC) -MM -MG $< > $@; # Use first prerequisite only as other prereqs are included from the existing %.d file\n\t@sed -i -e 's|.*:|$(OBJDIR)/$*.o $(DEPDIR)/$*.d:|' $@\n\t@echo Creating/updating: $@\n\n# Phony make functions\n.PHONY: clean\nclean:\n\trm $(EXECUTABLE) $(OBJECTS) $(DEPENDS)\n\n\n\n\n" }, { "alpha_fraction": 0.49202126264572144, "alphanum_fraction": 0.5279255509376526, "avg_line_length": 17.518518447875977, "blob_id": "2319e552c62531617e206ed4540bcdb97439ce95", "content_id": "b12c5048ca06c8f865973d3c4c9323196216f955", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 91, "num_lines": 81, "path": "/src/exact_solutions.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#include \"exact_solutions.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"Parameters.h\"\n#include \"S_DB.h\"\n\nvoid uniform_solution_InternalSubsonic(double *XYZ, double*W){\n\t\n\tdouble rho, u, v, w, P, eTot;\n\n\trho = DB.rhoInf;\n\tu = DB.MInf*DB.cInf;\n\tv = 0.0;\n\tw = 0.0;\n\tP = DB.pInf;\n\n\teTot = P/((GAMMA-1)*rho) + (u*u + v*v)/2.;\n\n\t// Compute the state vector\n\tW[0] = rho;\n\tW[1] = rho*u;\n\tW[2] = rho*v;\n\tW[3] = rho*eTot;\n\n}\n\nvoid exact_solution_IsentropicVortex(double *XYZ, double *W){\n\n\t/*\n\tPurpose:\n\t\t- Compute the W vector at the given location\n\n\tParameters:\n\t\tXYZ : Vector for the point on the physical domain\n\t\tW : The returned state vector at the XYZ point\n\t*/\n\n\tdouble \tCONST_Gamma, eStrength, x0, y0, x, y,\n\t\t\tr, uinf, vinf, u, v, delT, ro, P, eTot;\n\n\tCONST_Gamma = 1.4;\n\n\t//-------------------------------------------------\n\t//\t\t\t\tVortex Properties\n\n\teStrength = 5.0;\n\n\tx0 = 0.0;\n\ty0 = 0.0;\n\n\t//-------------------------------------------------\n\n\n\tx = XYZ[0];\n\ty = XYZ[1];\n\n\tr = sqrt((x-x0)*(x-x0) + (y-y0)*(y-y0));\n\n\tuinf = 1.0;\n\tvinf = 0.0;\n\n\tu = uinf - ((eStrength)/(2.*M_PI))*exp(0.5*(1.-r*r))*(y-y0);\n\tv = vinf + ((eStrength)/(2.*M_PI))*exp(0.5*(1.-r*r))*(x-x0);\n\n\tdelT = -(((CONST_Gamma-1.)*(eStrength*eStrength))/(8.*CONST_Gamma*M_PI*M_PI))*exp(1.-r*r);\n\n\tro = pow((1.+delT),(1./(CONST_Gamma-1)));\n\tP = pow(ro,(CONST_Gamma));\n\n\teTot = P/((CONST_Gamma-1)*ro) + (u*u + v*v)/2.;\n\n\t// Compute the state vector\n\tW[0] = ro;\n\tW[1] = ro*u;\n\tW[2] = ro*v;\n\tW[3] = ro*eTot;\n\n}\n\n\n" }, { "alpha_fraction": 0.7102137804031372, "alphanum_fraction": 0.7102137804031372, "avg_line_length": 29.14285659790039, "blob_id": "56b056fc64b2488bd2fed602abcbcd4fc9e44b38", "content_id": "c76936d07fe8ff42f3e4cb6c015579635a0bcc96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 421, "license_type": "no_license", "max_line_length": 63, "num_lines": 14, "path": "/include/boundary_conditions.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__boundary_conditions_h__INCLUDED\n#define DG__boundary_conditions_h__INCLUDED\n\n\nvoid boundary_SlipWall(double *WL, double *nL, double *WB, \n\tdouble *FB, double *GB, int n);\n\nvoid boundary_BackPressure(double *WL, double *nL, double *WB, \n\tdouble *FB, double *GB, int n);\n\nvoid boundary_Total_TP(double *WL, double *nL, double *WB, \n\tdouble *FB, double *GB, int n);\n\n#endif // DG__boundary_conditions_h__INCLUDED" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.7804877758026123, "avg_line_length": 34.28571319580078, "blob_id": "daeb5daf8e38713b2681d52b9c23c32aba43bd22", "content_id": "c9e3e4a3416582e2ecb387556bb1a28341c40dea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 246, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/include/exact_solutions.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG_exact_solutions_h__INCLUDED\n#define DG_exact_solutions_h__INCLUDED\n\nvoid exact_solution_IsentropicVortex(double *XYZ, double *W);\nvoid uniform_solution_InternalSubsonic(double *XYZ, double *W);\n\n#endif // DG_exact_solutions_h__INCLUDED" }, { "alpha_fraction": 0.7702702879905701, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 23.83333396911621, "blob_id": "61ff5c1b8ef0a91dba27a8b29994be2512b246da", "content_id": "0d0b6347f9b4bb58b42e12111a1402d85e62a5cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 148, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/include/solver_explicit.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG_solver_explicit_h__INCLUDED\n#define DG_solver_explicit_h__INCLUDED\n\nvoid solver_explicit(void);\n\n#endif // DG_solver_explicit_h__INCLUDED" }, { "alpha_fraction": 0.7175572514533997, "alphanum_fraction": 0.7175572514533997, "avg_line_length": 31.875, "blob_id": "a364d2252f6a5618f99555676485e3b158d1dcf1", "content_id": "2092ce027c0e6795d36c533a478597bba6be466e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 262, "license_type": "no_license", "max_line_length": 79, "num_lines": 8, "path": "/include/fluxes_inviscid.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#ifndef DG__fluxes_inviscid_h__INCLUDED\n#define DG__fluxes_inviscid_h__INCLUDED\n\n\nvoid flux_LF(double *WIn, double *WOut, double *FIn, double *FOut, double *GIn,\n\tdouble *GOut, double *FComm, double *nL, int P, int Neq);\n\n#endif //DG__fluxes_inviscid_h__INCLUDED" }, { "alpha_fraction": 0.5331471562385559, "alphanum_fraction": 0.5531811714172363, "avg_line_length": 20.493473052978516, "blob_id": "fd88670f338c5de38c1fc5aa1707563b6388d7cb", "content_id": "2d61009786a0c741fa4c7a1d506fae8289664894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8236, "license_type": "no_license", "max_line_length": 113, "num_lines": 383, "path": "/src/boundary_conditions.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"boundary_conditions.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"euler_flux.h\"\n#include \"S_DB.h\"\n#include \"Parameters.h\"\n\nvoid boundary_SlipWall(double *WL, double *nL, double *WB, \n\tdouble *FB, double *GB, int n){\n\n\t/*\n\tImplement the slip wall boundary condition at the wall. This is done\n\tweakly through the numerical flux by making the state in a ghost cell\n\tflipped relative to the wall for the velocity so that the flux in\n\tvelocity normal to the wall is 0.\n\t- Note, due to how the boundary state is computed, it will not need\n\t\tto be flipped when computing the numerical flux.\n\n\tn = number of integration nodes for this face\n\t*/\n\n\n\t// Standard datatypes\n\tint i, j;\n\n\t// The boundary state\n\tdouble \t*rhoB, *rhouB, *rhovB, *rhoEB, rhoVL;\n\t// The inner volume state\n\tdouble\t*rhoL, *rhouL, *rhovL, *rhoEL;\n\n\t// For computing the euler flux vector at each boundary integration node point\n\tdouble W[4], F[4], G[4];\n\n\tdouble *nL_x, *nL_y;\n\n\tnL_x = &nL[0];\n\tnL_y = &nL[n];\n\n\t// Conservative State Vector at each integration node\n\trhoL = &WL[n*0];\n\trhouL = &WL[n*1];\n\trhovL = &WL[n*2];\n\trhoEL = &WL[n*3];\n\n\trhoB = &WB[n*0];\n\trhouB = &WB[n*1];\n\trhovB = &WB[n*2];\n\trhoEB = &WB[n*3];\n\n\t// Density and total energy are equivalent in ghost cell\n\tfor (i = 0; i < n; i++) {\n\t\trhoB[i] = rhoL[i];\n\t\trhoEB[i] = rhoEL[i];\n\t}\n\n\t// Set velocity components of the boundary condition\n\tfor (i = 0; i < n; i++) {\n\t\trhoVL = nL_x[i]*rhouL[i]+nL_y[i]*rhovL[i];\n\n\t\trhouB[i] = rhouL[i]-2.0*rhoVL*nL_x[i];\n\t\trhovB[i] = rhovL[i]-2.0*rhoVL*nL_y[i];\n\t}\n\t\n\t// Set up the flux vectors for the boundary state\n\tfor (i=0; i<n; i++){\n\t\t\n\t\tW[0] = rhoB[i];\n\t\tW[1] = rhouB[i];\n\t\tW[2] = rhovB[i];\n\t\tW[3] = rhoEB[i];\n\n\t\teuler_flux_2D(W, F, G);\n\n\t\tfor(j=0; j<4; j++){\n\t\t\tFB[j*n + i] = F[j];\n\t\t\tGB[j*n + i] = G[j];\n\t\t}\n\t}\n\n\tif(DB.Testing == 1){\n\t\t// Print the state at the wall:\n\t\tprintf(\"Boundary State (SLIPWALL): \\n\");\n\n\t\tfor (i=0; i<n; i++){\n\t\t\tprintf(\"\tFace Node : %d \\n\", i);\n\t\t\tprintf(\"\t\t[nx, ny] : [%f, %f] \\n\", nL_x[i], nL_y[i]);\n\t\t\tprintf(\"\t\trhoL, rhoB : %f, %f \\n\", rhoL[i], rhoB[i]);\n\t\t\tprintf(\"\t\trhouL, rhouB : %f, %f \\n\", rhouL[i], rhouB[i]);\n\t\t\tprintf(\"\t\trhovL, rhovB : %f, %f \\n\", rhovL[i], rhovB[i]);\n\t\t\tprintf(\"\t\trhoEL, rhoEB : %f, %f \\n\", rhoEL[i], rhoEB[i]);\n\t\t}\n\n\t}\n\n\n}\n\nvoid boundary_BackPressure(double *WL, double *nL, double *WB, \n\tdouble *FB, double *GB, int n) {\n\n\t/*\n\t *\tPurpose:\n\t *\t\tImpose back Pressure (outflow) boundary condition.\n\t *\n\t *\tReferences:\n\t *\t\tCarlson(2011): 2.4\n\t */\n\n\n\t// Standard datatypes\n\tint i, j;\n\n\t// The boundary state\n\tdouble \t*rhoB, *rhouB, *rhovB, *rhoEB;\n\t// The inner volume state\n\tdouble\t*rhoL, *rhouL, *rhovL, *rhoEL;\n\n\tdouble T_i, p_i, rho_i, rho_i_inv, \n\t\t\tV_i, u_i, v_i, eTot_i, p_b, rho_b;\n\n\tdouble c_i, c_i_2;\n\n\t// For computing the euler flux vector at each boundary integration node point\n\tdouble W[4], F[4], G[4];\n\n\tdouble *nL_x, *nL_y;\n\n\tnL_x = &nL[0];\n\tnL_y = &nL[n];\n\n\t// Conservative State Vector at each integration node\n\trhoL = &WL[n*0];\n\trhouL = &WL[n*1];\n\trhovL = &WL[n*2];\n\trhoEL = &WL[n*3];\n\n\trhoB = &WB[n*0];\n\trhouB = &WB[n*1];\n\trhovB = &WB[n*2];\n\trhoEB = &WB[n*3];\n\n\tfor (i=0; i<n; i++){\n\t\t// Loop over all the n face integration nodes\n\n\n\t\t// Get inner volume's states\n\t\trho_i = rhoL[i];\n\t\trho_i_inv = 1./rho_i;\n\n\t\tu_i = rhouL[i]*rho_i_inv;\n\t\tv_i = rhovL[i]*rho_i_inv;\n\n\t\tV_i = u_i*u_i + v_i*v_i;\n\t\tV_i = sqrt(V_i);\n\n\t\teTot_i = rhoEL[i]*rho_i_inv;\n\n\t\tp_i = (GAMMA-1)*rho_i*(eTot_i - 0.5*V_i*V_i);\n\n\t\tc_i_2 = GAMMA*p_i/rho_i; // speed of sound squared\n\t\tc_i = sqrt(c_i_2);\n\n\t\tif(fabs(V_i) >= c_i){\n\t\t\t// Supersonic Flow\n\t\t\t// \t\tp_b = p_i (set pressure has no influence)\n\t\t\tp_b = p_i;\n\n\t\t} else{\n\t\t\t// Subsonic Flow\n\t\t\t// \t\tp_b = p_set\n\n\t\t\tp_b = DB.pBack;\n\t\t}\t\n\n\t\t// Set the boundary state:\n\t\t//\t- T_b = T_i (adiabatic flow with V constant)\n\n\t\trho_b = GAMMA*p_b/c_i_2;\n\n\t\trhoB[i] = rho_b;\n\t\trhouB[i] = rho_b*u_i;\n\t\trhovB[i] = rho_b*v_i;\n\n\t\trhoEB[i] = rho_b*(p_b/((GAMMA-1)*rho_b) + 0.5*V_i*V_i);\n\n\t}\n\t\n\t// Set up the flux vectors for the boundary state\n\tfor (i=0; i<n; i++){\n\t\t\n\t\tW[0] = rhoB[i];\n\t\tW[1] = rhouB[i];\n\t\tW[2] = rhovB[i];\n\t\tW[3] = rhoEB[i];\n\n\t\teuler_flux_2D(W, F, G);\n\n\t\tfor(j=0; j<4; j++){\n\t\t\tFB[j*n + i] = F[j];\n\t\t\tGB[j*n + i] = G[j];\n\t\t}\n\t}\n\n\tif(DB.Testing == 1){\n\t\t// Print the state at the wall:\n\t\tprintf(\"Boundary State (BACK PRESSURE): \\n\");\n\n\t\tfor (i=0; i<n; i++){\n\t\t\tprintf(\"\tFace Node : %d \\n\", i);\n\t\t\tprintf(\"\t\t[nx, ny] : [%f, %f] \\n\", nL_x[i], nL_y[i]);\n\t\t\tprintf(\"\t\trhoL, rhoB : %f, %f \\n\", rhoL[i], rhoB[i]);\n\t\t\tprintf(\"\t\trhouL, rhouB : %f, %f \\n\", rhouL[i], rhouB[i]);\n\t\t\tprintf(\"\t\trhovL, rhovB : %f, %f \\n\", rhovL[i], rhovB[i]);\n\t\t\tprintf(\"\t\trhoEL, rhoEB : %f, %f \\n\", rhoEL[i], rhoEB[i]);\n\t\t}\n\n\t}\n\n}\n\nvoid boundary_Total_TP(double *WL, double *nL, double *WB, \n\tdouble *FB, double *GB, int n){\n\n\t/*\n\t *\tPurpose:\n\t *\t\tImpose total (P)ressure/(T)emperature (inflow) boundary condition.\n\t *\n\t *\tComments:\n\t *\t\teq. (38/47) in Carlson(2011) implies that the velocity should be normal to the boundary. As the direction of\n\t *\t\tthe flow velocity cannot be known, this implies that this boundary condition is not physically correct...\n\t *\n\t *\tReferences:\n\t *\t\tCarlson(2011): 2.7\n\t *\t\tToro(2009): (3.9), (8.58)\n\t */\n\n\n\t// Standard datatypes\n\tint i, j;\n\n\t// The boundary state\n\tdouble \t*rhoB, *rhouB, *rhovB, *rhoEB;\n\t// The inner volume state\n\tdouble\t*rhoL, *rhouL, *rhovL, *rhoEL;\n\n\tdouble T_i, p_i, rho_i, rho_i_inv, \n\t\t\tV_i, Vn_i, u_i, v_i, eTot_i;\n\n\tdouble H_i, R_i; \n\n\tdouble \tp_Total = DB.p_Total,\n\t\t\tT_Total = DB.T_Total;\n\n\tdouble c_i, c_i_2;\n\n\t// For computing the euler flux vector at each boundary integration node point\n\tdouble W[4], F[4], G[4];\n\n\tdouble *nL_x, *nL_y;\n\n\tnL_x = &nL[0];\n\tnL_y = &nL[n];\n\n\t// Conservative State Vector at each integration node\n\trhoL = &WL[n*0];\n\trhouL = &WL[n*1];\n\trhovL = &WL[n*2];\n\trhoEL = &WL[n*3];\n\n\trhoB = &WB[n*0];\n\trhouB = &WB[n*1];\n\trhovB = &WB[n*2];\n\trhoEB = &WB[n*3];\n\n\tfor (i=0; i<n; i++){\n\t\t\n\t\t// Get inner volume's states\n\t\trho_i = rhoL[i];\n\t\trho_i_inv = 1./rho_i;\n\n\t\tu_i = rhouL[i]*rho_i_inv;\n\t\tv_i = rhovL[i]*rho_i_inv;\n\n\t\tV_i = u_i*u_i + v_i*v_i;\n\t\tV_i = sqrt(V_i);\n\n\t\teTot_i = rhoEL[i]*rho_i_inv;\n\n\t\tp_i = (GAMMA-1)*rho_i*(eTot_i - 0.5*V_i*V_i);\n\n\t\tc_i_2 = GAMMA*p_i/rho_i; // speed of sound squared\n\t\tc_i = sqrt(c_i_2);\n\n\t\t// Total Enthalpy (constant outside domain as well)\n\t\tH_i = (p_i/rho_i)*(GAMMA/(GAMMA-1)) + 0.5*V_i*V_i;\n\n\t\t// Normal component of velocity \n\t\tVn_i = u_i*nL_x[i] + v_i*nL_y[i];\n\n\t\tR_i = Vn_i + 2.0*c_i/(GAMMA-1);\n\n\t\t// Solve for c_b\n\n\t\tdouble \taQ, bQ, cQ, term1, term2, cM, cP, \n\t\t\t\tc_b, V_b, M_b, T_b, p_b, rho_b, u_b, v_b, eTot_b;\n\n\t\taQ = 1+2.0/(GAMMA-1);\n\t\tbQ = -2.0*R_i;\n\t\tcQ = 0.5*(GAMMA-1)*(R_i*R_i - 2.0*H_i);\n\n\t\tterm1 = -bQ/(2.0*aQ);\n\t\tterm2 = sqrt(bQ*bQ-4.0*aQ*cQ)/(2.0*aQ);\n\n\t\tcM = term1-term2;\n\t\tcP = term1+term2;\n\n\t\t// c = max(cM,cP)\n\t\tif (cM > cP){\n\t\t\tc_b = cM;\n\t\t} else{\n\t\t\tc_b = cP;\n\t\t}\n\n\t\t// Same reimann invariant of inner state is that of boundary state\n\t\tV_b = R_i - 2.0*c_b/(GAMMA-1);\n\n\t\t// Mach number of boundary flow.\n\t\tM_b = V_b/c_b;\n\n\t\t// Use isentropic relations to get static conditions now at the boundary\n\t\tT_b = T_Total/(1.0+0.5*(GAMMA-1)*M_b*M_b);\n\t\tp_b = p_Total*pow(T_b/T_Total, GAMMA/(GAMMA-1));\n\n\t\trho_b = p_b/(DB.Rg*T_b);\n\t\tu_b = V_b*nL_x[i];\n\t\tv_b = V_b*nL_y[i];\n\n\t\teTot_b = p_b/((GAMMA-1)*rho_b) + 0.5*(u_b*u_b + v_b*v_b);\n\n\t\t// Get boundary state (conservative variables)\n\t\trhoB[i] = rho_b;\n\t\trhouB[i] = rho_b*u_b;\n\t\trhovB[i] = rho_b*v_b;\n\t\trhoEB[i] = rho_b*eTot_b;\n\n\t}\n\t\n\t// Set up the flux vectors for the boundary state\n\tfor (i=0; i<n; i++){\n\t\t\n\t\tW[0] = rhoB[i];\n\t\tW[1] = rhouB[i];\n\t\tW[2] = rhovB[i];\n\t\tW[3] = rhoEB[i];\n\n\t\teuler_flux_2D(W, F, G);\n\n\t\tfor(j=0; j<4; j++){\n\t\t\tFB[j*n + i] = F[j];\n\t\t\tGB[j*n + i] = G[j];\n\t\t}\n\t}\n\n\tif(DB.Testing == 1){\n\t\t// Print the state at the wall:\n\t\tprintf(\"Boundary State (TOTAL TP): \\n\");\n\n\t\tfor (i=0; i<n; i++){\n\t\t\tprintf(\"\tFace Node : %d \\n\", i);\n\t\t\tprintf(\"\t\t[nx, ny] : [%f, %f] \\n\", nL_x[i], nL_y[i]);\n\t\t\tprintf(\"\t\trhoL, rhoB : %f, %f \\n\", rhoL[i], rhoB[i]);\n\t\t\tprintf(\"\t\trhouL, rhouB : %f, %f \\n\", rhouL[i], rhouB[i]);\n\t\t\tprintf(\"\t\trhovL, rhovB : %f, %f \\n\", rhovL[i], rhovB[i]);\n\t\t\tprintf(\"\t\trhoEL, rhoEB : %f, %f \\n\", rhoEL[i], rhoEB[i]);\n\t\t}\n\n\t}\n\n\n}\n\n\n\n" }, { "alpha_fraction": 0.5733333230018616, "alphanum_fraction": 0.5843333601951599, "avg_line_length": 22.77777862548828, "blob_id": "9e188112363c32fdf2321a84503d08c14eb86ce0", "content_id": "98852df68ea6d3b9a8f746e788c8302aea771fc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3000, "license_type": "no_license", "max_line_length": 94, "num_lines": 126, "path": "/src/output_solution.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"output_solution.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\n#include \"S_VOLUME.h\"\n#include \"S_DB.h\"\n#include \"S_ELEMENT.h\"\n\n#include \"matrix_functions.h\"\n\n\nvoid output_tecplot(int t){\n\t/*\n\tPurpose:\n\t\tOutput the solution in tecplot format in order to \n\t\tvisualize it\n\t*/\n\n\tstruct S_VOLUME *VOLUME;\n\tint i;\n\tdouble *XVals, *YVals, *W_Sol_P, *roVec, *ro_uVec, *ro_vVec, *eVec, x, y, \n\t\tro, u, v, e_tot;\n \tFILE *fp;\n\n \tchar *filename;\n \tfilename = malloc(200 * sizeof *filename);\n \tchar buffer[100];\n\tsprintf(buffer, \"%d\", t);\n\n \tstrcpy(filename, \"../output_tecplot/output\");\n\tstrcat(filename,buffer);\n\tstrcat(filename,\".dat\");\n\n \tfp = fopen(filename, \"w+\");\n\n \tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n \t\tfprintf(fp,\"VARIABLES = \\\"X\\\", \\\"Y\\\", \\\"ro\\\", \\\"u\\\", \\\"v\\\", \\\"e_tot\\\"\");\n \t\tfprintf(fp, \" \\n\");\n \t\tfprintf(fp,\"ZONE I=%d, J=%d, DATAPACKING=POINT\\n\",VOLUME->P+1,VOLUME->P+1);\n\n \t\tXVals = &VOLUME->XYZ_P[0];\n \t\tYVals = &VOLUME->XYZ_P[VOLUME->NvnP];\n\n\t\tW_Sol_P = malloc(VOLUME->NvnP*VOLUME->NVar* sizeof *W_Sol_P); // free\n\t\tmm_CNN(VOLUME->NvnP, VOLUME->NvnG, VOLUME->NVar, DB.ELEMENT->Chi_vP, VOLUME->What, W_Sol_P);\n\n\t\t// Get the solution wHat at the solution points\n\t\troVec = &(W_Sol_P[0]);\n\t\tro_uVec = &(W_Sol_P[1*VOLUME->NvnP]);\n\t\tro_vVec = &(W_Sol_P[2*VOLUME->NvnP]);\n\t\teVec = &(W_Sol_P[3*VOLUME->NvnP]);\n\n\t\t// Loop over all the solution nodes on this volume and print the \n\t\t// the w vector at each solution node.\n\t\tfor (i=0; i<VOLUME->NvnP; i++){\n\t\t\tx = XVals[i];\n\t\t\ty = YVals[i];\n\n\t\t\tro = roVec[i];\n\t\t\tu = ro_uVec[i]/ro;\n\t\t\tv = ro_vVec[i]/ro;\n\t\t\te_tot = eVec[i]/ro;\n\n\t\t\tfprintf(fp, \"%e %e %e %e %e %e\\n\", x,y,ro,u,v,e_tot);\n\t\t}\n\n\t\tfree(W_Sol_P);\n \t}\n \tfclose(fp);\n\n \tfree(filename);\n\n}\n\n\nvoid outputWSol(void){\n\n\t/*\n\tPurpose:\n\t\tOutput the solution at each solution node point\n\t*/\n\n\tstruct S_VOLUME *VOLUME;\n\tint i;\n\tdouble *XVals, *YVals, *roVec, *ro_uVec, *ro_vVec, *eVec, x, y, \n\t\t\tro, ro_u, ro_v, e;\n\tdouble *W_Sol;\n\t\n\tfor (VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next) {\n\n\t\t// Solution node locations. Recall that values are stored in \n\t\t// column major form.\t\n\t\tXVals = &(VOLUME->XYZ_S[0]);\n\t\tYVals = &(VOLUME->XYZ_S[1*VOLUME->NvnS]);\n\n\t\tW_Sol = malloc(VOLUME->NvnS*VOLUME->NVar* sizeof *W_Sol);\n\n\t\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, VOLUME->NVar, DB.ELEMENT->Chi_vS, VOLUME->What, W_Sol);\n\n\t\t// Get the solution wHat at the solution points\n\t\troVec = &(W_Sol[0]);\n\t\tro_uVec = &(W_Sol[1*VOLUME->NvnS]);\n\t\tro_vVec = &(W_Sol[2*VOLUME->NvnS]);\n\t\teVec = &(W_Sol[3*VOLUME->NvnS]);\n\n\t\t// Loop over all the solution nodes on this volume and print the \n\t\t// the w vector at each solution node.\n\t\tfor (i=0; i<VOLUME->NvnS; i++){\n\t\t\tx = XVals[i];\n\t\t\ty = YVals[i];\n\n\t\t\tro = roVec[i];\n\t\t\tro_u = ro_uVec[i];\n\t\t\tro_v = ro_vVec[i];\n\t\t\te = eVec[i];\n\n\t\t\tprintf(\"x: %.14f, y: %.14f \\n\", x, y);\n\t\t\tprintf(\" [ro, ro*u, ro*v, e] = [%.14f, %.14f, %.14f, %.14f] \\n\", ro, ro_u, ro_v, e);\n\n\t\t}\n\n\t\tfree(W_Sol);\n\t}\n}\n\n\n\n" }, { "alpha_fraction": 0.6029850840568542, "alphanum_fraction": 0.6761193871498108, "avg_line_length": 18.705883026123047, "blob_id": "fa26d7e9848331c8a9a56a7b77f272e8d3a12b13", "content_id": "35801f576514096dd5f8e9cb24e18b2a4be1d6a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 670, "license_type": "no_license", "max_line_length": 36, "num_lines": 34, "path": "/include/Parameters.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG__Parameters_h__INCLUDED\n#define DG__Parameters_h__INCLUDED\n\n\n// Common variables\n#define PI 3.1415926535897932\n#define GAMMA 1.4\n#define GM1 0.4\n#define GM3 -1.6\n\n// Boundary conditions\n#define BC_RIEMANN 1\n#define BC_SLIPWALL 2\n#define BC_BACKPRESSURE 3\n#define BC_TOTAL_TP 4\n#define BC_SUPERSONIC_IN 5\n#define BC_SUPERSONIC_OUT 6\n\n#define BC_NOSLIP_T 7\n#define BC_NOSLIP_ADIABATIC 8\n\n#define BC_DIRICHLET 11\n#define BC_NEUMANN 12\n\n#define BC_INFLOW 13\n#define BC_OUTFLOW 14\n\n#define BC_PERIODIC\t\t15\n#define BC_INTERNAL\t\t16\n\n// Solution\n#define EPS 1.0e-15\n\n#endif // DG__Parameters_h__INCLUDED" }, { "alpha_fraction": 0.7532467246055603, "alphanum_fraction": 0.7532467246055603, "avg_line_length": 24.33333396911621, "blob_id": "62cd834528f88957d0bb79e91a1d6cb646ed167f", "content_id": "198f1d6367c0ac7594f81ceccc9ec4ff665b02cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 154, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/include/compute_errors.h", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#ifndef DG_compute_errors_h__INCLUDED\n#define DG_compute_errors_h__INCLUDED\n\nvoid compute_errors_global(void);\n\n#endif // DG_compute_errors_h__INCLUDED\n\n" }, { "alpha_fraction": 0.6015081405639648, "alphanum_fraction": 0.6235498785972595, "avg_line_length": 24.701492309570312, "blob_id": "0c9070557ec94895ef1dfa696a1f164cdfc8cb9b", "content_id": "21e489f9c119a906b2128b06c49de6a03b7494d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1724, "license_type": "no_license", "max_line_length": 69, "num_lines": 67, "path": "/src/setup_geom_factors.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_geom_factors.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"S_DB.h\"\n#include \"S_ELEMENT.h\"\n#include \"matrix_functions.h\"\n\n\nvoid setup_geom_factors(struct S_VOLUME *VOLUME){\n\n\t/*\n\tPurpose:\n\t\tSetup the metric terms and the Jacobian. For now, everything is\n\t\tcollocated so the metric terms will be found at the integration\n\t\tpoints which are at the solution nodes.\n\n\t*/\n\n\tint i;\n\tdouble *GradChi_vS_xi, *GradChi_vS_eta;\n\tdouble *x_xi, *x_eta, *y_xi, *y_eta;\n\tdouble *XYZ_x, *XYZ_y;\n\tdouble *C_vS, *C_vS_11, *C_vS_12, *C_vS_21, *C_vS_22, *detJV_vS;\n\n\tXYZ_x = &VOLUME->XYZ[0];\n\tXYZ_y = &VOLUME->XYZ[VOLUME->NvnG];\n\n\tGradChi_vS_xi = DB.ELEMENT->GradChi_vS_xi;\n\tGradChi_vS_eta = DB.ELEMENT->GradChi_vS_eta;\n\n\tx_xi = malloc(VOLUME->NvnS*1* sizeof *x_xi); // free\n\tx_eta = malloc(VOLUME->NvnS*1* sizeof *x_xi); // free\n\ty_xi = malloc(VOLUME->NvnS*1* sizeof *x_xi); // free\n\ty_eta = malloc(VOLUME->NvnS*1* sizeof *x_xi); // free\n\n\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, 1, GradChi_vS_xi, XYZ_x, x_xi);\n\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, 1, GradChi_vS_xi, XYZ_y, y_xi);\n\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, 1, GradChi_vS_eta, XYZ_x, x_eta);\n\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, 1, GradChi_vS_eta, XYZ_y, y_eta);\n\n\t// Store the results in the Cofactor matrix and Jacobian vector\n\n\tC_vS_11 = &VOLUME->C_vS[0];\n\tC_vS_12 = &VOLUME->C_vS[1*VOLUME->NvnS];\n\tC_vS_21 = &VOLUME->C_vS[2*VOLUME->NvnS];\n\tC_vS_22 = &VOLUME->C_vS[3*VOLUME->NvnS];\n\n\tfor(i=0; i<VOLUME->NvnS; i++){\n\t\tC_vS_11[i] = y_eta[i];\n\t\tC_vS_12[i] = -y_xi[i];\n\t\tC_vS_21[i] = -x_eta[i];\n\t\tC_vS_22[i] = x_xi[i];\n\n\t\tVOLUME->detJV_vS[i] = x_xi[i]*y_eta[i] - y_xi[i]*x_eta[i];\n\t}\n\n\t\n\n\tfree(x_xi);\n\tfree(x_eta);\n\tfree(y_xi);\n\tfree(y_eta);\n\n}\n\n" }, { "alpha_fraction": 0.6451063752174377, "alphanum_fraction": 0.6502127647399902, "avg_line_length": 19.928571701049805, "blob_id": "4f3eabf950d6c785b112239d84ee66ad63ea7817", "content_id": "67b5f15f26bb03f66af21674b86efef0030e4237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 88, "num_lines": 56, "path": "/src/finalize_RHS.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "#include \"finalize_RHS.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"S_DB.h\"\n#include \"S_ELEMENT.h\"\n#include \"S_VOLUME.h\"\n#include \"matrix_functions.h\"\n\ndouble finalize_RHS(void){\n\n\t/*\n\tPurpose:\n\t\tCompute the RHS for the volume using the RHS_FACE and RHS_VOL\n\t\tfor each volume. Then, multiply by MInv to the RHS to get \n\t\tthe rate of change of each modal coefficient.\n\t*/\n\n\tint i;\n\tstruct S_VOLUME *VOLUME;\n\n\tdouble maxRHS = 0;\n\n\tdouble *temp_RHS; // temporary RHS matrix (before MInv multiplied)\n\n\tdouble resVec[4];\n\n\tfor(i=0; i<4; i++){\n\t\tresVec[i] = 0;\n\t}\n\n\tfor(VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next){\n\n\t\ttemp_RHS = malloc(VOLUME->NvnG*VOLUME->NVar* sizeof *temp_RHS); // free\n\n\t\t// Add both RHS contributions\n\t\tfor(i=0; i<VOLUME->NvnG*VOLUME->NVar; i++){\n\t\t\ttemp_RHS[i] = VOLUME->RHS_VOL[i] - VOLUME->RHS_FACE[i];\t\n\t\t\tif(fabs(temp_RHS[i]) > maxRHS && i<VOLUME->NvnG){\n\t\t\t\t// Compute the LInf Norm of RHS for the density\n\t\t\t\tmaxRHS = fabs(temp_RHS[i]);\n\t\t\t}\n\t\t}\n\n\t\t// Multiply MInv to get RHS\n\t\tmm_CNN(VOLUME->NvnG, VOLUME->NvnG, VOLUME->NVar, VOLUME->MInv, temp_RHS, VOLUME->RHS);\n\n\t\tfree(temp_RHS);\n\n\t}\n\n\treturn maxRHS;\n\n}\n\n\n\n" }, { "alpha_fraction": 0.5848178267478943, "alphanum_fraction": 0.5947095155715942, "avg_line_length": 27.933937072753906, "blob_id": "c5dcfd1783613d8fee7b4dab6c1c7bb4693cf5c3", "content_id": "d53f9d1e8cd0338eb5b988456b2c791221aeb76d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 22342, "license_type": "no_license", "max_line_length": 105, "num_lines": 772, "path": "/src/setup_operators.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"setup_operators.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n#include <string.h>\n\n#include \"memory_constructors.h\"\n#include \"cubature.h\"\n#include \"S_DB.h\"\n#include \"S_ELEMENT.h\"\n#include \"bases.h\"\n#include \"matrix_functions.h\"\n\n\n/*\n * Purpose:\n *\tSet up operators to be used throughout the code for the reference \n *\telements.\n * \n *\tNotation:\n *\n * Grad)Chi(Ref)(1)_(3)(4) : \n *\t\t(Grad)ient (optional) (Ref)erence (optional) Basis functions (Chi) of type\n *\t\t\t(1) evaluated at (3) nodes of (4)\n *\t\t\t(1/4): (P)lotting, (G)eometry, (C)ofactor, (I)ntegration, (S)olution\n *\t\t\t(3): (v)olume, (f)ace, (e)dge\n *\n *\tI_(1)(2)_(3)(4) : (I)nterpolation operator from (1) nodes of type (2) to (4) nodes of type (5)\n *\t\t(1/3): (v)olume, (f)ace\n *\t\t(2/4): (P)lotting, (G)eometry, (I)ntegration, (S)olution\n *\t\n */\n\n\nstatic void setup_Interpolation_operators(void){\n\n\t/*\n\tPurpose:\n\t\tSetup the interpolation operators for the reference element\n\t\tusing the Chi operators which have been set up already. \n\n\t\tNOTE: Interpolation is always from the nodal values, to their modal\n\t\tvalues first. Then, the modal values are multiplied to the basis evaluated\n\t\tat a set of points to get the nodal values at those points. \n\n\t\t\tnodal_1 -> modal -> nodal_2\n\n\t\tex : Therefore, Interpolation operators are of the form:\n\t\t\tI_vG_vS = Chi_vS * inv(Chi_vG)\n\t*/\n\n\tstruct S_ELEMENT *ELEMENT;\n\tELEMENT = DB.ELEMENT;\n\n\t// -------------------------------------------\n\t//\t\t\tI_vG_vS and I_vS_vG\n\t// -------------------------------------------\n\tdouble *I_vG_vS, *I_vS_vG, *Chi_vS, *ChiInv_vG;\n\n\t// Interpolation Operator\n\tI_vG_vS = ELEMENT->I_vG_vS;\n\n\t// Chi Operators\n\tChi_vS = ELEMENT->Chi_vS;\n\tChiInv_vG = mm_inv_d_alloc(ELEMENT->NvnG, ELEMENT->Chi_vG); // free\n\n\t// I_vG_vS:\n\tmm_CNN(ELEMENT->NvnS, ELEMENT->NvnG, ELEMENT->NvnG, Chi_vS, ChiInv_vG, I_vG_vS);\t\n\t\n\t// I_vS_vG:\n\tmm_inv_d_secondInPlace(ELEMENT->NvnS, I_vG_vS, ELEMENT->I_vS_vG);\n\n\tfree(ChiInv_vG);\n\n\t// -------------------------------------------\n\t//\t\t\t\t\tI_vG_vP\n\t// -------------------------------------------\n\tdouble *I_vG_vP, *Chi_vP;\n\n\t// Interpolation Operator\n\tI_vG_vP = ELEMENT->I_vG_vP;\n\n\t// Chi Operators\n\tChi_vP = ELEMENT->Chi_vP;\n\tChiInv_vG = mm_inv_d_alloc(ELEMENT->NvnG, ELEMENT->Chi_vG); // free\n\n\t// I_vG_vP:\n\tmm_CNN(ELEMENT->NvnP, ELEMENT->NvnG, ELEMENT->NvnG, Chi_vP, ChiInv_vG, I_vG_vP);\t\n\n\tfree(ChiInv_vG);\n\n\t// -------------------------------------------\n\t//\t\t\t\t\tI_vS_fI\n\t// -------------------------------------------\n\tdouble *I_vS_fI, *Chi_fI, *ChiInv_vS;\n\n\t// Interpolation Operator\n\tI_vS_fI = ELEMENT->I_vS_fI;\n\n\t// Chi Operators\n\tChi_fI = ELEMENT->Chi_fI;\n\tChiInv_vS = ELEMENT->ChiInv_vS;\n\n\t// I_vS_fI:\n\tmm_CNN(ELEMENT->NfnI, ELEMENT->NvnG, ELEMENT->NvnG, Chi_fI, ChiInv_vS, I_vS_fI);\t\n\n}\n\n\nstatic void setup_Chi_operators(void){\n\t/*\n\tPurpose:\n\t\tSet up the chi operators (basis function evaluated \n\t\tat different points on the computational domain). These operators\n\t\twill then be used to make the interpolation operators and solve the\n\t\tflow\n\t*/\n\n\tstruct S_ELEMENT *ELEMENT;\n\tELEMENT = DB.ELEMENT;\n\n\tint basis_i, basis_j, basis_index, node_index,i,j;\n\n\t// -------------------------------------------\n\t//\t\t\tChi_vS and ChiInv_vS\n\t// -------------------------------------------\n\tdouble *Chi_vS, *ChiInv_vS, *XiEtaZeta_S_xi, *XiEtaZeta_S_eta;\n\n\t// Create the Chi_vS (Vandermonde matrix): \n\t// \tV = Interpolation from geometry node points to volume nodes of type solution\n\n\t// There are NvnG basis functions (one for each geometry node point due to the mapping).\n\t// There are NvnS solution points. Therefore, Chi_vS is of size NvnS x NvnG. All matrices\n\t// are stored in column major form.\n\n\tChi_vS = ELEMENT->Chi_vS;\n\t\n\t// The xi,eta values of the solution nodes on comp domain\n\tXiEtaZeta_S_xi = &(ELEMENT->XiEtaZeta_S[0]);\n\tXiEtaZeta_S_eta = &(ELEMENT->XiEtaZeta_S[ELEMENT->NvnS]);\n\n\tbasis_index = 0;\n\tfor(basis_j=0; basis_j<(ELEMENT->P+1); basis_j++){\n\t\tfor(basis_i=0; basis_i<(ELEMENT->P+1); basis_i++){\n\t\t\t// Loop over all the basis functions (order is same as ordering\n\t\t\t// of geometry node points (all j=0, j=1, ...))\n\n\t\t\tfor(node_index=0 ;node_index<ELEMENT->NvnS; node_index++){\n\n\t\t\t\t//Loop over the solution points on reference element. Order is all\n\t\t\t\t// j=0, j=1, ...\n\n\t\t\t\t// Store the value in V. node_index is the row and basis_index is \n\t\t\t\t// the column of the entry of the matrix.\n\t\t\t\tif(strstr(DB.BasisType,\"Polynomial\")){\n\t\t\t\t\tChi_vS[basis_index*ELEMENT->NvnS + node_index] = basis_TP_Lagrange_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->XiEtaZeta_G, XiEtaZeta_S_xi[node_index], XiEtaZeta_S_eta[node_index]);\n\t\t\t\t}else if(strstr(DB.BasisType,\"NURBS\")){\n\t\t\t\t\tChi_vS[basis_index*ELEMENT->NvnS + node_index] = basis_TP_NURBS_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->xiVector, ELEMENT->etaVector, XiEtaZeta_S_xi[node_index], XiEtaZeta_S_eta[node_index]);\n\t\t\t\t} else{\n\t\t\t\t\tprintf(\"UNRECOGNIZED BASIS \\n\");\n\t\t\t\t\texit(1);\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tbasis_index++;\n\t\t}\n\t}\n\n\tChiInv_vS = ELEMENT->ChiInv_vS;\n\n\tfor(i=0; i<ELEMENT->NvnS*ELEMENT->NvnG; i++){\n\t\tChiInv_vS[i] = ELEMENT->Chi_vS[i];\n\t}\n\n\tmm_inv_d(ELEMENT->NvnG, ChiInv_vS);\n\n\t// -------------------------------------------\n\t//\t\tGradChi_vS_xi and GradChi_vS_eta\n\t// -------------------------------------------\n\n\tdouble *GradChi_vS_xi, *GradChi_vS_eta;\n\tdouble *grad;\n\n\tGradChi_vS_xi = ELEMENT->GradChi_vS_xi;\n\tGradChi_vS_eta = ELEMENT->GradChi_vS_eta;\n\n\t// The xi,eta values of the solution nodes on comp domain. Already\n\t// declared when setting the previous operators (Chi_vS)\n\tXiEtaZeta_S_xi = &(ELEMENT->XiEtaZeta_S[0]);\n\tXiEtaZeta_S_eta = &(ELEMENT->XiEtaZeta_S[ELEMENT->NvnS]);\n\n\tbasis_index = 0;\n\tfor(basis_j=0; basis_j<(ELEMENT->P+1); basis_j++){\n\t\tfor(basis_i=0; basis_i<(ELEMENT->P+1); basis_i++){\n\t\t\t// Loop over all the basis functions (order is same as ordering\n\t\t\t// of geometry node points (all j=0, j=1, ...))\n\n\t\t\tfor(node_index=0; node_index<ELEMENT->NvnS; node_index++){\n\n\t\t\t\t//Loop over the solution points on reference element. Order is all\n\t\t\t\t// j=0, j=1, ...\n\n\t\t\t\t// grad is allocated in function\n\t\t\t\tif(strstr(DB.BasisType,\"Polynomial\")){\n\t\t\t\t\tgrad = basis_TP_Lagrange_2D_Grad(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->XiEtaZeta_G, XiEtaZeta_S_xi[node_index], XiEtaZeta_S_eta[node_index]);\n\t\t\t\t}else if(strstr(DB.BasisType,\"NURBS\")){\n\t\t\t\t\tgrad = basis_TP_NURBS_2D_Grad(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->xiVector, ELEMENT->etaVector, XiEtaZeta_S_xi[node_index], XiEtaZeta_S_eta[node_index]);\n\t\t\t\t} else{\n\t\t\t\t\tprintf(\"UNRECOGNIZED BASIS \\n\");\n\t\t\t\t\texit(1);\n\t\t\t\t}\n\n\t\t\t\tGradChi_vS_xi[basis_index*ELEMENT->NvnS + node_index] = grad[0];\n\t\t\t\tGradChi_vS_eta[basis_index*ELEMENT->NvnS + node_index] = grad[1];\n\n\t\t\t\tfree(grad);\n\n\t\t\t}\n\n\t\t\tbasis_index++;\n\t\t}\n\t}\n\n\t// -------------------------------------------\n\t//\t\tGradChi_fI_xi and GradChi_fI_eta\n\t// -------------------------------------------\n\n\t// Compute gradient at the face integration nodes (for finding\n\t// the metric terms)\n\tdouble *GradChi_fI_xi, *GradChi_fI_eta, *XiEtaZeta_F_xi, *XiEtaZeta_F_eta;\n\n\tGradChi_fI_xi = ELEMENT->GradChi_fI_xi;\n\tGradChi_fI_eta = ELEMENT->GradChi_fI_eta;\n\n\t// Xi Eta values at face integration nodes on reference domain\n\tXiEtaZeta_F_xi = &(ELEMENT->XiEtaZeta_F[0]);\n\tXiEtaZeta_F_eta = &(ELEMENT->XiEtaZeta_F[ELEMENT->NfnI]);\n\n\tbasis_index = 0;\n\tfor(basis_j=0; basis_j<(ELEMENT->P+1); basis_j++){\n\t\tfor(basis_i=0; basis_i<(ELEMENT->P+1); basis_i++){\n\t\t\t// Loop over all the basis functions (order is same as ordering\n\t\t\t// of geometry node points (all j=0, j=1, ...))\n\n\t\t\tfor(node_index=0; node_index<ELEMENT->NfnI; node_index++){\n\n\t\t\t\t//Loop over the face integration points on reference element.\n\n\t\t\t\t// grad is allocated in function\n\t\t\t\tif(strstr(DB.BasisType,\"Polynomial\")){\n\t\t\t\t\tgrad = basis_TP_Lagrange_2D_Grad(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->XiEtaZeta_G, XiEtaZeta_F_xi[node_index], XiEtaZeta_F_eta[node_index]);\n\t\t\t\t}else if(strstr(DB.BasisType,\"NURBS\")){\n\t\t\t\t\tgrad = basis_TP_NURBS_2D_Grad(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->xiVector, ELEMENT->etaVector, XiEtaZeta_F_xi[node_index], XiEtaZeta_F_eta[node_index]);\n\t\t\t\t} else{\n\t\t\t\t\tprintf(\"UNRECOGNIZED BASIS \\n\");\n\t\t\t\t\texit(1);\n\t\t\t\t}\n\n\t\t\t\tGradChi_fI_xi[basis_index*ELEMENT->NfnI + node_index] = grad[0];\n\t\t\t\tGradChi_fI_eta[basis_index*ELEMENT->NfnI + node_index] = grad[1];\n\n\t\t\t\tfree(grad);\n\n\t\t\t}\n\n\t\t\tbasis_index++;\n\t\t}\n\t}\n\n\n\n\t// -------------------------------------------\n\t//\t\t\tChi_vG (not used in NURBS)\n\t// -------------------------------------------\n\n\tdouble *Chi_vG, *XiEtaZeta_G_xi, *XiEtaZeta_G_eta;\n\n\tChi_vG = ELEMENT->Chi_vG;\n\n\t// The xi,eta values of the geometry nodes on comp domain\n\tXiEtaZeta_G_xi = &(ELEMENT->XiEtaZeta_G[0]);\n\tXiEtaZeta_G_eta = &(ELEMENT->XiEtaZeta_G[ELEMENT->NvnG]);\n\n\tbasis_index = 0;\n\tfor(basis_j=0; basis_j<(ELEMENT->P+1); basis_j++){\n\t\tfor(basis_i=0; basis_i<(ELEMENT->P+1); basis_i++){\n\t\t\t// Loop over all the basis functions (order is same as ordering\n\t\t\t// of geometry node points (all j=0, j=1, ...))\n\n\t\t\tfor(node_index=0; node_index<ELEMENT->NvnG; node_index++){\n\n\t\t\t\t//Loop over the solution points on reference element. Order is all\n\t\t\t\t// j=0, j=1, ...\n\n\t\t\t\t// Store the value in V. node_index is the row and basis_index is \n\t\t\t\t// the column of the entry of the matrix.\n\t\t\t\tChi_vG[basis_index*ELEMENT->NvnG + node_index] = basis_TP_Lagrange_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\tELEMENT->XiEtaZeta_G, XiEtaZeta_G_xi[node_index], XiEtaZeta_G_eta[node_index]);\n\n\t\t\t}\n\n\t\t\tbasis_index++;\n\t\t}\n\t}\n\n\t// -------------------------------------------\n\t//\t\t\t\t\tChi_vP\n\t// -------------------------------------------\n\n\tdouble *Chi_vP, *XiEtaZeta_P_xi, *XiEtaZeta_P_eta;\n\n\tChi_vP = ELEMENT->Chi_vP;\n\n\t// The xi,eta values of the geometry nodes on comp domain\n\tXiEtaZeta_P_xi = &(ELEMENT->XiEtaZeta_P[0]);\n\tXiEtaZeta_P_eta = &(ELEMENT->XiEtaZeta_P[ELEMENT->NvnP]);\n\n\tbasis_index = 0;\n\tfor(basis_j=0; basis_j<(ELEMENT->P+1); basis_j++){\n\t\tfor(basis_i=0; basis_i<(ELEMENT->P+1); basis_i++){\n\t\t\t// Loop over all the basis functions (order is same as ordering\n\t\t\t// of geometry node points (all j=0, j=1, ...))\n\n\t\t\tfor(node_index=0; node_index<ELEMENT->NvnP; node_index++){\n\n\t\t\t\t//Loop over the solution points on reference element. Order is all\n\t\t\t\t// j=0, j=1, ...\n\n\t\t\t\t// Store the value in V. node_index is the row and basis_index is \n\t\t\t\t// the column of the entry of the matrix.\n\t\t\t\tif(strstr(DB.BasisType,\"Polynomial\")){\n\t\t\t\t\tChi_vP[basis_index*ELEMENT->NvnP + node_index] = basis_TP_Lagrange_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->XiEtaZeta_G, XiEtaZeta_P_xi[node_index], XiEtaZeta_P_eta[node_index]);\n\t\t\t\t}else if(strstr(DB.BasisType,\"NURBS\")){\n\t\t\t\t\tChi_vP[basis_index*ELEMENT->NvnP + node_index] = basis_TP_NURBS_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->xiVector, ELEMENT->etaVector, XiEtaZeta_P_xi[node_index], XiEtaZeta_P_eta[node_index]);\n\t\t\t\t} else{\n\t\t\t\t\tprintf(\"UNRECOGNIZED BASIS \\n\");\n\t\t\t\t\texit(1);\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tbasis_index++;\n\t\t}\n\t}\n\n\t// -------------------------------------------\n\t//\t\t\t\t\tChi_fI\n\t// -------------------------------------------\n\tdouble *Chi_fI;\n\n\tChi_fI = ELEMENT->Chi_fI;\n\n\t// The xi,eta values of the geometry nodes on comp domain\n\tXiEtaZeta_F_xi = &(ELEMENT->XiEtaZeta_F[0]);\n\tXiEtaZeta_F_eta = &(ELEMENT->XiEtaZeta_F[ELEMENT->NfnI]);\n\n\tbasis_index = 0;\n\tfor(basis_j=0; basis_j<(ELEMENT->P+1); basis_j++){\n\t\tfor(basis_i=0; basis_i<(ELEMENT->P+1); basis_i++){\n\t\t\t// Loop over all the basis functions (order is same as ordering\n\t\t\t// of geometry node points (all j=0, j=1, ...))\n\n\t\t\tfor(node_index=0; node_index<ELEMENT->NfnI; node_index++){\n\n\t\t\t\t//Loop over the solution points on reference element. Order is all\n\t\t\t\t// j=0, j=1, ...\n\n\t\t\t\t// Store the value in V. node_index is the row and basis_index is \n\t\t\t\t// the column of the entry of the matrix.\n\t\t\t\tif(strstr(DB.BasisType,\"Polynomial\")){\n\t\t\t\t\tChi_fI[basis_index*ELEMENT->NfnI + node_index] = basis_TP_Lagrange_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->XiEtaZeta_G, XiEtaZeta_F_xi[node_index], XiEtaZeta_F_eta[node_index]);\n\t\t\t\t} else if(strstr(DB.BasisType,\"NURBS\")){\n\t\t\t\t\tChi_fI[basis_index*ELEMENT->NfnI + node_index] = basis_TP_NURBS_2D(ELEMENT->P, basis_i, basis_j, \n\t\t\t\t\t\tELEMENT->xiVector, ELEMENT->etaVector, XiEtaZeta_F_xi[node_index], XiEtaZeta_F_eta[node_index]);\n\t\t\t\t} else{\n\t\t\t\t\tprintf(\"UNRECOGNIZED BASIS \\n\");\n\t\t\t\t\texit(1);\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tbasis_index++;\n\t\t}\n\t}\n\n}\n\nstatic void setup_reference_element(){\n\t\n\t/*\n\tPurpose:\n\t\tSet up the reference element struct (S_ELEMENT)\n\t*/\n\n\tint i,j,k;\n\tstruct S_ELEMENT *ELEMENT;\n\tELEMENT = DB.ELEMENT;\n\tdouble \t*XiEtaZeta_S, // Reference Element Nodes at Solution points\n\t\t\t*XiEtaZeta_S_x, *XiEtaZeta_S_y,\n\t\t\t*XiEtaZeta_G, // Reference Element Nodes at Geometry points\n\t\t\t*XiEtaZeta_G_x, *XiEtaZeta_G_y,\n\t\t\t*XiEtaZeta_P, // Reference Element Nodes at Plotting points\n\t\t\t*XiEtaZeta_P_x, *XiEtaZeta_P_y,\n\t\t\t*XiEtaZeta_F, // Reference Element Nodes at Face Integration points\n\t\t\t*XiEtaZeta_F_x, *XiEtaZeta_F_y;\n\n\n\tdouble xi, eta, deltaXi_G, deltaEta_G, deltaXi_P, deltaEta_P;\n\n\t// -----------------------------------\n\t//\t\t\tProperties\n\t// -----------------------------------\n\n\tELEMENT->P = DB.P;\n\tELEMENT->d = DB.d;\n\n\t// Get number of geometry and solution nodes\n\tELEMENT->NvnG = (ELEMENT->P+1);\n\tfor(i=0; i<ELEMENT->d-1; i++){\n\t\tELEMENT->NvnG = (ELEMENT->NvnG)*(ELEMENT->P+1);\n\t}\n\tELEMENT->NvnS = ELEMENT->NvnG;\n\tELEMENT->NvnP = ELEMENT->NvnG;\n\tELEMENT->NfnI = 4*(ELEMENT->P+1);\n\n\t// -----------------------------------\n\t//\t\t\tSolution Points\n\t// -----------------------------------\n\n\tdouble *Cubature_xi, *Cubature_wi;\n\tCubature_xi = malloc((ELEMENT->P+1)* sizeof *Cubature_xi);\n\tCubature_wi = malloc((ELEMENT->P+1)* sizeof *Cubature_wi);\n\n\tcubature_literature(ELEMENT->P, DB.NodeType, Cubature_xi, Cubature_wi);\n\n\t// Compute Solution Point Locations on Ref. Elem. This is a matrix of \n\t// size NvnS x d stored in column major form.\n\tXiEtaZeta_S = malloc(ELEMENT->NvnS*ELEMENT->d* sizeof *XiEtaZeta_S);\n\n\tXiEtaZeta_S_x = &XiEtaZeta_S[0];\n\tXiEtaZeta_S_y = &XiEtaZeta_S[ELEMENT->NvnS];\n\n\t// Ordering of reference solution nodes is all eta = -1, then increase eta\n\tk = 0;\n\tfor(j=0; j<(ELEMENT->P+1); j++){\n\t\tfor(i=0; i<(ELEMENT->P+1); i++){\n\n\t\t\txi = Cubature_xi[i];\n\t\t\teta = Cubature_xi[j];\n\n\t\t\tXiEtaZeta_S_x[k] = xi;\n\t\t\tXiEtaZeta_S_y[k] = eta;\n\n\t\t\tk++;\n\t\t}\n\t}\n\n\t// -----------------------------------\n\t//\tGeometry Points (Used in polynomial basis)\n\t// -----------------------------------\n\n\t// Compute Geometry Node locations on reference element. Geometry\n\t// node points are equally spaced out on the element. The ordering of\n\t// the points is all eta = -1,...\n\n\tXiEtaZeta_G = malloc(ELEMENT->NvnG*ELEMENT->d* sizeof *XiEtaZeta_G);\n\tXiEtaZeta_G_x = &XiEtaZeta_G[0];\n\tXiEtaZeta_G_y = &XiEtaZeta_G[ELEMENT->NvnG];\n\n\tdeltaXi_G = (2.)/((double)(ELEMENT->P));\n\tdeltaEta_G = (2.)/((double)(ELEMENT->P));\n\n\tk = 0;\n\tfor(j=0; j<(ELEMENT->P+1); j++){\n\t\tfor(i=0; i<(ELEMENT->P+1); i++){\n\n\t\t\txi = -1. + i*deltaXi_G;\n\t\t\teta = -1. + j*deltaEta_G;\n\n\t\t\tXiEtaZeta_G_x[k] = xi;\n\t\t\tXiEtaZeta_G_y[k] = eta;\n\n\t\t\tk++;\n\t\t}\n\t}\n\n\t// -----------------------------------\n\t//\t\t\tKnot Vectors\n\t// -----------------------------------\n\n\t// Even if the mesh is not using a NURBS basis, set up the knot vector if it \n\t// were being used based on the mesh. Now, to make the NURBS work on the\n\t// reference element, simply set the limits to be at -1 to 1 with the correct\n\t// multiplicity. Note that widen the knot vector range by a small amount\n\t// in the case we are evaluating points exactly at the limits of the element\n\n\tdouble *xiVector, *etaVector;\n\txiVector = malloc((ELEMENT->P+1)*2* sizeof *xiVector);\n\tetaVector = malloc((ELEMENT->P+1)*2* sizeof *etaVector);\n\n\tfor(i=0; i<ELEMENT->P+1; i++){\n\t\txiVector[i] = -1 - 1E-14;\n\t\tetaVector[i] = -1 - 1E-14;\n\t}\n\n\tfor(i=0; i<ELEMENT->P+1; i++){\n\t\txiVector[i+ELEMENT->P+1] = 1 + 1E-14;\n\t\tetaVector[i+ELEMENT->P+1] = 1 + 1E-14;\n\t}\n\n\n\n\t// -----------------------------------\n\t//\t\t\tPlotting Points\n\t// -----------------------------------\n\n\t// Compute the plotting point locations on the reference element. There\n\t// will be P+1 plotting points along each coordinate direction spaced equally.\n\n\tXiEtaZeta_P = malloc(ELEMENT->NvnP*ELEMENT->d* sizeof *XiEtaZeta_P);\n\tXiEtaZeta_P_x = &XiEtaZeta_P[0];\n\tXiEtaZeta_P_y = &XiEtaZeta_P[ELEMENT->NvnP];\n\n\tdeltaXi_P = (2.)/((double)(ELEMENT->P));\n\tdeltaEta_P = (2.)/((double)(ELEMENT->P));\n\n\tk = 0;\n\tfor(j=0; j<(ELEMENT->P+1); j++){\n\t\tfor(i=0; i<(ELEMENT->P+1); i++){\n\n\t\t\txi = -1. + i*deltaXi_P;\n\t\t\teta = -1. + j*deltaEta_P;\n\n\t\t\tXiEtaZeta_P_x[k] = xi;\n\t\t\tXiEtaZeta_P_y[k] = eta;\n\n\t\t\tk++;\n\t\t}\n\t}\n\n\t// -----------------------------------\n\t//\t\tFace Integration Points\n\t// -----------------------------------\n\n\t// Compute the face integration point locations. Order of the nodes\n\t// in the array is counterclockwise starting from (xi,eta) = (-1, -1).\n\t// Note, cubature array is in increasing order.\n\n\tXiEtaZeta_F = malloc(ELEMENT->NfnI*ELEMENT->d* sizeof *XiEtaZeta_F);\n\tXiEtaZeta_F_x = &XiEtaZeta_F[0];\n\tXiEtaZeta_F_y = &XiEtaZeta_F[ELEMENT->NfnI];\n\t \n\tint faceNodeCumul, face, faceNode;\n\n\tfaceNodeCumul = 0;\n\tfor(face = 0; face<4; face++){\n\t\tfor(faceNode = 0; faceNode < ELEMENT->P+1; faceNode++){\n\n\t\t\tif(face == 0){\n\t\t\t\txi = Cubature_xi[faceNode];\n\t\t\t\teta = -1;\n\n\t\t\t} else if(face == 1){\n\t\t\t\txi = 1;\n\t\t\t\teta = Cubature_xi[faceNode];\n\n\t\t\t} else if(face == 2){\n\t\t\t\txi = -Cubature_xi[faceNode];\n\t\t\t\teta = 1;\n\n\t\t\t} else{\n\t\t\t\txi = -1;\n\t\t\t\teta = -Cubature_xi[faceNode];\n\n\t\t\t}\n\n\t\t\tXiEtaZeta_F_x[faceNodeCumul] = xi;\n\t\t\tXiEtaZeta_F_y[faceNodeCumul] = eta;\n\t\t\tfaceNodeCumul++;\n\t\t}\n\t}\n\n\t// -----------------------------------\n\t//\t\t\t\tTesting\n\t// -----------------------------------\n\n\tif (DB.Testing == 1 || DB.Testing == 4){\n\t\t// Print the coordinates of the solution nodes on the reference element\n\t\tprintf(\"Geometry: \\n\");\n\t\tfor(k=0; k<ELEMENT->NvnG; k++){\n\t\t\tprintf(\"(xi,eta) = (%f, %f) \\n\", \tXiEtaZeta_G[k], \n\t\t\t\t\t\t\t\t\t\t\t\tXiEtaZeta_G[ELEMENT->NvnG+k]);\n\t\t}\n\n\t\tprintf(\"Solution: \\n\");\n\t\tfor(k=0; k<ELEMENT->NvnS; k++){\n\t\t\tprintf(\"(xi,eta) = (%f, %f) \\n\", \tXiEtaZeta_S[k], \n\t\t\t\t\t\t\t\t\t\t\t\tXiEtaZeta_S[ELEMENT->NvnS+k]);\n\t\t}\n\n\t\tprintf(\"Plotting: \\n\");\n\t\tfor(k=0; k<ELEMENT->NvnP; k++){\n\t\t\tprintf(\"(xi,eta) = (%f, %f) \\n\", \tXiEtaZeta_P[k], \n\t\t\t\t\t\t\t\t\t\t\t\tXiEtaZeta_P[ELEMENT->NvnP+k]);\n\t\t}\n\n\t}\n\n\t// Allocate arrays for the different operators to be used\n\n\t// Chi Operators\n\tELEMENT->Chi_vS = malloc(ELEMENT->NvnS*ELEMENT->NvnG* sizeof *ELEMENT->Chi_vS);\n\tELEMENT->ChiInv_vS = malloc(ELEMENT->NvnS*ELEMENT->NvnG* sizeof *ELEMENT->ChiInv_vS);\n\tELEMENT->Chi_vG = malloc(ELEMENT->NvnG*ELEMENT->NvnG* sizeof *ELEMENT->Chi_vG);\n\tELEMENT->Chi_vP = malloc(ELEMENT->NvnP*ELEMENT->NvnG* sizeof *ELEMENT->Chi_vP);\n\tELEMENT->Chi_fI = malloc(ELEMENT->NvnG*ELEMENT->NfnI* sizeof *ELEMENT->Chi_fI);\n\n\t// Derivative Operators\n\tELEMENT->GradChi_vS_xi = malloc(ELEMENT->NvnS*ELEMENT->NvnG* sizeof *ELEMENT->GradChi_vS_xi);\n\tELEMENT->GradChi_vS_eta = malloc(ELEMENT->NvnS*ELEMENT->NvnG* sizeof *ELEMENT->GradChi_vS_eta);\t\n\tELEMENT->GradChi_fI_xi = malloc(ELEMENT->NfnI*ELEMENT->NvnG* sizeof *ELEMENT->GradChi_fI_xi);\n\tELEMENT->GradChi_fI_eta = malloc(ELEMENT->NfnI*ELEMENT->NvnG* sizeof *ELEMENT->GradChi_fI_eta);\n\n\t// Interpolation Operators\n\tELEMENT->I_vG_vP = malloc(ELEMENT->NvnP*ELEMENT->NvnG* sizeof *ELEMENT->I_vG_vP);\n\tELEMENT->I_vG_vS = malloc(ELEMENT->NvnG*ELEMENT->NvnS* sizeof *ELEMENT->I_vG_vS);\n\tELEMENT->I_vS_vG = malloc(ELEMENT->NvnS*ELEMENT->NvnG* sizeof *ELEMENT->I_vS_vG);\n\tELEMENT->I_vS_fI = malloc(ELEMENT->NvnS*ELEMENT->NfnI* sizeof *ELEMENT->I_vS_fI);\n\n\t// Save reference element node locations and other cubature data\n\tELEMENT->nodes_xi = Cubature_xi;\n\tELEMENT->nodes_wi = Cubature_wi;\n\n\tELEMENT->XiEtaZeta_S = XiEtaZeta_S;\n\tELEMENT->XiEtaZeta_G = XiEtaZeta_G;\n\tELEMENT->XiEtaZeta_P = XiEtaZeta_P;\n\tELEMENT->XiEtaZeta_F = XiEtaZeta_F;\n\n\tELEMENT->xiVector = xiVector;\n\tELEMENT->etaVector = etaVector;\n\n}\n\n\nvoid setup_operators(void){\n\n\tprintf(\"Setup Operators \\n\");\n\n\tstruct S_ELEMENT *ELEMENT;\n\tELEMENT = New_ELEMENT();\n\tDB.ELEMENT = ELEMENT;\n\n\tsetup_reference_element();\n\n\tsetup_Chi_operators();\n\n\tsetup_Interpolation_operators();\n\n\tif(DB.Testing == 1 || DB.Testing == 3){\n\t\t//Print V (Vandermonde matrix)\n\n\t\tint r, c;\n\n\t\tprintf(\"Chi_vS : Vandermonde \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnS; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %.15f \", ELEMENT->Chi_vS[c*ELEMENT->NvnS + r]);\n\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"ChiInv_vS : Inverse Vandermonde \\n\");\n\n\t\tfor(r=0; r<ELEMENT->NvnS; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %.15f \", ELEMENT->ChiInv_vS[c*ELEMENT->NvnS + r]);\n\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"GradChi_vS_xi : Gradient Computational Domain \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnS; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->GradChi_vS_xi[c*ELEMENT->NvnS + r]);\n\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"GradChi_vS_eta : Gradient Computational Domain \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnS; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->GradChi_vS_eta[c*ELEMENT->NvnS + r]);\n\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"Chi_vG : \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnG; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->Chi_vG[c*ELEMENT->NvnG + r]);\n\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"Chi_vP : \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnP; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->Chi_vP[c*ELEMENT->NvnP + r]);\n\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"Chi_fI : \\n\");\n\t\tfor(r=0; r<ELEMENT->NfnI; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->Chi_fI[c*ELEMENT->NfnI + r]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"I_vG_vS : \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnS; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->I_vG_vS[c*ELEMENT->NvnS + r]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"I_vS_vG : \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnG; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnS; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->I_vS_vG[c*ELEMENT->NvnG + r]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"I_vG_vP : \\n\");\n\t\tfor(r=0; r<ELEMENT->NvnP; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnG; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->I_vG_vP[c*ELEMENT->NvnP + r]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t\tprintf(\"I_vS_fI : \\n\");\n\t\tfor(r=0; r<ELEMENT->NfnI; r++){\n\t\t\tfor(c=0; c<ELEMENT->NvnS; c++){\n\t\t\t\tprintf(\" %f \", ELEMENT->I_vS_fI[c*ELEMENT->NvnS + r]);\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\n\t}\n\n}\n\n\n\n\n" }, { "alpha_fraction": 0.5786485075950623, "alphanum_fraction": 0.599659264087677, "avg_line_length": 25.238805770874023, "blob_id": "678860f849d889bfd97f774599f8ceedd1c84d85", "content_id": "0cbc645b80d177c16768e553b7422fc5e080be37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1761, "license_type": "no_license", "max_line_length": 79, "num_lines": 67, "path": "/src/fluxes_inviscid.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"fluxes_inviscid.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\nvoid flux_LF(double *WIn, double *WOut, double *FIn, double *FOut, double *GIn,\n\tdouble *GOut, double *FComm, double *nIn, int P, int Neq){\n\n\t/*\n\tPurpose:\n\t\tCompute the numerical flux at all the integration nodes on the face\n\t\tusing the Lax numerical flux. Result returned is a matrix for the \n\t\tnumerical flux for all equations at each integration points :\n\t\tmatrix of size nSol x nEq\n\t*/\n\n\tint sol_i, eq_i;\n\n\tdouble GAMMA = 1.4;\n\n\tdouble FAvg, GAvg, diffW;\n\tdouble roIn, uIn, vIn, eIn, PIn;\n\tdouble roOut, uOut, vOut, eOut, POut;\n\tdouble lambdaIn, lambdaOut, lambda;\n\tdouble nx, ny;\n\n\tfor(sol_i=0; sol_i<(P+1); sol_i ++){\n\t\t// Loop over the integration nodes\n\n\t\troIn = WIn[sol_i];\n\t\tuIn = WIn[sol_i + 1*(P+1)]/roIn;\n\t\tvIn = WIn[sol_i + 2*(P+1)]/roIn;\n\t\teIn = WIn[sol_i + 3*(P+1)]/roIn;\n\t\tPIn = (GAMMA-1)*roIn*(eIn - 0.5*(uIn*uIn + vIn*vIn));\n\n\t\troOut = WOut[sol_i];\n\t\tuOut = WOut[sol_i + 1*(P+1)]/roOut;\n\t\tvOut = WOut[sol_i + 2*(P+1)]/roOut;\n\t\teOut = WOut[sol_i + 3*(P+1)]/roOut;\n\t\tPOut = (GAMMA-1)*roOut*(eOut - 0.5*(uOut*uOut + vOut*vOut));\n\n\t\tlambdaIn = sqrt(uIn*uIn + vIn*vIn) + sqrt(fabs(GAMMA*PIn/roIn));\n\t\tlambdaOut = sqrt(uOut*uOut + vOut*vOut) + sqrt(fabs(GAMMA*POut/roOut));\n\n\t\tif(lambdaIn > lambdaOut){\n\t\t\tlambda = lambdaIn;\n\t\t} else{\n\t\t\tlambda = lambdaOut;\n\t\t}\n\n\t\tnx = nIn[sol_i];\n\t\tny = nIn[sol_i + (P+1)];\t\t\n\n\t\tfor(eq_i=0; eq_i<Neq; eq_i ++){\n\t\t\t// Loop over the equations\n\n\t\t\tFAvg = 0.5*(FIn[eq_i*(P+1) + sol_i] + FOut[eq_i*(P+1) + sol_i]);\n\t\t\tGAvg = 0.5*(GIn[eq_i*(P+1) + sol_i] + GOut[eq_i*(P+1) + sol_i]);\n\t\t\tdiffW = WIn[eq_i*(P+1) + sol_i] - WOut[eq_i*(P+1) + sol_i];\n\n\t\t\tFComm[eq_i*(P+1) + sol_i] = nx*FAvg + ny*GAvg + 0.5*lambda*diffW;\n\n\t\t}\n\t}\n\n}\n\n\n" }, { "alpha_fraction": 0.6916666626930237, "alphanum_fraction": 0.6916666626930237, "avg_line_length": 14.565217018127441, "blob_id": "cf8b6de91ced3085d4935080292515f9b753e146", "content_id": "f828053f1ee026ebf6da17c1c4942c6d0a85d8a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 360, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/src/memory_destructors.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#include \"memory_destructors.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n\n#include \"S_DB.h\"\n\n\nvoid memory_destructor_E (struct S_ELEMENT *ELEMENT){\n\tfree(ELEMENT);\n}\n\nvoid memory_destructor_V (struct S_VOLUME *VOLUME){\n\tfree(VOLUME);\n}\n\nvoid memory_destructor_F (struct S_FACE *FACE){\n\tfree(FACE);\n}\n\nvoid memory_destructor_BC (struct S_BC *BC){\n\tfree(BC);\n}\n" }, { "alpha_fraction": 0.592052161693573, "alphanum_fraction": 0.6004346609115601, "avg_line_length": 18.490909576416016, "blob_id": "dd273abb3d7da8c5622a80d595663b7c7933d93f", "content_id": "df2704855778a807fe4b81777e4983bd960cec58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3221, "license_type": "no_license", "max_line_length": 79, "num_lines": 165, "path": "/src/matrix_functions.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n\n#include \"matrix_functions.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <Accelerate/Accelerate.h>\n\n\n\nvoid mm_CNN(int m, int k, int n, double *A, double *B, double *C){\n\n\t/*\n\n\tA = m x k\n\tB = k x n\n\tC = m x n\n\n\tPurpose:\n\t\tCompute matrix-matrix and matrix-vector products:\n\t\t\t0) return C = A*B\n\t\t\t1) Column-major storage of A\n\t\t\t2) Column-major storage of B and C\n\t\t\n\t\tComments:\n\t\t\tThe 'CNN' function name refers to:\n\t\t\t\tC : Column-major output\n\t \t\t\tN : No Transpose of A\n\t \t\t\tN : No transpose of B\n\n\t*/\n\n\tint lda = m;\n\tint ldb = k;\n\tint ldc = m;\n\n\tcblas_dgemm (102, 111, 111, m, n, k, 1.0, A, lda, B, ldb, 0.0, C, ldc);\n\n}\n\nvoid mm_transposeR_d(int m, int n, double *A){\n\t\n\t/*\n\tPurpose:\n\t\tCompute the transpose of the matrix A (in column major form) which is of size\n\t\tm x n. Return the result in column major form.\n\t*/\n\n\tdouble *transA;\n\tint i,j,k;\n\n\ttransA = malloc(m*n* sizeof *transA);\n\n\tk=0;\n\tfor(i=0; i<m; i++){\n\t\tfor(j=0; j<n; j++){\n\t\t\t// keep row fixed, loop through columns\n\n\t\t\ttransA[k++] = A[j*m + i];\n\t\t}\n\t}\n\n\tA = transA;\n\n}\n\nvoid mm_inv_d(int N, double *matrix) {\n\n\t/*\n\tPurpose:\n\t\tCompute the inverse of a matrix using LAPACK. \n\t\tMatrix is in column major form and result is \n\t\treturned in column major form as well.\n\t*/\n\n int error=0;\n int *pivot = malloc(N*sizeof(int));\n double *workspace = malloc(N*sizeof(double));\n\n /* LU factorisation */\n dgetrf_(&N, &N, matrix, &N, pivot, &error);\n\n if (error != 0) {\n free(pivot);\n free(workspace);\n }\n\n /* matrix inversion */\n dgetri_(&N, matrix, &N, pivot, workspace, &N, &error);\n\n if (error != 0) {\n free(pivot);\n free(workspace);\n }\n\n // Now, inversion was done in row major form (transpose was inverted)\n // Compute the transpose of the inverse to get the inverse in column\n // major form\n mm_transposeR_d(N, N, matrix);\n\n free(pivot);\n free(workspace);\n}\n\nvoid mm_inv_d_secondInPlace(int N, double *matrix, double *matrixInv) {\n\n\t/*\n\tPurpose:\n\t\tCompute the inverse of a matrix using LAPACK. \n\t\tMatrix is in column major form and result is \n\t\treturned in column major form as well.\n\t*/\n\n\tint i;\n\tfor(i=0; i<N*N; i++){\n\t\tmatrixInv[i] = matrix[i];\n\t}\n\n int error=0;\n int *pivot = malloc(N*sizeof(int));\n double *workspace = malloc(N*sizeof(double));\n\n /* LU factorisation */\n dgetrf_(&N, &N, matrixInv, &N, pivot, &error);\n\n if (error != 0) {\n free(pivot);\n free(workspace);\n }\n\n /* matrix inversion */\n dgetri_(&N, matrixInv, &N, pivot, workspace, &N, &error);\n\n if (error != 0) {\n free(pivot);\n free(workspace);\n }\n\n // Now, inversion was done in row major form (transpose was inverted)\n // Compute the transpose of the inverse to get the inverse in column\n // major form\n mm_transposeR_d(N, N, matrixInv);\n\n free(pivot);\n free(workspace);\n}\n\ndouble *mm_inv_d_alloc(int N, double *matrix){\n\t\n\t/*\n\tPurpose:\n\t\tCompute the inverse of the matrix but do not modify\n\t\tthe one that is sent as an argument.\n\t*/\n\n\tint i;\n\tdouble *matrix_inv;\n\tmatrix_inv = malloc(N*N * sizeof *matrix_inv);\n\n\tfor(i=0; i<N*N; i++){\n\t\tmatrix_inv[i] = matrix[i];\n\t}\n\n\tmm_inv_d(N, matrix_inv);\n\n\treturn matrix_inv;\n}\n\n\n\n" }, { "alpha_fraction": 0.5681318640708923, "alphanum_fraction": 0.58681321144104, "avg_line_length": 20.341176986694336, "blob_id": "fa7a7d3131a77a14323a197cd71940791ddf5787", "content_id": "9ff72efc2eed9e6345cb062b2643fa17140b5b9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1820, "license_type": "no_license", "max_line_length": 92, "num_lines": 85, "path": "/src/compute_errors.c", "repo_name": "manmeetb/IGA-DG-Deprecated", "src_encoding": "UTF-8", "text": "\n#include \"compute_errors.h\"\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include \"S_DB.h\"\n#include \"S_VOLUME.h\"\n#include \"S_ELEMENT.h\"\n\n#include \"exact_solutions.h\"\n#include \"matrix_functions.h\"\n\nvoid compute_errors_global(){\n\n\t/*\n\tPurpose:\n\t\tCompute the Error of the flow with the exact solution\n\t*/\n\n\tstruct S_VOLUME *VOLUME;\n\tint i, j, r, c;\n\tdouble *XVals, *YVals, *roVec, *ro_uVec, *ro_vVec, *eVec, x, y, \n\t\t\tro, ro_u, ro_v, e;\n\tdouble *W_Sol;\n\tdouble total_dof;\n\n\tdouble error_L2[4];\n\tfor(i=0; i<4; i++){\n\t\terror_L2[i] = 0;\n\t}\n\n\tdouble W_exact[4], XYZ_vec[2];\n\n\ttotal_dof = 0;\n\n\tfor (VOLUME = DB.VOLUME_HEAD; VOLUME; VOLUME = VOLUME->next) {\n\n\t\ttotal_dof = total_dof + VOLUME->NvnS;\n\n\t\t// Solution node locations. Recall that values are stored in \n\t\t// column major form.\t\n\t\tXVals = &(VOLUME->XYZ_S[0]);\n\t\tYVals = &(VOLUME->XYZ_S[1*VOLUME->NvnS]);\n\n\t\tW_Sol = malloc(VOLUME->NvnS*VOLUME->NVar* sizeof *W_Sol);\n\n\t\tmm_CNN(VOLUME->NvnS, VOLUME->NvnG, VOLUME->NVar, DB.ELEMENT->Chi_vS, VOLUME->What, W_Sol);\n\n\t\t// Loop over all the solution nodes on this volume and get the difference\n\t\ti = 0;\n\t\tfor(r = 0; r<VOLUME->P+1; r++){\n\t\t\tfor(c=0; c<VOLUME->P+1; c++){\n\t\t\t\tx = XVals[i];\n\t\t\t\ty = YVals[i];\n\n\t\t\t\tXYZ_vec[0] = x;\n\t\t\t\tXYZ_vec[1] = y;\n\n\t\t\t\texact_solution_IsentropicVortex(XYZ_vec, W_exact);\n\n\t\t\t\tfor(j=0; j<4; j++){\n\t\t\t\t\terror_L2[j] = error_L2[j] + \n\t\t\t\t\t\t(W_Sol[j*VOLUME->NvnS + i] - W_exact[j])*(W_Sol[j*VOLUME->NvnS + i] - W_exact[j])*\n\t\t\t\t\t\tDB.ELEMENT->nodes_wi[r]*DB.ELEMENT->nodes_wi[c]*VOLUME->detJV_vS[i];\n\t\t\t\t}\n\t\t\t\ti++;\n\t\t\t}\n\t\t}\n\n\t\tfree(W_Sol);\n\t}\n\n\tprintf(\"L2 ERROR : \\n\");\n\tprintf(\"\tnumDOF : %f \\n\", total_dof);\n\t// Normalize the Error and Print the results:\n\tfor(i=0; i<4; i++){\n\t\terror_L2[i] = sqrt(error_L2[i]/(16.*16.));\n\n\t\tprintf(\"\tw : %d -> %.14e \\n\", i, error_L2[i]);\n\n\t}\n\n\n}\n\n\n\n\n\n" } ]
66
Iriday/JBA_HyperJob-Agency
https://github.com/Iriday/JBA_HyperJob-Agency
816add40bafc85bec9f74514295e84b57da6aec4
142aa48f0c06ee1427091539d00ff0bf6c00948d
a28a936e245a87f02c2e010e40bff063b57cc1b6
refs/heads/master
2022-11-21T03:42:20.595353
2020-07-17T11:41:34
2020-07-17T12:14:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6682847738265991, "alphanum_fraction": 0.6682847738265991, "avg_line_length": 33.33333206176758, "blob_id": "6eafd700ecded0809d4e027c7469827326662c13", "content_id": "9f7195dca4f850b4194d6a68fa90dc2ef37469df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 97, "num_lines": 18, "path": "/mainapp/views.py", "repo_name": "Iriday/JBA_HyperJob-Agency", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views import View\nfrom django.core.exceptions import PermissionDenied\n\n\n# Create your views here.\nclass MainPageView(View):\n def get(self, request, *args, **kwargs):\n return render(request, template_name=\"mainapp/main_page.html\")\n\n\nclass ProfileView(View):\n def get(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n raise PermissionDenied\n\n return render(request, template_name=(\"vacancy/new_vacancy.html\" if request.user.is_staff\n else \"resume/new_resume.html\"))\n" }, { "alpha_fraction": 0.7147147059440613, "alphanum_fraction": 0.7147147059440613, "avg_line_length": 32.29999923706055, "blob_id": "bdad7740afa79f32986a3619830b67c90e42b156", "content_id": "1fb97779541ebf4a101ba4de0f3669093c2ac5bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 87, "num_lines": 20, "path": "/resume/views.py", "repo_name": "Iriday/JBA_HyperJob-Agency", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.views import View\nfrom resume.models import Resume, get_all_resumes\nfrom django.core.exceptions import PermissionDenied\n\n\n# Create your views here.\n\nclass ResumesView(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"resume/resumes.html\", {\"resumes\": get_all_resumes()})\n\n\nclass NewResumeView(View):\n def post(self, request, *args, **kwargs):\n if not request.user.is_authenticated or request.user.is_staff:\n raise PermissionDenied\n\n Resume(author=request.user, description=request.POST.get(\"description\")).save()\n return redirect(\"/home\")\n" }, { "alpha_fraction": 0.7748091816902161, "alphanum_fraction": 0.7748091816902161, "avg_line_length": 29.823530197143555, "blob_id": "19a15a8322d8af5268529afa936d428e2e8baacb", "content_id": "7ceee9db6397778a41eb464987be1f008389327e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 67, "num_lines": 17, "path": "/reg_and_auth/views.py", "repo_name": "Iriday/JBA_HyperJob-Agency", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.views.generic import CreateView\nfrom django.contrib.auth.views import AuthenticationForm, LoginView\n\n\n# Create your views here.\nclass MySignupView(CreateView):\n form_class = UserCreationForm\n success_url = \"/login\"\n template_name = \"reg_and_auth/signup.html\"\n\n\nclass MyLoginView(LoginView):\n form_class = AuthenticationForm\n redirect_authenticated_user = True\n template_name = \"reg_and_auth/login.html\"\n" }, { "alpha_fraction": 0.7211678624153137, "alphanum_fraction": 0.7211678624153137, "avg_line_length": 33.25, "blob_id": "8dad3d7d7cb03dc6e8456124da460328f20714cd", "content_id": "b3a4b0d5a5b758b05a4402d5e1256900585ef62a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 92, "num_lines": 20, "path": "/vacancy/views.py", "repo_name": "Iriday/JBA_HyperJob-Agency", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.views import View\nfrom vacancy.models import Vacancy, get_all_vacancies\nfrom django.core.exceptions import PermissionDenied\n\n\n# Create your views here.\n\nclass VacanciesView(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"vacancy/vacancies.html\", {\"vacancies\": get_all_vacancies()})\n\n\nclass NewVacancyView(View):\n def post(self, request, *args, **kwargs):\n if not request.user.is_authenticated or not request.user.is_staff:\n raise PermissionDenied\n\n Vacancy(author=request.user, description=request.POST.get(\"description\")).save()\n return redirect(\"/home\")\n" } ]
4
MyWishPlatform/rubic_exchange_backend
https://github.com/MyWishPlatform/rubic_exchange_backend
58d4a6e5fe174c5f0a9b6044639a739036f14e6a
721c0f2b0fd6a19c50ce73111e8639781540168e
d973bf85fd2d515740e8cf95f9ab9159a6970be1
refs/heads/main
2022-12-25T04:42:00.777385
2020-10-13T19:49:07
2020-10-13T19:49:07
303,812,902
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7375565767288208, "alphanum_fraction": 0.7375565767288208, "avg_line_length": 23.55555534362793, "blob_id": "ad3844419aaf253501aded70c8b9f710db35a3ab", "content_id": "a1392f8bc9cac42dedb304ab476bc78aad799ec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 55, "num_lines": 9, "path": "/rubic_exchange/orderbook/serializers.py", "repo_name": "MyWishPlatform/rubic_exchange_backend", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom rubic_exchange.orderbook.models import OrderBook\n\n\nclass OrderBookSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrderBook\n fields = '__all__'\n" }, { "alpha_fraction": 0.7412499785423279, "alphanum_fraction": 0.7537500262260437, "avg_line_length": 39, "blob_id": "dd8d541f00aaeb3096e589a91ef8213b7afa16bd", "content_id": "58c185f00b074cc47c683f2753f655746ad9f034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 91, "num_lines": 40, "path": "/rubic_exchange/orderbook/models.py", "repo_name": "MyWishPlatform/rubic_exchange_backend", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\n\n\nfrom rubic_exchange.consts import MAX_WEI_DIGITS\n\n\nclass Network(models.Model):\n name = models.CharField(max_length=128, db_index=True)\n\n\nclass OrderContractId(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass OrderBook(models.Model):\n order_contract = models.OneToOneField(OrderContractId, on_delete=models.CASCADE)\n network = models.ForeignKey(Network, on_delete=models.CASCADE, default=1)\n\n owner_address = models.CharField(max_length=50)\n base_address = models.CharField(max_length=50)\n quote_address = models.CharField(max_length=50)\n\n base_limit = models.DecimalField(max_digits=MAX_WEI_DIGITS, decimal_places=0)\n quote_limit = models.DecimalField(max_digits=MAX_WEI_DIGITS, decimal_places=0)\n expiration_timestamp = models.BigIntegerField()\n base_only_investor = models.CharField(max_length=50)\n min_base_investment = models.DecimalField(max_digits=MAX_WEI_DIGITS, decimal_places=0)\n min_quote_investment = models.DecimalField(max_digits=MAX_WEI_DIGITS, decimal_places=0)\n broker_address = models.CharField(max_length=50)\n broker_base_percent = models.IntegerField()\n broker_quote_percent = models.IntegerField()\n refund_delay_seconds = models.BigIntegerField()\n public = models.BooleanField(default=True)\n\n state = models.CharField(max_length=63, default='CREATED')\n\n created_at = models.DateTimeField(auto_now_add=True)\n changed_at = models.DateTimeField(auto_now=True)\n state_changed_at = models.DateTimeField(default=timezone.now)\n" }, { "alpha_fraction": 0.7199327349662781, "alphanum_fraction": 0.7199327349662781, "avg_line_length": 33.97058868408203, "blob_id": "b70e228bc9a075415f9bb203a07ce9df4f729f46", "content_id": "4339cd3fa3e04d63b1dd7981227cd679d7e19b80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1189, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/rubic_exchange/orderbook/views.py", "repo_name": "MyWishPlatform/rubic_exchange_backend", "src_encoding": "UTF-8", "text": "from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework import mixins\n\nfrom rubic_exchange.orderbook.api import make_new_id\nfrom rubic_exchange.orderbook.models import OrderBook\nfrom rubic_exchange.orderbook.serializers import OrderBookSerializer\n\n\n@api_view(http_method_names=['GET'])\ndef generate_order_id(request: Request):\n order_id = make_new_id()\n return Response({'id': order_id})\n\n\nclass OrderBookViewSet(mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n GenericViewSet):\n queryset = OrderBook.objects.filter(public=True)\n serializer_class = OrderBookSerializer\n permission_classes = []\n\n def retrieve(self, request: Request, *args, **kwargs):\n order_id = kwargs.get('pk')\n try:\n order = OrderBook.objects.get(order_contract__id=order_id)\n except OrderBook.DoesNotExist:\n raise NotFound\n\n serializer = self.get_serializer(order)\n return Response(serializer.data)\n" }, { "alpha_fraction": 0.8443396091461182, "alphanum_fraction": 0.8443396091461182, "avg_line_length": 29.285715103149414, "blob_id": "d352f4cadf0c126c65b234ffe9c1def54cd992e6", "content_id": "f34bf72805645d3314df72c81f19409c281d0b93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 79, "num_lines": 7, "path": "/rubic_exchange/orderbook/admin.py", "repo_name": "MyWishPlatform/rubic_exchange_backend", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom rubic_exchange.orderbook.models import OrderBook, OrderContractId, Network\n\n\nadmin.site.register(OrderBook)\nadmin.site.register(OrderContractId)\nadmin.site.register(Network)\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 33, "blob_id": "6babcaa180d909224b71fdeaddd0bb8facbbdbd6", "content_id": "7e82584e8104765041c8f39056f5dcaf7e4ce2ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 33, "num_lines": 1, "path": "/rubic_exchange/consts.py", "repo_name": "MyWishPlatform/rubic_exchange_backend", "src_encoding": "UTF-8", "text": "MAX_WEI_DIGITS = len(str(2**256))\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 30, "blob_id": "77541a5d977de5c4dfd12e3047cbc77eb741e84f", "content_id": "f11de9c96b230cc73edb2ddd4a9836c1f1b2e289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 70, "num_lines": 10, "path": "/rubic_exchange/orderbook/api.py", "repo_name": "MyWishPlatform/rubic_exchange_backend", "src_encoding": "UTF-8", "text": "from rest_framework.exceptions import ValidationError\n\nfrom rubic_exchange.orderbook.models import OrderContractId, OrderBook\nfrom rubic_exchange.orderbook.serializers import OrderBookSerializer\n\n\ndef make_new_id():\n order_contract = OrderContractId()\n order_contract.save()\n return order_contract.id\n" } ]
6
IbroCalculus/Python-Codes-Snippets-for-Reference
https://github.com/IbroCalculus/Python-Codes-Snippets-for-Reference
03a0778d56ea07267bdbb4cf462daedb3df0667b
36cfb59847520724e2329c93233773ca7c1b4f08
f5ea5b66a58cc48c31122ec5cbfcb33944c3b9d9
refs/heads/main
2023-05-09T23:54:24.578258
2021-06-08T13:56:29
2021-06-08T13:56:29
375,029,913
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6381909251213074, "alphanum_fraction": 0.6457286477088928, "avg_line_length": 27.464284896850586, "blob_id": "99ed42ee52f79007c04845a9d119f761b73af15c", "content_id": "a6a2388cfb0bd5bc56c8cd23888d1cf7eba33368", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/CSV.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import csv\n\n\n#CSV -> Comma separated Values\n#What separated the values is called a delimeter\n\n#READING FROM A CSV FILE\nwith open('csvTestFile.csv', 'r') as csvFileObj:\n #csvReader = csv.reader(csvFileObj, delimiter=\",\") #Returns value in list format\n csvReader = csv.DictReader(csvFileObj, delimiter=\",\") #Returns value in dictionary format\n\n count = 0\n for line in csvReader:\n if count != 0:\n print(f'{count}. {line}')\n count += 1\n\n\n#WRITING TO A CSV FILE\n\nwith open('csvTestFile.csv', 'r') as csvFileObj:\n csvReader = csv.reader(csvFileObj, delimiter=\",\")\n\n with open('csvTestFile2.csv', 'w') as csvFileObj2:\n csvWriter = csv.writer(csvFileObj2, delimiter=\"%\")\n\n for line in csvReader:\n csvWriter.writerow(line)" }, { "alpha_fraction": 0.8088235259056091, "alphanum_fraction": 0.8088235259056091, "avg_line_length": 67, "blob_id": "48f4412ac645c83d705815877c68cf1aed6f7381", "content_id": "8a91f8155afa80a47032a7160acf6dca013e41a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 95, "num_lines": 2, "path": "/HashTable.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#HashTables store data in key-value pairs and the values are generated using a hashing function\n#It is the same as dictionary in python" }, { "alpha_fraction": 0.626163125038147, "alphanum_fraction": 0.6327312588691711, "avg_line_length": 31.625, "blob_id": "f3a674f985b45d5abd848a5d2ebfe82b5c33b95b", "content_id": "d718535d179d4da8d9a4e327a151df24230e85bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1827, "license_type": "no_license", "max_line_length": 128, "num_lines": 56, "path": "/SQLite2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import sqlite3\n\n#1. CREATE DATABASE\n'''\n conn = sqlite3.connect(\"MyDataBase.db\") #extension can be anything. i.e .db, .sqlite3, .dbase, .ibrahim etc, preferrably .db\n cur = conn.cursor() #Handle for working with the database\n \n #Close database when you are done with the dbase file\n cur.close()\n'''\n\n#2. CREATE A TABLE\n'''\n cur.execute (\"\"\" CREATE TABLE IF NOT EXISTS employee_records(\n ID INT PRIMARY KEY NOT NULL,\n NAME TEXT NOT NULL,\n DIVISION TEXT NOT NULL,\n STARS INT NOT NULL\n )\n \"\"\")\n'''\n\n#3. ADD DATA TO DATABASE TABLE\n'''\n cur.execute(\"\"\" INSERT INTO employee_records(ID,NAME,DIVISION,STARS)\n VALUES(1,\"Ibrahim\",\"Software\",5)\n \"\"\")\n # Alternatively, use: cur.execute(\"\"\" INSERT INTO employee_records(*) \"\"\") where * signifies ALL\n # or specify the particular fields to insert into if not all\n # But NOT NULL -> every field must have values, hence All i.e ID,NAME,DIVISION,STARS\n \n #Apply changes to db using commit\n conn.commit()\n'''\n\n#4. #ADD DATA TO DATABASE USING A FUNCTION WITH PARAMETERS\n'''\n def insert_record(ID,NAME,DIVISION,STARS):\n cur.execute(\"\"\" INSERT INTO employee_records(ID,NAME,DIVISION,STARS)\n VALUES(?,?,?,?)\"\"\",(ID,NAME,DIVISION,STARS))\n # Alternatively, use: cur.execute(\"\"\" INSERT INTO employee_records(*) \"\"\") where * signifies ALL\n # or specify the particular fields to insert into if not all\n # But NOT NULL -> every field must have values, hence All i.e ID,NAME,DIVISION,STARS\n \n #Apply changes to db using commit\n conn.commit()\n print(\"Changes applied\")\n \n insert_record(5,\"Muhammad\",\"Soldier\",9)\n \n \n #Close database when you are done with the dbase file\n cur.close()\n'''\n\n#5. #READ DATA FROM DATABASE\n" }, { "alpha_fraction": 0.6427640318870544, "alphanum_fraction": 0.6805736422538757, "avg_line_length": 32.39130401611328, "blob_id": "3872b6f6c501e87bf87ef48c7da6165f40d02877", "content_id": "76b0a659766c97596d518ecacef6f4d74fe972b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 113, "num_lines": 23, "path": "/Secrets Module.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import secrets\n\n#It's also used for random number generations, but more secure, the others are: random and numpy(Numpy2)\n#It only has 3 functions\n#Used for i.e: Passwords, security tokens, account authentication\n#DISADVANTAGE: It takes more time to generate the algorithms\n\nfor i in range(3):\n a = secrets.randbelow(10) #Generates random int between 0 and 10, 10 not included\n print(f'RANDBELOW: {a}')\n\nprint()\n\nfor i in range(3):\n b = secrets.randbits(4) #Generates random int of 4 bits values. ie 0000 - 1111 (0 - 15) 1111 included\n print(f'RANDBITS: {b}')\n\nprint()\n\nfor i in range(3):\n nameList = [\"Ibrahim\", \"Musa\", \"Suleiman\"]\n c = secrets.choice(nameList) #Generates random choice from a list\n print(f'CHOICE: {c}')" }, { "alpha_fraction": 0.7971380352973938, "alphanum_fraction": 0.8038720488548279, "avg_line_length": 33.94117736816406, "blob_id": "6596b664df7030c8c45b910f4907f98e0205a20b", "content_id": "1d4d083df8204fbcfcbbb483fedaaba26267ef65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 190, "num_lines": 34, "path": "/Cryptography/Basics.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "#pip install cryptography\n\n\n#TERMINOLOGIES OF CRYPTOGRAPHY\n\n#PLAIN TEXT - Text which is readable and understandable by all users.\n\n#CIPHER TEXT - The message obtained after applying cryptography on plain text.\n\n#ENCRYPTION - The process of converting cipher text to plain text. It is also called ENCODING.\n\n#DECRYPTION - The process of converting cipher text to plain text. It is also called DECODING.\n\n\n#BASIC CHARACTERISTICS OF MODERN CRYPTOGRAPHY:\n\n#1 It operates on bit sequences.\n#2 It uses mathematical algorithms for securing the information\n#3 It requires parties interested in secure communication channel to achieve privacy\n\n\n#DOUBLE STRENGTH ENCRYPTION\n#Double strength encryption, also known as MULTIPLE ENCRYPTION, is the process of encrypting an already encrypted text one or more times, either with the same or different algorithm/pattern.\n\n#LEVELS OF DOUBLE STRENGTH ENCTYPTION\n#First layer of encryption, Second and Third layer of encryption. CHECK REFERENCE.\n\n#HYBRID CRYPTOGRAPHY\n#Is the process of using multiple ciphers of differenct types together by including benefits of each of the cipher.\n\n#TYPES OF CIPHERS\n#1. Reverse cipher\n#2. Caeser cipher\n#3. ROT13 cipher\n" }, { "alpha_fraction": 0.7427577972412109, "alphanum_fraction": 0.7439165711402893, "avg_line_length": 42.150001525878906, "blob_id": "2276596c37c45da02895cbca02a8779e55138594", "content_id": "a86fb4152c6b9ad4564d7a5b37a1ee8d4f3924e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 103, "num_lines": 20, "path": "/Logging.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import logging\n\n#Logging is a means of tracking events that happen when some software runs.\n#Logging is important for software development, debugging and running.\n#If you don't have any logging record and your program crashes, there are very little chances\n# that you detect the cause of the problem\n\n#PRINTING / LOGGING\n#Printing may be used for logging purpose for simple scripts, but will fail for complex scripts.\n\n#Can log to 5 different log levels. They indicat the severity of the event\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S')\n\nlogging.debug(\"This is a debug message\")\nlogging.info(\"This is an info message\")\nlogging.warning(\"This is a warning message\")\nlogging.error(\"This is a error message\")\nlogging.critical(\"This is a critical message\")\n" }, { "alpha_fraction": 0.6464646458625793, "alphanum_fraction": 0.6498316526412964, "avg_line_length": 21.769229888916016, "blob_id": "adf2218541f9c2266ed10191e58b2e9e4d788c21", "content_id": "6da3ade362c65ac7edf614427150c6bef274bc20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 130, "num_lines": 13, "path": "/Cryptography/Caesar Cipher.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#In this type of cipher: Each letter of plain text is replaced by a letter with some fixed number of positions down wit alphabets.\n#E.g: ABC = XYZ\n\nmsg = input(\"Enter message: \")\n\ncipherText = ''\n\nfor i in msg:\n x = ord(i)\n x += 5\n cipherText += chr(x)\n\nprint(\"CIPHER: \", cipherText)\n" }, { "alpha_fraction": 0.6836419701576233, "alphanum_fraction": 0.6929012537002563, "avg_line_length": 28.5, "blob_id": "29bdd3ed26438dc6103563c4cb9770969dfad867", "content_id": "ea9f852fd21124e4516fb1d2fdeb50493a3230c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/Datetime2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import datetime\n\n#Today's date and present time\nprint(f'TODAYS DATE & PRESENT TIME IS: {datetime.datetime.now()}')\n\n#Today's date only\nprint(f'TODAYS DATE ONLY IS: {datetime.date.today()}')\n\n#Today's date only\nprint(f'PRESENT TIME ONLY IS: {datetime.datetime.now().time()}')\n\n\n#Formatting Dates\ndateNow = datetime.datetime.now()\nprint(f'BEFORE FORMATTING: {dateNow}')\n\ndateNowF1 = dateNow.strftime(\"%d/%m/%Y -- %H:%M:%S\")\ndateNowF2 = dateNow.strftime(\"%d/%B/%Y -- %H:%M:%S\")\ndateNowF3 = dateNow.strftime(\"%d/%b/%Y -- %H:%M:%S\")\nprint(f'AFTER FORMATTING: {dateNowF1}')\nprint(f'AFTER FORMATTING: {dateNowF2}')\nprint(f'AFTER FORMATTING: {dateNowF3}')" }, { "alpha_fraction": 0.5409181714057922, "alphanum_fraction": 0.5908183455467224, "avg_line_length": 14.59375, "blob_id": "50845e5b2b368b7bb8f839ac4325aaac20d412f8", "content_id": "bf6fed5c5ffbfa6bdf1d6b4a0642ab2f401d90ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 92, "num_lines": 32, "path": "/SNIPPETS/timeConverter.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "#Convert seconds to the equivalent hour, min, and seconds\n\ntime = int(input(\"Enter time in seconds: \"))\n\nseconds = 0\nminutes = 0\nhours = 0\nhrs = \"\"\nmins = \"\"\nsecs = \"\"\n\nhours = time//3600\ntime %= 3600\n\nminutes = time//60\ntime %= 360\n\nseconds = time\n\nif hours > 1:\n hrs = \"hrs\"\nelse: hrs = \"hr\"\n\nif minutes > 1:\n mins = \"mins\"\nelse: mins = \"min\"\n\nif seconds > 1:\n secs = \"secs\"\nelse: secs = \"sec\"\n\nprint('The time: {0}{3} : {1}{4} : {2}{5}'.format(hours, minutes, seconds, hrs, mins, secs))\n\n\n" }, { "alpha_fraction": 0.6620941758155823, "alphanum_fraction": 0.6716963648796082, "avg_line_length": 27.402597427368164, "blob_id": "4d1e26e4b173503285b8116f62c3aa3400ad44a3", "content_id": "f128867b702949a2f11745c7ce8bd4c734ca8658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2187, "license_type": "no_license", "max_line_length": 125, "num_lines": 77, "path": "/File.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nimport os\n\n#CHECK OS Module for more on working with files and directories\n\n\n#Check if file exists in a directory\nstatement = \"This file exists\" if os.path.isfile(\"thisfile.csv\") else \"This file does not exist\"\nprint(statement)\n\n#f.seek(5) -> Moves the file cursor to the 5th character, hence can override\n#f.tell() -> Tells the current position within the file\n\n#Reading (r), writing (w), reading and writing (r+) and appending (a) a file or binary file (rb, wb, ab)\n#myFileObj = open(\"myFile.txt\", 'r')\n#myFileObj.close()\n\n#If second argument is not present, it defaults to a read file. i.e myFileObj = open('myFile.txt') == open('myFile.txt', 'r')\n\n\n#CREATE AND WRITE TO A FILE\nmyFileObjWrite = open(\"FileTextFile.txt\", 'w')\nmyFileObjWrite.write(\"This is the first text\")\nmyFileObjWrite.write(\"\\nThis is the second text\")\nmyFileObjWrite.close()\n\n\n#READ FROM A FILE\nmyFileObjRead = open(\"FileTextFile.txt\", 'r')\nprint(myFileObjRead.name)\nprint(myFileObjRead.mode)\n'''for i in myFileObjRead:\n print(i.strip())\n'''\ncontent = myFileObjRead.read() #.read(100) -> Read the first 100 Characters\nprint(\"CONTENT\", content)\nmyFileObjRead.close()\n\nprint('\\n')\n\n\n#USING CONTEXT MANAGER\nwith open('FileTextFile.txt') as f:\n contents = f.read() #Good for small file, DON'T OVERLOAD the memory with large file, instead check below\n # .read(100) -> Read the first 100 Characters\n print('CONTENTS:', contents)\n\nprint('\\n\\n')\n\n\nwith open('FileTextFile2.txt', 'w') as f:\n f.write('This is another first line')\n f.write('\\nThis is another second line')\n f.write('\\nThis is another third line')\n\nprint('CONTENT2:')\nwith open('FileTextFile2.txt', 'r') as f:\n x = f.readlines() #Returns list of String by lines\n print(x, end='')\n\n\nprint('\\n\\nCONTENT3:')\nwith open('FileTextFile2.txt', 'r') as f:\n x = f.readline() #Returns first list Item\n print(x, end='')\n\n#Efficient way\nprint('\\n\\nCONTENT4:')\nwith open('FileTextFile2.txt', 'r') as f:\n for i in f:\n print(i, end='')\n\n\n#Copying file\nwith open('FileTextFile.txt', 'r') as rf:\n with open('FileTextFile_Copy.txt', 'w') as wf:\n for line in rf:\n wf.write(line)" }, { "alpha_fraction": 0.6489483714103699, "alphanum_fraction": 0.6680688261985779, "avg_line_length": 32.075950622558594, "blob_id": "09648775855fe711469dedbe02d1e108da3d3c32", "content_id": "c065f3825624e70fef47665b912ed24b5e89b3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2615, "license_type": "no_license", "max_line_length": 99, "num_lines": 79, "path": "/Text_To_Speech.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nimport pyttsx3\nimport time\n\n#Works offline\n\n\n#1. Basic Usage\ndef basicUsage():\n msg1 = \"Good day ibrahim, Press Enter to proceed with the launch sequence?\"\n print(msg1)\n pyttsx3.speak(msg1)\n input()\n\n pyttsx3.speak(\"Ready to launch in...\")\n for i in range(10, 0, -1):\n print(i)\n pyttsx3.speak(i)\n time.sleep(0.01)\n\n msg2 = \"Launch carried out successfully\"\n print(msg2)\n pyttsx3.speak(msg2)\n\n#basicUsage()\n\n#2. More\ndef advanceUsage():\n #Speak\n engine = pyttsx3.init() # object creation\n engine.say(\"Hello Ibrahim, what may I do for you today?\")\n engine.runAndWait()\n\n #Changing voice, Rate and Volume\n\n \"\"\" RATE. ie: How fast the voice sound, normal about 200\"\"\"\n rate = engine.getProperty('rate') # getting details of current speaking rate\n print(f'RATE1: {rate}') # printing current voice rate\n engine.setProperty('rate', 325) # setting up new voice rate\n print(f'RATE2: {rate}') # printing current voice rate\n engine.say(\"Hello Ibrahim, what may I do for you today?\")\n engine.runAndWait()\n\n \"\"\"VOLUME\"\"\"\n volume = engine.getProperty('volume') # getting to know current volume level (min=0 and max=1)\n print(volume) # printing current volume level\n engine.setProperty('volume', 0.3) # setting up volume level between 0 and 1\n engine.say(\"Hello Ibrahim, Welcome to python text to speech.\")\n engine.runAndWait()\n\n \"\"\"VOICE\"\"\"\n engine.setProperty('volume', 1.0) # setting up volume level between 0 and 1\n engine.setProperty('rate', 150) # setting up new voice rate\n rate1 = engine.getProperty('rate') # getting details of current speaking rate\n print(f'RATE3: {rate}')\n voices = engine.getProperty('voices') # getting details of current voice\n # engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male\n engine.setProperty('voice', voices[1].id) # changing index, changes voices. 1 for female\n\n engine.say(\"Hello Ibrahim, Welcome to python text to speech.\")\n engine.say('My current speaking rate is ' + str(rate1))\n engine.runAndWait()\n engine.stop()\n\n\n \"\"\"Saving Voice to a file\"\"\"\n\n print(\"Do you want to save a voice recording?, if yes, press enter to proceed\")\n engine.say(\"Do you want to save a voice recording?, if yes, press enter to proceed\")\n engine.runAndWait()\n input()\n\n # On linux make sure that 'espeak' and 'ffmpeg' are installed\n engine.save_to_file('Hello World, I am Ibrahim Suleiman', 'testTTS.mp3')\n time.sleep(1)\n engine.say(\"File saved successfully\")\n engine.runAndWait()\n\n\nadvanceUsage()\n\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.646258533000946, "avg_line_length": 17.607595443725586, "blob_id": "698ea90a8354d8d6d006dfcf9009c099e153298d", "content_id": "db6bcfdc31dbc74b3b62f62dc3711fc5abd227fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 53, "num_lines": 79, "path": "/Matplotlib.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nfrom matplotlib import style\nstyle.use('ggplot')\n\n#Matplotlib is a python package used for 2D graphics\n\n#Plot graph\nx = [1,2,3,4,5]\ny = [10,20,30,20,10]\nplt.plot(x,y)\nplt.show()\n\n\n#Add label and title to graph\nx = [1,2,3,4,5]\ny = [10,20,30,20,10]\nplt.plot(x,y)\nplt.title('GRAPH TITLE')\nplt.xlabel('X Label')\nplt.ylabel('Y Label')\nplt.grid(False)\nplt.show()\n\n\n#Add style to graph\n#from matplotlib import style\n\nstyle.use(\"ggplot\")\n\nx = [1,2,3,4,5]\ny = [10,20,30,20,10]\na = [1,2,3,4,5]\nb = [1,3,5,7,9]\n\nplt.plot(x,y,'g',label=\"First Line\", linewidth=5)\nplt.plot(a,b,'y', label=\"Second Line\", linewidth=2)\nplt.legend()\nplt.title('GRAPH TITLE')\nplt.xlabel('X Label')\nplt.ylabel('Y Label')\nplt.grid(True,color='#FF0000')\nplt.show()\n\n\n#PLOT A BAR GRAPH\nx = [1,3,5,7,9]\ny = [10,20,30,20,10]\na = [2,4,6,8,10]\nb = [1,3,5,7,9]\n\nplt.bar(x,y, label='First bar graph')\nplt.bar(a,b, label='Second bar graph', color='green')\nplt.legend()\nplt.title('BAR GRAPH TITLE')\nplt.xlabel('HORIZONTAL AXIS')\nplt.ylabel('BAR HEIGHT')\nplt.show()\n\n#SCATTER PLOT\nx = [1,3,5,7,9]\ny = [10,20,30,20,10]\n\nplt.scatter(x,y, label=\"Scatter plot\", color='blue')\nplt.legend()\nplt.title('SCATTER PLOT')\nplt.xlabel('HORIZONTAL AXIS')\nplt.ylabel('BAR HEIGHT')\nplt.show()\n\n#PIE CHART\nITEMS = [\"Orange\", \"Fish\", \"Mango\", 'Garlic', \"Yam\"]\nVALUES = [3,8,2,4,1]\n\nplt.pie(VALUES,\n labels=ITEMS,\n startangle=90,\n shadow=True)\nplt.title(\"PIE CHART\")\nplt.show()\n" }, { "alpha_fraction": 0.6730450987815857, "alphanum_fraction": 0.7028112411499023, "avg_line_length": 36.80356979370117, "blob_id": "7f9ffd3bebb08174dbc9b0d3e56e74f09cb1a40f", "content_id": "04f4a51773c3f9f3c70d1809827918cdabe2410c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4235, "license_type": "no_license", "max_line_length": 126, "num_lines": 112, "path": "/Regex.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import re\n\n#NB: .findall does not work with patterns containing groups. i.e: ()\n\n#CREATING PATTERNS AND USING SEARCH & FINDALL METHOD IN re\nregex1 = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\ntext = 'My number are 415-555-6543 as well as 657-321-6643.'\nmo = regex1.search(text) #Returns only the first item of the pattern. i.e 415-555-6543\nprint('Phone number found is: ' + mo.group())\nprint('\\n')\nmo2 = regex1.findall(text)\nprint('Phone number found is: ', mo2) #Returns the list of all items that match the pattern\n\n\n#GROUPING WITH PARENTHESIS\nregex1 = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d)-(\\d\\d\\d\\d)')\ntext = 'My number are 415-555-6543 as well as 657-321-6643.'\nmo = regex1.search(text)\nprint('Phone number found is: ' + mo.group())\nprint('Phone number found is: ' + mo.group(0))\nprint('Phone items in 1st parenthesis are: ' + mo.group(1))\nprint('Phone items in 2nd parenthesis are: ' + mo.group(2))\nprint('The groups are:', mo.groups())\n\n#MATCHING MULTIPLE GROUPS WITH PIPE\nregex2 = re.compile(r'Ibrahim|Musa')\ntext = \"I am Musa, but you can call me Ibrahim\"\nmo = regex2.search(text)\nmo2 = regex2.findall(text)\nprint('THE OUTPUT IS:', mo2)\nprint('\\n')\n\n#OPTIONAL MATCHING WITH THE QUESTION MARK. (Zero or One)\nregex3 = re.compile(r'Bat(wo)?man')\nmo1 = regex3.search('The Adventures of Batman and Batwoman')\nmo2 = regex3.findall('The Adventures of Batman and Batwoman') #? Doesn't seem to work well with .findall because of group. ().\nprint(mo1.group())\nprint(mo2)\nprint('\\n')\n\n#MATCHING ZERO OR MORE WITH THE STAR (*)\nregex4 = re.compile(r'Bat(wo)*man')\ntext = 'This is Batwowowowowoman'\nmo1 = regex4.search(text)\nmo2 = regex4.findall(text) #* Doesn't seem to work well with .findall because of group. ().\nprint(\"THE OUTPUT:\", mo1.group())\nprint(\"THE OUTPUT:\", mo2)\nprint(\"\\n\")\n\n#MATCHING ONE OR MORE WITH THE PLUS (+)\nregex5 = re.compile(r'Bat(wo)+man')\ntext = 'This is a Batwoman and Batwowoman'\nmo1 = regex5.search(text)\nmo2 = regex5.findall(text)\nprint(\"Mo1:\", mo1.group())\nprint(\"Mo2:\", mo2)\nprint(\"\\n\")\n\n#MATCHING SPECIFIC REPETIONS WITH CURLY BRACKETS\nregex6 = re.compile(r'(wo){2}')\ntext = 'This a Batwowoman and Somwowowo'\nmo1 = regex6.search(text)\nmo2 = regex6.findall(text)\nprint('CURLY:', mo1.group())\nprint('CURLY:', mo2)\nprint('\\n')\n\n#(wo){2,5} -> 2 or 3 or 4 0r 5 times wo\n#GREEDY AND NONGREEDY MATCHING\n#GREEDY implies in (wo){2,5}, regext will match the longest first. i.e wo*5. this is DEFAULT\n#TO make into non-greedy. Syntax; (wo){2,5}?\n\n\n#CHARACTER CLASSES\n# \\d Any numeric digit from 0 to 9\n# \\D Any character that is not a numeric digit from 0 to 9\n# \\w Any letter, numeric digit, or the underscore character\n# \\W Any character that is not a letter, numeric digit, or the underscore character\n# \\s Any space, tab, or newline character.\n# \\S Any character that is not a space, tab, or newline character.\n\n#The character class [0-5] will match only the numbers 0 to 5; this is much shorter than typing (0|1|2|3|4|5).\n\n\n#MAKING YOUR OWN CHARACTER CLASS\n#the character class [aeiouAEIOU] will match any vowel, both lowercase and uppercase.\n#the character class [a-zA-Z0-9] will match all lowercase letters, uppercase letters, and numbers.\n\n#By placing a caret character (^) just after the character class’s opening bracket, you can make a\n# negative character class. A negative character class will match all the characters that are not in\n# the character class.\n\n#THE CARET(^) AND DOLLAR($) SIGN CHARACTERS\n# ^ at the start of a regex to indicate that a match must occur at the beginning of the searched text.\n# $ at the end of the regex to indicate the string must end with this regex pattern.\n\n#THE WILDCARD CHARACTER (.)\n# will match any character except for a newline.\n\n#MATCHING EVERYTHING WITH DOT-STAR (.*)\n\n#MATCHING NEWLINES WITH THE DOT CHARACTER BY PASSING re.DOTALL as the second argument to re.compile('')\n#E.g: newlineRegex = re.compile('.*', re.DOTALL)\n\n\n#CASE INSENSITIVE MATCHING BY PASSING re.I or re.IGNORECASE\n#E.g: robocop = re.compile(r'robocop', re.I)\n\n\n#SUBSTITUTING STRINGS WITH THE sub METHOD. Check reference\n\n#FORMAT THE REGEX ON MULTILINE ignoring white space and comments. Check reference." }, { "alpha_fraction": 0.6943521499633789, "alphanum_fraction": 0.7109634280204773, "avg_line_length": 29.100000381469727, "blob_id": "02c30b33e981382de654865dac5f7eaeab2bf587", "content_id": "a86ac5573b0ee16145f40a4b16c573ff7e20db19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "no_license", "max_line_length": 99, "num_lines": 20, "path": "/Modules.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#A module is a file containing python code. It may contain functions, classes etc\n#It is used with modular programming, which is used to separate a program into parts.\n\n#Create and import your own custom module\n#i.e Modules2\n\n#On this machine, it always shows the red for custom module import, don't know why, but still works\n\nimport modules2\nfrom modules2 import timesAll\n\nx = modules2.sumAll(5,6,7)\ny = timesAll(5,4,2)\n\nprint(f'sumAll = {x}, while timesAll = {y}')\n\n\n#================= List all modules available on the computer ===============\nprint(\"LIST OF ALL AVAILABLE MODULES\")\nhelp('modules')" }, { "alpha_fraction": 0.5837438702583313, "alphanum_fraction": 0.5837438702583313, "avg_line_length": 13.464285850524902, "blob_id": "8b12d0d2f0deb012684ed8021c751d8023fbe0ef", "content_id": "54f016bbc67f9d46033a303b29c4f515c5c65688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 51, "num_lines": 28, "path": "/End & Sep.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#USING END\n#Similar\nprint(\"Enter a number: \", end=\"\")\nx = input()\nprint(x)\n\ny = input(\"Enter: \")\nprint(y)\n\n\n#Similar\nprint(\"Enter a number: \", end='\\n\\n')\ny = input()\nprint(y)\n\nx = input(\"Enter: \\n\\n\")\nprint(x)\n\n\n\n#USING SEP\nfname, mname, sname = \"Ibrahim\", \"Musa\", \"Suleiman\"\nprint(fname, mname, sname, sep='---')\n\n#Similarly\nname = \"Ibrahim\"\nfor i in name:\n print('[{}]'.format(i), end='', sep=\"++\")\n" }, { "alpha_fraction": 0.7282127141952515, "alphanum_fraction": 0.7296898365020752, "avg_line_length": 36.55555725097656, "blob_id": "7794e0993901bfdc4d6a764e649d68e78aea08b8", "content_id": "158859edbfaf37f36c30b08ac2a21d6d9da18bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 118, "num_lines": 18, "path": "/Urllib.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Similar reference files: Urllib and Requests.py. Contains urllib and requests module\n\nimport urllib.request, urllib.response, urllib.parse, urllib.error\n\n#Alternatively:\n#from urllib import request, response, parse, error\n\naddress = \"http://data.pr4e.org/romeo.txt\" #NB: Address can be either a file on the webpage or the webpage itself\nresponse = urllib.request.urlopen(address)\nfor content in response:\n print(content.decode().strip())\n\n\n#================ WEB SCRAPING(or SPIDERING or WEB CRAWLING =============\n'''\nWeb scraping is the act of retrieving a web page, extracting and using information from that web page\nA module good for this is \"Beautiful soup.\n'''\n" }, { "alpha_fraction": 0.525383710861206, "alphanum_fraction": 0.5543093085289001, "avg_line_length": 31.576923370361328, "blob_id": "36d21b617a9b0fbcad456d76620a2b5229dee025", "content_id": "aa5781f0ec4579aecb70c8c61993cad642b5e3cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1694, "license_type": "no_license", "max_line_length": 96, "num_lines": 52, "path": "/Sorting.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Sort() method - Used with lists\n#Sort() function - Used with iterables\n\n\n#=============== SORT METHOD (this works with only lists)===================\nlist1 = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \"Seven\", \"Eight\", \"Nine\", \"Ten\"]\nlist1.sort()\nprint(f'FIRST SORT: {list1}')\n\nlist1.sort(reverse=True)\nprint(f'SECOND SORT: {list1}')\nprint('\\n')\n\n#=============== SORTED FUNCTION (Accept other iterables including lists) ===================\ntuple1 = (\"Alpha\", \"Beta\", \"Gamma\", \"Delta\", \"Epsilon\", \"Zeta\", \"Eta\", \"Theta\", \"Iota\", \"Kappa\")\nlist1_sorted = sorted(tuple1)\nprint(f'THIRD SORT: {list1_sorted}')\n\nlist1_sorted = sorted(tuple1, reverse=True)\nprint(f'FOURTH SORT: {list1_sorted}')\nprint('\\n')\n\n#=========== MORE ON SORT WITH SORT METHOD (works only on lists) =================\nlist2 = [(\"Ibrahim\", \"Suleiman\", 26),\n (\"Abraham\", \"Lincon\", 87),\n (\"Sadiq\", \"Kashmar\", 66),\n (\"Philip\", \"Johnson\", 41),\n (\"Anthony\", \"Mark\", 39),\n (\"Pascal\", \"Blaize\", 42),\n (\"Isaac\", \"Newton\", 78)]\n\nsort_key = lambda sort_value: sort_value[2] #Sort by index 2\nlist2.sort(key=sort_key, reverse=True)\nfor i in list2:\n print(i)\nprint('\\n')\n\n\n#=========== MORE ON SORT WITH SORTED FUNCTION (works only all iterables) =================\ntuple2 = ((\"Ibrahim\", \"Suleiman\", 26),\n (\"Abraham\", \"Lincon\", 87),\n (\"Sadiq\", \"Kashmar\", 66),\n (\"Philip\", \"Johnson\", 41),\n (\"Anthony\", \"Mark\", 39),\n (\"Pascal\", \"Blaize\", 42),\n (\"Isaac\", \"Newton\", 78))\n\nsort_key = lambda sort_value: sort_value[1] #Sort by index 1\nsorted_values = sorted(tuple2, key=sort_key, reverse=True)\nfor i in sorted_values:\n print(i)\nprint('\\n')" }, { "alpha_fraction": 0.6539215445518494, "alphanum_fraction": 0.6676470637321472, "avg_line_length": 25.102563858032227, "blob_id": "8db9c94452475725c2945ea26c1ea197ed9f61db", "content_id": "34cd77111f287883c7fa4adfc9fb8a7ec6a042b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "no_license", "max_line_length": 148, "num_lines": 39, "path": "/Classes.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#CHECK: OOP for more ie Class and Instance variables, Inheritance etc, though many repetitions\n\nimport pyttsx3\n\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\nrate = engine.getProperty('rate')\nprint(rate)\nengine.setProperty('rate', 190)\n\n#Classes - is a blueprint for creating objects\n#NB: Attributes(Properties) and Behaviours(methods)\n\nclass Phone:\n\n def __init__(self, brand, price):\n self.brand = brand # Property(Attribute)\n self.price = price\n\n def call(self):\n print(\"Making a call\")\n\n def sayMyname(self, fname):\n pyttsx3.speak(f'This phone as identified you {fname} as the new owner this phone, of brand, {self.brand}, which costs {self.price} dollars')\n\n #Ctrl + O\n def __str__(self) -> str:\n return f'Brand is: {self.brand}, Price is: {self.price}'\n\n \n\n\nphone1 = Phone(\"Iphone\", 100)\nphone1.call()\nphone1.sayMyname(\"Ibrahim Musa Suleiman\")\n\n#This displays due to def __str__(self) -> str:...\nprint(phone1)\n\n" }, { "alpha_fraction": 0.45348837971687317, "alphanum_fraction": 0.4883720874786377, "avg_line_length": 14.727272987365723, "blob_id": "1d4a274ba52b04aec36d79655fc1f4418a2ef014", "content_id": "c782df554b296b7ded533f52b312463e98fa9e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/SNIPPETS/Fibonacci Sequence.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "def fib():\n a,b = 0,1\n while True:\n yield a\n a,b = b, a+b\n\n#Fibbonaci sequence of less than 50\nfor i in fib():\n if i > 50:\n break\n print(i)" }, { "alpha_fraction": 0.502581775188446, "alphanum_fraction": 0.5387263298034668, "avg_line_length": 28.049999237060547, "blob_id": "a3d86a0a6febcebc66e9a96fc3b47cfa945a6715", "content_id": "ca68084dd380ee70450370a9e6afac3340a671dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 91, "num_lines": 20, "path": "/Filter.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Filter creates a collection of elements from an iterable for which a function returns true\n\n#filter(function, iterable)\n\nnames_ages = [(\"Ibrahim\", 26),\n (\"Salim\", 12),\n (\"Kal\", 19),\n (\"Philip\", 26),\n (\"Muhammad\", 54),\n (\"Mark\", 18),\n (\"Shalom\", 22),\n (\"Qadir\", 4)]\n\nfilter_key = lambda filter_value: filter_value[1] >= 18 #Sort by index 1, greater than 18\n\nAdults = filter(filter_key, names_ages)\n#print(list(filtered_values))\nprint('===== ADULTS =========')\nfor i in Adults:\n print(i)" }, { "alpha_fraction": 0.5190380811691284, "alphanum_fraction": 0.5671342611312866, "avg_line_length": 18.959999084472656, "blob_id": "5347e696d0594bd6c1214819b0515000618b9070", "content_id": "b70a6f2ce8e7e9f5f41794a11d4b30549335dac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 66, "num_lines": 25, "path": "/SNIPPETS/Drawing Star Tree.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "for i in range(1, 6):\n print(\"*\" * i)\n\n\nprint(\"\\n\\n\")\nfor i in range(1, 11, 2):\n print((\"*\"*i).center(10))\n\n\nprint(\"\\n\\n\")\nfor i in range(1, 11, 2):\n print((\"*\"*i).center(10, \"=\"))\n\n\nprint(\"\\n\\n\")\nsymbol = input(\"Enter the symbol/character to use in your tree: \")\nheight = int(input(\"Enter the height of the tree: \"))\n\nfor i in range(1, (2*height)+1, 2):\n print((symbol*i).center(2*height))\n\nprint(\"\\n\\n\")\n\nfor i in range(1, (2*height)+1, 2):\n print((symbol*i).center(2*height, \"-\"))\n" }, { "alpha_fraction": 0.6920821070671082, "alphanum_fraction": 0.7170087695121765, "avg_line_length": 28.65217399597168, "blob_id": "0e8471382c24957c46d6d48fa5a898db3b8badfd", "content_id": "0b7ff8c1ad39c26cb584809631a66ef76e63fc08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 89, "num_lines": 23, "path": "/Reduce.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#reduce apply a function to an iterable and reduce it to a single cummulative value.\n#It performs function on first two elements and repeats the process until 1 value remains\n\n#Fibonacci sequence of addition, Factorial of numbers, etc\n\n#reduce(function, iterable)\n\nimport functools\n\nletter = ['I','b','r','a','h','i','m']\naction = lambda x,y: x + y\nword = functools.reduce(action, letter)\nprint(f'WORD: {word}')\n\nnumbers1 = [1,2,3,4,5]\naction = lambda x,y: x*y\nfactorial = functools.reduce(action, numbers1)\nprint(f'FACTORIAL OF 5 = {factorial}')\n\nnumbers2 = [1,2,3,4,5]\naction = lambda x,y: x+y\nfibonacci = functools.reduce(action, numbers2)\nprint(f'FIBONACCI OF 5 = {fibonacci}')" }, { "alpha_fraction": 0.6984478831291199, "alphanum_fraction": 0.7006651759147644, "avg_line_length": 33.69230651855469, "blob_id": "b27175e1a40f491c0ef12308615e1fc946b694e6", "content_id": "83bb9e40f778fd632e76128f452e9565a911c8ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 902, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/Regex2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import re\n\n#REGULAR EXPRESSIONS QUICK GUIDE\n'''\n ^ - Matches the beginning of a line\n $ - Matches the end of a line\n . - Matches any character\n \\s - Matches whitespace\n \\S - Matches any non-whitespace character\n * - Repeats a character zero or more times\n *? - Repeats a character zero or more times (non-greedy)\n + - Repeats a character one or more times.\n +? - Repeats a character one or more times (non-greedy)\n [aeiou] - Matches a single character in the listed set\n [^XYZ] - Matches a single character not in the listed set\n [a-z0-9] - The set of characters can include a range\n ( - Indicates where string extraction is to start\n ) - Indicates where String extraction is to end\n'''\n\nst = \"This is a string statement, this is a this is which is a this. Just for testing regex\"\n\n#USING re.find and re.search\nif re.search(\"is\", st):\n print(\"Match found\")\nprint(f'{re.search(\"is\", st)}')\n" }, { "alpha_fraction": 0.6730434894561768, "alphanum_fraction": 0.6852173805236816, "avg_line_length": 22.489795684814453, "blob_id": "c59c8cd17b486744d4608f122a2db0c4a480f756", "content_id": "fbf601e2d0bc1cdc3ce708221fe228a0dd0f22aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1150, "license_type": "no_license", "max_line_length": 105, "num_lines": 49, "path": "/SNIPPETS/WordCounter.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import time\nimport math\nimport string\n\n#TYPING Duration\nprint(\"Enter any text in: \")\nfor i in range(3, 0, -1):\n print(i)\n time.sleep(1)\n(\"Press Enter and start typing: \")\ninput()\nstart_time = time.time()\ntext = input(\"Type\\n\")\nend_time = time.time()\nelapsted_time = end_time - start_time\nelapsted_time = round(elapsted_time,2)\nprint(f\"ELAPSED TIME: {elapsted_time} seconds\")\n\nalphabets = string.ascii_letters\nvowels = 'aeiouAEIOU'\nconsonants = \"\"\nfor i in alphabets:\n if i not in vowels:\n consonants += i\nprint(f'STRING OF CONSONANTS ARE: {consonants} of length {len(consonants)}')\n\nvowelCount = 0\nconsonantCount = 0\ncharacterCount = 0\nwordCount = 0\n\nfor i in text:\n if i in vowels:\n vowelCount += 1\n if i in consonants:\n consonantCount += 1\n characterCount += 1\n\nprint(\"VOWELS = {}\\nCHARACTERS = {}\\nCONSONANTS = {}\".format(vowelCount, characterCount, consonantCount))\n\n\n#WORD COUNTER\ny = text.split(\" \")\nprint(y)\nprint(f'NUMBER OF WORDS: {len(y)}')\n\ntyping_speed = len(y)/ elapsted_time\ntyping_speed_Minutes = 60*(typing_speed)\nprint(f'Your typing speed is: {typing_speed} WPS or {typing_speed_Minutes} WPM ')" }, { "alpha_fraction": 0.6287769675254822, "alphanum_fraction": 0.638129472732544, "avg_line_length": 17.289474487304688, "blob_id": "a625767b7ab0f412b892454cc8b2cfc6ec80ca37", "content_id": "1b4819473d6e16ae005b6b6d34057529b2ebc153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 69, "num_lines": 76, "path": "/Strings.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "#List of alphabets\nimport string\n\nprint(string.ascii_letters)\nprint(string.ascii_lowercase) #Check others\nprint(string.ascii_uppercase)\nprint(string.digits)\nprint(string.punctuation)\nprint(string.hexdigits)\nprint(string.printable)\n\n#Raw Strings - Raw strings ignore escape characters. i.e prints any \\\nprint(r'Hello, I\\'m Ibrahim')\n\n\n#Multiline Strings\n'''\n I am\n Who I am\n '''\nprint(''' I\n am Ibrahim ''')\n\n#Indexing and slicing. syntax = str[begin, end+1]\nmsg = \"This is a test string\"\nprint(msg[5:7])\nprint(msg[0:])\nprint(msg[:])\nprint(msg[:7])\n\n\n#in & not\nmsg = \"This is a test string\"\nif \"test\" not in msg:\n print(\"True\")\nelse: print(\"False\")\n\n#Some methods:\n#lower()\n#upper()\n#islower()\n#isspace()\n#isalnum()\n#isdecimal()\n\n\n#Join - .join(takes in list of items)\nfname, mname, sname = \"Ibrahim\", \"Musa\", \"Suleiman\"\nx = '**'.join([fname, mname, sname])\nprint(x)\n\n#Split - returns a list of items\nx = \"Ibrahim**Musa**Suleiman\"\ny = x.split(\"**\")\nprint(y)\n\n\n#Justifying\nx = 'This'\nprint(x.rjust(10))\nprint(x.rjust(10, \"*\"))\nprint(x.center(10, \"*\"))\n\n\n# Romoving white space. strip(), rstrip(), lstrip()\nx = \" This is a sample \"\nprint(x.strip())\nprint(x.rstrip())\nprint(x.lstrip())\n\ny = \"balaTest this outbala\"\nprint(y.strip(\"bala\"))\n\nx = \"This is a statement\"\nn = x.lower().count('a', 0, len(x)) #from 0 index to len(x) index\nprint(n)\n" }, { "alpha_fraction": 0.5891472697257996, "alphanum_fraction": 0.6443798542022705, "avg_line_length": 25.487178802490234, "blob_id": "d6b087998ec8f607e8553709d817b0613e1fdc13", "content_id": "b8e013aee836b1fcf8f6f0e2ecad4cbb443b05b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1032, "license_type": "no_license", "max_line_length": 102, "num_lines": 39, "path": "/Turtle Graphics.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import turtle\nimport time\n\ndef runTurtle():\n turtle.title(\"This is the title\")\n turtle.penup()\n turtle.setposition(0,0) #0,0 is center of screen, also -x, -y\n turtle.setheading(45) #setheading(angle): sets the pen to face in a particular direction\n turtle.pencolor(\"#ff0000\")\n turtle.pendown()\n turtle.forward(200) #forward right direction\n turtle.left(90)\n time.sleep(1)\n\n turtle.pencolor(\"#0000ff\")\n turtle.forward(150)\n turtle.left(90)\n time.sleep(1)\n turtle.pensize(4)\n turtle.hideturtle()\n\n turtle.pencolor(\"#00ff00\")\n turtle.forward(200)\n turtle.left(90)\n time.sleep(1)\n turtle.pensize(10)\n\n turtle.pencolor('#123456')\n turtle.forward(150)\n time.sleep(2)\n turtle.exitonclick()\n\nrunTurtle()\n\n#REFERENCE; HALTERMAN\n#Other methods\n#speed(arg) 0 == 'fastest', 10 == 'fast', 6 == 'normal', 3 == 'slow', 1 == 'slowest'\n#setheading(angle): sets the pen to face in a particular direction\n#setposition(x,y) 0,0 is center of screen" }, { "alpha_fraction": 0.6906927824020386, "alphanum_fraction": 0.7144856452941895, "avg_line_length": 39.79999923706055, "blob_id": "34b3ec4ec1f1ba418528bbfa33af3918287774d7", "content_id": "fa026f4f46290bf8d8f88dd104a7ec8a0dc5a317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 139, "num_lines": 35, "path": "/Requests.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import requests\n\n\n#Similar reference files: Urllib.py and Urrlib2.py. Contains urllib module\n\n#Easily make hppt requests to get information from websites\n#NB: requests module is great for getting info from websites, but isn't great for parsing those info,\n#better to use beautifulsoup for parsing.\n\n\n#STATUS CODE & VALUES\n#1. 200 Response OK\n#2. 300 Redirect\n#3 400 Client error (Trying to access a website you are not authorized to)\n#4 500 Server error\n\n\n#urlTest = \"https://www.google.com\"\nurlTest = 'https://xkcd.com/353/'\nresponse = requests.get(urlTest)\n#response = requests.get('https://www.behance.net/ibrahimsuleiman3', auth=('user', 'pass')) #Requires login details\nprint(f'THE STATUS CODE: {response.status_code}') #Returns response code\nprint(response.ok) #Returns true(<400 status code) or false(400 or 500 status code). Check for only client & server error (main focus)\nprint(f'THE REQUEST HEADERS: {response.headers}')\n\nprint('\\n\\n')\nprint(f'RESPONSE VALUE: {response.text}') #Returns text content of the website in unicode(html,css...)\n\n\n#Download images from website, use content (returns content in byte)\nUrlImage = \"https://imgs.xkcd.com/comics/python.png\"\nresponse1 = requests.get(UrlImage)\nprint(f'IMAGE IN BINARY:\\n {response1.content}') #Save in a file to be able to view\nwith open('Internet3.png', 'wb') as f:\n f.write(response1.content)\n\n" }, { "alpha_fraction": 0.6665441393852234, "alphanum_fraction": 0.7095588445663452, "avg_line_length": 27.01030921936035, "blob_id": "ff0971478d87b54d7bf8d16146a7f4b5c847c901", "content_id": "64305389384f79d024cc0e0c0eb20730672b30ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2720, "license_type": "no_license", "max_line_length": 137, "num_lines": 97, "path": "/Pyautogui.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "#check pyautogui document @pyautogui.readthedocs.io\n\nimport pyautogui as pya\nimport time\n\nDOWN = pya.keyDown('ctrl')\n\npya.FAILSAFE = True #Move mouse to left top corner of monitor, raise exception FaisSafeEsception\n\n#pya.PAUSE = 2 #Pause every pya action for 2 seconds after every action, Another fail safe\n\nprint(pya.size())\nprint(f'width = {pya.size()[0]}px, height = {pya.size()[1]}px')\n\n\n#Moving the mouse\n#pay.moveTo moves the mouse relative to the screen position (0,0)\nfor i in range(1):\n pya.moveTo(100, 100, duration=1, tween=pya.easeInBounce) #tween is mouse transition animation\n pya.moveTo(200, 100, duration=0.75, tween=pya.easeInOutElastic) #Check for other tween options\n pya.moveTo(200, 200, duration=.5, tween=pya.easeInExpo)\n pya.moveTo(100, 200, duration=2, tween=pya.easeOutBounce)\npya.PAUSE = 2\n\n#pya.moveRel(100, 200) Moves the mouse relative to its current position\n\n\n#Getting the mouse position\nprint(\"\\n\",pya.position())\nprint(f'The mouse is positioned @ x= {pya.position()[0]}, y = {pya.position()[1]}')\ntime.sleep(2)\n\n#Clicking the mouse\npya.click(600,100)\ntime.sleep(2)\npya.click(button='right')\n#pya.click(button='right', clicks=2, interval=0.25) will double click @ 0.25s interval\n#similarly, pya.doubleClick(), pya.tripleClick(), pya.rightClick()... with optional parameters\ntime.sleep(3)\n\n# Similarly; pya.mouseDown(x=45, y=65), pya.mouseDown(button='right')\n\n#Drag the mouse\npya.dragTo(900, 300, duration=3)\ntime.sleep(2)\n#Similarly, pya.draRel(200, 200, duration=2)\n\n\n#Scrolling the mouse\npya.scroll(20) #+ve integer scrolls up\ntime.sleep(2)\n\n\n#Getting screenshot\nscreenshot = pya.screenshot()\nprint(screenshot.getpixel((150,230))) #Returns Color of the point in rgb tuple. No alpha value because screenshots are fully opaque\n\n\n#Analyze screenshot\n#print(pya.pixelMatchesColor(100,200,(83, 62, 64))) #Returns true or false if color matches\n\n#Image recognition\n#pya.locateOnScreen('image.png')\n#pya.locateAllOnScreen('image.png')\n\n\n#Controlling keypboard\ntime.sleep(5)\npya.click(pya.position()[0], pya.position()[1])\npya.typewrite(\"This is a test text\", 0.2)\ntime.sleep(3)\n\n#Key Names\n#E.g ESC = 'esc\n#ENTER = 'enter'\n# use pya.KEYBOARD_KEYS to view list of all keypress\npya.typewrite(['ctrl', 'a', 'right']) #Simulate typing, NOT CTRL + A + RIGHT ARROW\ntime.sleep(2)\n\n\n#Pressing and releasing the keyboards\npya.keyDown('ctrl')\npya.press('a')\npya.keyUp('ctrl')\npya.click(button='right')\ntime.sleep(2)\npya.moveRel(3, 10)\n\n\n#OnScreen()\n#To check if XY coordinates are on the screen\npya.onScreen(0,-1) #-1 is not on the screen, hence false\n\n#Hotkey\npya.hotkey('ctrl', 'a')\ntime.sleep(2)\npya.hotkey('win', 'd')\n\n\n\n" }, { "alpha_fraction": 0.46987950801849365, "alphanum_fraction": 0.47590360045433044, "avg_line_length": 29.18181800842285, "blob_id": "ddee44c0bafdb2eec564612c4d75e512f37247e5", "content_id": "9224c4e7590065a3808167f5bb2a45b4b6a842ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 996, "license_type": "no_license", "max_line_length": 124, "num_lines": 33, "path": "/Function.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#====================== KEYWORD ARGUMENT =====================================================\ndef fullName(fName, mName, sName):\n x = f\"Your first Name is {fName}\"\n y = f\"Your Middle Name is {mName}\"\n z = f\"Your Surname is {sName}\"\n print(f'{x}\\n{y}\\n{z}')\n\nfullName(sName=\"Suleiman\", fName=\"Ibrahim\", mName=\"Musa\")\n\n\n\n#===================================== PARAMETER PACKING ================================================================\ndef multiplier(*args):\n answer = 1\n for i in args:\n answer *= i\n print(f'THE MULTIPLIED VALUE = {answer}')\n\nmultiplier(2,3,4,10)\n\n\n\n#===================================== **Kwargs ================================================================\n#It is a parameter that will pack all arguments into a dictionary\n\n\n#CHECK MORE, REFERENCE INCOMPLETE\ndef fullID(**kwargs):\n firstName = kwargs['fName']\n lastName = kwargs['lName']\n print(f'FIRST NAME: {firstName} \\nLAST NAME: {lastName}')\n\nfullID(fName=\"Ibrahim\", lName=\"Suleiman\")" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 29, "blob_id": "abfe27f9d72ef04d14eceb4a4364e4ca3baa7ccb", "content_id": "7174d928b1a16064bc358960d00d8c07b10bbabc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/BeauifulSoup.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nfrom bs4 import BeautifulSoup" }, { "alpha_fraction": 0.6133720874786377, "alphanum_fraction": 0.6395348906517029, "avg_line_length": 25.384614944458008, "blob_id": "66e8027b29e84c939b48ca558e65081ee52b9042", "content_id": "81fb9018050ac746c5e845284dc8ab034d4f58c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 84, "num_lines": 13, "path": "/Map.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Map function applies a function to each item in an iterable (ie, lists, tuples etc)\n\n#map(function, iterable)\n\nlist_of_nums = [2,3,4,5,6,7,8,8]\nsquare_func = lambda z: z**2\nsquare_value = map(square_func, list_of_nums)\n#print(list(square_value))\n\nprint('\\n=============')\n\nfor i,j in zip(list_of_nums, square_value):\n print(f'{i} -- {j}')\n" }, { "alpha_fraction": 0.6099071502685547, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 28.363636016845703, "blob_id": "fa8e61b6711ae480cb2bed72fce32697d7a6efa0", "content_id": "4b670e6d67c0f392802b2ea1bcaef0032d46889b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 68, "num_lines": 11, "path": "/Lambda.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Lambda square of number\nsquare_of_number = lambda z: z**2\nprint(F'SQUARE: {square_of_number(20)}')\n\n\n#Lambda quadratic equation: y = 3*(X**2) + 4*X + 5\nquad = lambda X: 3*(X**2) + 4*X + 5\nprint(f'QUAD: {quad(3)}')\n\nwelcome = lambda fName, sName: f'You are welcome Mr {fName} {sName}'\nprint(welcome(\"Ibrahim\", \"Suleiman\"))" }, { "alpha_fraction": 0.6079545617103577, "alphanum_fraction": 0.6221590638160706, "avg_line_length": 19.58823585510254, "blob_id": "5b46d6b4028bf199c188d911083251dd6502e258", "content_id": "b7b2b18cf59007836f1ea2754fe29aa14aa6518e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 93, "num_lines": 17, "path": "/Generators.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n #A generator is a programming object that produces (that is, generates) a sequence of values.\n\ndef gen():\n yield \"Hello\"\n yield \"How are you\"\n yield \"Good to know you\"\n yield \"My name is Ibrahim\"\n yield \"The product of 5 and 6 is: \"\n yield \"30\"\n\nx = gen()\nprint(next(x))\nprint(next((x)))\nprint('\\n'*3)\n\nfor i in gen():\n print(i)\n" }, { "alpha_fraction": 0.6394472122192383, "alphanum_fraction": 0.6494975090026855, "avg_line_length": 22.072463989257812, "blob_id": "143f78b793a0772073bcc3239ce361d2735339d4", "content_id": "4ee7bc641b9370addd1d07e358711844a5343abc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1592, "license_type": "no_license", "max_line_length": 98, "num_lines": 69, "path": "/Set.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Does not allow duplicate, and unordered, but mutable.\n#It is faster than a list\n\n#Python supports the standard mathematical set operations of\n# intersection, union, set difference, and symmetric difference.\n\n#DECLARING EMPTY SET\nx= set() #NOTE: x = {} is not an empty set, rather an empty dictionary\n\ns = {10,10,10,3,2}\nprint(s, end='\\n\\n')\n\nL = ['Ibrahim','Ibrahim','Ibrahim',\"Musa\"]\nprint(L,end='\\n')\nS = set(L)\nprint(S)\n\n#Counting characters using set\ntestT = \"Ibrahim Musa Suleiman\"\nprint(f'CHARACTERS PRESENT: {set(testT)}, NUMBER OF CHARACTERS: {len(set(testT))}')\n\n#Add elements to a set\ny = set()\ny.add(\"One\")\ny.add(\"Two\")\ny.add(\"Three\")\ny.add(\"Four\")\nprint(y)\nprint(f'ADDED ELEMENT: {y}')\n\n#Remove element from a set\ny.remove(\"Three\") #OR y.discard(\"Three\")\nprint(f'ROM0VED ELEMENT: {y}')\n\n#y.pop() removes the last element in the tuple, but tuple is unordered, ie: removes any element\n\n\n#Clear a set\ny.clear()\n\n#Delete a tuple\ndel y\n\n\n#JOIN TWO SETS\n#Assume Set1 and Set2 are defined, use the Set1.update(Set2). print(Set1)\n\n#SET OPERATIONS\n#UNION A|B\n#INTERSECTION A&B\n#SET DIFFERENCE A-B ELEMENTS IN A BUT NOT IN B\n#SYMETRIC DIFFERENCE A^B ELEMENTS IN A OR B, BUT NOT IN BOTH\n\n#Check reference for more\n\nA = {\"IBRAHIM\", \"IBRAHIM\", \"MUSA\"}\nB = {\"SULEIMAN\", \"20s\", \"IBRAHIM\"}\n\nprint(f'THE UNION: {A|B}')\nprint(f'THE INTERSECT: {A&B}')\nprint(f'THE DIFFERENCE: {A-B}')\nprint(f'THE SYMETRIC DIFFERENCE: {A^B}')\n\nx = (A|B) - (A&B)\n\nprint(f'SIMILARLY: {x}')\n\n#SET QUANTIFICATION WITH ALL AND ANY\n#Check reference" }, { "alpha_fraction": 0.688524603843689, "alphanum_fraction": 0.7153502106666565, "avg_line_length": 32.5, "blob_id": "4f8f5b04b7834635d8e86884f5c95ce13d5461e3", "content_id": "7d5b85fba2d1b886a5ab128ff6af954930b3f47e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 671, "license_type": "no_license", "max_line_length": 89, "num_lines": 20, "path": "/Urllib2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n\n#Similar reference files: Urllib1.py and Requests.py. Contains urllib and requests module\n\n#using urllib request, There's also requests module, check Internet2\nfrom urllib import request\n\n\n#Fetch data from the internet\nresponse = request.urlopen(\"https://www.google.com\")\nprint(response) #Returns a response\nprint(f'STATUS CODE IS: {response.getcode()}')\n\n#STATUS CODE & VALUES\n#1. 200 Response OK\n#2. 300 Redirect\n#3 400 Client error (Trying to access a website you are not authorized to)\n#4 500 Server error\n\n#Read data from the website\nresponse = request.urlopen(\"https://www.google.com\")\nprint(f'READ DATA: {response.read()}')" }, { "alpha_fraction": 0.6812412142753601, "alphanum_fraction": 0.6826516389846802, "avg_line_length": 38.38888931274414, "blob_id": "a9cf2bf842baabf7547a06d41b7b03d7c04044af", "content_id": "1339be9eef56e853e71ad52c79429e3b16807a59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 709, "license_type": "no_license", "max_line_length": 103, "num_lines": 18, "path": "/Executable File.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#NB: Windows defender may prevent running, hence, turn it off\n#Make sure pip and pyinstaller are installed/updated\n\n\n#Copy the python file (and all relevant files, if any) to a new folder\n#Access CMD, with directory being the folder\n\n#Using pyinstaller, Note these commands:\n'''\n -F (Make all in 1 file)\n -w (Remove terminal window, in programs where it is not needed)\n -i iconName.ico (Adds custom icon, must be a .ico)\n -scriptName.py ()\n E.g; C:\\Users....\\New Folder >pyinstaller -F -w -i iconName.ico scriptName.py\n\n Locate exe file with 'dist' folder\n NB: Move the .exe from the 'dist' folder before you can see the icon reflect instead of the default\n'''" }, { "alpha_fraction": 0.5347825884819031, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 14.399999618530273, "blob_id": "6abeb431343e799a9b7ff777b52d388751c676c8", "content_id": "c5efb95630d655bc93ed8e88688afab29fbab9dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/modules2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "def sumAll(*Values):\n return sum(Values)\n\n\ndef timesAll(*Values):\n answer = 1\n for i in Values:\n answer *= i\n return answer\n\n'''\nx = sumAll(3,4,2)\ny = timesAll(2,3,4)\nprint(f'The value of x= {x} and y= {y}')\n'''" }, { "alpha_fraction": 0.6425992846488953, "alphanum_fraction": 0.7292418479919434, "avg_line_length": 20.230770111083984, "blob_id": "eff601e1d49931077923871c3390a8b5db902004", "content_id": "da97e74938606629b4ccf7482c0459996a6079d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/Calendar.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import calendar\n\ncal = calendar.month(2020, 1)\n\nprint(calendar.month(2020,1))\nprint(calendar.calendar(2020))\n\nprint(\"First week\")\nprint(calendar.firstweekday() + 2)\n\nprint('Is this year a leap year? : {}'.format(calendar.isleap(2021)))\n\nprint(calendar.monthcalendar(2020, 9))\n\n" }, { "alpha_fraction": 0.6257861852645874, "alphanum_fraction": 0.6305031180381775, "avg_line_length": 20.200000762939453, "blob_id": "4be2417357d4db804609215ed272d21dc71a1c6d", "content_id": "67f0aec1573e4b0e89864999b76df03df378c8f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 88, "num_lines": 30, "path": "/Pygame.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import pygame as p\nimport time\n\np.mixer.init() #Initialize the mixer, FIRST THING TO DO\n\n#Playing Music with Pygame\n\ndef playMusic():\n try:\n paused #Checks if paused variable is initialized\n except NameError:\n p.mixer.music.load(\"Sounds/sound1.mp3\")\n p.mixer.music.play()\n else:\n p.mixer.music.unpause() #If paused is initialized, do the else statement\n\ndef pauseMusic():\n global paused\n paused = True\n p.mixer.music.pause()\n\ndef stopMusic():\n p.mixer.music.stop()\n\n#Volume control\n\nx = input('Enter command:')\nif x == 'play':\n playMusic()\n time.sleep(5)\n" }, { "alpha_fraction": 0.6443940997123718, "alphanum_fraction": 0.6613816618919373, "avg_line_length": 22.83783721923828, "blob_id": "43c3fddb4167b616b765b692e7c4a02d36f2e32f", "content_id": "cdb8a51e0c5bb1953ec766d48e579eb0bd256999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 97, "num_lines": 74, "path": "/Dictionary.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#CREATE A DICTIONARY: 2 METHODS\ncars = {\"Benz\": 2,\n \"Corola\": 1}\n\ncars2 = dict(fname=\"Ibrahim\", mname=\"Musa\", sname=\"Suleiman\")\n\nprint(f'The number of Benz cars are: {cars[\"Benz\"]}')\nprint(f'You are welcome Mr {cars2[\"fname\"]} {cars2[\"mname\"]} {cars2[\"sname\"]}')\n\n#Copy a dictionary\ncars2 = dict(fname=\"Ibrahim\", mname=\"Musa\", sname=\"Suleiman\")\ncars2_copy = cars2.copy()\n\n#Add element\ncars['Toyota'] = 5\nprint(cars)\n\n#Access keys and values\nprint(f'The keys of the dictionary are: {cars.keys()}')\nprint(f'The values of the dictionary are: {cars.values()}')\n\nfor i in (cars.keys()):\n print(f'The number of {i} cars are: {cars.get(i)} OR {cars[i]}') #of cars[i]\nprint(\"\\n\")\n\n\nfor i,j in (cars.items()):\n print(f'The number of {i} cars are {j}')\nprint('\\n')\n\n#Update a dictionary\ndict1 = dict(fname=\"One\", sname=\"Two\", email=\"[email protected]\", phone=41234)\ndict2 = dict(fname=\"One\", sname=\"Three\", email=\"[email protected]\", phone=98765, address=\"My home address\")\ndict1.update(dict2)\nprint(f'DICT_MERGE: {dict1}')\n\n#Dictionary from lists\nkeyList = ['cat', 'Dog', 'Fish', 'Hen']\nvalueList = [2,4,8,3]\n\nfor i in zip(keyList, valueList):\n print(i)\nprint('\\n')\n\nfor i,j in zip(keyList, valueList):\n print(f'The number of {i} are {j}')\nprint('\\n')\n\n\nanimals = dict(zip(keyList, valueList))\nfor i in animals:\n print(f'The value of {i} are {animals[i]}')\nprint('\\n')\n\n#Deleting Elements\nprint(animals)\ndel animals['Dog']\nprint(animals.items())\nprint(animals)\nprint('\\n')\n\n#cars.pop('Benz') #Remove the Benz key value pair\n#cars.popitem() #Removes the last inserted element in the dictionary\n\n#Clear elements from the dictionary\nanimals.clear()\nprint(animals)\nprint('\\n')\n\n#Delete a dictionary\nprint(cars)\ndel cars\nprint(f'After del, Cars = ', end='')\nprint(cars)\n\n" }, { "alpha_fraction": 0.6706096529960632, "alphanum_fraction": 0.6915377378463745, "avg_line_length": 29.55555534362793, "blob_id": "7bf54e75a6150eea7b1cdadb080631f89ea575a5", "content_id": "e4d71f57a078c162396c0616d1f3cb0e92ebdc4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1099, "license_type": "no_license", "max_line_length": 123, "num_lines": 36, "path": "/Sockets.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import socket\nimport time\n\n#NB: Alternative to reading contents from the internet is by using the urllib module.\n\n'''\n#CREATE A SOCKET. NB: You need internet connection\nsockObject = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsockObject.connect((\"www.google.com\",80))\n'''\n\n\n#CREATE A SOCKET. NB: You need internet connection\nsockObject = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsockObject.connect(('data.pr4e.org',80)) #NB: Remove http(s)\n\n#Send command to receive data\ncommand = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\\r\\n\\r\\n'.encode() #NB .encode() encodes from Unicode to UTF-8\nsockObject.send(command)\nprint(\"Commmand sent\")\ntime.sleep(3)\nprint(\"Receiving data in...\")\nfor i in range(3,0,-1):\n print(i, end=\"\\n\")\n time.sleep(1)\ntime.sleep(1)\n\n#Receive and parse data\nwhile True:\n data = sockObject.recv(512) #Receives upto 512 characters\n if(len(data) < 1):\n break\n print(data.decode()) #Decodes from UTF-8 to Unicode\nsockObject.close()\n\n#NB: urllib module can be used to achieve this as well, perhaps easier" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 36.400001525878906, "blob_id": "9427bb00aed6679105748a815df07095515bb150", "content_id": "92c1913c9ec74ce5fff7c7aad634b1d7acb6fa50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 79, "num_lines": 5, "path": "/Run Python on CMD.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Save python script in a known location. E.g save the script as launch_codes.py\n#Navigate to location on windows CMD, or the equivalent on mac.\n#TYPE: python launch_codes.py\n\n#That's all" }, { "alpha_fraction": 0.5634328126907349, "alphanum_fraction": 0.5820895433425903, "avg_line_length": 36.71428680419922, "blob_id": "a3c17ef6a5d30645cf804d3eb3d37320018a3b23", "content_id": "a884d407346d57ffa983f6a3f5d6636aa072c028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 86, "num_lines": 7, "path": "/If_Name_Main.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n# If __name__ == '__main__':\n#Check if module is running directly or indirectly (as an imported module)\n#If directly, __name__ == '__main__', else, if imported module, __name__ != '__main__'\n#CHECK: Python Full Course: 5:25:00\n\nif __name__ == '__main__':\n pass\n\n\n\n" }, { "alpha_fraction": 0.4769230782985687, "alphanum_fraction": 0.5516483783721924, "avg_line_length": 31.464284896850586, "blob_id": "c7f28302968e998d2dcf195fa5840de71de96a0c", "content_id": "569ddb4756b8b126aaf54ae3bd64b76f23397926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/String Formatting.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nfname, mname, sname = \"Ibrahim\", \"Musa\", \"Suleiman\"\nprint(\"Welcome Mr {2}, {0} {1}\".format(fname, mname, sname))\n\n\n# By default, left justified\n# {:>3} => Right-justify the first argument with a width of 3 characters\n\na,b,c,d = 1,10,100,1000\nprint('a={0:>4}, b={1:>4}, c={2:>4}, d={3:>4}'.format(a,b,c,d))\nprint('a={0:>3}, b={1:>3}, c={2:>3}, d={3:>3}'.format(a,b,c,d))\nprint('a={0:>2}, b={1:>2}, c={2:>2}, d={3:>2}'.format(a,b,c,d))\nprint('a={0:>1}, b={1:>1}, c={2:>1}, d={3:>1}'.format(a,b,c,d))\nprint('a={0:>0}, b={1:>0}, c={2:>0}, d={3:>0}'.format(a,b,c,d))\nprint('a={0}, b={1}, c={2}, d={3}'.format(a,b,c,d))\n\n#Second method\nx,y,z = \"Ibrahim\", \"Musa\", \"Suleiman\"\nprint(f'You are welcome Mr {z} {x} {y}')\n\n#Third method\n\nfname, sname = \"Ibrahim\", \"Musa\"\nprint(\"I am %s of surname %s. Thank you.\" % (fname, sname))\n\nx,y = 45, 234.3456\nprint(\"The numbers are: %d and %f\" % (x,y))\n\n#Check reference for more\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7409090995788574, "avg_line_length": 35.5, "blob_id": "2fab31de6cc5125f66787825508da935ee3ac7a4", "content_id": "c5e3dfcb37493987d17a257bea98fcd30b5234cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 100, "num_lines": 6, "path": "/If Statement.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nimport pyttsx3\n\n#Ternary if statement. Use with only one if statement\n\nfname = input(\"Enter your firstname here: \")\nresponse = pyttsx3.speak(\"Access granted\") if fname == \"Ibrahim\" else pyttsx3.speak(\"Access Denied\")\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 14.615385055541992, "blob_id": "29aeabfbded1748c47baf605bd6e6a3020df7952", "content_id": "546f3c4ce08eea335b97dcc207f91f21f59d46fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/Bitwise Operators.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Bitwise operators are used to compare binary numbers.\n\n#BITWISE OPERATORS\n'''\n\n& Bitwise AND\n| Bitwise OR\n^ Bitwise XOR\n~ Bitwise NOT\n<< Left Shift\n>> Right Shift\n\n'''" }, { "alpha_fraction": 0.6353166699409485, "alphanum_fraction": 0.6852207183837891, "avg_line_length": 25.100000381469727, "blob_id": "123a123f1eb867d89048661101965471eb4e0407", "content_id": "492d39c22ba0315a1cf4b40cbdb28bf416160656", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 80, "num_lines": 20, "path": "/Copy.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import copy\n\n#Note pyperclip and shutil\n\n#NOTE Shallow vs Deepcopy\n#Shallow copy -> One level deep, only references of nested child objects\n#Deep copy -> full independent copy\n\nlistN = [2,3,6,3]\nlistN_copy = copy.copy(listN) #Shallow copy\nlistN[0] = 45\nprint(f'listN: {listN}')\nprint(f'listN_Copy: {listN_copy}')\n\nprint('')\nlistN2 = [[2,3,6,3],[7,2,3]]\nlistN_copy2 = copy.deepcopy(listN2) #Deep copy, Shallow copy does not work\nlistN2[0][1] = 45\nprint(f'listN2: {listN2}')\nprint(f'listN_Copy2: {listN_copy2}')" }, { "alpha_fraction": 0.5724637508392334, "alphanum_fraction": 0.6159420013427734, "avg_line_length": 18.571428298950195, "blob_id": "83437c77ceca2ba3f9896858b1c6c168eb075ae4", "content_id": "edb2be4718f940f6c4cd8d4d98dad17669f6ebc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 66, "num_lines": 7, "path": "/Errors.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#TYPES OF ERRORS\n\n#1. Syntax Error - programmer's error. E.g x + 2. x is undefined.\n\n#2. Run-time Error - \n\n#3. Logic Error - E.g: 3/0\n" }, { "alpha_fraction": 0.6007798314094543, "alphanum_fraction": 0.628974199295044, "avg_line_length": 35.63736343383789, "blob_id": "03d43bb5da68355108a32bcb3736d7a146d12983", "content_id": "feb796991d8e9f9070bf91123f5636d87b06a8ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3334, "license_type": "no_license", "max_line_length": 104, "num_lines": 91, "path": "/Datetime.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import datetime\nimport pytz\n\n#NAIVE vs AWARE DATE&TIME\n#Naive date/time don't have enough info to determine things like; timezone, daylight savings time etc\n#Naive date & Time are easier to work with\n#Aware date/time is the opposite of Naive date/time\n\n#THERE ARE 3 PARTS; DATETIME.DATE, DATETIME.TIME and DATETIME.DATETIME\n\n#============================================ DATETIME.DATE ==============================\n#CREATE A DATE\ndate = datetime.date(2020, 12, 19)\nprint(f'THE DATE YOU CREATED IS: {date}')\n\n#GET TODAY'S DATE\ntoday = datetime.date.today()\nprint(f\"TODAY'S DATE IS: {today}\")\n\n\n\n#GET YEAR, MONTH OR DAY\ntoday = datetime.date.today()\ntodayDay = today.day\ntodayMonth = today.month\ntodayYear = today.year\nprint(f\"TODAY'S DATE IS; DAY:{todayDay}, MONTH:{todayMonth}, YEAR:{todayYear}\")\n\n#GET THE DAY OF THE WEEK\ntoday = datetime.date.today()\ntodayWeekDay = today.weekday() #Takes Monday as the first day of the week counting from 0 (ie: 0-6)\ntodayWeekDay2 = today.isoweekday() #Takes Monday as the first day of the week counting from 1 (ie: 1-7)\nprint(f\"TODAY'S WEEKDAY IS: {todayWeekDay} AND ALSO: {todayWeekDay2}\")\n\n\n#TIME DELTAS (Difference between two days or times)\ntoday = datetime.date.today()\n#timeDelta = datetime.timedelta(days=14)\ntimeDelta = datetime.timedelta(days=14, hours=12, minutes=56, seconds=33, milliseconds=2000)\nnextDate = today + timeDelta\npreviousDate = today - timeDelta\nprint(f\"14 DAYS FROM NOW WILL BE: {nextDate}\")\nprint(f\"14 DAYS AGO WAS {previousDate}\")\n\n\n#NOTE: -- Date1 = todayDate + timeDelta --\n# -- (+-)timeDelta = (+-)Date1 - today\ndate = datetime.date(2021, 12, 19)\ntoday = datetime.date.today()\ndifference = date - today\nprint(f\"DIFFERENCE BETWEEN DATE TO COME AND TODAY(in days) IS: {difference}, OR:{difference.days}\")\nprint(f\"DIFFERENCE BETWEEN DATE TO COME AND TODAY(in seconds) IS: {difference.total_seconds()}\")\n\n\n#================================== DATETIME.TIME ==============================\n#SET TIME\ntime = datetime.time(9,30,45,1250)\nprint(f\"THE TIME YOU DEFINED IS: {time}\")\n\n#PRESENT TIME\ntime2 = datetime.datetime.now().time()\nprint(f'THE TIME NOW IS: {time2}')\n\n#============================ DATETIME.DATETIME =================================\nt = datetime.datetime(2020,8,28,11,54,33,2000)\nprint(f'THE DATE-TIME YOU DEFINED IS: {t}')\nprint(f'THE DATE ONLY IS: {t.date()}')\nprint(f'THE TIME ONLY IS {t.time()}')\nprint(f'THE YEAR ENTERED IS: {t.year}')\n\n#=================== USING TIME DELTA WITH THE DATETIME.DATETIME ===============\ntimeDelta = datetime.timedelta(hours=26)\ntTime = t + timeDelta\nprint(f'26 HOURS FROM NOW WILL BE: {tTime}')\n\n\n#==================================== SIMILAR DATETIME METHODS ============================\ndt_today = datetime.datetime.today() #Returns current datetime with a timezone of none\ndt_now = datetime.datetime.now() #Returns current datetime with optional timezone\ndt_utcnow = datetime.datetime.utcnow()\n\nprint(f'DATETIME.TODAY IS: {dt_today}')\nprint(f'DATETIME.NOW IS: {dt_now}')\nprint(f'DATETIME.UTCNOW IS: {dt_utcnow}')\n\n\n# ====================================== AWARE DATETIME ===================================\n# ======================================= USING PYTZ ========================\n\nt = datetime.datetime(2020,8,28,11,54,33,2000, tzinfo=pytz.UTC)\nprint(f'THE DATETIME USING pytz is: {t}')\n" }, { "alpha_fraction": 0.6754250526428223, "alphanum_fraction": 0.6939721703529358, "avg_line_length": 29.85714340209961, "blob_id": "95d2560d193796866fb7f787a9535ccc3754ec37", "content_id": "7598bcd844a2879a2a5f25cacc8e87eb48a813d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 76, "num_lines": 21, "path": "/Shutil.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import shutil\n\n#Check also; pyperclip\n#in the shutil module, there are 3 basic functions to copying a file: ie\n'''\n1. copyfile() - Copies content of a file\n2. copy() - copyfile() + permission mode + destination can be a directory\n3. copy2() - copy() + copy metadata (file's creation and modification times)\n'''\n\n\n#============= 1. copyfile() ============\n# NB: Clear shutil2 before running this code to observe the change\n\ninput(\"Press enter to copy the contents of Shutil1.txt into Shutil2.txt: \")\n\nshutil.copyfile('Shutil1.txt', 'Shutil2.txt') #src, des\nprint('DONE!')\n\n\n#NB: For both copy and copy2, the arguments are same as that for copyfile" }, { "alpha_fraction": 0.5721694231033325, "alphanum_fraction": 0.5721694231033325, "avg_line_length": 26.547618865966797, "blob_id": "61417bd7cd86976f2e90961d474e31e032f7df92", "content_id": "8c03bd177ca5b0c62f0ec86fd50ab85b0543085f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 123, "num_lines": 42, "path": "/OOP2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nclass Car:\n '''\n def __init__(self): #Constructor\n print('Welcome to this class')\n '''\n\n color = 'Red' #Class variable\n\n def __init__(self, carName, carModel, carPrice): #Constructor\n print('Welcome to this class')\n self.carName = carName #Instance variable\n self.carModel = carModel\n self.carPrice = carPrice\n print(f'CAR NAME: {self.carName}\\nCAR MODLE: {self.carModel}\\nCAR PRICE: {self.carPrice}\\nCAR COLOR: {self.color}')\n\n def identify(self):\n print(f'The name of this car is: {self.carName} and it costs {self.carPrice} dollars, of color {self.color}')\n\n\n#====================== INHERITANCE ==================\nclass Animals:\n alive = True\n id = 'This object is an animal object'\n def __init__(self):\n print(\"This is an animal class\")\n\n def sleep(self):\n print(\"The animal sleeps\")\n\n def eat(self):\n print(\"This animal sleeps\")\n\n\nclass Rabbit(Animals):\n pass\nclass Fish(Animals):\n pass\nclass Hawk(Animals):\n pass\n\nclass RabbitChild(Rabbit): #Multi-level Inheritance\n pass" }, { "alpha_fraction": 0.558309018611908, "alphanum_fraction": 0.5940233469009399, "avg_line_length": 24.867923736572266, "blob_id": "3d295144279574aa764bde4c9991bce73caa302a", "content_id": "63d0fcd27ba3fec9d9bd798e04ae1add10552ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1372, "license_type": "no_license", "max_line_length": 62, "num_lines": 53, "path": "/Classes2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nclass Employee:\n\n salary_increment = 1.05 #.05 (5%) increment\n\n #CREATE A CONSTRUCTOR\n def __init__(self, fName, sName, eMail, salary):\n self.fName = fName\n self.sName = sName\n self.eMail = eMail\n self.salary = salary\n self.fullName = f'{fName} {sName}'\n\n def fullDetails(self):\n print(f'First Name: {self.fName} \\n'\n f'Surname: {self.sName} \\n'\n f'Email: {self.eMail} \\n'\n f'Salary: {self.salary} \\n'\n f'FullName: {self.fullName} \\n')\n\n def apply_increment(self):\n self.salary = int(self.salary * self.salary_increment)\n\nemp_1 = Employee(\"Ibrahim\", \"Suleiman\", \"[email protected]\", \"$95,0000\")\nemp_2 = Employee(\"Musa\", \"Aboy\", \"[email protected]\", \"$115,000\")\n\n'''\n INSTEAD OF:\nemp_1.fName = \"Ibrahim\"\nemp_1.sName = \"Suleiman\"\nemp_1.eMail = \"[email protected]\"\nemp_1.salary = \"$95,000\"\n\nemp_2.fName = \"Musa\"\nemp_2.sName = \"Aboy\"\nemp_2.eMail = \"[email protected]\"\nemp_2.salary = \"$115,000\"\n'''\n\nprint(emp_1.eMail)\nprint(emp_2.eMail)\nprint(f'You are Welcome Mr {emp_1.fullName}')\n\n#CALLING A METHOD INSIDE A CLASS\nprint('\\nFull Details1')\nemp_1.fullDetails()\nprint('\\nFull Details2')\nEmployee.fullDetails(emp_2)\nprint(\"-------------\\n\")\n\nprint(f'Without salary increment: {emp_1.salary}')\n#emp_1.apply_increment()\nprint(f'After salary increment: {emp_1.salary}')\nprint(\"-----------------\\n\")\n" }, { "alpha_fraction": 0.6253822445869446, "alphanum_fraction": 0.6605504751205444, "avg_line_length": 30.14285659790039, "blob_id": "3eced2844002b4587addfe029906611369320083", "content_id": "008c0abebd494df879dcd534705ff02e80bf7012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 104, "num_lines": 42, "path": "/JSON.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\nimport json\n\n#JSON -> JavaScript Object Notation\n#A lightweight data format that is used for data exchange\n\n#Converting of python object to JSON is called Serialization or encoding\n#Converting of json to python object is called Deserialization or decoding\n\ndict1 = {'fName': \"Ibrahim\",\n 'mName': \"Musa\",\n 'sName': \"Suleiman\",\n 'fullName': [\"Ibrahim\",\"Musa\",\"Suleiman\"],\n 'age': 26,\n 'adult': True,\n 'phone Numbers':{\n 'Phone1': '08123456776',\n 'Phone2': '07087654334'\n }\n}\n\n#Converting dict1 to JSON\n\n#dict1JSON = json.dumps(dict1)\ndict1JSON = json.dumps(dict1, indent=4, sort_keys=True)\n#dict1JSON = json.dumps(dict1, indent=4, separators=(\"$\",\"#\"), sort_keys=True) #dumps, s -> string\nprint(f'DICTIONARY: {dict1}')\nprint(f'JSON: {dict1JSON}')\n\n#dump python object into json file\nwith open('JSON.json', 'w') as jsonFileObj:\n json.dump(dict1, jsonFileObj, indent=4, sort_keys=True)\n\n#Converting json file into python object:\ndict1Python = json.loads(dict1JSON)\nprint(f'JSON_TO_PYTHON: {dict1Python}')\n\n#Load json file from json file\nwith open('JSON.json', 'r') as jsonFileObj:\n content = json.load(jsonFileObj)\n #content1 = json.loads(content)\n print(f\"CONTENT: {content}\")\n #print(f\"CONTENT1: {content1}\")" }, { "alpha_fraction": 0.6687697172164917, "alphanum_fraction": 0.6782334446907043, "avg_line_length": 30.600000381469727, "blob_id": "e521bc4f8749a6de10ef4cc73eaf24e6ca01985e", "content_id": "3ba71e228d9cb4bfea25274f3bcee4d2d67dcd6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "no_license", "max_line_length": 86, "num_lines": 10, "path": "/tuple.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#TUPLE / LIST: Tuple is less in size, hence better in size management compared to list\n\n#Tuple is immutable. i.e: You can't add or modify a tuple, have to create another\n\nx = (\"One\",) #This is a tuple\nx2 = (\"One\",\"Two\", \"One\")\nprint(x,x2)\n\n#COUNT PARTICULAR ELEMENTS IN A TUPLE\nprint(f'COUNT: {x2.count(\"One\")}')\n" }, { "alpha_fraction": 0.5871621370315552, "alphanum_fraction": 0.5993243455886841, "avg_line_length": 29.8125, "blob_id": "76b2455a42b9c715239344d51ba7bff6d0abbadf", "content_id": "3abe5d0158e75d47468eaaacb120aa91a4dc0f3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 104, "num_lines": 48, "path": "/Threading.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#CHECK: Python Full Course: 5:40:00\n\nimport threading as thd\n\n#============= VIEW ACTIVE THREADS ====================\nprint(f'NUMBER OF ACTIVE THREADS: {thd.active_count()}')\n\n#============= VIEW LIST OF ACTIVE THREADS ====================\nprint(f'LIST OF ACTIVE THREADS: {thd.enumerate()}')\n\n\n#===================== EXAMPLE: RUN 3 TASKS CONCURRENTLY ==============\nimport time\ndef firstTimer():\n for i in range(1,10):\n time.sleep(1)\n print(f'FIRST TIMER: {i}\\n')\n\ndef secondTimer():\n for i in range(1,10):\n time.sleep(1)\n print(f'SECOND TIMER: {i}\\n')\n\ndef thirdTimer():\n for i in range(1,10):\n time.sleep(1)\n print(f'THIRD TIMER: {i}\\n')\n\nprint(\"Starting Thread\")\nfirstThread = thd.Thread(target=firstTimer, args=()) #arg=() Optional, if function takes in parameter\nfirstThread.start()\n\nsecondThread = thd.Thread(target=secondTimer, args=())\nsecondThread.start()\n\nthirdThread = thd.Thread(target=thirdTimer, args=())\nthirdThread.start()\n\n\n#============================ CHECK DURATION OF MAIN THREAD ===========================\nprint(f'Main thread ran for {time.perf_counter()} seconds')\n\n\n#==================== Thread Synchronization =============================\n#This is a concept where one thread waits for another to finish execution before it resumes\n\nfirstThread.join() #Main thread must wait for firstThread to finish executing before it can proceed\nprint(f'Main thread ran for {time.perf_counter()} seconds')\n" }, { "alpha_fraction": 0.5401273965835571, "alphanum_fraction": 0.5401273965835571, "avg_line_length": 22.787878036499023, "blob_id": "f54f61ab19f6063330302f1ffff40cb570a7866c", "content_id": "9d080457e6decdef11cb9ca89c4a5f051dcba0e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 75, "num_lines": 33, "path": "/OOP3.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Super Keyword\n\nclass Rectangle:\n def __init__(self, length, width):\n self.length = length\n self.width = width\n\nclass Square(Rectangle):\n def __init__(self, length, width):\n super().__init__(length, width)\n\n '''\n INSTEAD OF:\n self.length = length\n self.width = width\n '''\n def area(self):\n print(f'AREA OF SQUARE = {self.width * self.length}')\n\n\nclass Cube(Rectangle):\n def __init__(self, length, width, height):\n super().__init__(length, width)\n self.height = height\n\n '''\n INSTEAD OF:\n self.length = length\n self.width = width\n self.height = height\n '''\n def volume(self):\n print(f'VOLUME OF CUBE = {self.width * self.length * self.height}')" }, { "alpha_fraction": 0.5424657464027405, "alphanum_fraction": 0.6493150591850281, "avg_line_length": 28.96268653869629, "blob_id": "6e2c526860c260a41576a85b5187bb6224e62f7f", "content_id": "c4a67d957161f26ecf1024b60a749e938e62724a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4015, "license_type": "no_license", "max_line_length": 77, "num_lines": 134, "path": "/Numpy.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n#NUMPY VS LIST\n#1. Less memory\n#2. Fast\n#3. Convenient\n\n\n#SINGLE-DIMENSIONAL ARRAY\na = np.array([1,2,3,4])\nprint(a)\n\n#MULTI-DIMENSIONAL ARRAY\nb = np.array([(1,2,3,4),(4,3,2,1)])\nprint(f'MULTIDEMENSIONAL: {b}')\n\n#MULTI-DIMENSIONAL ARRAY TO SINLE-DIMENSIONAL ARRAY\nb = np.array([(1,2,3,4),(4,3,2,1)])\nx= b.ravel()\nprint(f'RAVELED: {x}')\n\n#RANGE SIMILAR TO RANGE IN FOR LOOP\nz = np.arange(0, 11, 2)\nprint(f'THE RANGE LIST IS: {z}')\n\n\n#Check the dimension of the array, 1 -> single-dimesion, 2 -> Multi-dimension\nc = np.array([(1,2,3,4),(4,3,2,1),(14,13,12,11),(24,23,22,21)])\nprint(f'The the dimension of the array is {c.ndim}')\n\n#Check size in byte of each element in the array\nc = np.array([(1,2,3,4),(4,3,2,1),(14,13,12,11),(24,23,22,21)])\nprint(f'The array element size is {c.itemsize} bytes')\n\n#Check array element data type\nd = np.array(['Ibrahim','Suleiman'])\nprint(f'The array elements data type is {d.dtype}')\n\n#Check size of the array (Number of elements)\nc = np.array([(1,2,3,4),(4,3,2,1),(14,13,12,11),(24,23,22,21)])\nprint(f'The number of elements in the array are {c.size}')\n\n#Check the shape of the array (Row by Column). DIFFERENT if not n X n\ne = np.array([(1,2,3,4),(4,3,2,1),(14,13,12,11),(24,23,22,21),(34,33,32,31)])\nprint(f'The shape of the array is: {e.shape}')\nprint(f'{e.shape[0]} rows by {e.shape[1]} columns')\n\n#Reshaping an array. i.e 2x6 reshaped to 6x2 AND 4,3\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12)])\nreshaped = d.reshape(6,2)\nreshaped1 = d.reshape(4,3)\nprint(f'RESHAPED = {reshaped}')\nprint(F'RESHAPED1 = {reshaped1}')\n\n#slicing elements in array (Similar to list)\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12),(13,14,15,16,17,18)])\nprint(f'SLICE 1 = {d[0, 2:5]}')\nprint(f'SLICE 2 = {d[1, 2:5]}')\nprint(f'SLICE 3 = {d[0:, 2:5]}') #All the rows\nprint(f'SLICE 4 = {d[0:2, 2:5]}') #First 2 rows\n\n\n#List of values range equally spaced\nf = np.linspace(0,2.5, 5) #Returns 5 values of range of values from 0 to 2.5\nprint(f'LINSPACE VALUE {f}')\n\n\n#Minimum and Maximum value of arrays\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12),(13,14,15,16,17,18)])\nminValue = d.min()\nmaxValue = d.max()\nprint(f'THE MINIMUM VALUE OF THE NUMPY ARRAY IS: {minValue}')\nprint(f'THE MAXIMUM VALUE OF THE NUMPY ARRAY IS: {maxValue}')\n\n#Sum of array elements\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12),(13,14,15,16,17,18)])\nsumValue = d.sum()\nprint(F'THE SUM OF THE ELEMENTS IN THE NUMPY ARRAY IS: {sumValue}')\n\n#ARITHMETIC OPERATIONS (+,-,*,/) IN NUMPY OCCURS ELEMENT-WISE\na = np.array([(1,2,3), (3,2,1), (10,9,8)])\nb = np.array([(7,2,7), (5,1,1), (8,3,2)])\nprint(f'THE SUM OF THE ARRAYS = {a+b}')\n\n\n#Axis in arrays. ROWS = axis1, COLUMNS= axis 0\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12),(13,14,15,16,17,18)])\nrowSum = d.sum(axis= 1)\ncolumnSum = d.sum(axis= 0)\nprint(f'THE SUM OF ELEMENTS IN THE ROWS = {rowSum}')\nprint(f'THE SUM OF ELEMENTS IN THE COLUMN = {columnSum}')\n\n\n#SQUARE ROOT OF ELEMENTS IN A NUMPY ARRAY\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12),(13,14,15,16,17,18)])\nprint(f'THE SQUARE ROOT OF ELEMENTS IN THE ARRAY ARE: {np.sqrt(d)}')\n\n#STANDARD DAVIATION OF AN ARRAY\nd = np.array([(1,2,3,4,5,6),(7,8,9,10,11,12),(13,14,15,16,17,18)])\nprint(f'THE STANDARD DEVIATON OF ELEMENTS IN THE ARRAY ARE: {np.std(d)}')\n\n\n\n#CONCATINATING ARRAYS; HORIZONTAL STACKING AND VERTICAL STACKING\na = np.array([(1,2,3), (3,2,1), (10,9,8)])\nb = np.array([(7,2,7), (5,1,1), (8,3,2)])\n\nhStack = np.hstack((a,b))\nvStack = np.vstack((a,b))\n\nprint(f'HORIZONTAL STACK: {hStack}')\nprint(f'VERTICAL STACK: {vStack}')\n\n\n#SPECIAL FUNCTIONS\n#SINE FUNCTION IN GRAPH PLOTTING VIA MATPLOTLIB\n\nx_axis = np.arange(0, 10, 2)\ny_axis = np.sin(x_axis)\nplt.plot(x_axis, y_axis)\nplt.show()\n\n#EXPONENTIAL VALUE E^X (LOG TO BASE E)\na = np.array([1,2,3,4])\nprint(f'EXPONENTIAL VALUE {np.exp(a)}')\n\n#NATURAL LOGARITHM\na = np.array([1,200,1000,400])\nprint(f'NATURAL LOG VALUE {np.log(a)}')\n\n#LOGARITHM BASE 10\na = np.array([1,200,1000,400])\nprint(f'LOG BASE 10 VALUE {np.log10(a)}')\n" }, { "alpha_fraction": 0.7160493731498718, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 12.5, "blob_id": "fda4f48094320d75973bff9fa0ca8d174139d145", "content_id": "11fa4fd44efed016c2c2d881fd6103c204f07348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/Keyword.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import keyword\n\n\n#Get list of all keywords\nfor i in keyword.kwlist:\n print(i)\n" }, { "alpha_fraction": 0.6602972149848938, "alphanum_fraction": 0.6819533109664917, "avg_line_length": 21.864078521728516, "blob_id": "ccefefd456d6759f2ade80e9ba1a7b38fc8a682a", "content_id": "4f1029707142433988e6a6273afb16b0c8b1d0eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2355, "license_type": "no_license", "max_line_length": 86, "num_lines": 103, "path": "/List.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#TUPLE / LIST: Tuple is less in size, hence better in size management compared to list\n\n#NB: List accepts different data types in one list and duplicates\n\nfnames = ['Ibrahim', \"Muhammad\", \"Abdullahi\", \"Zak\"]\nsnames = ['Suleiman', \"Jamilu\", \"Sabo\", \"panshak\"]\ntest = [['one', 'two', 'Three'], [\"Ten\", \"Nine\", \"Eight\"]]\n\nprint(fnames[1])\nprint(fnames[-1])\nprint(fnames)\n\n#Count particular element in a list\nL = [\"A\",\"B\",\"C\",\"A\",\"A\"]\nprint(f'COUNT: {L.count(\"A\")}')\n\n#Index of particular element in a list\nL = [\"A\",\"B\",\"C\",\"A\",\"A\"]\nprint(f'INDEX: {L.index(\"C\")}')\n\n#Create New Empty List\nmyListZZZ = list()\n\n#Number of items in a list\nprint(f'The number of names are: {len(fnames)}')\n\nprint(\"\\n\")\nprint(test)\nprint(test[1][2])\n\n#Append elements to a list\nmyListZZZ.append([\"One\",\"Two\"])\nprint(\"myListZZZ Appended: \",myListZZZ)\n#OR\nmyListZZZ += [\"New name1\", \"New name2\"]\n\n#Add elements at specified position\nmyListZZZ.insert(0, \"New One\")\nprint(\"myListZZZ Appended2: \",myListZZZ)\n\n#Remove items: Check below\n\n#Reverse list\nmyListZZZ.reverse()\nprint(f'REVERSED LIST: {myListZZZ}')\n\n#Sort elements in the list\nyy = [8,5,3,-6,-20, 0]\nfnames.sort()\nprint(f\"SORTED: {fnames}\")\nfnames.sort(reverse=True)\nprint(f\"SORTED2: {fnames}\")\nx5 = sorted(yy, reverse=True)\nprint(f'SORTED3: {x5}')\n\n#Copy a list, 3 Ways\nyy1 = [8,5,3,-6,-20, 0]\nyy_Copy1 = yy1.copy()\nyy_Copy2 = list(yy1)\nyy_Copy3 = yy1[:]\n\n\n#Modified element in list\ntest[0][2] = \"Modified three\"\nprint(test)\n\n#Loop Through\nfor i in reversed(fnames):\n print(i, end=' ')\nprint(\"\")\n\n#adding elements to a list\nfnames += [\"New name1\", \"New name2\"]\nprint(fnames)\n\n#Removing elements from a list via element index\nfnames.pop(1) #remove element at index 1\nfnames.pop() #removes the last element in the list\nprint(\"POPPED list: \",fnames)\n\n#Removing elements via element value\nfnames.remove(\"New name1\") #Accepts only one argument\nprint(fnames)\n\n#Remove all elements in the list\nmyListZZZ.clear()\nprint(f'Value of myListZZZ is {myListZZZ}')\n\n#Remove many elements using list slicing\nprint(snames)\ndel(snames[:2])\nprint(snames)\n\n#List slicing. syntax = str[begin, end+1]. Check String for similar\nprint(fnames[0:2])\n\n#List methods.\n#count(arg), insert(arg), append(), index, reverse, sort. Check Halterman\n\n\n#List comprehension\nnumbers = [x for x in range(0,20, 2) if x != 10]\nprint(numbers)" }, { "alpha_fraction": 0.7072550058364868, "alphanum_fraction": 0.7089520692825317, "avg_line_length": 26.418603897094727, "blob_id": "ef0c73f56b30ed8d53d95933f85cda7c81437f9c", "content_id": "5f0f66bf6fb5f0898f275eae5e17da1d3b6031e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2357, "license_type": "no_license", "max_line_length": 104, "num_lines": 86, "path": "/Os Module.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import os\n\nfor i in dir(os):\n print(i, end=' ')\nprint('\\n')\n\n#Get current working directory\nprint(os.getcwd())\n\n#Check if directory or file exists\npath = r'C:\\Users\\Public\\AndroidStudio' #...\\AndroidStudio\\Project1.txt\nprint(f\"Does the directory; Ibrahim Suleiman exists?: ANS: {os.path.exists(path)}\")\n\n#Change working directory\n#os.chdir('Users/...')\n\n#List file and folder in a directory\nprint(os.listdir())\nprint(os.listdir(r'C:\\Users\\Ibrahim Suleiman\\Desktop'))\nNumber_of_items = len(os.listdir(r'C:\\Users\\Ibrahim Suleiman\\Desktop'))\nprint(f'The number of items in the directory are {Number_of_items}')\n\n#Check if file exists in a directory or if location is a file\nstatement = \"This file exists\" if os.path.isfile(\"thisfile.csv\") else \"This file does not exist\"\nprint(statement)\n\n#Check if directory exists if location is a directory\nstatement = \"This is a folder\" if os.path.isdir(\"thisfile.csv\") else \"This is not a folder\"\nprint(statement)\n\n#Move files or replace files\n'''\nsource = text.txt\ndestination = r'C:\\Desktop\\text.txt\nos.replace(source, destination)\n'''\n\n#Move folder or replace folder\n'''\nsource = folder1\ndestination = r'C:\\Desktop\\folder1\nos.replace(source, destination)\n'''\n\n#Create new directory\ntry:\n os.makedirs('NewDir/innerOne/innerTwo/innerThree') #Can create nested directories\n #os.mkdir('NewDir') #Create just a single directory unlike os.makedirs()\n\n with open('NewDir/innerOne/innerTwo/innerThree/myFile.txt', 'w') as f:\n f.write('This is the first line\\n')\n f.write('And this is the second line')\n print('directory created')\nexcept:\n print('Cannot create directory of the same name')\n\n#Deleting files\n'''\nos.remove('file1.txt')\n'''\n\n#Deleting directories\ndef delete_dir():\n #os.rmdir('NewDir/innerOne/innerTwo/innerThree') #Remove a single directory. i.e innerThree\n #print('Directory removed')\n os.removedirs('NewDir/innerOne/innerTwo/innerThree') #Remove directory with nested directories\n print('All directories removed')\n\n#delete_dir()\n\n#RENAMING FILE OR FOLDER\n#os.rename('NewDir', 'NewRanamedDir')\n\n#GETTING INFO ABOUT A FILE\nprint(os.stat('NewRanamedDir'))\nfor i in os.stat('NewRanamedDir'):\n print(i)\n\nprint('\\n')\n\n\n#VIEW DIRECTORY TREE\nprint(os.walk('NewDir'))\nfor i,j,k in os.walk('NewDir'):\n print(f'CURRENT PATH {i} \\nDIRECTORIES {j} \\nFILES{k}')\n print('\\n')" }, { "alpha_fraction": 0.6419558525085449, "alphanum_fraction": 0.6514195799827576, "avg_line_length": 22.518518447875977, "blob_id": "62cedfcd04f257a9bcf66b23a9a1a990eb4dd3a0", "content_id": "e144ba247b0b421094aa55a173ce8b9b1b62fdf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 87, "num_lines": 27, "path": "/Exception Handling.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "#Common Standard Exceptions\n\n#Attribute error. E.g: 2/numerator. NO ATTRIBUTE NUMERATOR\n#ZeroDivisionError\n#NameError\n\n\nnum = 5\nden = 0\n\ntry:\n x = num/den\n print('ANSWER =',x)\nexcept: print(\"DIVISION BY ZERO NOT ALLOWED\")\n\n########## MULTIPLE EXCEPTION ###################\nnum = 5\nden = 0\na = 'Hello'\ntry:\n #y = a/2\n x = num/den\n print('ANSWER =',x)\n#except ZeroDivisionError as ze: print(\"You cannot divide by zero. ERROR OF TYPE:\", ze)\nexcept TypeError as te: print('ERROR OCCURRED of type', te)\nexcept Exception as e: print(f'Exception of type: {e} occured')\nfinally: \"Runs always whether exception or not\"" }, { "alpha_fraction": 0.6439393758773804, "alphanum_fraction": 0.6515151262283325, "avg_line_length": 18, "blob_id": "c4842a3eaa7eff5cdb7299b95ddf5c36ffbd0bf7", "content_id": "227e11f13791b0bbeb1965e3bc9da162d3688429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 67, "num_lines": 7, "path": "/Sys.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import sys\n\n#sys.exit(1) To exit a system\n\nx = \"Ibrahim\"\ny = \"M\"\nprint(f\"SIZE: {sys.getsizeof(x)} bytes + {sys.getsizeof(y)} bytes\")" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.6247139573097229, "avg_line_length": 21.947368621826172, "blob_id": "daa6b0479cb0ae44d4ef6347384006043f274959", "content_id": "a9b0acc0be750efc840880ad26bab045ba91bece", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 54, "num_lines": 19, "path": "/Numpy2.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import numpy as np\n\nnp.random.seed(1)\n\n#Random float\na = np.random.rand(3) #3 random floats in 1-D\nb = np.random.rand(3,4) #12 random floats in 3x3\nprint(f'RAND1: {a}')\nprint(f'RAND2:\\n {b}')\n\n#Random int\nc = np.random.randint(2,8, (3,4)) #8 not included\nprint(f'RANDINT:\\n {c}')\n\n#Random shuffle\narr = np.array([[1,2,3],[4,5,6],[7,8,9]])\nprint(f'UNSHUFFLED: \\n {arr}')\nnp.random.shuffle(arr)\nprint(f'SHUFFLED: \\n {arr}')\n\n" }, { "alpha_fraction": 0.690651535987854, "alphanum_fraction": 0.709915041923523, "avg_line_length": 32.96154022216797, "blob_id": "944b66095917afb3ec09df6eb5cf672137743ac5", "content_id": "b257a95d6a5ba5d16b003a3789b1b8b32ed28243", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1765, "license_type": "no_license", "max_line_length": 134, "num_lines": 52, "path": "/Random.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import random\n\n#The random module is not advisable for generating secure random numbers, rather use the secrets module\n\n#They are 3 good modules for random numbers generation\n#1 random module -> For pseudo random numbers generation (Called pseudo random because they are not really random, can be reproduced)\n#2 secrets module -> For cryptographic strong random numbers\n#3 numpy module (Numpy2)\n\n#random.seed(1)\n\nprint(\"Random\")\nfor i in range(5):\n print(random.random()) #returns random numbers <= 0\n\nprint(\"Uniform\")\nfor i in range(5):\n print(random.uniform(1,5)) #returns random float numbers 1 and 5 inclusive\n\nprint(\"\\nRandInt\")\nfor i in range(5):\n print(random.randint(2,4)) #returns random int numbers 2,3 0r 4\n\nprint(\"\\nRandRange\")\nfor i in range(5):\n print(random.randrange(2,4)) #returns random numbers 2 or 3. 4 is not included\n\nprint('\\nNormalVariate')\nfor i in range(3):\n print(random.normalvariate(0,1)) #random.normalvariate(mu,sigma) Good in statistics. mu=mean, sigma=standard deviation\n\nprint(\"\\nChoice\")\nnumber_list = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\"]\nfor i in range(5):\n print(random.choice(number_list)) #Returns any single random element from the list\n\nprint(\"\\nChoices\")\nnumber_list = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\"]\nfor i in range(5):\n print(random.choices(number_list, k=3)) #Returns random group of elements from the list (i.e Repetitions allowed)\n\nprint(\"\\nSample\")\nnumber_list = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\"]\nfor i in range(5):\n print(random.sample(number_list, 3)) #Returns unique random group of elements from the list (i.e no repetition)\n\nprint(\"\\nShuffle\")\nname = \"Ibrahim\"\nnameList = list(name)\nprint(nameList)\nrandom.shuffle(nameList)\nprint(''.join(nameList))" }, { "alpha_fraction": 0.6612451076507568, "alphanum_fraction": 0.6926528215408325, "avg_line_length": 32, "blob_id": "e8ffccafb10ec89377132677dce1de9bfb6a06ff", "content_id": "37b2a64590ae2748dba14ea605c1416dcd87b7c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1783, "license_type": "no_license", "max_line_length": 194, "num_lines": 54, "path": "/time.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import time\n\n#The time.time() (NOT time.clock() which is deprecated) function allows us measure the time of parts of a program's execution. returns a floating point value representing elapsed time in seconds\n#The time.sleep() function gives delay\n#time.time() is time in seconds from the epoch time.\n\n#TIME.TIME()\nprint(time.time()) #Epoch time. ie. from 12:00am jan 1 1970\ninput(\"Press enter to begin \")\nstart_time = time.time()\nname = input(\"Start typing text here: \")\nend_time = time.time()\nelapsted_time = end_time - start_time\nprint(\"ELAPSED TIME: \", elapsted_time)\n\n#TIME.SLEEP()\nstatement = \"Launch begins in t - \"\ndef launch():\n for i in range(10, 0, -1):\n print('{} {} seconds'.format(statement, i))\n time.sleep(1) # time in seconds. ie 1 -> 1 second\n print(\"LAUNCH was successful\")\n#launch()\n\n#CURRENT TIME\ncurrentTime = time.localtime(time.time())\ncurrentTime2 = time.localtime(2000)\nprint('The current time is: {}'.format(currentTime))\nprint(\"The current time2 is: {}\".format(currentTime2))\nprint('The current time is: {}'.format(time.ctime()))\nprint('The date, 2000 seconds from the epoch time .ie 1 Jan 1970 is: {}'.format(time.ctime(2000)))\nprint('The time current date is: {}'.format(time.gmtime()))\nprint('The date, 2000 seconds from the epoch time .ie 1 Jan 1970 is: {}'.format(time.gmtime(2000)))\n\nprint('\\n')\n\nformattedTime = time.asctime(currentTime)\nprint('The formatted time is: {}'.format(formattedTime))\n\n\nsplittedTime = formattedTime.split(' ')\nprint(splittedTime)\ndes = [\"Day\", \"Month\", \"Month2\", \"Time\", \"Year\"]\n\nfor i,j in zip(des, splittedTime):\n print(i,j)\n\nprint(\"\\n\")\n\nsplittedTime2 = splittedTime[3].split(\":\")\nprint(splittedTime2)\ndec2 = [\"Hour\",\"Minute\",\"Seconds\"]\nfor i,j in zip(dec2, splittedTime2):\n print(i,j)\n\n" }, { "alpha_fraction": 0.6878980994224548, "alphanum_fraction": 0.6942675113677979, "avg_line_length": 34, "blob_id": "dee31990f3813ab69d88b9f1526c8aa90b04ef9c", "content_id": "ab4ac04e459ef006519eac9c2b50de8579cc494e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 314, "license_type": "no_license", "max_line_length": 63, "num_lines": 9, "path": "/Collections.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import collections as coll\n\n#1 - Counter\ns = 'Ibrahim Musa Suleiman'\nprint(f'COUNTER RESULT: {coll.Counter(s)}')\nprint(f'COUNTER ITEMS: {coll.Counter(s).items()}')\nprint(f'COUNTER KEYS: {coll.Counter(s).keys()}')\nprint(f'COUNTER VALUES: {coll.Counter(s).values()}')\nprint(f'COUNTER MOST COMMON: {coll.Counter(s).most_common(2)}')" }, { "alpha_fraction": 0.6605504751205444, "alphanum_fraction": 0.6605504751205444, "avg_line_length": 12.625, "blob_id": "21fa30dc593bbe3ce8c3edc8c5e6fb5561913926", "content_id": "158d44347b1a6dc357470752550ac26081edcb62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 28, "num_lines": 8, "path": "/Cryptography/Reverse Cipher.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "text = input('Enter text: ')\n\ncipherText = ''\n\nfor i in reversed(text):\n cipherText+=i\n\nprint(cipherText)\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 16.14285659790039, "blob_id": "94dc44f28d4d2a71ed33304a776b3078784f1826", "content_id": "7e4c506d792c4f687c6d1684917afceccb18cb19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/Pyperclip.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import pyperclip as ppclip\n\nx = input(\"Enter your name: \")\nppclip.copy(x)\ninput()\nprint(ppclip.paste())\nprint(\"Pasted\")" }, { "alpha_fraction": 0.5742753744125366, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 18.034482955932617, "blob_id": "579d0cf61885fef475f476fb775c030bbb649acc", "content_id": "0989ddd906e4ef4dfe8fb918380b1bd47e08c1ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 63, "num_lines": 29, "path": "/OOP1.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "\n#Create class in seperate module. ie: OOP2\n\n#from OOP2 import Car, Animals #Import Class from module\nfrom OOP2 import * #Import Class from module\ncar1 = Car(\"Honda\", \"Model_4X56YT\", 15000)\ncar1.identify()\nprint('\\n'*2)\n\n#========= Inheritance =============\nanimal1 = Animals()\n\nrabbit1 = Rabbit()\nfish1 = Fish()\nhawk1 = Hawk()\n\nprint(rabbit1.id)\nprint(fish1.id)\n\nrabbit1.sleep()\n\n\n#============ Super keyword =============\nfrom OOP3 import Rectangle, Square, Cube\n\nsquare1 = Square(4,6)\nsquare1.area()\n\ncube1 = Cube(3,4,5)\ncube1.volume()" }, { "alpha_fraction": 0.6577908992767334, "alphanum_fraction": 0.6612426042556763, "avg_line_length": 22.32183837890625, "blob_id": "d5637bbd6c089f2dc806c6e21d0482e8478c5ea9", "content_id": "972b6f7a8a790303a8bef17bb230d37a746b9d3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2028, "license_type": "no_license", "max_line_length": 131, "num_lines": 87, "path": "/SQLite.py", "repo_name": "IbroCalculus/Python-Codes-Snippets-for-Reference", "src_encoding": "UTF-8", "text": "import sqlite3\n\n#Reference: Python for everybody, Timestamp: about 09:00:35\n#For this, we use DB Browser for SQLite\n\n#CREATE DATABASE\n'''\n First, create a database. E.g: testData.db (Done visually in in SQLite. Otherwise\n CREATE DATABASE testData\n'''\n\n#CREATE TABLE IN DATABASE Users\n''' \n Visually; Self-explainable\n \n otherwise:\n\n CREATE TABLE Users (\n \"Name\" TEXT,\n \"Email\" TEXT,\n );\n \n Available datatype: TEXT, INTEGER, BLOB, REAL, NUMERIC\n'''\n\n#INSERT DATA INTO TABLE\n'''\n VISUALLY ON SQLite, Go to \"Browse Data\" Tab, Click on New Record\n \n otherwise: Navigate to \"Execute SQL\" tab:\n \n INSERT INTO Users (Name, Email) VALUES (\"Ibrahim\", \"[email protected]\");\n or\n INSERT INTO Users VALUES (\"Aboy\", \"[email protected]\");\n click the RUN/PLAY icon to submit the changes \n'''\n\n#DELETE A ROW IN A TABLE based on selection criteria\n'''\n VISUALLY: Go to \"Browse Data\" Tab, Highlight the record, Click on Delete record.\n \n otherwise: Navigate to \"Execute SQL\" tab:\n \n DELETE FROM Users WHERE \"Email\"=\"[email protected]\";\n \n NB: If duplicates, it deletes all\n \n'''\n\n#DELETE ALL RECORDS IN THE DATABASE\n'''\n DELETE FROM Users;\n'''\n\n#UPDATE TABLE RECORD\n'''\n UPDATE Users SET Name=\"AAboy\", Email=\"[email protected]\" WHERE Name=\"Aboy\";\n'''\n\n#RETRIEVE ALL RECORDS FROM A TABLE\n'''\n SELECT * FROM Users;\n'''\n\n#RETRIEVE A PARTICULAR COLUMN FROM A TABLE\n'''\n SELECT Name FROM Users;\n'''\n\n#RETRIEVE A PARTICULAR COLUMN WITH CONDITION\n'''\n SELECT Name FROM Users where Name=\"Aboy\";\n'''\n\n#RETRIEVE DISTINCT\n'''\nThe SELECT DISTINCT statement is used to return only distinct (different) values.\nInside a table, a column often contains many duplicate values; and sometimes you only want to list the different (distinct) values.\n SELECT DISTINCT * FROM Users;\n SELECT DISTINCT Name FROM Users;\n SELECT DISTINCT count(Name) FROM Users; #Count the number of distinct values\n'''\n\n#SORTING\n'''\n SELECT * FROM Users ORDER BY Name DESC; //ASC\n'''" } ]
70
RichardSchreier/sokoban
https://github.com/RichardSchreier/sokoban
56305768a0ea47d8bb090881d90040dedf4e9071
cdeaa727036e103cf334b29ecb203a8c23c9ed58
d8766a77d62e88bc631f2018214a8c475a2ac0b9
refs/heads/master
2020-04-22T07:14:54.575697
2019-03-23T22:52:02
2019-03-23T22:52:02
170,213,681
0
1
MIT
2019-02-11T22:35:37
2019-02-23T22:50:06
2019-02-25T02:42:58
Python
[ { "alpha_fraction": 0.6158669590950012, "alphanum_fraction": 0.6221374273300171, "avg_line_length": 47.26315689086914, "blob_id": "5b7001f73c4933449f4e8f10ca835804e27f90fb", "content_id": "09359dd2ff19a9afc8af08ae0d5b20953ff9beaf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3668, "license_type": "permissive", "max_line_length": 120, "num_lines": 76, "path": "/a_star.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "import time\nimport math\nfrom PriorityQueue import PriorityQueue\nfrom engineering_notation import eng\n\n\ndef a_star(initial_state, max_cost=1000, max_time=30*60, max_states=50000, progress_report=None):\n \"\"\"\nAn implementation of the A* search algorithm.\nSee https://www.redblobgames.com/pathfinding/a-star/introduction.html\n initial_state an object supporting the following methods:\n cost the cost associated with reaching this state from the initial state\n neighbors an iterator returning neighboring states\n is_goal returns True if the state is a goal state\n heuristic returns an underestimate of the number of moves to reach a goal state\n max_cost search terminates if an underestimate of the cost exceeds max_cost\n max_time search terminates if the search time exceeds max_time\n max_states search terminates if the number of states visited exceeds max_states\n progress_report (progress_fn, progress_interval)\n progress_fn a function called every progress_interval seconds.\n Arguments passed to progress_fn are (current_state, states_reached, priority_queue, elapsed_time).\n If progress_fn returns non-None, the search is terminated.\n\n Returns (solution_state, solution_info)\n\"\"\"\n frontier = PriorityQueue(initial_state, 0)\n states_reached = [initial_state]\n time0 = time.time()\n termination_condition = \"\"\n if progress_report:\n progress_fn, progress_interval = progress_report\n progress_update_time = time0\n else:\n progress_fn, progress_interval = None, None\n progress_update_time = math.inf\n\n while not frontier.empty():\n current = frontier.pop()\n if current.is_goal():\n return current, f\"cost-optimal solution (cost = {current.cost()}) found in {eng(time.time() - time0, 2)}s\" \\\n + f\" after examining {len(states_reached)} states.\"\n # These checks could be in the next_state loop\n if time.time() - time0 > max_time:\n termination_condition += f\"Time limit ({max_time}s) exceeded.\"\n if current.cost() > max_cost:\n termination_condition += f\"Max cost ({max_cost}) exceeded.\"\n if len(states_reached) > max_states:\n termination_condition += f\"Max states ({max_states}) exceeded.\"\n if termination_condition:\n return None, termination_condition\n if time.time() > progress_update_time:\n return_value = progress_fn(current, states_reached, frontier, time.time() - time0)\n if return_value is not None:\n msg = f\"halted after {eng(time.time() - time0, 2)}s after examining {len(states_reached)} states. \"\n if type(return_value) is str:\n msg += return_value\n return None, msg\n progress_update_time = time.time() + progress_interval\n\n for next_state in current.neighbors():\n next_added = False\n try:\n i = states_reached.index(next_state)\n if states_reached[i].cost() > next_state.cost():\n states_reached[i] = next_state\n next_added = True\n except ValueError:\n states_reached.append(next_state)\n next_added = True\n if next_added:\n h = next_state.heuristic()\n # if h is 0:\n # print(f'Found a solution with cost = {next_state.cost()} (possibly non-optimal).')\n priority = next_state.cost() + h\n frontier.insert(next_state, priority)\n return None, \"No solution.\"\n" }, { "alpha_fraction": 0.5271883010864258, "alphanum_fraction": 0.5331565141677856, "avg_line_length": 26.418182373046875, "blob_id": "33e00d14730eea6bf24a206b9562fd207352b1ea", "content_id": "e48c034412a42d9f6c6deb6cfd30af07f71cb2aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1508, "license_type": "permissive", "max_line_length": 97, "num_lines": 55, "path": "/Point.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "from math import sqrt\n\n\nclass Point:\n \"\"\"A 2D point class.\"\"\"\n __slots__ = ['x', 'y'] # Prevents the creation of an instance dictionary\n\n def __init__(self, x=None, y=None):\n self.x = x\n self.y = y\n\n def __eq__(self, d): # to make == work\n return self.x == d.x and self.y == d.y\n\n def __hash__(self): # to support dictionary indexing based on the point aot the Point object\n return hash((self.x, self.y))\n\n def __bool__(self): # to support conditionals\n return self.x is not None and self.y is not None\n\n def __iter__(self): # to support tuple(point)\n yield self.x\n yield self.y\n\n def __add__(self, d):\n return Point(self.x + d.x, self.y + d.y)\n\n def __sub__(self, d):\n return Point(self.x - d.x, self.y - d.y)\n\n def __neg__(self): # unary minus\n return Point(-self.x, -self.y)\n\n def __mul__(self, k): # to support point * k\n return Point(self.x * k, self.y * k)\n\n def __rmul__(self, k): # to support k * point\n return Point(self.x * k, self.y * k)\n\n def __str__(self):\n return f\"({self.x},{self.y})\"\n\n def __repr__(self):\n return \"Point\" + self.__str__()\n\n def __lt__(self, p2): # to allow a list of Points to be sorted\n return self.x < p2.x or (self.x == p2.x and self.y < p2.y)\n\n @property\n def l1_norm(self):\n return abs(self.x) + abs(self.y)\n\n @property\n def l2_norm(self):\n return sqrt(self.x ** 2 + self.y ** 2)\n" }, { "alpha_fraction": 0.6895223259925842, "alphanum_fraction": 0.6926040053367615, "avg_line_length": 32.28205108642578, "blob_id": "a1a113ea2d9563694311ae4a929e923902840ad5", "content_id": "d3412539b0831eb2a277dbe86cad755e1a4e26c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1306, "license_type": "permissive", "max_line_length": 107, "num_lines": 39, "path": "/README.md", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "# Sokoban game in Python\nWith simple solver built on David Moreno's sokoban code.\n\n## Screenshots\n![Solved Image](https://raw.githubusercontent.com/RichardSchreier/sokoban/master/Solved.png \"Solved\")\n![Unsolved Image](https://raw.githubusercontent.com/RichardSchreier/sokoban/master/Unsolved.png \"Unsolved\")\n\n## Actions\n```\nKey Action\n←↑↓→ Move worker left, up, down, right\nn/p Next/previous level\nN/P Next/previous unsolved level\n>/< Next/previous world\nq Quit\nr Re-start\nR Replay solution\ns If no solution, solve from the current state.\nS Solve from the initial state, even if a solution has been found. \n While the solver is running:\n space pause solve\n esc exit solve\ncmd-S Save the solution as a comment in the world file\nu/U Undo/Re-do\nmouse Move worker to specified square; can push box adjacent to worker\n\nDebug\n^a display annotated_map\n^d toggle DEBUG flag; DEBUG==True prints debug messages during solve()\n^h print heuristic\n^m print move_count_maps\n^n print neighbors\n^s print solution string\n```\n\n## Install & Play\n1. Clone the repository `git clone https://github.com/RichardSchreier/sokoban.git`\n2. Run `pip install -r requirements.txt`\n3. Enjoy! `python3 sokoban.py`\n" }, { "alpha_fraction": 0.48593950271606445, "alphanum_fraction": 0.4898807108402252, "avg_line_length": 43.07511901855469, "blob_id": "ee409ea9f13513179d1eb8543f77cbafc6de2d2b", "content_id": "f491b8820e9ecf3549fd4c71efe86713ef1f5715", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9396, "license_type": "permissive", "max_line_length": 106, "num_lines": 213, "path": "/sokoban.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "#!../bin/python\n# Original code downloaded from https://github.com/morenod/sokoban.git\n\"\"\"\nKey Action\n←↑↓→ Move worker left, up, down, right\nn/p Next/previous level\nN/P Next/previous unsolved level\n>/< Next/previous level file\nq Quit\nr Re-start\nR Replay solution\ns If no solution, solve from the current state.\nS Solve from the initial state, even if a solution has been found.\n While the solver is running:\n space pause solve\n esc exit solve\ncmd-S Save the solution as a comment in the world file\nu/U Undo/Re-do\nmouse Move worker to specified square, can push box adjacent to worker\n\nDebug\n^a display annotated_map\n^d toggle DEBUG flag; DEBUG==True prints debug messages during solve()\n^h print heuristic\n^m print move_count_maps\n^n print neighbors\n^s print solution string\n\"\"\"\nimport sys\nimport os\nimport time\nfrom datetime import datetime\nfrom getpass import getuser\nimport pygame\nfrom Game import Game, GameWorld, UP, DOWN, LEFT, RIGHT\nfrom engineering_notation import eng\n\nWORLD_DIR = \"Worlds\"\nSOKOBAN_INIT = \"sokoban.init\"\n\n\ndef main():\n def initialize_game():\n level_id = world.level_ids[min(level_i, len(world.level_ids))]\n world_id = worlds[min(world_i, len(worlds))]\n return Game(world.levels[level_i], f\"{world_id}-{level_id}\", True, world.solutions[level_i])\n\n def initialize_world():\n return GameWorld(worlds[world_i], WORLD_DIR)\n\n def read_world_i_and_level_i():\n try:\n file = open(SOKOBAN_INIT, \"r\")\n line = file.readline()\n file.close()\n words = line.split()\n return int(words[0]), int(words[1])\n except (FileNotFoundError, IndexError):\n return 1, 0\n\n def save_world_i_and_level_i():\n with open(SOKOBAN_INIT, \"w\") as file:\n file.write(f\"{world_i} {level_i}\")\n\n def check_for_manual_solution():\n move_count = game.current_state.move_count\n if game.solved() and (game.solution_state is None or move_count < game.solution_state.move_count):\n game.solution_state = game.current_state\n date_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n game.solution_info = f\"{date_str}, manually solved by {getuser()} in {move_count} moves.\"\n world.update_solution(level_i, (game.solution_string(), game.solution_info))\n\n pygame.init()\n pygame.display.set_icon(pygame.image.load('Images/icon.png'))\n move_dict = {pygame.K_UP: UP, pygame.K_DOWN: DOWN, pygame.K_LEFT: LEFT, pygame.K_RIGHT: RIGHT}\n worlds = os.listdir(WORLD_DIR)\n worlds.sort()\n world_i, level_i = read_world_i_and_level_i()\n world = initialize_world()\n game = initialize_game()\n while 1:\n game.display()\n for event in pygame.event.get():\n if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key is pygame.K_q:\n save_world_i_and_level_i()\n return\n elif event.type == pygame.KEYDOWN:\n if event.key in move_dict and not game.solved():\n game.move(move_dict[event.key])\n check_for_manual_solution()\n elif event.unicode == 'n': # go to next level\n if level_i < len(world.levels) - 1:\n level_i += 1\n game = initialize_game()\n elif world_i < len(worlds) - 1:\n level_i = 0\n world_i += 1\n world = initialize_world()\n game = initialize_game()\n elif event.unicode == 'N': # go to next unsolved level\n saved_vars = level_i, world_i, world\n found_unsolved_game = False\n while not found_unsolved_game:\n if level_i < len(world.levels) - 1:\n level_i += 1\n elif world_i < len(worlds) - 1:\n world_i += 1\n world = initialize_world()\n level_i = 0\n else:\n break\n if not world.solutions[level_i][0]:\n found_unsolved_game = True\n if found_unsolved_game:\n game = initialize_game()\n else:\n level_i, world_i, world = saved_vars\n elif event.unicode == 'p': # go to previous level\n if level_i > 0:\n level_i -= 1\n game = initialize_game()\n elif world_i > 0:\n world_i -= 1\n world = initialize_world()\n level_i = len(world.levels) - 1\n game = initialize_game()\n elif event.unicode == 'P': # go to previous unsolved level\n saved_vars = level_i, world_i, world\n found_unsolved_game = False\n while not found_unsolved_game:\n if level_i > 0:\n level_i -= 1\n elif world_i > 0:\n world_i -= 1\n world = initialize_world()\n level_i = len(world.levels) - 1\n else:\n break\n if not world.solutions[level_i][0]:\n found_unsolved_game = True\n if found_unsolved_game:\n game = initialize_game()\n else:\n level_i, world_i, world = saved_vars\n elif event.unicode == 'r':\n game.restart()\n elif event.unicode == 'R':\n game.replay_solution()\n elif event.unicode == 's' and pygame.key.get_mods() is pygame.KMOD_NONE:\n if game.solution_state is not None:\n game.current_state = game.solution_state\n else:\n game.solve()\n if game.solution_state:\n world.update_solution(level_i, (game.solution_string(), game.solution_info))\n elif event.unicode == 'S':\n game.current_state = game.initial_state\n game.solve()\n if game.solution_state:\n world.update_solution(level_i, (game.solution_string(), game.solution_info))\n elif event.key == pygame.K_s and pygame.key.get_mods() & pygame.KMOD_META: # save\n world.save()\n elif event.unicode == 'u':\n game.undo()\n elif event.unicode == 'U':\n game.redo()\n elif event.unicode == '>':\n world_i = min(world_i + 1, len(worlds) - 1)\n world = initialize_world()\n level_i = 0\n game = initialize_game()\n elif event.unicode == '<':\n world_i = max(world_i - 1, 0)\n world = initialize_world()\n level_i = 0\n game = initialize_game()\n # debug commands\n elif event.key is pygame.K_a and pygame.key.get_mods() & pygame.KMOD_CTRL:\n game.show_annotated_map = True\n elif event.key is pygame.K_d and pygame.key.get_mods() & pygame.KMOD_CTRL:\n game.toggle_debug()\n elif event.key is pygame.K_h and pygame.key.get_mods() & pygame.KMOD_CTRL:\n time0 = time.time()\n heuristic = game.current_state.heuristic()\n dt = time.time() - time0\n print(f\"heuristic = {heuristic} computed in {eng(dt, 2)}s\")\n elif event.key is pygame.K_m and pygame.key.get_mods() & pygame.KMOD_CTRL:\n print(\"Move Count Maps:\")\n game.print_move_count_maps()\n elif event.key is pygame.K_n and pygame.key.get_mods() & pygame.KMOD_CTRL:\n print(\"Neighbors:\")\n for i, n in enumerate(game.current_state.neighbors()):\n print(f\"{i}: {len(n.previous_moves)} moves\")\n print(n.full_map)\n print(\"Done\")\n elif event.key is pygame.K_r and pygame.key.get_mods() & pygame.KMOD_CTRL:\n game.show_raw_map = True\n elif event.key is pygame.K_s and pygame.key.get_mods() & pygame.KMOD_CTRL:\n print(game.solution_string())\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_a:\n game.show_annotated_map = False\n if event.key == pygame.K_r:\n game.show_raw_map = False\n elif event.type == pygame.MOUSEBUTTONDOWN and not game.solved():\n game.move_to(pygame.mouse.get_pos())\n check_for_manual_solution()\n pygame.display.update()\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n" }, { "alpha_fraction": 0.5553299784660339, "alphanum_fraction": 0.5670050978660583, "avg_line_length": 39.18367385864258, "blob_id": "66f38a43822faa87e91a1f50ed87a5d51e8564e2", "content_id": "28a4be74e8e5e102cb62d8447c73c0a6c5d8dfd8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1970, "license_type": "permissive", "max_line_length": 113, "num_lines": 49, "path": "/test_solve.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "\"\"\"Script for testing the Game.solve() function, or for generating solutions.\"\"\"\nimport os\nfrom Game import Game, GameWorld\n\nsave_world = 1\ndo_solve = False\nsolve_all = 0 # 0 to solve only unsolved\nprint_stats = 1\n\nWORLD_DIR = \"Worlds\"\nworld_files = sorted(os.listdir(WORLD_DIR))\n\nif print_stats:\n for world_file in world_files:\n world = GameWorld(world_file, \"Worlds\")\n levels = world.levels\n n_unsolved, n_solved_optimally, n_solved_manually, = 0, 0, 0\n for level_i, solution in enumerate(world.solutions):\n if solution[0] is None:\n n_unsolved += 1\n # print(world.level_ids[level_i])\n elif \"cost-optimal\" in solution[1]:\n n_solved_optimally += 1\n elif \"manual moves\" in solution[1] or \"manually solved\" in solution[1]:\n n_solved_manually += 1\n else:\n print(f\"unintelligible solution info: {solution[1]}\")\n print(f'{world_file} contains {n_unsolved} unsolved, {n_solved_optimally} optimally solved, and ' +\n f'{n_solved_manually} (at least partially) manually-solved puzzles.')\n\n\nif do_solve:\n for world_file in world_files:\n world = GameWorld(world_file, \"Worlds\")\n levels = world.levels\n for level_i in range(len(levels)):\n level_id = world.level_ids[level_i]\n full_id = f\"{world_file}#{level_id}\"\n game = Game(levels[level_i], level_id, False, world.solutions[level_i])\n if solve_all or game.solution_state is None:\n game.solve()\n msg1 = f\"{full_id}: {game.solution_info}\"\n if game.solution_state:\n msg2 = world.check_and_update_solution(level_i, (game.solution_string(), game.solution_info))\n if save_world:\n world.save()\n else:\n msg2 = \"\"\n print(f\"{msg1:110} {msg2}\")\n\n" }, { "alpha_fraction": 0.515954315662384, "alphanum_fraction": 0.521988034248352, "avg_line_length": 38.114158630371094, "blob_id": "d3304aa79604c4599e049bafe9dc189de6ef8fdf", "content_id": "938d5d5fcfdbf40b4f8d0816cdb95456ab67f909", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35633, "license_type": "permissive", "max_line_length": 120, "num_lines": 911, "path": "/Game.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "\"\"\"Sokoban game class\"\"\"\nimport pygame\nimport os\nimport sys\nimport datetime\nimport time\nfrom itertools import permutations\nfrom copy import deepcopy\nfrom getpass import getuser\nfrom find_path import find_path\nfrom Point import Point\nfrom a_star import a_star\nfrom engineering_notation import eng\n\nWALL = '#'\nSPACE = ' '\nGOAL = '.'\nWORKER = '@'\nWORKER_ON_GOAL = '+'\nBOX = '$'\nBOX_ON_GOAL = '*'\nNO_BOX = 'X' # Only used in annotated_map\nCAUTION_BOX = 'x'\nWORKER_CHARS = WORKER + WORKER_ON_GOAL\nBOX_CHARS = BOX + BOX_ON_GOAL\nOPEN_FOR_BOX = SPACE + GOAL\nOPEN_FOR_WORKER = OPEN_FOR_BOX + NO_BOX\nGOAL_CHARS = GOAL + BOX_ON_GOAL + WORKER_ON_GOAL\nVALID_CHARS = frozenset(WALL + OPEN_FOR_BOX + WORKER_CHARS + BOX_CHARS)\nfile_dict = {WALL: 'wall', SPACE: 'floor', BOX: 'box', BOX_ON_GOAL: 'box_docked', WORKER: 'worker',\n WORKER_ON_GOAL: 'worker_dock', GOAL: 'dock', NO_BOX: 'no_box', CAUTION_BOX: 'caution_box'}\nblit_dict = {key: pygame.image.load(f'Images/{file}.png') for (key, file) in file_dict.items()}\nCELL_SIZE = 32\nUP = Point(0, -1)\nDOWN = Point(0, 1)\nLEFT = Point(-1, 0)\nRIGHT = Point(1, 0)\nMOVE_DIRECTIONS = [UP, DOWN, LEFT, RIGHT]\nMAX_LINE_LENGTH = 120\nINFINITY = sys.maxsize // 2\n\n\nclass GameWorld:\n def __init__(self, filename=None, directory=\"\"):\n self.full_filename = os.path.join(directory, filename)\n self.levels = []\n self.title = None\n self.author = None\n self.contents = []\n self.level_ids = []\n self.level_index = []\n self.solutions = []\n if filename:\n self.read()\n\n def read(self):\n \"\"\"Read a file to define the levels of that world\"\"\"\n # File format:\n # Optional Header consisting of any text\n # ; Title =\n # ; Author =\n # ; URL =\n # Level description (Level name can be specified before or after, with or without ;)\n # ; Solution = {UDLR}\n def line_is_level_description():\n return bool(line) and not bool(set(line).difference(VALID_CHARS))\n\n def rhs_of_assignment():\n ii = line.find('=')\n if ii + 1 < len(line):\n return line[ii + 1:].strip()\n else:\n return \"\"\n\n def add_level():\n if matrix:\n self.level_ids.append(level_id)\n self.levels.append(matrix)\n self.level_index.append(starting_line_number)\n self.solutions.append((solution, solution_info))\n\n with open(self.full_filename, 'r') as file:\n matrix = []\n solution, solution_info = None, None\n level_id = None\n preceding_comment = None\n header_passed = False\n reading_a_level = False\n logical_line_number = 0\n actual_line_number = 1\n for line in file:\n line = line.rstrip('\\n')\n if line_is_level_description():\n header_passed = True\n if not reading_a_level:\n starting_line_number = logical_line_number\n if not level_id and preceding_comment:\n level_id = preceding_comment.lstrip('; ').strip\n reading_a_level = True\n matrix.append(list(line))\n elif line.startswith(';'):\n if line[1:].lstrip().startswith('Solution ='):\n while line.rstrip().endswith('\\\\'): # Support multi-line solution comments\n line = line.rstrip(' \\\\') + file.readline().rstrip(' \\n').lstrip(' ;')\n actual_line_number += 1\n solution = rhs_of_assignment()\n elif line[1:].lstrip().startswith('Solution_info ='):\n solution_info = rhs_of_assignment()\n elif (reading_a_level or header_passed) and not level_id:\n level_id = line.lstrip('; ').rstrip()\n reading_a_level = False\n if not header_passed:\n if 'Title' in line:\n self.title = rhs_of_assignment()\n elif 'Author' in line:\n self.author = rhs_of_assignment()\n elif line.strip() == \"\":\n header_passed = True\n add_level()\n matrix = []\n solution, solution_info = None, None\n level_id = None\n preceding_comment = None\n reading_a_level = False\n elif line.startswith(\"Level\"):\n level_id = line[5:].rstrip('\\n').strip()\n assert level_id\n else:\n print(f\"Problem in line {actual_line_number} of {self.full_filename}.\")\n sys.exit(1)\n self.contents.append(line)\n logical_line_number += 1\n actual_line_number += 1\n if self.contents[-1] is not '':\n self.contents.append('')\n add_level()\n\n def save(self):\n with open(self.full_filename, 'w') as file:\n for line in self.contents:\n if len(line) > MAX_LINE_LENGTH and line[0] == ';' and line[1:].lstrip().startswith('Solution ='):\n # Support multi-line solution comments\n while len(line) > MAX_LINE_LENGTH:\n file.write(line[:MAX_LINE_LENGTH - 1] + '\\\\\\n')\n line = '; ' + line[MAX_LINE_LENGTH - 1:]\n file.write(line + '\\n')\n\n def check_and_update_solution(self, level_i, solution):\n \"\"\"Update the solution if it is new or shorter than the existing solution.\n Return a string summarizing the difference between the new and the existing solution\"\"\"\n def parse_solution_info(info_str):\n def parse_value(search_str1, search_str2=\" \"):\n try:\n i = info_str.index(search_str1) + len(search_str1)\n j = info_str.find(search_str2, i)\n return eng(info_str[i:j])\n except (ValueError, IndexError):\n return None\n\n solution_time = parse_value('found in ', 's')\n states_searched = parse_value('after examining ', 'states')\n return solution_time, states_searched\n\n if solution[0]:\n l1 = len(solution[0])\n solution_time1, states_searched1 = parse_solution_info(solution[1])\n if not self.solutions[level_i]:\n summary_str = f\"New solution of length {l1} found in {solution_time1}s\"\n else: # Compare against existing solution\n old_solution = self.solutions[level_i]\n if old_solution[0]:\n l0 = len(old_solution[0])\n solution_time0, states_searched0 = parse_solution_info(old_solution[1])\n if l1 < l0:\n summary_str = f'New solution is shorter ({l1} vs. {l0}).'\n elif l1 > l0:\n summary_str = f'New solution is longer ({l1} vs. {l0}).'\n else:\n summary_str = f'Same solution length ({l1}).'\n if solution_time0:\n solution_time_diff = (solution_time1 - solution_time0) / solution_time0\n summary_str += f' Solution time ({eng(solution_time1)}s) is '\n if solution_time_diff < -0.1:\n summary_str += f'{-solution_time_diff * 100:.0f}% faster.'\n elif solution_time_diff > 0.1:\n summary_str += f'{solution_time_diff * 100:.0f}% slower.'\n else:\n summary_str += f'essentially the same.'\n else:\n summary_str = \"New solution.\"\n self.update_solution(level_i, solution)\n return summary_str\n\n def update_solution(self, level_i, solution):\n self.solutions[level_i] = solution\n line_number = self.level_index[level_i]\n solution_line_updated = False\n solution_info_updated = False\n solution_line = f';Solution = {solution[0]}'\n solution_info = f';Solution_info = {solution[1]}'\n while self.contents[line_number]:\n line = self.contents[line_number]\n if line[0] == ';':\n if line[1:].lstrip().startswith('Solution ='):\n self.contents[line_number] = solution_line\n solution_line_updated = True\n elif line[1:].lstrip().startswith('Solution_info ='):\n self.contents[line_number] = solution_info\n solution_info_updated = True\n line_number += 1\n lines_added = 0\n if not solution_info_updated:\n self.contents.insert(line_number, solution_info)\n lines_added += 1\n if not solution_line_updated:\n self.contents.insert(line_number, solution_line)\n lines_added += 1\n if lines_added:\n for i in range(level_i + 1, len(self.level_index)):\n self.level_index[i] += lines_added\n\n\nclass Game:\n debug = False\n \n def __init__(self, full_map, level_id, initialize_screen=True, solution=(None, None)):\n self.raw_map = GameMap(full_map)\n worker, boxes, goals = self.raw_map.make_raw()\n self.annotated_map = deepcopy(self.raw_map)\n self.annotated_map.annotate(worker)\n self.show_annotated_map = False\n self.show_raw_map = False\n self.initial_state = GameState(worker, boxes, self)\n self.current_state = self.initial_state\n self.goals = goals\n self.goals.sort()\n self.level_id = level_id\n if solution[0]:\n self.solution_state = self.verify_solution(solution[0])\n if self.solution_state:\n self.solution_info = solution[1]\n else:\n print(f\"Saved solution for {self.level_id} didn't work!\")\n else:\n self.solution_state = None\n self.solution_info = \"\"\n if initialize_screen:\n self.screen = pygame.display.set_mode(self.size) # Generates KEYUP events for all active keys, incl. SHIFT!\n else:\n self.screen = None\n self._move_count_maps = None\n self._min_move_count_map = None\n\n @property\n def move_count_maps(self):\n \"\"\"Move counts needed to push a box to each goal. Uses lazy initialization\"\"\"\n if getattr(self, '_move_count_maps', None) is None:\n self._move_count_maps = []\n for goal in self.goals:\n self._move_count_maps.append(MoveCountMap(self.raw_map, goal))\n return self._move_count_maps\n\n @property\n def min_move_count_map(self):\n if getattr(self, '_min_move_count_map', None) is None:\n if len(self.goals) == 1:\n self._min_move_count_map = self.move_count_maps[0]\n else:\n self._min_move_count_map = deepcopy(self.move_count_maps[0])\n for next_map in self.move_count_maps[1:]:\n for y in range(len(next_map.matrix)):\n for x in range(len(next_map.matrix[y])):\n p = Point(x, y)\n if next_map[p] < self.min_move_count_map[p]:\n self.min_move_count_map[p] = next_map[p]\n return self._min_move_count_map\n\n def print_move_count_maps(self):\n for g_i, g in enumerate(self.goals):\n print(self.move_count_maps[g_i])\n print(\"Minima of above maps:\")\n print(self.min_move_count_map)\n\n @property\n def size(self):\n x, y = self.raw_map.size\n return x * CELL_SIZE, y * CELL_SIZE\n\n @staticmethod\n def grid_point(screen_pos):\n return Point(screen_pos[0] // CELL_SIZE, screen_pos[1] // CELL_SIZE)\n \n @classmethod\n def toggle_debug(cls):\n cls.debug = not cls.debug\n if cls.debug:\n print(\"Debug is on\")\n else:\n print(\"Debug is off\")\n\n def display(self):\n if self.show_annotated_map:\n self.display_annotated_map()\n elif self.show_raw_map:\n self.display_raw_map()\n else:\n self.display_full_map()\n\n def display_full_map(self):\n self.current_state.full_map.display(self.screen)\n if self.solved():\n caption = f\"Solved in {self.current_state.move_count} moves!!\"\n else:\n caption = f\"{self.level_id}, move {self.current_state.move_count}.\"\n pygame.display.set_caption(caption)\n\n def display_annotated_map(self):\n self.annotated_map.display(self.screen)\n pygame.display.set_caption(\"Annotated Map\")\n\n def display_raw_map(self):\n self.raw_map.display(self.screen)\n pygame.display.set_caption(\"Raw Map\")\n\n def solved(self):\n return self.current_state.solved()\n\n def restart(self):\n self.current_state = self.initial_state\n\n def move(self, d):\n new_state = self.current_state.move(d)\n if new_state:\n self.current_state = new_state\n self.solution_state = None\n\n def undo(self):\n if self.current_state.predecessor:\n successor = self.current_state\n self.current_state = self.current_state.predecessor\n self.current_state.successor = successor\n\n def redo(self):\n if self.current_state.successor:\n self.current_state = self.current_state.successor\n\n def move_to(self, screen_pos):\n p = Game.grid_point(screen_pos)\n if self.current_state.full_map[p] in BOX_CHARS and (self.current_state.worker - p).l1_norm == 1:\n self.move(p - self.current_state.worker)\n else:\n new_state = self.current_state.move_to(p)\n if new_state:\n self.current_state = new_state\n\n def replay_solution(self):\n def display(_state):\n _state.full_map.display(self.screen)\n pygame.display.update()\n pygame.event.pump() # Need to ping the event queue to actually update the display\n time.sleep(0.1)\n\n if not self.solution_state:\n return\n solution_states = [self.solution_state]\n while solution_states[-1].predecessor:\n solution_states.append(solution_states[-1].predecessor)\n solution_states.reverse()\n display(solution_states[0])\n for state in solution_states[1:]:\n intermediate_state = state.predecessor\n for move in state.previous_moves[:-1]:\n intermediate_state = intermediate_state.move(move)\n display(intermediate_state)\n display(state)\n self.current_state = self.solution_state\n\n def solve(self):\n move_count0 = self.current_state.move_count\n solution, solution_info = self.current_state.solve()\n self.solution_state = solution\n if solution is not None:\n self.current_state = solution\n date_str = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if move_count0 is not 0:\n self.solution_info = f\"{date_str}, solution with cost = {solution.move_count} \" \\\n + f\"found after {move_count0} manual moves by {getuser()}\"\n else:\n self.solution_info = f\"{date_str}, {solution_info}\"\n if self.debug and self.screen:\n print(solution_info, self.solution_string())\n\n def solution_string(self):\n \"\"\"Convert the solution to a string\"\"\"\n if self.solution_state is None:\n return \"\"\n move_dict = {DOWN: 'd', UP: 'u', RIGHT: 'r', LEFT: 'l'}\n solution_states = [self.solution_state]\n while solution_states[-1].predecessor:\n solution_states.append(solution_states[-1].predecessor)\n solution_states.reverse()\n move_string = \"\"\n if len(solution_states) > 1:\n for state in solution_states[1:]:\n intermediate_state = state.predecessor\n for move in state.previous_moves:\n if intermediate_state.full_map[intermediate_state.worker + move] in OPEN_FOR_WORKER:\n move_string += move_dict[move]\n else:\n assert intermediate_state.full_map[intermediate_state.worker + move] in BOX_CHARS\n move_string += move_dict[move].upper()\n intermediate_state = intermediate_state.move(move)\n return move_string\n\n def verify_solution(self, solution_str):\n move_dict = {'d': DOWN, 'u': UP, 'r': RIGHT, 'l': LEFT}\n current_state = self.initial_state\n for c in solution_str:\n d = move_dict[c.lower()]\n current_state = current_state.move(d)\n if current_state.solved():\n return current_state\n\n\nclass GameState:\n def __init__(self, worker, boxes, game, predecessor=None, previous_moves=None):\n self.worker = worker\n self.boxes = boxes\n self.boxes.sort()\n self.game = game\n self.predecessor = predecessor\n self.successor = None\n if predecessor:\n if previous_moves:\n self.move_count = predecessor.move_count + len(previous_moves)\n self.previous_moves = previous_moves\n else:\n self.move_count = predecessor.move_count + 1\n self.previous_moves = [worker - predecessor.worker]\n else:\n self.move_count = 0\n self.previous_moves = None\n self._full_map = None\n\n @property\n def full_map(self): # use lazy initialization;\n if getattr(self, '_full_map', None) is None:\n self._full_map = self.game.raw_map.fill(self.worker, self.boxes)\n return self._full_map\n\n def __eq__(self, game_state): # to make == work and to support set of GameStates\n return self.worker == game_state.worker and set(self.boxes) == set(game_state.boxes)\n\n def __hash__(self): # to make == work and to support set of GameStates\n return hash((self.worker,))\n\n def solved(self):\n return self.boxes == self.game.goals\n\n def move(self, d):\n new_worker = self.worker + d\n if self.full_map[new_worker] in OPEN_FOR_WORKER:\n return GameState(new_worker, self.boxes, self.game, self)\n elif self.full_map[new_worker] in BOX_CHARS and self.full_map[new_worker + d] in OPEN_FOR_BOX:\n return self.move_box(new_worker, d)\n else:\n return None\n\n def move_box(self, new_worker, d, moves=None):\n i = self.boxes.index(new_worker)\n new_boxes = self.boxes.copy()\n new_boxes[i] = new_worker + d\n return GameState(new_worker, new_boxes, self.game, self, moves)\n\n def move_to(self, p):\n moves = find_path(self.worker, p, self.full_map.open_for_worker)\n if moves:\n return GameState(p, self.boxes, self.game, self, moves)\n\n def check_2x2(self, b, dd):\n \"\"\"Check the 2x2 square with one corner at b and the opposite corner ad b + dd.\n Deadlock has occurred if the square contains only walls or boxes, and at least one box is not on a goal.\"\"\"\n walls = 0\n c = self.full_map[b]\n if c in SPACE + WORKER: # The box will be pushed to b\n boxes = 1\n boxes_on_goal = 0\n else:\n assert c in GOAL + WORKER_ON_GOAL\n boxes = 0\n boxes_on_goal = 1\n for d in [Point(dd.x, 0), Point(dd.x, dd.y), Point(0, dd.y)]:\n c = self.full_map[b + d]\n if c == WALL:\n walls += 1\n elif c == BOX:\n boxes += 1\n elif c == BOX_ON_GOAL:\n boxes_on_goal += 1\n if boxes + boxes_on_goal + walls == 4 and boxes != 0:\n return True\n else:\n return False\n\n def count_boxes_and_goals(self, p0, d):\n \"\"\"Count the number of boxes and goals along the line segment perpendicular to d\"\"\"\n n_boxes = 0\n n_goals = int(self.full_map[p0] is GOAL)\n for dp in [Point(d.y, -d.x), Point(-d.y, d.x)]:\n p = p0\n while True:\n p += dp\n c = self.full_map[p]\n if c in GOAL_CHARS:\n n_goals += 1\n elif c in BOX_CHARS:\n n_boxes += 1\n elif c is WALL:\n break\n return n_boxes, n_goals\n\n def anticipate_deadlock(self, b, d):\n \"\"\"Check if moving the box at b in the direction d causes a deadlock\"\"\"\n b = b + d\n if self.game.annotated_map[b] is NO_BOX:\n return True\n if self.game.annotated_map[b] is CAUTION_BOX and self.game.annotated_map[b - d] is not CAUTION_BOX:\n n_boxes, n_goals = self.count_boxes_and_goals(b, d)\n if n_boxes >= n_goals:\n return True\n # Check two 2x2 boxes in the direction of d\n dp = Point(d.y, -d.x) # perpendicular to d\n for dd in [d + dp, d - dp]:\n if self.check_2x2(b, dd):\n return True\n return False\n\n def number_of_boxes_along_line(self, p, pp):\n \"\"\"Count the number of boxes on the line joining p to pp\"\"\"\n n = 0\n if p.x == pp.x:\n for b in self.boxes:\n if b.x == p.x and (b.y - p.y) * (b.y - pp.y) <= 0:\n n += 1\n else:\n assert p.y == pp.y\n for b in self.boxes:\n if b.y == p.y and (b.x - p.x) * (b.x - pp.x) <= 0:\n n += 1\n return n\n\n def neighbors(self):\n \"\"\"Generate neighboring states by looking for boxes that can be pushed.\"\"\"\n for b_i, b in enumerate(self.boxes):\n for d in MOVE_DIRECTIONS:\n new_worker = b - d\n if self.full_map[b + d] in OPEN_FOR_BOX + WORKER_CHARS and not self.anticipate_deadlock(b, d):\n moves = find_path(self.worker, new_worker, self.full_map.open_for_worker)\n if moves is not None:\n moves.append(d)\n new_state = self.move_box(b, d, moves)\n if not self.predecessor or new_state != self.predecessor:\n yield new_state\n\n def is_goal(self):\n return self.solved()\n\n def cost(self):\n return self.move_count\n\n # heuristic() underestimates the number of moves needed to reach a solution; used by a_star().\n def heuristic(self):\n \"\"\"For each pairing of boxes with goals, sum the move counts found in game.move_count_maps.\"\"\"\n if self.solved():\n return 0\n n = len(self.boxes)\n if n > 6: # Too many permutations\n return self.heuristic1()\n min_move_count = INFINITY\n for p in permutations(range(n)):\n move_count = sum([self.game.move_count_maps[j][self.boxes[i]] for i, j in enumerate(p)])\n min_move_count = min(min_move_count, move_count)\n return min_move_count\n\n def heuristic_using_search(self): # only faster than the above brute-force method for n>=8\n \"\"\"Find the minimum move count sum over all box-goal pairings. (Uses search aot examining all permutations.)\"\"\"\n def search_pairings(box_i, goal_indices, current_move_count=0):\n \"\"\"Recursive search equivalent to looking at all permutations.\"\"\"\n nonlocal min_move_count\n box = self.boxes[box_i]\n if box_i == len(goal_indices) - 1: # last pairing\n move_count = current_move_count + move_count_maps[goal_indices[-1]][box]\n min_move_count = min(min_move_count, move_count)\n return\n # Stop recursion if underestimate exceeds the current min_move_count\n underestimate = current_move_count\n for b in self.boxes[box_i:]:\n underestimate += min([move_count_maps[i][b] for i in goal_indices[box_i:]])\n if underestimate >= min_move_count:\n return\n unallocated_indices = sorted(goal_indices[box_i:], key=lambda i: move_count_maps[i][box])\n for i, j in enumerate(unallocated_indices):\n rearranged_goal_indices = goal_indices[0:box_i] + \\\n [unallocated_indices[i]] + unallocated_indices[:i] + unallocated_indices[i+1:]\n new_base_move_count = current_move_count + move_count_maps[j][box]\n search_pairings(box_i + 1, rearranged_goal_indices, new_base_move_count)\n if underestimate >= min_move_count:\n return\n\n if self.solved():\n return 0\n n = len(self.boxes)\n if n > 6: # Too many permutations\n return self.heuristic1()\n move_count_maps = self.game.move_count_maps\n min_move_count = INFINITY\n search_pairings(0, list(range(n)))\n return min_move_count\n\n def heuristic1(self):\n \"\"\"Sum the minimum move counts found in game.min_move_count_map.\"\"\"\n if self.solved():\n return 0\n move_sum = 0\n for b in self.boxes:\n move_sum += self.game.min_move_count_map[b]\n return move_sum\n\n def heuristic0(self):\n \"\"\"Sum the distances from each box to the nearest goal.\"\"\"\n if self.solved():\n return 0\n move_sum = 0\n for b in self.boxes:\n min_move_count = INFINITY\n for g in self.game.goals:\n min_move_count = min(min_move_count, (b - g).l1_norm)\n move_sum += min_move_count\n return move_sum\n\n def solve(self):\n \"\"\"Use the A-star algorithm to search for a solution\"\"\"\n def progress_fn(state, states_seen, pq, elapsed_time):\n if self.game.screen:\n t = f\"{elapsed_time:6.1f}s: \"\n s = f\"{len(states_seen)} states, \"\n q = f\"{len(pq)} queued, \"\n m = f\"{state.move_count + state.heuristic()} moves\"\n if self.game.debug:\n print(t + s + q + m)\n state.full_map.display(self.game.screen)\n pygame.display.set_caption(t + m)\n pygame.display.flip()\n pygame.event.pump() # Need to tap the event queue in order for the display to update\n first_pass = True\n pause = False\n while first_pass or pause:\n first_pass = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key in (pygame.K_q, pygame.K_ESCAPE):\n return False\n elif event.unicode == ' ':\n pause = not pause\n\n return a_star(self, max_cost=1000, max_time=10 * 3600, max_states=1000000, progress_report=(progress_fn, 0.5))\n\n\nclass GameMap:\n def __init__(self, matrix):\n self.matrix = deepcopy(matrix)\n\n def make_raw(self):\n def line_find(chars):\n for i, char in enumerate(line):\n if char in chars:\n yield i\n\n worker = None\n boxes = []\n goals = []\n for y, line in enumerate(self.matrix):\n for x in line_find(WORKER_CHARS):\n assert worker is None # Only one worker is allowed\n p = Point(x, y)\n worker = p\n self.remove_worker(p)\n for x in line_find(BOX_CHARS):\n p = Point(x, y)\n boxes.append(p)\n self.remove_box(p)\n for x in line_find(GOAL_CHARS):\n goals.append(Point(x, y))\n assert worker is not None\n assert len(boxes) == len(goals)\n return worker, boxes, goals\n\n def annotate(self, worker):\n def constrained_path(c1, c2):\n return c1 is WALL or c2 is WALL or (c1 is NO_BOX and c2 is NO_BOX)\n\n def check_direction():\n map_modified = False\n pp_ = p + d\n dp_ = Point(d.y, -d.x)\n while self[pp_] in SPACE and constrained_path(self[pp_ + dp_], self[pp_ - dp_]):\n pp_ += d\n if self[pp_] in WALL + NO_BOX and pp_ - d != p:\n pp_ -= d\n map_modified = True\n while pp_ != p:\n self[pp_] = NO_BOX\n pp_ -= d\n return map_modified\n\n self.fill_inaccessible_with_wall(worker)\n # mark inside corners with NO_BOX or CAUTION_BOX\n marker_dict = {SPACE: NO_BOX, GOAL: CAUTION_BOX}\n corners = []\n for y, row in enumerate(self.matrix):\n for x, c in enumerate(row):\n p = Point(x, y)\n if c in SPACE + GOAL and \\\n (self[p + LEFT] == WALL or self[p + RIGHT] == WALL) and \\\n (self[p + UP] == WALL or self[p + DOWN] == WALL):\n self[p] = marker_dict[c]\n corners.append(p)\n # Repeatedly join NO_BOX squares along wall edges or similarly constrained vertical/horizontal paths\n keep_going = True\n while keep_going:\n keep_going = False\n for y, row in enumerate(self.matrix):\n for x, c in enumerate(row):\n if c in NO_BOX:\n p = Point(x, y)\n for d in [DOWN, RIGHT]: # Only need to check in 2 directions\n if check_direction():\n keep_going = True\n # Join corners along constrained vertical/horizontal paths containing goal with CAUTION_BOX\n disallow_box = WALL + NO_BOX\n for p in corners:\n for d in [DOWN, RIGHT]:\n pp = p + d\n dp = Point(d.y, -d.x)\n while self[pp] in SPACE + GOAL and constrained_path(self[pp + dp], self[pp - dp]):\n pp += d\n if self[pp] in disallow_box and pp - d != p:\n pp -= d\n while pp != p:\n self[pp] = CAUTION_BOX\n pp -= d\n\n def fill_inaccessible_with_wall(self, worker):\n accessible_map = deepcopy(self)\n frontier = [worker]\n while frontier:\n p = frontier.pop()\n accessible_map[p] = WORKER\n for d in MOVE_DIRECTIONS:\n pp = p + d\n if accessible_map[pp] in SPACE + GOAL:\n frontier.append(pp)\n for y, row in enumerate(self.matrix):\n for x, c in enumerate(row):\n p = Point(x, y)\n if self[p] is SPACE and accessible_map[p] is not WORKER:\n self[p] = WALL\n\n def fill(self, worker, boxes):\n full_map = deepcopy(self)\n full_map.add_worker(worker)\n for b in boxes:\n full_map.add_box(b)\n return full_map\n\n # query/set map contents using self[point]\n def __getitem__(self, point):\n if point.x < 0 or point.y < 0:\n return WALL\n try:\n return self.matrix[point.y][point.x]\n except IndexError:\n return WALL\n\n def __setitem__(self, point, content):\n if point.x < 0 or point.y < 0:\n raise IndexError\n self.matrix[point.y][point.x] = content\n\n def display(self, screen):\n background = 255, 226, 191\n screen.fill(background)\n for y, row in enumerate(self.matrix):\n for x, c in enumerate(row):\n screen.blit(blit_dict[c], (x * CELL_SIZE, y * CELL_SIZE))\n\n def remove_box(self, p):\n if self[p] == BOX:\n self[p] = SPACE\n else:\n assert self[p] == BOX_ON_GOAL\n self[p] = GOAL\n\n def add_box(self, p):\n if self[p] == SPACE:\n self[p] = BOX\n else:\n assert self[p] == GOAL\n self[p] = BOX_ON_GOAL\n\n def remove_worker(self, p):\n if self[p] == WORKER:\n self[p] = SPACE\n else:\n assert self[p] == WORKER_ON_GOAL\n self[p] = GOAL\n\n def add_worker(self, p):\n if self[p] == SPACE:\n self[p] = WORKER\n else:\n assert self[p] == GOAL\n self[p] = WORKER_ON_GOAL\n\n @property\n def size(self):\n x = 0\n y = len(self.matrix)\n for row in self.matrix:\n if len(row) > x:\n x = len(row)\n return x, y\n\n def __str__(self):\n string = \"\"\n for row in self.matrix:\n string += str.join(\"\", row) + '\\n'\n return string\n\n def open_for_worker(self, p):\n return self[p] in OPEN_FOR_WORKER\n\n def open_for_box(self, p):\n return self[p] in OPEN_FOR_BOX\n\n\nclass MoveCountMap:\n \"\"\"Tabulates the minimum number of moves to push a box to a particular goal\"\"\"\n\n def __init__(self, raw_map, goal):\n def worker_moves():\n saved_map_state = raw_map[box]\n raw_map[box] = BOX\n moves = find_path(new_worker, worker, raw_map.open_for_worker)\n raw_map[box] = saved_map_state\n if moves:\n return len(moves)\n else:\n return INFINITY\n\n self.matrix = []\n for row in raw_map.matrix:\n self.matrix.append([INFINITY] * len(row))\n\n frontier = []\n move_counts = {}\n self[goal] = 0\n box = goal\n for d in MOVE_DIRECTIONS:\n worker = box + d\n if raw_map.open_for_worker(worker):\n frontier.append((box, worker, 0))\n move_counts[(box, worker)] = 0\n\n while frontier:\n box, worker, move_count = frontier.pop()\n for d in MOVE_DIRECTIONS:\n new_box = box + d\n new_worker = new_box + d\n if raw_map.open_for_box(new_box) and raw_map.open_for_worker(new_worker):\n new_move_count = move_count + worker_moves()\n new_state = new_box, new_worker\n if new_state not in move_counts or new_move_count < move_counts[new_state]:\n move_counts[new_state] = new_move_count\n frontier.append((new_box, new_worker, new_move_count))\n if new_move_count < self[new_box]:\n self[new_box] = new_move_count\n\n def __str__(self):\n string = \"\"\n for row in self.matrix:\n for c in row:\n if c == INFINITY:\n string += \" * \"\n else:\n string += f\"{c:3} \"\n string += '\\n'\n return string\n\n # query/set map contents using self[point]\n def __getitem__(self, point):\n if point.x < 0 or point.y < 0:\n raise IndexError\n return self.matrix[point.y][point.x]\n\n def __setitem__(self, point, content):\n if point.x < 0 or point.y < 0:\n raise IndexError\n self.matrix[point.y][point.x] = content\n" }, { "alpha_fraction": 0.45706427097320557, "alphanum_fraction": 0.4662998616695404, "avg_line_length": 24.572864532470703, "blob_id": "2a9d944bebf8842219a75abd3e42f05c6be740d0", "content_id": "a5c3dfc26f29a8963b2e56ddbfe60b8481b1b727", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5089, "license_type": "permissive", "max_line_length": 114, "num_lines": 199, "path": "/find_path.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "from PriorityQueue import PriorityQueue\nfrom Point import Point\n\n\ndef find_path(a, b, is_open):\n \"\"\"\n :param a: Start Point\n :param b: Finish Point\n :param is_open: Function returning True if the Point argument is an open square\n :return: A list of Points containing the moves needed to get from a to b\n \"\"\"\n if a == b:\n return []\n if not is_open(b):\n return None\n moves = rectilinear_path(a, b, is_open) or direct_path(a, b, is_open) or find_path_using_a_star(a, b, is_open)\n return moves\n\n\ndef sgn(x):\n if x >= 0:\n return 1\n else:\n return -1\n\n\ndef sign_and_magnitude(x):\n if x >= 0:\n return 1, x\n else:\n return -1, -x\n\n\ndef rectilinear_path(a, b, is_open):\n def check_moves(moves):\n p = a\n for move in moves:\n p += move\n if not is_open(p):\n return None\n return moves\n\n horizontal_moves = [Point(sgn(b.x - a.x), 0)] * abs(b.x - a.x)\n vertical_moves = [Point(0, sgn(b.y - a.y))] * abs(b.y - a.y)\n if abs(a.x - b.x) > abs(a.y - b.y):\n return check_moves(horizontal_moves + vertical_moves) or \\\n (vertical_moves and check_moves(vertical_moves + horizontal_moves))\n else:\n return check_moves(vertical_moves + horizontal_moves) or \\\n (horizontal_moves and check_moves(horizontal_moves + vertical_moves))\n\n\ndef hv45_path(a, b, is_open):\n \"\"\"Horizontal or vertical path with a 45-degree tail\"\"\"\n horizontal_move = Point(sgn(b.x - a.x), 0)\n vertical_move = Point(0, sgn(b.y - a.y))\n moves = []\n while a != b:\n if abs(a.x - b.x) > abs(a.y - b.y):\n move = horizontal_move\n else:\n move = vertical_move\n a += move\n if not is_open(a):\n return None\n moves.append(move)\n return moves\n\n\n# I suspect a first-order delta-sigma modulator could give the same result\ndef direct_path(a, b, is_open):\n dx, n_x = sign_and_magnitude(b.x - a.x)\n dy, n_y = sign_and_magnitude(b.y - a.y)\n n = n_x + n_y\n p = a\n moves = []\n m = 0\n while p != b:\n m += 1\n p_m = a + (b - a) * (m / n)\n if (p + Point(dx, 0) - p_m).l1_norm <= (p + Point(0, dy) - p_m).l1_norm:\n move = Point(dx, 0)\n else:\n move = Point(0, dy)\n p += move\n if not is_open(p):\n return None\n moves.append(move)\n return moves\n\n\nUNIT_MOVES = [Point(1, 0), Point(-1, 0), Point(0, 1), Point(0, -1)]\n\n\ndef find_path_using_a_star(a, b, is_open):\n \"\"\"Find a path from a to b using a uniform-step-cost version of the A* algorithm\"\"\"\n def successor(p_):\n for move in UNIT_MOVES:\n pp = p_ + move\n if is_open(pp):\n yield pp\n\n def traceback():\n moves = []\n p_ = b\n pp = predecessor(p_)\n while pp:\n moves.append(p_ - pp)\n p_ = pp\n pp = predecessor(p_)\n moves.reverse()\n return moves\n\n def predecessor(p_):\n return node_dict[p_][0]\n\n def g(p_):\n return node_dict[p_][1]\n\n def h(p_):\n return (p_ - b).l1_norm\n\n pq = PriorityQueue(a, h(a))\n node_dict = {a: (None, 0)} # dict contents are (predecessor, g)\n while pq:\n p0 = pq.pop()\n if p0 == b:\n return traceback()\n g1 = g(p0) + 1\n for p in successor(p0):\n f = g1 + h(p)\n if p not in node_dict: # or f < existing_f: Not necessary since nodes are on a uniform-cost grid\n pq.insert(p, f)\n node_dict[p] = p0, g1\n return None\n\n\ndef test():\n def initialize_map():\n nonlocal a, b, matrix\n board = \"\"\"\n###############\n# #\n# # #\n# # #\n# # #\n#a # #\n# # #\n# # #\n############# #\n# b #\n###############\n\"\"\"\n lines = board.splitlines()\n y = 0\n for line in lines:\n if line:\n matrix.append(list(line))\n if not a and 'a' in line:\n a = Point(line.index('a'), y)\n matrix[a.y][a.x] = ' '\n if not b and 'b' in line:\n b = Point(line.index('b'), y)\n matrix[b.y][b.x] = ' '\n y += 1\n\n def mark_map():\n matrix[a.y][a.x] = 'a'\n matrix[b.y][b.x] = 'b'\n p = a\n if moves:\n for move in moves:\n p += move\n if p != b:\n matrix[p.y][p.x] = '+'\n if p != b:\n matrix[p.y][p.x] = 'X'\n\n def print_map():\n for line in matrix:\n print(str.join(\"\", line))\n\n def is_open(p):\n if p.x < 0 or p.y < 0:\n return False\n try:\n return matrix[p.y][p.x] is \" \"\n except IndexError:\n return False\n\n a, b, matrix = Point(), Point(), []\n initialize_map()\n moves = find_path(a, b, is_open)\n mark_map()\n print_map()\n\n\nif __name__ == '__main__':\n test()\n" }, { "alpha_fraction": 0.4907497465610504, "alphanum_fraction": 0.49853944778442383, "avg_line_length": 25.33333396911621, "blob_id": "aa4190034f6b354e0f4a63acd71aaff1d52c62cc", "content_id": "aea4c7abf641f607d33e2c6eb338bce8f59a2a94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2054, "license_type": "permissive", "max_line_length": 102, "num_lines": 78, "path": "/PriorityQueue.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "class PriorityQueue:\n \"\"\"A queue in which the items are stored in increasing priority\"\"\"\n def __init__(self, item=None, priority=None):\n if priority is None:\n self.queue = []\n elif isinstance(priority, list):\n self.queue = list(zip(priority, item))\n self.check()\n else:\n self.queue = [(priority, item)]\n\n def __len__(self):\n return len(self.queue)\n\n def empty(self):\n return len(self.queue) == 0\n\n def insert_using_linear_search(self, item, priority):\n for i in range(len(self.queue)):\n priority_i, _ = self.queue[i]\n if priority_i >= priority:\n self.queue.insert(i, (priority, item))\n return\n self.queue.append((priority, item))\n\n def insert(self, item, priority):\n # Binary search\n m = -1\n n = len(self.queue)\n while n > m + 1:\n i = (m + n) // 2\n priority_i, _ = self.queue[i]\n if priority_i < priority: # < ensures most recent insertion is above equal-valued items\n m = i\n else:\n n = i\n self.queue.insert(n, (priority, item))\n\n def pop(self):\n try:\n priority, item = self.queue[0]\n del self.queue[0]\n return item\n except IndexError:\n return None\n\n def check(self):\n \"\"\"Verify priorities are non-decreasing\"\"\"\n p0 = None\n for priority, _ in self.queue:\n if p0 is not None:\n assert priority >= p0\n p0 = priority\n self.print()\n\n def print(self):\n for priority, item in self.queue:\n print(f\"{priority:5d}, {item}\")\n print()\n\n\ndef __test__():\n pq = PriorityQueue(\"a\", 5)\n pq.check()\n pq.insert(\"b\", 6)\n pq.check()\n pq.insert(\"c\", 3)\n pq.check()\n pq.insert(\"d\", 3)\n pq.check()\n pq.insert(\"e\", 4)\n pq.check()\n item = pq.pop()\n assert item is \"d\"\n\n\nif __name__ == '__main__':\n __test__()\n" }, { "alpha_fraction": 0.33485639095306396, "alphanum_fraction": 0.40665796399116516, "avg_line_length": 25.413793563842773, "blob_id": "f626c5cb06af0251c1abed7b536168fd362cce04", "content_id": "4b52d93e4365875b688a1ffce93024656a9af044", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1532, "license_type": "permissive", "max_line_length": 61, "num_lines": 58, "path": "/engineering_notation.py", "repo_name": "RichardSchreier/sokoban", "src_encoding": "UTF-8", "text": "import math\n\nprefixes_n = ['', 'm', 'u', 'n', 'p', 'f', 'a', 'z', 'y']\nprefixes_p = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']\n\n\ndef eng(x, nsd=3):\n if type(x) is str: # return a float\n if x[-1] in prefixes_n:\n multiplier = 10 ** (-3 * prefixes_n.index(x[-1]))\n x = x[:-1]\n elif x[-1] in prefixes_p:\n multiplier = 10 ** (3 * prefixes_p.index(x[-1]))\n x = x[:-1]\n else:\n multiplier = 1\n return float(x) * multiplier\n else: # return a string\n if x >= 0:\n sign = ''\n else:\n sign = '-'\n x = -x\n p = int(math.floor(math.log10(x)))\n x = round(x * 10 ** -p, nsd - 1)\n if x >= 10:\n x /= 10\n p += 1\n p3 = p // 3\n if p3 >= 0:\n p3 = min([p3, len(prefixes_p)])\n prefix = prefixes_p[p3]\n else:\n p3 = -min([-p3, len(prefixes_n)])\n prefix = prefixes_n[-p3]\n dp = p - 3 * p3\n x = x * 10 ** dp\n if nsd > dp:\n fmt = f\"%.{nsd - dp - 1}f\"\n else:\n fmt = \"%g\"\n return sign + (fmt % x) + prefix\n\n\nif __name__ == '__main__':\n print(eng(1, 4))\n print(eng(3.14159))\n print(eng(-31.4159))\n print(eng(314.159, 4))\n print(eng(314.159, 3))\n print(eng(314.159, 2))\n print(eng(314.159, 1))\n print(eng(1000))\n print(eng(999.9, 4))\n print(eng(999.9, 3))\n print(eng(999.9, 2))\n print(eng(31.416e-6))\n print(eng('31.4k'))\n" } ]
9
weizhiyangq/python_train
https://github.com/weizhiyangq/python_train
967b58bd823c939b04331751a20cf162087ee89f
187c1228f1419be35994206d33ff89b1513d5d01
63b01cf9fcfc2667e37a7b6602d19bd009b2023a
refs/heads/master
2020-06-21T09:03:35.128977
2019-08-25T05:44:40
2019-08-25T05:44:40
197,402,484
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.651063859462738, "alphanum_fraction": 0.672340452671051, "avg_line_length": 15.714285850524902, "blob_id": "6f725efb7a4bff787bbe58596b055dbbb48c366e", "content_id": "d1f0ebaed4c0bd6265cb72aa753df89630ecbaea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 31, "num_lines": 14, "path": "/tanh_plot.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nx=np.arange(-4,4,0.01)\ny=[math.tanh(i) for i in x]\nfig=plt.figure()\nax=plt.subplot()\nplt.plot(x,y,'k')\nplt.title('y=tanh(x)')\nplt.ylabel('y')\nplt.xlabel('x')\nplt.show()\n\n" }, { "alpha_fraction": 0.581845223903656, "alphanum_fraction": 0.581845223903656, "avg_line_length": 24.69230842590332, "blob_id": "fc3201ff447145b7d606de3a368308bc5da2be35", "content_id": "7d66343d83a8b8a77f106306cac8ae52ccec08a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 46, "num_lines": 26, "path": "/wenjian.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "\nwith open('pi_digits.txt','a') as file_object:\n #contents=file_object.read()\n #print(contents.rstrip())\n #lines=file_object.readlines()\n #for line in lines:\n # print(line.rstrip())\n file_object.write(\"\\nI LOVE YOU QQ\\n\")\n file_object.write(\"I LOVE YOU WEIXIN\")\n file_object.write(\"我爱你python\")\n'''\nwhile True:\n print (\"beichushu:\")\n beichushu=input()\n if beichushu=='quit':\n break\n print(\"chushu:\")\n chushu=input()\n if chushu=='quit':\n break\n try:\n answer=int(beichushu)/int(chushu)\n except ZeroDivisionError:\n print(\"chushu can't be zero\")\n else:\n print(answer)\n'''\n\n\n\n" }, { "alpha_fraction": 0.5177304744720459, "alphanum_fraction": 0.6170212626457214, "avg_line_length": 13.199999809265137, "blob_id": "92895c3cf2d23503ebd2c0bb42f3de75d9494cdc", "content_id": "ddbd677ef2a9ad7220a2e8515e394332b9b2101f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 35, "num_lines": 10, "path": "/os_train/os_train.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 24 09:30:28 2019\n\n@author: YWZQ\n\"\"\"\n\nimport os\nwalk = os.walk('text_page_1')\nprint(list(walk))" }, { "alpha_fraction": 0.5632184147834778, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 21.592592239379883, "blob_id": "21933c04b33f25de5582a03a5bc2e31edf5a7529", "content_id": "576026e3394f10949729fa25af3dfed29ad5bc15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 69, "num_lines": 27, "path": "/hdf_csv_gz/save_csv_gz.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 20 14:20:59 2019\n\n@author: YWZQ\n\"\"\"\n\nimport pandas as pd\n\na = pd.DataFrame(columns=['a','b'])\na['a'] = [[1,3],[2,3],[3,3],[4,8]]\na['b'] =[2,2,2,2]\na.to_csv('a.csv.gz',compression='gzip',index=False)\na.to_hdf('a.hdf','w',index=False)\nprint(a.info())\nprint(a.iloc[0,0][1])\n\n'''\na_read = pd.read_csv('a.csv.gz')\n#print('a info:\\n',a_read.info())\nb = pd.read_hdf('a.hdf')\n#print('b info:\\n',b.info())\nprint(a_read)\nprint(a_read.iloc[0,0][0]) #可见,csv保存形式不会保留dataframe中的特定形式,如列表变为了字符串\nprint(b)\nprint(b.iloc[0,0][0]) #可见hdf形式可以保存dataframe中数据的格式,如列表,但hdf保存占内存比csv大\n'''" }, { "alpha_fraction": 0.6530705690383911, "alphanum_fraction": 0.665444552898407, "avg_line_length": 21.81052589416504, "blob_id": "c4cfb16f828a01a67a64a116a37f75a8b9fb49dc", "content_id": "3ef9476d7fe4474308e6724991ab11bfaa4f1bb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2922, "license_type": "no_license", "max_line_length": 123, "num_lines": 95, "path": "/jieba_demo.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 12 09:01:30 2018\n\n@author: YWZQ\n\"\"\"\n'''\n#最简单用法\nimport jieba\n\nseg_list=jieba.cut(\"我来到华南农业大学\",cut_all=True,HMM=False)\nprint(\"full mode:\",','.join(seg_list))\nseg_list=jieba.cut(\"我来到华南农业大学\",cut_all=True,HMM=True)\nprint(\"full mode:\",'/'.join(seg_list))\nseg_list=jieba.cut(\"我来到华南农业大学\",cut_all=False)\nprint(\"default mode\",'/'.join(seg_list))\nseg_list=jieba.cut_for_search(\"我来到华南农业大学\")\nprint('/'.join((seg_list)))\n\n'''\n\n\n'''\n#读写文件与结巴,并且自建划分词\nimport jieba\njieba.load_userdict('结巴自写词典.txt')#把想划分的词放在此文件里,然后会自动使用\n\n\n#jieba.add_word('红海行动') #直接增加划分词,和load_userdict效果差不多\n#jieba.add_word('看了')\n#jieba.del_word('看了') #删除划分词\n#jieba.del_word('红海行动')\n\n\nwith open(u'分词前评论.txt') as f: #读取原始文本,默认为‘r’,也可标注\n lines=f.readlines()\n \n for line in lines:\n print(line)\n seg_list=jieba.cut(line,cut_all=False)\n seg_list='/'.join(seg_list)\n print(seg_list)\n with open('E:\\pythonlianxi\\分词后.txt','a') as file_object: #‘a’和‘w’的区别是,‘w’会覆盖原来文本,‘b’是增添\n file_object.write(seg_list)\n \n #line=f.readline()\nprint('end')\n'''\n\n\n\n'''\n#功能是实现关键词提取\n#注意,结巴并行处理不能再window下进行\nimport jieba\nimport jieba.analyse\n#jieba.enable_parallel(3)\nwith open(r'E:\\pythonlianxi\\jiebaxiaoshuo.txt',encoding='UTF-8') as f:\n lines=f.readlines()\n for line in lines:\n seg_list=jieba.cut(line,cut_all=False)\n seg_list='/'.join(seg_list)\n with open(r'E:\\pythonlianxi\\duancixiaoshuo.txt','a',encoding='UTF-8') as f2:\n f2.write(seg_list)\n str1=''.join(seg_list)\n str1=''.join(lines)\n tags = jieba.analyse.extract_tags(str1,topK=100) #指定提取100个词作为关键词\n \n \n\nprint(','.join(tags))\nprint('end')\n\n'''\n\n'''\n#显示词性\nimport jieba\nimport jieba.posseg as psg\nmessage='他很满足,因为只要庄稼在,就代表他们一家人有吃不完的粮食,而让他更满足的,则是他每次看到自己的儿子时'\nwords=psg.cut(message)\nfor word,flag in words:\n print('%s %s'%(word,flag))\n'''\n\n\nimport jieba.analyse\n\nmessage='尽管他们看不到他,可孟浩还是跪在那里,磕了一个头,目中露出柔和,那双眼内蕴含了曾经无数年的追忆,那曾经的一幕幕画面,在孟浩的脑海里清晰的浮现,妖仙古宗认柯父的情感,那种首次体会到父爱的感情,在这一瞬,于孟浩心中不断地沉淀下来。'\n\njieba.analyse.set_stop_words('E:\\pythonlianxi\\jiebastopword.txt')\n\ntags=jieba.analyse.extract_tags(message)#提取关键词\n\nprint(','.join(tags))\n\n\n\n\n\n\n\n\n\n\n\n\n " }, { "alpha_fraction": 0.7404227256774902, "alphanum_fraction": 0.7443857192993164, "avg_line_length": 34.1860466003418, "blob_id": "beae3d2552bac7d550b8ccbbdc47ba895797916e", "content_id": "7a2fb458ba1411931143c7fa20f88b0efcafde07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 111, "num_lines": 43, "path": "/tfidf.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport jieba\nimport numpy as np\ntext = \"\"\"我是一条天狗呀!\n 我把月来吞了,\n 我把日来吞了,\n 天狗把一切的星球来吞了,\n 天狗把全宇宙来吞了。\n 我便是我了!\"\"\"\nsentences = text.split()\n#print(sentences)\nsentence_word = [list(jieba.cut(sentence)) for sentence in sentences]\ndocument = [\" \".join(sentence) for sentence in sentence_word]\n\nprint('document:\\n',document)\n\n#tfidf_model = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b').fit(document)#token_pattern删掉了一个\\\\w,就可以匹配一个字的词语了\n\ntfidf_model = TfidfVectorizer(min_df=0,max_df=0.999,token_pattern=r\"(?u)\\b\\w+\\b\").fit(document)\n\nprint('vocabulary:\\n',tfidf_model.vocabulary_)\nsparse_result = tfidf_model.transform(document)\ndense_result = tfidf_model.transform(document).todense()\nprint('sparse result\\n',sparse_result)\nprint(sparse_result.data)\nprint(sparse_result.indices)\nprint(dense_result)\nfeatureName = tfidf_model.get_feature_names()\nprint('featurename:\\n',featureName) #其实就是tfidf_model.vocabulary_中的各个词组成的一个列表,且是按键号顺序的,不过除去了各个词的键(即对应的数号id)\n\nstr_result = tfidf_model.transform(['一条 天狗 天狗'])\nstr_result_dense = tfidf_model.transform(['一条 天狗 天狗','天狗全宇宙']).todense()\nprint(str_result)\nprint(str_result.data)\nprint(tfidf_model.vocabulary_)\nvoca = tfidf_model.vocabulary_\n#voca_replace = dict([val,key] for key,val in tfidf_model.vocabulary_.items())\nstr_result_dense=np.array(str_result_dense)[0]\n#print(voca_replace)\n\nprint(str_result_dense)\ntfidf_list = np.array([str_result_dense[voca[i]] for i in '一条 天狗 天狗'.split()])\nprint(tfidf_list)\n" }, { "alpha_fraction": 0.6357142925262451, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 14.333333015441895, "blob_id": "1d9c871fcec578ddb94e117cff8d4becd0d5e90a", "content_id": "4506b3c58f118c886b198fc88d19d7dc5b9b49d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/lianxi.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nN=5\ny=[20,14,19,30,12]\n\nindex=np.arange(N)\npl=plt.bar(left=index,height=y)\n\nplt.show()\n\n\n" }, { "alpha_fraction": 0.6039035320281982, "alphanum_fraction": 0.6130884289741516, "avg_line_length": 31.296297073364258, "blob_id": "e54ba5a35ef7d0255ac1b22eef8539181d9c49d6", "content_id": "4c1b0120c1362d07713763addf788e9d385ec456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 871, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/lei.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "class Car():\n def __init__(self,paizi,xinghao,year):\n self.paizi=paizi\n self.xinghao=xinghao\n self.year=year\n self.lucheng=0\n def descriptive(self):\n long_name=str(self.year)+' '+self.paizi+' '+self.xinghao\n\n print(long_name)\n def read_lucheng(self):\n print(\"The car has run \"+str(self.lucheng)+' miles.')\n def update_lucheng(self,miles):\n if miles>=self.lucheng:\n self.lucheng=miles\n elif miles<self.lucheng:\n print(\"You can't roll back\")\n\nclass Electric(Car):\n def __init__(self,paizi,xinghao,year):\n super().__init__(paizi,xinghao,year)\n self.battery_size=100\n def describe_battery(self):\n print(\"The car has a \"+str(self.battery_size)+\" -kwh battery.\")\nmy_tesla=Electric('tesla','model',2013)\nmy_tesla.descriptive()\nmy_tesla.describe_battery()" }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.6605392098426819, "avg_line_length": 21.69444465637207, "blob_id": "efa7e9f926114e4896e1dc71b656a5d9b062f4c1", "content_id": "f39778b3dcce57dd61d655819b7f751c23c9872e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 838, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/libsvm2df.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 11 22:41:29 2019\n\n@author: YWZQ\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.datasets import dump_svmlight_file\n\n\ndf1 = pd.DataFrame()\ndf1['a']= [1,2,3]\ndf1['b'] = [1,1,2]\n#print(df1)\ndf1.to_csv('test_libsvm1.csv',index=None)\n\ndf2= pd.DataFrame()\ndf2['c']=[1,2]\ndf2['d']=[3,3]\ndf2.to_csv('test_libsvm2.csv',index=None,header=None)\ndf_read1 = pd.read_csv('test_libsvm1.csv')\ndf_read2= pd.read_csv('test_libsvm2.csv',header=None)\ndf_read1.columns=range(0,len(df_read1.columns))\n#print(df_read1)\n#print(df_read2)\ndf_concat=pd.concat([df_read1,df_read2])\nprint(df_concat)\n\n'''\ndf = pd.read_csv(\"data.txt\") # 第一个字段为target\ny = df.target # y为数据的label值\ndummy = (df.iloc[:, 1:])\nmat = dummy.as_matrix()\ndump_svmlight_file(mat, y, 'svm_output.libsvm', zero_based=False) \n'''" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.517241358757019, "avg_line_length": 16.600000381469727, "blob_id": "dca2c64ccbdb941186c23dcadd2405e683c1f899", "content_id": "c9e923a6573acb01a48d474a7eab061c557f9204", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/hanshu.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "def hello():\n print(\"hello\")\ndef test():\n hello()\nif __name__=='__main__':hello()" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5693069100379944, "avg_line_length": 13.88888931274414, "blob_id": "0904bc62c54339a40229a1acfa2e87e49f991fde", "content_id": "18ff7b127fc293f5d4e4be7c493663b5bae6c9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/joblib_train.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 8 22:49:15 2019\n\n@author: YWZQ\n\"\"\"\n\nimport joblib\nfrom joblib import Parallel, delayed\nfrom math import sqrt\n\n\nlist_list = [[1,2,3,4],[-1,3,2,1],[-9,3,-1,-4]]\ncount = 0\ndef my_fun(l):\n global count\n for i in l:\n if i>0:\n count+=1\n\nParallel(n_jobs=2)(delayed(my_fun)(l) for l in list_list)\nprint(count)\n\n'''\ndef fun(dic):\n \n''' " }, { "alpha_fraction": 0.4488501250743866, "alphanum_fraction": 0.49801746010780334, "avg_line_length": 17.761194229125977, "blob_id": "7120b8b59224ed3f4d58816400ef1bd27a1cf977", "content_id": "64fd2f15c3db5a8086e8be414d9a77aa7871d647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 79, "num_lines": 67, "path": "/pandas_re.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 12 07:11:29 2018\n\n@author: YWZQ\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport re\na = [['-', 'DDD', '+-'], ['3+', 'Ⅳ', '++++-'], ['Ⅳ度', '- 0mmol/L', '3']]\ndf = pd.DataFrame(a)\nprint(df)\n\n\n\ndef return_class(s):\n p5=r'\\+{3,}|HP|hp|3\\+'\n pattern5=re.compile(p5)\n p4=r'\\+{2,}'\n pattern4=re.compile(p4)\n p3=r'\\+|阳'\n pattern3=re.compile(p3)\n p2=r'\\+\\-'\n pattern2=re.compile(p2)\n p1=r'DDD'\n pattern1=re.compile(p1)\n \n \n if pattern1.findall(s):\n a=1\n elif pattern5.findall(s):\n a=5\n #if re.findall(\"\\+{2,}\",s):\n # a=2\n elif pattern4.findall(s):\n a=4\n elif pattern2.findall(s):#注意,这里先检测2否则2会被3掩盖\n a=2\n elif pattern3.findall(s):\n a=3\n \n if pattern1.findall(s):\n a=666\n \n else:\n a=0\n return (a)\n\n\n\nfor i in df.columns:\n df[i]=df[i].map(return_class)\n #df[i]=df[i].map(lambda s:1 if(re.findall(r\"\\-\",s))else 0)\n\n \n'''\n for j in df[i].index:\n #df[i][j]=''.join(re.findall(r\"\\d+\\.?\\d*\",df[i][j]))\n df[i][j]=(1 if(re.findall(r\"瘤|明|病史|史\",df[i][j]))else 0)\n''' \n \ndf.fillna(0,inplace=True)\ndf.replace('',0,inplace=True)\n\n \nprint(df)\n#print(df_re)\n\n\n\n\n" }, { "alpha_fraction": 0.6225961446762085, "alphanum_fraction": 0.6586538553237915, "avg_line_length": 17.727272033691406, "blob_id": "e9d2be162a6af8596ce41f44224814d2fce06434", "content_id": "6ece3974651e493005ebcb2fb90197b7e79da3f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/read_doc.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 11 13:13:19 2019\n\n@author: YWZQ\n\"\"\"\n\nimport docx\n\n\nimport pandas as pd\n\nfile_write=open('ganjv1.txt',mode='w+')\ndoc=docx.Document('ganjv1.docx') #注意docx只能读取docx文档不能读取doc文档\nprint(len(doc.paragraphs))\ntry:\n for para in doc.paragraphs:\n print(para.text)\n file_write.write(para.text)\n file_write.write('\\n')\nexcept UnicodeEncodeError:\n pass\n " }, { "alpha_fraction": 0.7111178636550903, "alphanum_fraction": 0.722661018371582, "avg_line_length": 30.97087287902832, "blob_id": "5034a1f7f73599bc1620c08286aa1c746e6029a4", "content_id": "5df01e60e550693042311739abce31ebafad768e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3902, "license_type": "no_license", "max_line_length": 147, "num_lines": 103, "path": "/word2vecdemo/word2vecdemo1.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 13 12:26:58 2018\n\n@author: YWZQ\n\"\"\"\n\n'''\n#例1\n# coding:utf-8\n# 引入 word2vec\nfrom gensim.models import word2vec\n\n# 引入日志配置\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# 引入数据集\nraw_sentences = [\"the quick brown fox jumps back home over the lazy dogs go home dd back home\",\"yoyoyo you and he go home now to sleep back home\"]\n\n# 切分词汇\nsentences= [s.split() for s in raw_sentences]\nprint(sentences)\n\n# 构建模型\nmodel = word2vec.Word2Vec(sentences, min_count=1)\n\n# 进行相关性比较\nprint('similarity between go and home:',model.similarity('go','home'))\n'''\nraw_corpus = [\"Human machine interface for lab abc computer applications\",\n \"A survey of user opinion of computer system response time\",\n \"The EPS user interface management system\",\n \"System and human system engineering testing of EPS\", \n \"Relation of user perceived response time to error measurement\",\n \"The generation of random binary unordered trees\",\n \"The intersection graph of paths in trees\",\n \"Graph minors IV Widths of trees and well quasi ordering\",\n \"Graph minors A survey\"]\n\nstoplist = set('for a of the and to in'.split(' '))\ntexts = [[word for word in document.lower().split() if word not in stoplist]\n for document in raw_corpus]\n\nfrom collections import defaultdict\nfrequency = defaultdict(int)\nfor text in texts:\n for token in text:\n frequency[token] += 1\n\nprecessed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]\nfor i in precessed_corpus:\n print(i) #显示每个文档,即每行,单词在所有文档累计个数超过1的单词\n \n\n\nfrom gensim import corpora\ndictionary = corpora.Dictionary(precessed_corpus)#为得到的单词表进行唯一性操作,得到无重复的单词字典\nprint(dictionary)\nfor i in dictionary:\n print(i,dictionary[i])\nprint(dictionary.token2id)#无重复的单词及对应id\nnew_doc = \"human computer interaction\"#准备检测用的文档\nnew_vec = dictionary.doc2bow(new_doc.lower().split())\nprint (new_vec) #显示检测文档单词id及个数,id对应在上述语料库得到的个数超过1的单词id,若检测文档的单词未在id表中出现,则不显示\nbow_corpus = [dictionary.doc2bow(text) for text in precessed_corpus]\n\nprint(bow_corpus)\n\n\n#显示权重\nfrom gensim import models\ntfidf = models.TfidfModel(bow_corpus)\nstring = \"system minors graph graph human\"\nstring_bow = dictionary.doc2bow(string.lower().split())#得到system和minors的id和个数\nstring_tfidf = tfidf[string_bow]#由于system在上面的语料库中出现4次,而minors2次,说明system是更常见的词,所以重要性小,但是,如果将system在需检测的文档中的出现次数提高,也可将他的重要性提高\nprint (string_bow) \nprint (string_tfidf)\n\nfor i in range(len(string_tfidf)):\n print(string_tfidf[i][1])\n\ntfidf_list=[string_tfidf[i][1] for i in range(len(string_tfidf))]\n\nprint(tfidf_list)\n\n\nimport numpy as np\ndtype=[('id',int),('score',float)]\nvalues=np.array(string_tfidf,dtype=dtype) #string_tridf转为数组(方便设置名称),并设置每列的名称\nvalues_sort=np.sort(values,order='score')#根据score列进行排序\nprint('sort:',values_sort)\n\nvalues_sort_list=values_sort.tolist()#转为list是因为list可以使用reverse,进行降序排列\nvalues_sort_list.reverse()\nprint('reverse list:',values_sort_list)\n\nprint(dictionary[values_sort_list[0][0]])#values_sort_list[0]表示最重要的词的元组,values_sort_list[0][0]表示最重要的词元组的id,dictionary[id]则是对应的词语\n\nprint(\"词语重要性排序:\")\nfor i in range(len(values_sort_list)):\n print(dictionary[values_sort_list[i][0]])" }, { "alpha_fraction": 0.6438305974006653, "alphanum_fraction": 0.6526703238487244, "avg_line_length": 18.824817657470703, "blob_id": "af1edb78dc7fda02819483143b163edf84b6d7f5", "content_id": "545d2fd834c67f546d50dc7737b34f7dba4f56b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3011, "license_type": "no_license", "max_line_length": 92, "num_lines": 137, "path": "/chatterbot_train/demo_1.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 09:37:59 2018\n\n@author: YWZQ\n\"\"\"\n'''\nfrom chatterbot import ChatBot\n\n\n# 构建ChatBot并指定Adapter\nbot = ChatBot(\n 'Default Response Example Bot',\n #storage_adapter='chatterbot.storage.StorageAdapter',\n logic_adapters=[\n {\n 'import_path': 'chatterbot.logic.BestMatch'\n },\n {\n 'import_path': 'chatterbot.logic.LowConfidenceAdapter',\n 'threshold': 0.65,\n 'default_response': 'I am sorry, but I do not understand.'\n }\n ],\n trainer='chatterbot.trainers.ListTrainer'\n)\n\n# 手动给定一点语料用于训练\nbot.train([\n 'How can I help you?',\n 'I want to create a chat bot',\n 'Have you read the documentation?',\n 'No, I have not',\n 'This should help get you started: http://chatterbot.rtfd.org/en/latest/quickstart.html'\n])\n\n# 给定问题并取回结果\nquestion = 'How do I make an omelette?'\nprint(question)\nresponse = bot.get_response(question)\nprint(response)\n\nprint(\"\\n\")\nquestion = 'how to make a chat bot?'\nprint(question)\nresponse = bot.get_response(question)\nprint(response)\n\n'''\n\n\n\n'''\nfrom chatterbot import ChatBot\n\n\nbot = ChatBot(\n \"yang_wei_zhi_Math & Time Bot\",\n\n logic_adapters=[\n \n\n {\n 'import_path': 'chatterbot.logic.MathematicalEvaluation'\n },\n {\n 'import_path': 'chatterbot.logic.TimeLogicAdapter'\n },\n \n\n {\n 'import_path': 'chatterbot.logic.LowConfidenceAdapter',\n 'threshold': 0.65,\n 'default_response': 'I am sorry, but I do not understand.'\n }\n ],\n \n input_adapter=\"chatterbot.input.VariableInputTypeAdapter\",\n output_adapter=\"chatterbot.output.OutputAdapter\"\n)\n\n# 进行数学计算\nquestion = \"What is 4 + 9?\" #记得输入要标准,如不能写成4+9,中间的空格要有,否则识别不出,就会作为时间问题了\nprint(question)\nresponse = bot.get_response(question)\nprint(response)\n\nprint(\"\\n\")\n\n# 回答和时间相关的问题\nquestion = \"what's the time?\"\nprint(question)\nresponse = bot.get_response(question)\nprint(response)\n\n\n# 进行数学计算\nquestion = \"What is sqrt 2?\"\nprint(question)\nresponse = bot.get_response(question)\nprint(response)\n\nprint(\"\\n\")\nquestion = \"how about sweather\"\n\nprint(question)\nresponse = bot.get_response(question)\nprint(response)\n'''\n#手动设置一些语料\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\n \n \nChinese_bot = ChatBot(\"Training demo\")\nChinese_bot.set_trainer(ListTrainer)\nChinese_bot.train([\n '你好',\n '你好',\n '有什么能帮你的?',\n '想买数据科学的课程',\n '具体是数据科学哪块呢?'\n '机器学习',\n])\n \n# 测试一下\nquestion = '你好ma'\nprint(question)\nresponse = Chinese_bot.get_response(question)\nprint(response)\n\nprint(\"\\n\")\n\nquestion = '请问哪里能数据科学的课程'\nprint(question)\nresponse = Chinese_bot.get_response(question)\nprint(response)" }, { "alpha_fraction": 0.5142857432365417, "alphanum_fraction": 0.523809552192688, "avg_line_length": 22.44444465637207, "blob_id": "ffd39c8b00ffcfd822fe455f371fe2562291fc8a", "content_id": "d8bc38703dfff97183e8c983466cce2a9b459059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/zidian.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "xinxi={'姓氏':'杨','名字':'靓仔','住址':'广东','年龄':str(20),'爱好':'数据挖掘'}\nfor i,j in xinxi.items():\n print('i: '+i)\n print('j: '+j)\n print(\"\\n\")\nprint(xinxi['姓氏'])\nxinxi['学历']='研究生'\nprint(xinxi['学历'])\nprint(xinxi)" }, { "alpha_fraction": 0.5186440944671631, "alphanum_fraction": 0.5355932116508484, "avg_line_length": 20, "blob_id": "8d7175359ab17adb2d2b5fdcd7fcbc97b3933375", "content_id": "a4a2cddd63dcf76b51d28cdb2508e46cc146f637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/lianxiif.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "car='subaru'\nprint(\"请输入您的答案:\")\n#daan=(car==answer)\ni=0\nwhile(i<3):\n answer=input()\n answer=answer.lower()\n i+=1\n if((i!=3)&(answer!=car)):\n print(\"错误,请重新输入:\")\n if((i==3)&(answer!=car)):\n print(\"错误,您的机会已用完\")\n if(answer==car):\n print(\"恭喜您,答案正确,请与工作人员兑换奖品\")\n\n" }, { "alpha_fraction": 0.5129310488700867, "alphanum_fraction": 0.5754310488700867, "avg_line_length": 16.846153259277344, "blob_id": "b8658456c9ac27e84d7b19e7e80685ccf7c6bc01", "content_id": "2463a71d06b6e102f4d2dc9ee58bc4825df80ea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/pd.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nd={'1':[7,1,1,0],'2':[3,1,0,1],'3':[0,9,1,2]}\nd_pd=pd.DataFrame(d)\nprint(d_pd)\n\n\nme=d_pd['1'].median()\nd_pd['1']=d_pd['1'].map(lambda x:0 if x<me else x )\ncolumns_name=d_pd.columns.tolist()\n\nrename=['tt'+i for i in columns_name]\nd_pd.columns=rename\n\nprint(d_pd)\n'''\nimport re\npat=re.compile(\".*?([\\u4E00-\\u9FA5])\")\ndef return_0(x):\n x=str(x)\n if pat.findall(x):\n x=0\n return x\nd_pd['1']=d_pd['1'].map(return_0)\nprint(d_pd)\n'''\n" }, { "alpha_fraction": 0.5931734442710876, "alphanum_fraction": 0.6088560819625854, "avg_line_length": 22.586956024169922, "blob_id": "f5c883d19b4f3be379112b66367a06bdc42d4739", "content_id": "829b1572d3155cd3b86d51834f8f117d84706774", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 72, "num_lines": 46, "path": "/all_text_textpage_data/all_text.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 09:27:34 2018\n\n@author: YWZQ\n\"\"\"\n\n\n'''\nimport os\nprint(os.listdir(\"text_page_1\"))\nfirst_list=os.listdir(\"text_page_1\")\nfirst_wether_text=[os.path.isfile(\"text_page_1/\"+i) for i in first_list]\nprint(first_wether_text)\nfirst_text_index=[i for i,x in enumerate(first_wether_text) if x==True]\nprint(first_text_index)\nfirst_text=[]\nfirst_text=list([first_list[i] for i in first_text_index])\nprint(first_text)\nfor i in first_text:\n first_list.remove(i)\nprint(first_list)\n'''\nimport os\nclass Solution(object):\n def dir_combine(self,now_dir):\n def dfs(self,now_dir,res):\n if os.path.isfile(now_dir):\n return res.append(now_dir)\n for i in os.listdir(now_dir):\n i=now_dir+\"/\"+i\n dfs(self,i,res)\n res=[]\n dfs(self,now_dir,res)\n return res\nif __name__==\"__main__\":\n res=Solution().dir_combine(\"text_page_1\")\n print(res)\n print(len(res))\n \n''' \n#也可以这样操作,不过后续还要进行一些处理,不一定简单\nimport os\nwalk = os.walk('text_page_1')\nprint(list(walk))\n'''" }, { "alpha_fraction": 0.7057496905326843, "alphanum_fraction": 0.7249154448509216, "avg_line_length": 22.36842155456543, "blob_id": "1aa0ef0bcab0f6b63b38f84576869419fb7ba074", "content_id": "804c6cae6e708ec1f2f1ba6330427fbb03fca1a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 887, "license_type": "no_license", "max_line_length": 96, "num_lines": 38, "path": "/automl_first_train.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 29 09:50:31 2019\n\n@author: YWZQ\n\"\"\"\n\nfrom auto_ml import Predictor\nfrom auto_ml.utils import get_boston_dataset\nimport numpy as np\n\nfrom auto_ml.utils_models import load_ml_model\n \ndf_train, df_test = get_boston_dataset()\n#df_train.iloc[1,:]=np.nan\nprint(df_train.info())\n\n\ncolumn_descriptions = {\n 'MEDV': 'output'\n , 'CHAS': 'categorical'\n}\n \nml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)\n \nml_predictor.train(df_train)\n \ntest_score = ml_predictor.score(df_test, df_test.MEDV)\nfile_name = ml_predictor.save()\n \ntrained_model = load_ml_model(file_name)\npredictions = trained_model.predict(df_test)\n#print(predictions)\n\ntrain_target_pre = df_train[['MEDV']]\ntrain_predictions = trained_model.predict(df_train)\ntrain_target_pre['pre'] = train_predictions\nprint(train_target_pre.head(100))" }, { "alpha_fraction": 0.4959016442298889, "alphanum_fraction": 0.5942623019218445, "avg_line_length": 12.55555534362793, "blob_id": "1800c21bccd8a17abee2bd95704e4b7f449dc760", "content_id": "0de0b056c63bc0d55e4d245e844737463957bc07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 26, "num_lines": 18, "path": "/test_timelong.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "import time\na=list(range(0,100000000))\na_len=len(a)\nnum=0\nstart=time.time()\n'''\nfor i in range(0,a_len):\n if a[i]>10000:\n num+=1\n'''\nfor i in a:\n if i >10000:\n num+=1\n\nstop=time.time()\nlong=stop-start\nprint(long)\nprint(num)\n" }, { "alpha_fraction": 0.4901960790157318, "alphanum_fraction": 0.5085784196853638, "avg_line_length": 22.882352828979492, "blob_id": "f00be670c960070926a1e83da9d87ab2e0bd88e0", "content_id": "bbe05dbeab2bda493953d97a23491df870604910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "no_license", "max_line_length": 141, "num_lines": 34, "path": "/linstnode_structrue.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 6 21:05:43 2018\n\n@author: YWZQ\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n print(head.val)\n cur=None\n pre=None\n while head:\n cur=head\n head=head.next\n print('cur')\n print(cur.val)\n cur.next=pre\n #head=head.next #注意,这句要放在 cur.next=pre前面,否则第一次时,由于pre是None,而cur指向None,head也会跟着指向None,不符合 #while条件,结束\n pre=cur\n return cur\nif __name__=='__main__':\n a=Solution().reverseList(ListNode([5,6,7])).val\n print(a)\n " }, { "alpha_fraction": 0.5941320061683655, "alphanum_fraction": 0.6018744707107544, "avg_line_length": 15.476510047912598, "blob_id": "ef01e71ba5ddf76f800cc56e02494c9612f2a8b1", "content_id": "5ee075f7f5e4be10ce37698cc535f324c37f830c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2852, "license_type": "no_license", "max_line_length": 70, "num_lines": 149, "path": "/biye/qa/pdf2txt.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 16:54:20 2019\n\n@author: YWZQ\n\"\"\"\n\n\n\n#import pyocr\n\nimport importlib\nimport chardet\n\nimport sys\n\nimport time\nfrom tqdm import *\n\n \n\nimportlib.reload(sys)\n\ntime1 = time.time()\n\n# print(\"初始时间为:\",time1)\n\n \n\nimport os.path\n\nfrom pdfminer.pdfparser import PDFParser,PDFDocument\n\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\n\nfrom pdfminer.converter import PDFPageAggregator\n\nfrom pdfminer.layout import LTTextBoxHorizontal,LAParams\n\nfrom pdfminer.pdfinterp import PDFTextExtractionNotAllowed\n\n \n\n\n\n# text_path = r'photo-words.pdf'\npdf_text_path = os.listdir('pdf_text/')\n#print(pdf_text_path)\n \n\ndef parse(i):\n\n '''解析PDF文本,并保存到TXT文件中'''\n\n fp_p = open('pdf_text/'+i,'rb') \n file_data=fp_p.read()\n result=chardet.detect(file_data)\n print('result')\n print(result)\n fp=file_data.decode(encoding=result['encoding'])\n\n #用文件对象创建一个PDF文档分析器\n\n parser = PDFParser(fp)\n\n #创建一个PDF文档\n\n doc = PDFDocument()\n\n #连接分析器,与文档对象\n\n parser.set_document(doc)\n\n doc.set_parser(parser)\n\n \n\n #提供初始化密码,如果没有密码,就创建一个空的字符串\n\n doc.initialize()\n\n \n\n #检测文档是否提供txt转换,不提供就忽略\n\n if not doc.is_extractable:\n\n raise PDFTextExtractionNotAllowed\n\n else:\n\n #创建PDF,资源管理器,来共享资源\n\n rsrcmgr = PDFResourceManager()\n\n #创建一个PDF设备对象\n\n laparams = LAParams()\n\n device = PDFPageAggregator(rsrcmgr,laparams=laparams)\n\n #创建一个PDF解释其对象\n\n interpreter = PDFPageInterpreter(rsrcmgr,device)\n\n \n\n #循环遍历列表,每次处理一个page内容\n\n # doc.get_pages() 获取page列表\n\n for page in doc.get_pages():\n\n interpreter.process_page(page)\n\n #接受该页面的LTPage对象\n\n layout = device.get_result()\n\n # 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象\n\n # 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等\n\n # 想要获取文本就获得对象的text属性,\n\n for x in layout:\n\n if(isinstance(x,LTTextBoxHorizontal)):\n\n with open(r'pdf2text.txt','a') as f:\n\n results = x.get_text()\n results=results.replace('\\n','')\n results=results.replace(' ','')\n \n\n print(results)\n\n f.write(results +\"\\n\")\n\n \n\nif __name__ == '__main__':\n for i in tqdm(pdf_text_path):\n parse(i)\n\n time2 = time.time()\n\n print(\"总共消耗时间为:\",time2-time1)" }, { "alpha_fraction": 0.41532257199287415, "alphanum_fraction": 0.4354838728904724, "avg_line_length": 19.25, "blob_id": "e3ceefa44975157c7c64c099671800da471ca928", "content_id": "4f113a0122f9f02df6e51224a141bb98c08beeb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 64, "num_lines": 12, "path": "/pd_class_num.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport re\na = [['+', '++', '+-'], ['-+', '+++', '++++-'], ['阴', '5', '1']]\ndf = pd.DataFrame(a)\ndict={}\nfor i in df.iloc[:,1]:\n if i in dict:\n dict[i]+=1\n else:\n dict[i]=1\nprint(dict)\n \n" }, { "alpha_fraction": 0.38213133811950684, "alphanum_fraction": 0.3875134587287903, "avg_line_length": 20.511627197265625, "blob_id": "7cc5c86d2346a4a9ebc12608045ba0d0da822b59", "content_id": "125a445ba84592d41047722fd66c542e64fa5db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 991, "license_type": "no_license", "max_line_length": 53, "num_lines": 43, "path": "/caidan.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "hao=1\ncaidan=['米饭','番茄炒鸡蛋','白切鸡','排骨','白菜','茄子']\nwhile True:\n\n num=1\n liao=[]\n print(\"What do you want to add:\\n\"+str(num)+\".\")\n while True:\n zeng=input()\n if zeng in caidan:\n liao.append(zeng)\n num+=1\n print(\"OK\\n\"+str(num)+\".\")\n\n\n elif zeng=='quit':\n\n hao+=1\n print(str(hao)+\"号\"+\"That's all,you need\")\n for u in liao:\n print(u)\n break\n elif zeng=='delete':\n\n\n print(\"Which do you want to delete\")\n while True:\n\n jian=input()\n if jian in liao:\n liao.remove(jian)\n num-=1\n\n elif jian=='back':\n break\n\n\n else:\n print(\"请重新输入\")\n # print(str(num)+\".\")\n else:\n print(\"对不起,没有这个菜\")\n print(str(num)+\".\")\n\n\n\n\n" }, { "alpha_fraction": 0.7007512450218201, "alphanum_fraction": 0.7124373912811279, "avg_line_length": 26.89411735534668, "blob_id": "67af45ee5ac4158341b34c2146e963d2b46993cd", "content_id": "d49b62c69ea25de0e1c2e770c79ab24db403e2c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3300, "license_type": "no_license", "max_line_length": 128, "num_lines": 85, "path": "/word2vecdemo/w2v_chinese.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 13 16:18:12 2018\n\n@author: YWZQ\n\"\"\"\nimport jieba\n\nraw_corpus=[\"春节电影院是爆满人的\",\n \"很多人去看电影\",\n \"电影是个不错的选择\",\n \"红海行动\",\n \"今年有很多好看的电影\",\n \"好看\",\n \"什么电影好\"]\n\njieba.add_word('红海行动')\n\nraw_list=[]\n\n#汉语分词,jieba\nfor text in raw_corpus:\n seg_list=jieba.cut(text,cut_all=False,HMM=False)\n wen=' '.join(seg_list)\n #print(wen)\n raw_list.append(wen)\nprint(\"law_list\",raw_list)\ntexts = [[word for word in text.split()]\n for text in raw_list]\nprint(texts)\n\n#为各词语分配id等\nfrom gensim import corpora\ndictionary = corpora.Dictionary(texts)#为得到的单词表进行唯一性操作,得到无重复的单词字典\nprint('词典:',dictionary)\nfor i in dictionary:\n print(i,dictionary[i])\nprint(dictionary.token2id)#无重复的单词及对应id列表\nbow_corpus = [dictionary.doc2bow(text) for text in texts]\nprint(\"各句出现在词库中的词及个数:\",bow_corpus)\n\nnew_doc = \"电影红海行动是好看的\"#准备检测用的文档\nnew_doc=jieba.cut(new_doc)\nnew_doc=' '.join(new_doc)\nnew_vec = dictionary.doc2bow(new_doc.split())\nprint (\"检测新语句词语id及个数:\",new_vec) #显示检测文档单词id及个数,id对应在上述语料库得到的个数超过1的单词id,若检测文档的单词未在id表中出现,则不显示\n\n\n#显示权重,词语重要性排序实验\nfrom gensim import models\ntfidf = models.TfidfModel(bow_corpus)\n\nstring = \"红海行动是好看的电影\"\nstring=jieba.cut(string)\nstring=' '.join(string)\nstring_bow = dictionary.doc2bow(string.split())#得到system和minors的id和个数\nstring_tfidf = tfidf[string_bow]#由于system在上面的语料库中出现4次,而minors2次,说明system是更常见的词,所以重要性小,但是,如果将system在需检测的文档中的出现次数提高,也可将他的重要性提高\nprint ('新语句各词语在语库词的id及个数:',string_bow) \nprint ('新语句各词语在语库中的id及重要度tfidf:',string_tfidf)#得到一个列表,列表里是各个元组,元组包括id和重要度\n\nfor i in range(len(string_tfidf)):\n print('单纯显示重要度tfidf:',string_tfidf[i][1])\n'''\n#与上for循环等效\ntfidf_list=[string_tfidf[i][1] for i in range(len(string_tfidf))]\nprint('tfidf_list:',tfidf_list)\n\n'''\n\n#以下代码为依据tfidf大小进行词语排序\nimport numpy as np\ndtype=[('id',int),('score',float)]\nvalues=np.array(string_tfidf,dtype=dtype) #string_tridf转为数组(方便设置名称),并设置每列的名称\nvalues_sort=np.sort(values,order='score')#根据score列进行排序\nprint('sort:',values_sort)\n\nvalues_sort_list=values_sort.tolist()#转为list是因为list可以使用reverse,进行降序排列\nvalues_sort_list.reverse()\nprint('reverse list:',values_sort_list)\n\nprint(dictionary[values_sort_list[0][0]])#values_sort_list[0]表示最重要的词的元组,values_sort_list[0][0]表示最重要的词元组的id,dictionary[id]则是对应的词语\n\nprint(\"词语重要性排序:\")\nfor i in range(len(values_sort_list)):\n print(dictionary[values_sort_list[i][0]])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.621052622795105, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 20.846153259277344, "blob_id": "024bc19e0254c255d709d4bc95e6a34eaa94e259", "content_id": "efdedc0f1c42898fbd62e5dd7c902948932f950d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/numpy_save_restore.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "import numpy as np\n\na = [[1,11,111],[2,22,222],[3,33,333]]\na_array = np.array(a)\n\nnp.save(\"npsave_train_bin.npy\",a)\nnp.savetxt(\"npsave_train_txt.txt\",a)\n\na_numpyload = np.load(\"npsave_train_bin.npy\")\nprint(a_numpyload)\nimport pandas as pd\na_df = pd.DataFrame(a_numpyload)\nprint(a_df)\n\n" }, { "alpha_fraction": 0.5830815434455872, "alphanum_fraction": 0.6374622583389282, "avg_line_length": 16.473684310913086, "blob_id": "09062a6d983391fa5674eb58f6c22f15fb4a9408", "content_id": "1a2873286e41d35046736c092e2f3c2aab6f0d12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 40, "num_lines": 19, "path": "/csv_train/csv_train.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 24 11:28:45 2019\n\n@author: YWZQ\n\"\"\"\n\nimport csv\nimport numpy as np\ncsv_file = csv.reader(open('testa.csv'))\n#print(list(csv_file))\n#print(np.array(list(csv_file)))\nfor i in range(10):\n a = next(csv_file)\n print(a)\n\nb=[1,2,3]\nb_filter = filter(lambda x:x,b)\nprint(list(b_filter))" }, { "alpha_fraction": 0.6654719114303589, "alphanum_fraction": 0.6917562484741211, "avg_line_length": 23.558822631835938, "blob_id": "1c4f65caef4834762d261dcdf0447d46a01261c3", "content_id": "5c56d43f23d0f19e9168f6262d8fdf003d3c5393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 97, "num_lines": 34, "path": "/lsa_train.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 25 12:37:32 2019\n\n@author: YWZQ\n\"\"\"\n\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndocs = ['$#%^2fw fsdddg^&@ fsdddg^&@ fsgs7 *&$hfh','dfs@#$ df45 fsdddg^&@ fgs%','fgs bh7 gh &^%']\nvectorizer = TfidfVectorizer(token_pattern=r'\\S+')\nX = vectorizer.fit_transform(docs)\nterms = vectorizer.get_feature_names()\n\nprint(terms)\nprint(X)\nprint(X.todense())\nprint('test:\\n')\nprint(vectorizer.transform(['fsdddg^&@ *&$hfh']).todense())\n\nn_topics = 2\nlsa = TruncatedSVD(n_topics)\nx2 = lsa.fit_transform(X)\nprint(x2)\nprint(lsa.components_) #shape:[n_topics,word]\n\n#from collections import defaultdict\n#term_lsa = defaultdict(list)\nterm_lsa = {}\nposition = 0\nfor i in terms:\n term_lsa[i] = lsa.components_[:,position] \nprint(term_lsa) \n" }, { "alpha_fraction": 0.4674556255340576, "alphanum_fraction": 0.5680473446846008, "avg_line_length": 10.333333015441895, "blob_id": "db9e5e5e9bb9eb8d8862f5084977347e0c2239db", "content_id": "55def4b7098e4a137f7c750532f715b4c53c7e83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "no_license", "max_line_length": 35, "num_lines": 15, "path": "/dectorator.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 25 15:02:26 2018\n\n@author: YWZQ\n\"\"\"\n\ndef pri(*args,**kwargs):\n print(args[2])\n print(kwargs)\n\na=1\nb=2\nc=3\npri(a,b,c)" }, { "alpha_fraction": 0.7001044750213623, "alphanum_fraction": 0.7304075360298157, "avg_line_length": 23.538461685180664, "blob_id": "ac7fc49fdcff02e424f0aaac5aace45400a9b357", "content_id": "3667fa652931517d9edf50eed6f344e7e70750f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1327, "license_type": "no_license", "max_line_length": 107, "num_lines": 39, "path": "/pymysql_train.py", "repo_name": "weizhiyangq/python_train", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport pymysql\nimport getpass\n\n\n\n\n\nprint(\"输入数据库密码:\")\npasswd=getpass.getpass(\"password:\") #这种方法好像是通过脚本运行,密码才隐藏。直接在控制台输入密码,还是会看得到\n\nconn=pymysql.connect(host='127.0.0.1',port=3306,user='root',passwd=passwd,db='db_demo1',charset='utf8')\n #127.0.0.1也可以写为 localhost\ncursor=conn.cursor(cursor=pymysql.cursors.DictCursor) #默认从mysql读取的数据是元祖形式,这样设置的话,返回就是字典形式\ncursor.execute(\"select * from test_evt\")\nprint('ok')\n\nrow_1=cursor.fetchone() #获取db_demo1数据库中的 test_evt表的第一行数据\nprint(row_1)\ncursor.scroll(-1,mode='relative') #表示相对当前位置往回移动一位。如果mode='absolute'则是直接指定游标到哪个位置\nrow_1_back=cursor.fetchone()\nprint(row_1_back)\nrow_2=cursor.fetchone() #获取db_demo1数据库中的 test_evt表的第二行数据\nprint(row_2)\n\n\n#row_remain=cursor.fetchall() #获取db_demo1数据库中的 test_evt表的除了第一第二行,剩下的数据\n\n\n\n#print(row_remain)\nconn.commit() #提交,不然无法保存新建或者修改的数据\ncursor.close() #关闭游标\nconn.close() #关闭连接\n" } ]
31
johancordoba78/leccion-03-python-tipos-variables-expresiones
https://github.com/johancordoba78/leccion-03-python-tipos-variables-expresiones
9951375adb31d60fa569986b470c2c857d78909a
8b274d5d126019e94a85e76b064ada2f0cf9659e
860a02b524f4353caccfa7a0b5a1758b8b823e97
refs/heads/master
2022-11-30T01:37:40.430208
2020-08-14T00:16:21
2020-08-14T00:16:21
287,400,309
0
0
null
2020-08-13T23:36:45
2020-08-13T06:49:32
2020-08-13T06:49:30
null
[ { "alpha_fraction": 0.6090909242630005, "alphanum_fraction": 0.6090909242630005, "avg_line_length": 16.66666603088379, "blob_id": "8578d00707817b902e1c995a2bf9475d7e5e4ed9", "content_id": "a99421711e2d36444b082e763642b6bc56c26146", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Hola.py", "repo_name": "johancordoba78/leccion-03-python-tipos-variables-expresiones", "src_encoding": "UTF-8", "text": "# Hilera \" Hola Mundo\"\r\n\r\n# Hilera \"Hola mundo\"\r\nprint(\"Hola mundo\")\r\nprint (\"Hola América\")\r\nprint(\"Hola Costa Rica\")" } ]
1
liqiangvip/project2pdf
https://github.com/liqiangvip/project2pdf
27d76ec07c998b9a9dcce66b6f4688faadaf2b86
8a59ece97e7b8e5b6700588e02ba482d6d0ca306
ea0e06722a0b43e580b1f02124988f0707008815
refs/heads/master
2021-05-09T18:07:56.203222
2018-01-27T10:22:11
2018-01-27T10:22:11
119,153,922
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5732659697532654, "alphanum_fraction": 0.5827591419219971, "avg_line_length": 34.1129035949707, "blob_id": "2c7e57e46aaeb6cbc97c0ee5f7ef1018875ba919", "content_id": "3d62826542f9ddbb17b359983ea6fdc903c03e5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6887, "license_type": "no_license", "max_line_length": 89, "num_lines": 186, "path": "/1_project批处理(提取学生单个包并规范命名)-Final.py", "repo_name": "liqiangvip/project2pdf", "src_encoding": "UTF-8", "text": "# 批量转PDF.py\n# Created by liqiang on 10/01/2018.\n# coding=utf-8\n\nimport os, os.path, shutil, zipfile, time, sys\nif sys.platform == 'win32':\n from win32com.client import Dispatch, constants, gencache\n\n\nid2name = {}\nname2id = {}\npdf_count = 0\nword_count = 0\ntotal_count = 0\n\ndef outputFile(filename, output_dir):\n global total_count\n print(f'输出文件:{filename}-->{output_dir}')\n shutil.copy(filename, output_dir+'/'+os.path.basename(filename))\n total_count += 1\n\ndef word2pdf(wordFileName):\n #print('转换word文件:', wordfilename)\n f_name, extend_name = os.path.splitext(wordFileName)\n if extend_name.lower() in ('.docx', '.doc'):\n index = wordFileName.rindex('.')\n pdfFileName = wordFileName[:index] + '.pdf'\n print(f'转换PDF: -->{pdfFileName}')\n pass\n\ndef word2PDF(wordFile, pdfFile):\n print(f'转换PDF: -->{pdfFile}')\n w = gencache.EnsureDispatch('Word.Application')\n doc = w.Documents.Open(wordFile, ReadOnly=1)\n doc.ExportAsFixedFormat(pdfFile,\n constants.wdExportFormatPDF,\n Item=constants.wdExportDocumentWithMarkup,\n CreateBookmarks=constants.wdExportCreateHeadingBookmarks)\n w.Quit(constants.wdDoNotSaveChanges)\n \ndef idStuNameAsFileName(stu_dir, oldfilename):\n stuName = os.path.basename(stu_dir)\n file_name, extend_name = oldfilename.rsplit('.', 1)\n newfileName = os.path.join(stu_dir, name2id[stuName]+stuName+'.'+extend_name.lower())\n print(f\"文件改名: -->{newfileName}\")\n if os.path.exists(newfileName):\n pass\n else:\n os.rename(os.path.join(stu_dir, oldfilename), newfileName)\n return newfileName\n \ndef stuNameAsFileName(stu_dir, oldfilename):\n stuName = os.path.basename(stu_dir)\n file_name, extend_name = oldfilename.rsplit('.', 1)\n newfileName = os.path.join(stu_dir, stuName+'.'+extend_name.lower())\n print(f\"文件改名: -->{newfileName}\")\n os.rename(os.path.join(stu_dir, oldfilename), newfileName)\n return newfileName \n\ndef extract_all(zip_filename, extract_dir, filename_encoding='GBK'):\n zf = zipfile.ZipFile(zip_filename, 'r')\n for file_info in zf.infolist():\n filename = file_info.filename\n try:\n #使用cp437对文件名进行解码还原\n filename = filename.encode('cp437')\n filename = filename.decode(\"gbk\")\n except:\n #如果已被正确识别为utf8编码时则不需再编码\n filename = filename.decode('utf-8')\n pass# 解压调用\n print('解压... 获得...', filename)\n output_filename = os.path.join(extract_dir, filename)\n output_file_dir = os.path.dirname(output_filename)\n if not os.path.exists(output_file_dir):\n os.makedirs(output_file_dir)\n with open(output_filename, 'wb') as output_file:\n shutil.copyfileobj(zf.open(file_info.filename), output_file)\n zf.close()\n #print(f'删除zip文件... ', zip_filename)\n #os.remove(zip_filename)\n\ndef loadStuInfo():\n global id2name, name2id\n with open('stuinfo.csv', encoding='utf-8') as fp:\n lines = [line.strip().split(',') for line in fp.readlines()]\n id2name = {k.strip():v.strip() for k,v in lines}\n name2id = {v.strip():k.strip() for k,v in lines}\n return \n\ndef processStuHW(stu_dir):\n global output_path\n print(f'处理学生目录:{stu_dir}')\n for f in os.listdir(stu_dir):\n if os.path.isfile(os.path.join(stu_dir, f)):\n index = f.rfind('.')\n if index == -1:\n continue\n extend_name = f[index:]\n if(f[0] == '.'): # 排除隐藏文件\n continue\n elif extend_name.lower() in ('.doc', '.docx'):\n if f.startswith('计算机科学与技术学院'):\n continue\n else:\n wordFileName = idStuNameAsFileName(stu_dir, f)\n if sys.platform == 'win32':\n# index = wordFileName.rindex('.')\n# pdfFileName = wordFileName[:index]+ '.pdf'\n# print(wordFileName, pdfFileName)\n# word2PDF(wordFileName, pdfFileName)\n# time.sleep(0.1)\n# outputFile(pdfFileName, output_path)\n word2pdf(wordFileName)\n outputFile(wordFileName, output_path)\n else:\n word2pdf(wordFileName)\n outputFile(wordFileName, output_path)\n elif extend_name.lower() in ('.pdf', '.zip', '.rar'):\n newfileName = idStuNameAsFileName(stu_dir, f) \n outputFile(newfileName, output_path)\n return\n\ndef processSingleStuDir(path):\n '''\n 对每个文件夹单独处理\n '''\n global name2id\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n if(f[0] == '.'):\n continue\n elif f in name2id:\n processStuHW(os.path.join(path, f))\n else:\n continue\n\ndef exactSingleStuZipFile(path):\n '''\n 处理一次作业或者学习报告的文件夹,里面含有很多个学生的打包作业\n '''\n global output_path\n # 先解压所有子目录中的压缩文件\n for f in os.listdir(path):\n if os.path.isfile(os.path.join(path, f)):\n index = f.rfind('.')\n if index == -1:\n continue\n extend_name = f[index:]\n if(f[0] == '.'): # 排除隐藏文件\n pass\n elif extend_name.lower() == '.zip':\n extract_all(os.path.join(path, f), os.path.join(path, f[:index]))\n time.sleep(0.1)\n\ndef processSingleProject(projectName):\n global output_path, total_count\n projectPath = os.path.join(os.getcwd(), projectName)\n output_path = projectPath+'_pdf'\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n exactSingleStuZipFile(projectPath)\n time.sleep(5)\n processSingleStuDir(projectPath)\n print(f'一共输出文件: {total_count}')\n\ndef processOutputDir(wordfile_dir):\n wordFiles = [fn for fn in os.listdir(wordfile_dir) if fn.endswith(('.doc','.docx'))]\n print(wordFiles)\n for wordFile in wordFiles:\n wordFile = os.path.abspath(wordFile)\n index = wordFile.rindex('.')\n pdfFile = wordFile[:index] + '.pdf'\n word2PDF(wordFile, pdfFile)\n time.sleep(0.1)\n\ndef main():\n global extract_dir, total_count\n loadStuInfo()\n projectNames = ['第1次学习报告(以太网)','第2次学习报告(二层交换)', '第3次学习报告(IS-IS)',\n 'LAB-RIP', 'LAB-VLAN', 'LAB-STP', 'LAB-OSPF']\n for proj in projectNames:\n processSingleProject(proj)\n print('Stage1: DONE!')\n\nmain()\n" }, { "alpha_fraction": 0.593055009841919, "alphanum_fraction": 0.604174792766571, "avg_line_length": 34.36879348754883, "blob_id": "866cf479eec4c798d26c92a55206355e23e49b03", "content_id": "6b57f669c33a767448c790fa5f20cb06a527a41e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5388, "license_type": "no_license", "max_line_length": 98, "num_lines": 141, "path": "/2_output目录批处理解压和转PDF-Final.py", "repo_name": "liqiangvip/project2pdf", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 13 14:12:29 2018\r\n\r\n@author: liqiangvip\r\n\"\"\"\r\nimport os, os.path, sys, shutil, zipfile, time, chardet\r\nif sys.platform == 'win32':\r\n from win32com.client import Dispatch, constants, gencache\r\nfrom unrar import rarfile\r\nfrom pathlib import Path\r\nfrom random import randint\r\n\r\nZIP_FILENAME_UTF8_FLAG = 0x800\r\n\r\ndef decodeZipFileName(filename):\r\n '''\r\n 对乱码的文件名进行解码还原\r\n '''\r\n try:\r\n #使用cp437对文件名进行解码还原\r\n filename = filename.encode('cp437')\r\n filename = filename.decode(\"gbk\")\r\n except:\r\n #如果已被正确识别为utf8编码时则不需再编码\r\n# filename = filename.decode('utf-8')\r\n pass# 解压调用\r\n return filename\r\n\r\ndef unzip_file2(zfile_path, unzip_dir, encoding='gbk'):\r\n zf = zipfile.ZipFile(zfile_path, 'r')\r\n if not os.path.exists(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n for file_info in zf.infolist():\r\n filename = file_info.filename\r\n if file_info.flag_bits & ZIP_FILENAME_UTF8_FLAG == 0:\r\n filename_bytes = filename.encode('437')\r\n guessed_encoding = chardet.detect(filename_bytes)['encoding'] or encoding\r\n filename = filename_bytes.decode(guessed_encoding, 'replace')\r\n if file_info.is_dir():\r\n os.mkdir(os.path.join(unzip_dir, filename))\r\n continue\r\n output_filename = os.path.join(unzip_dir, filename)\r\n output_file_dir = os.path.dirname(output_filename)\r\n if not os.path.exists(output_file_dir):\r\n os.makedirs(output_file_dir)\r\n with open(output_filename, 'wb') as output_file:\r\n shutil.copyfileobj(zf.open(file_info.filename), output_file)\r\n zf.close()\r\n\r\ndef unzip_file(zfile_path, unzip_dir):\r\n '''\r\n 解压ZIP文件,基本能正确识别乱码文件名/目录名\r\n '''\r\n zf = zipfile.ZipFile(zfile_path, 'r')\r\n if not os.path.exists(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n for file_info in zf.infolist():\r\n if file_info.is_dir():\r\n os.mkdir(os.path.join(unzip_dir, file_info.filename))\r\n continue\r\n filename = decodeZipFileName(file_info.filename)\r\n output_filename = os.path.join(unzip_dir, filename)\r\n output_file_dir = os.path.dirname(output_filename)\r\n if not os.path.exists(output_file_dir):\r\n os.makedirs(output_file_dir)\r\n with open(output_filename, 'wb') as output_file:\r\n shutil.copyfileobj(zf.open(file_info.filename), output_file)\r\n zf.close()\r\n# os.remove(zfile_path)\r\n \r\ndef unrar_file(rfile_path, unrar_dir):\r\n '''\r\n 解压rar文件\r\n '''\r\n unrarfile = rarfile.RarFile(rfile_path) #这里写入的是需要解压的文件,别忘了加路径\r\n unrarfile.extractall(path=unrar_dir) #这里写入的是你想要解压到的文件夹\r\n\r\ndef word2PDF(wordFile, pdfFile):\r\n print(f'转换pdf: {wordFile}')\r\n w = gencache.EnsureDispatch('Word.Application')\r\n doc = w.Documents.Open(wordFile, ReadOnly=1)\r\n doc.ExportAsFixedFormat(pdfFile,\r\n constants.wdExportFormatPDF,\r\n Item=constants.wdExportDocumentWithMarkup,\r\n CreateBookmarks=constants.wdExportCreateHeadingBookmarks)\r\n w.Quit(constants.wdDoNotSaveChanges)\r\n\r\ndef processOutputDir(output_dir):\r\n # RAR文件解压没问题\r\n rarFiles = [fn for fn in os.listdir(output_dir) if fn.endswith(('.rar', '.RAR'))]\r\n print(rarFiles)\r\n for rarFile in rarFiles:\r\n rfile_path = os.path.join(output_dir ,os.path.basename(rarFile))\r\n unrar_dir = os.path.join(output_dir, rarFile.rsplit('.')[0])\r\n unrar_file(rfile_path, unrar_dir)\r\n time.sleep(0.2)\r\n\r\n # zip解压部分会有乱码问题\r\n zipFiles = [fn for fn in os.listdir(output_dir) if fn.endswith(('.zip', '.ZIP'))]\r\n print(zipFiles)\r\n for zipFile in zipFiles:\r\n zfile_path = os.path.join(output_dir ,os.path.basename(zipFile))\r\n unzip_dir = zfile_path.rsplit('.')[0]\r\n unzip_file(zfile_path, unzip_dir)\r\n time.sleep(0.2)\r\n\r\n wordFiles = [fn for fn in os.listdir(output_dir) if fn.endswith(('.doc','.docx'))]\r\n print(wordFiles)\r\n for wordFile in wordFiles:\r\n wordFile = os.path.join(output_dir, os.path.basename(wordFile))\r\n print(wordFile)\r\n index = wordFile.rfind('.')\r\n if index ==-1:\r\n continue\r\n pdfFile = wordFile[:index] + '.pdf'\r\n if os.path.exists(pdfFile):\r\n pdfFile = wordFile[:index] + str(randint(1,999)) + '.pdf'\r\n word2PDF(wordFile, pdfFile)\r\n time.sleep(0.2)\r\n\r\ndef clearOutputDir(output_dir):\r\n delFiles = [fn for fn in os.listdir(output_dir) if fn.endswith(('.doc','.docx','rar', 'zip'))]\r\n for delFile in delFiles:\r\n delFile = os.path.join(output_dir ,os.path.basename(delFile))\r\n os.remove(delFile)\r\n time.sleep(0.1)\r\n\r\ndef main():\r\n global extract_dir, total_count\r\n for f in os.listdir():\r\n if os.path.isdir(f):\r\n if(f[0] == '.'): # 排除隐藏文件\r\n continue\r\n if f.endswith('_pdf'):\r\n processOutputDir(os.path.abspath(f))\r\n time.sleep(1)\r\n clearOutputDir(os.path.abspath(f))\r\n print('Stage2: DONE!')\r\n\r\nmain()" }, { "alpha_fraction": 0.5574065446853638, "alphanum_fraction": 0.5709140300750732, "avg_line_length": 37.69643020629883, "blob_id": "ef5451adc255130c933a6282e54746f77944812c", "content_id": "15b5df9a6fe00ec4ae383040205b4bd17ace4303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2253, "license_type": "no_license", "max_line_length": 102, "num_lines": 56, "path": "/3_output目录递归寻找子目录的文档.py", "repo_name": "liqiangvip/project2pdf", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 13 14:12:29 2018\r\n\r\n@author: liqiangvip\r\n\"\"\"\r\nimport os, os.path, sys, shutil, zipfile, time, chardet\r\nif sys.platform == 'win32':\r\n from win32com.client import Dispatch, constants, gencache\r\nfrom unrar import rarfile\r\nfrom pathlib import Path\r\nfrom random import randint\r\n\r\ndef word2PDF(wordFile, pdfFile):\r\n print(f'转换pdf: {wordFile}')\r\n w = gencache.EnsureDispatch('Word.Application')\r\n doc = w.Documents.Open(wordFile, ReadOnly=1)\r\n doc.ExportAsFixedFormat(pdfFile,\r\n constants.wdExportFormatPDF,\r\n Item=constants.wdExportDocumentWithMarkup,\r\n CreateBookmarks=constants.wdExportCreateHeadingBookmarks)\r\n w.Quit(constants.wdDoNotSaveChanges)\r\n\r\ndef findFilesInOutputDir(output_dir):\r\n for stu_dir in os.listdir(output_dir):\r\n if(stu_dir[0] == '.'): # 排除隐藏文件\r\n continue\r\n for root, dirs, files in os.walk(os.path.join(output_dir, stu_dir)): \r\n for filepath in files:\r\n if filepath.endswith(('.doc', '.docx')):\r\n wordFile = os.path.join(root, filepath)\r\n pdfFile = os.path.join(output_dir, stu_dir) + '.pdf'\r\n word2PDF(wordFile, pdfFile)\r\n time.sleep(0.2)\r\n elif filepath.endswith('.pdf'):\r\n srcFileName = os.path.join(root, filepath)\r\n dstfileName = os.path.join(output_dir, stu_dir) + '.pdf'\r\n if os.path.exists(dstfileName):\r\n dstfileName = os.path.join(output_dir, stu_dir) + str(randint(1,999)) + '.pdf'\r\n shutil.copy(srcFileName, dstfileName)\r\n if os.path.isdir(os.path.join(output_dir, stu_dir)):\r\n print('删除 ...',os.path.join(output_dir, stu_dir))\r\n shutil.rmtree(os.path.join(output_dir, stu_dir))\r\n\r\ndef main():\r\n global extract_dir, total_count\r\n for f in os.listdir():\r\n if os.path.isdir(f):\r\n if(f[0] == '.'): # 排除隐藏文件\r\n continue\r\n if f.endswith('_pdf'):\r\n findFilesInOutputDir(os.path.abspath(f))\r\n time.sleep(1)\r\n print('Stage2: DONE!')\r\n\r\nmain()" } ]
3
Jsunmin/2020_C-Python_101
https://github.com/Jsunmin/2020_C-Python_101
6585c503a859e648d47517aaa5cc7beaca6218ed
ac335558b83393a7120105d7f803ab9cfa646d71
025c86747f4fc5e9b359eecb6555dc31c7938c53
refs/heads/master
2022-04-20T14:19:14.803444
2020-04-18T07:20:55
2020-04-18T07:20:55
256,646,171
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4553440809249878, "alphanum_fraction": 0.49633967876434326, "avg_line_length": 27.724637985229492, "blob_id": "eaba30160bf0f027925bfabf3a41a608b2782d80", "content_id": "f4c3372aa571e159b0f6d0967609e23a05e81bdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2649, "license_type": "no_license", "max_line_length": 129, "num_lines": 69, "path": "/c/src/basic01/basic01/array2.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "//#include <stdio.h>\r\n//\r\n//#define SIZE1 2 // alias 개념! 변수처럼 선언할당이 아니라, 시작부터 S=3으로 규정!\r\n//#define SIZE2 5\r\n//int main(void) {\r\n//\tprintf(\"2차 배열\\n\");\r\n//\t// 다차원 배열 --> int a[2][3][4].. N차원 배열\r\n//\t// 배열에 배열을 만듦. --> int a[2][3];\r\n//\t// const int로 사이즈 할당은 불가능하다!!! ( 자세한 것은 밑에 설명 )\r\n//\tint a[SIZE1][SIZE2] = { {1,2,3,4,5}, {6,7,8,9,10} };\r\n//\t// a는 현재 (0,0) - 1이 들어간 주소를 갖고 있음. (메모리주소는 1,2,3,4 ... 10) 형식으로 4씩(int형) 증가할 것임\r\n//\tfor (int i = 0; i < SIZE1; i++) {\r\n//\t\tfor (int j = 0; j < SIZE2; j++) {\r\n//\t\t\tprintf(\"%d\\n\", a[i][j]);\r\n//\t\t}\r\n//\t}\r\n//\t// 직접 사이즈 계산해서 해보기\r\n//\tconst int size1 = sizeof(a) / sizeof(a[0]);\r\n//\tconst int size2 = sizeof(a) / size1 / sizeof(a[0][0]);\r\n//\tprintf(\"연산 재료: %d, %d, %d\\nsize1: %d, size2: %d\\n\", sizeof(a), sizeof(a[0]), sizeof(a[0][0]), size1, size2); // 데이터가 바뀌더라도 불변!\r\n//\tfor (int i = 0; i < size1; i++) {\r\n//\t\tfor (int j = 0; j < size2; j++) {\r\n//\t\t\tprintf(\"%d\\n\", a[i][j]);\r\n//\t\t}\r\n//\t}\r\n//\t// 문자열\r\n//\t// 문자열 출력\r\n//\tchar string[] = \"Is Seoul the capital city of Korea?\";\r\n//\tprintf(\"%s\\n\", string);\r\n//\tint ind = 0;\r\n//\twhile (1) {\r\n//\t\tif (string[ind] == '\\0') {\r\n//\t\t\tbreak;\r\n//\t\t}\r\n//\t\tprintf(\"%c\", string[ind]);\r\n//\t\tind++;\r\n//\t}\r\n//\tprintf(\"\\n\");\r\n//\t// 문자열 길이\r\n//\tint stringLength = 0;\r\n//\twhile (1) {\r\n//\t\tif (string[stringLength] == '\\0') {\r\n//\t\t\tbreak;\r\n//\t\t}\r\n//\t\tstringLength += 1;\r\n//\t}\r\n//\tprintf(\"stringLength: %d\\n\", stringLength);\r\n//\t// 실습\r\n//\t/*int number = 0;\r\n//\tchar[] subjects[3] = { \"국어\", \"영어\", \"수학\" };\r\n//\tint subjectArrSize = sizeof(subjects) / sizeof\r\n//\twhile (1) {\r\n//\t\tfor \r\n//\t\tprintf(\"%d번 학생의 \")\r\n//\t}*/\r\n//\treturn 0;\r\n//}\r\n\r\n// C는 정적할당\r\n// 정적할당: 컴파일시에 사이즈가 정해져있어야 함. & 동적할당: 런타임시에 사이즈 결정\r\n//int s; // 컴파일시 작동 ~ 선언 시점\r\n//s = 3; // 런타임시 작동 ~ 할당 시점\r\n//int a[s] = { ~}; // 정적할당인 C는 컴파일시에 사이즈가 정해져있어야 하므로, s가 null일 때 array 생성 --> 컴파일 에러!\r\n//const int size1 = 2;\r\n//const int size2 = 5;\r\n//int a[size3][size2] = { {1,2,3,4,5}, {6,7,8,9,10} };\r\n\r\n// #define은 변수가 아니라, alias - 대체명 정도! 선언과 할당이 일어나지 않음!\r\n// 동적할당의 경우, #define const int 둘 다 범용해서 쓸 수 있지만, 정적할당인 C인 경우, #define을 통해서밖에 선언 못한다!" }, { "alpha_fraction": 0.6032689213752747, "alphanum_fraction": 0.6389301419258118, "avg_line_length": 12.73469352722168, "blob_id": "07d57ff2dcee8d3edfbd525c9fa5913d87173c48", "content_id": "7c21dab668155a4b399022c3660cd89104c96313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 43, "num_lines": 49, "path": "/python/src/ex20200208_sample.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "import turtle as t\n\nweek1 = 1\na = 20\nb = 10\nprint(a + b)\nprint(a ** b)\nprint(a / b)\nprint(a // b) # 나눈 몫\nprint(a % b) # 나눈 나머지\nprint('튜토리얼', week1, '주차') # 띄어쓰기 적용해야 함\nprint('튜토리얼' + str(week1) + '주차') # 띄어쓰기 안됨\n# 짧은 주석은 이렇게 쓰고..\n\n'''\n 긴 주석은 이렇게 쓰자!!\n'''\n\n# 쉽게 따라하는 파이썬\nprint('Hello')\nprint('Sunmin')\n\ndistance = 100\ntriangleDegree = 120\n# 삼각형 그리기\nt.color('red')\nt.fd(distance)\nt.left(triangleDegree)\nt.fd(distance)\nt.left(triangleDegree)\nt.fd(distance)\nt.left(triangleDegree)\n\n# 사각형 그리기\nt.color('green')\nt.pensize(3)\nt.fd(distance)\nt.lt(90)\nt.fd(distance)\nt.lt(90)\nt.fd(distance)\nt.lt(90)\nt.fd(distance)\nt.lt(90)\n\n# 원 그리기\nt.color('blue')\nt.pensize(5)\nt.circle(distance)\n" }, { "alpha_fraction": 0.4162049889564514, "alphanum_fraction": 0.43144044280052185, "avg_line_length": 17.026315689086914, "blob_id": "86c356f9f565f1b725e2c01cdb3e74701d3d5cb8", "content_id": "bc9cfde76520ee61e0505b2994bbd6b88bc3a7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1618, "license_type": "no_license", "max_line_length": 75, "num_lines": 76, "path": "/c/src/basic01/basic01/condition.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "#include <stdio.h>\r\n#include <math.h>\r\n\r\n//int main(void) {\r\n\t//if else\r\n\t//int year;\r\n\t//printf(\"연도를 입력하시오: \");\r\n\t//scanf_s(\"%d\", &year);\r\n\t//if ( ( year % 4 == 0 ) && ( year % 100 != 0 ) || ( year % 400 == 0 ) ) {\r\n\t//\tprintf(\"입력된 %d년은 윤년입니다.\\n\", year);\r\n\t//} else {\r\n\t//\tprintf(\"입력된 %d년은 윤년이 아닙니다.\\n\", year);\r\n\t//}\r\n\t//printf(\"%d\", 2^3);\r\n\r\n\t////switch\r\n\t//char grade;\r\n\t//printf(\"당신의 학점은?\");\r\n\t//scanf_s(\" %c\", &grade);\r\n\t//switch (grade) {\r\n\t//case 'A' :\r\n\t//\tprintf(\"Excellent!\\n\");\r\n\t//\tbreak;\r\n\t//case 'B' :\r\n\t//case 'C' :\r\n\t//\tprintf(\"Good\\n\");\r\n\t//\tbreak;\r\n\t//case 'D':\r\n\t//\tprintf(\"Poor..\\n\");\r\n\t//\tbreak;\r\n\t//case 'F':\r\n\t//\tprintf(\"Fail!\\n\");\r\n\t//\tbreak;\r\n\t//default :\r\n\t//\tprintf(\"잘못된 입력입니다.\\n\");\r\n\t//\tbreak;\r\n\t//}\r\n\r\n\t////while\r\n\t//int i = 0;\r\n\t//int whileSum = 0;\r\n\t//while (i <= 100 ) {\r\n\t//\twhileSum += i;\r\n\t//\ti++;\r\n\t//}\r\n\t//printf(\"%d\\n\", whileSum);\r\n\r\n\t//// for\r\n\t//int forSum = 0;\r\n\t//for (int i = 0; i <= 100; i++) {\r\n\t//\tforSum += i;\r\n\t//}\r\n\t//printf(\"%d\\n\", forSum);\r\n\r\n\t//// dowhile\r\n\t//// 보통 사용자 인풋의 검증용으로 자주 활용된다.\r\n\t//// 걍 while 써도 다 처리됨..\r\n\t//char color;\r\n\t//do {\r\n\t//\tprintf(\"신호등 색상을 입력하세요( r, y, g ): \");\r\n\t//\tscanf_s(\"%c\", &color);\r\n\t//\tprintf(\"%c\", color);\r\n\t//\tgetchar();\r\n\t//} while (color != 'r' && color != 'y' && color != 'g');\r\n\t//switch (color) {\r\n\t//case 'r' :\r\n\t//\tprintf(\"정지! \\n\");\r\n\t//\tbreak;\r\n\t//case 'y':\r\n\t//\tprintf(\"조심! \\n\");\r\n\t//\tbreak;\r\n\t//case 'g':\r\n\t//\tprintf(\"진행! \\n\");\r\n\t//\tbreak;\r\n\t//}\r\n//}" }, { "alpha_fraction": 0.5305378437042236, "alphanum_fraction": 0.5674567222595215, "avg_line_length": 16.41269874572754, "blob_id": "12017f3e6041e64d2ed6f3779902fb4865890948", "content_id": "d56cbd86ffabe867476b87cd401b721210a36afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2604, "license_type": "no_license", "max_line_length": 64, "num_lines": 126, "path": "/python/src/basic01/week1_book.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "import turtle as t\nimport time\nimport random\n\nfor x in range(10):\n print('Hello') # 들여쓰기를 통해 for 문의 블록을 체크한다\nprint('bye') # for 문 종료\n\n'''\ndistance = 100\ntriangleDegree = 120\n# 삼각형 그리기\nt.color('aqua')\nfor x in range(3):\n t.fd(distance)\n t.left(triangleDegree)\n\n# 사각형 그리기\ntriangleDegree = 90\nt.color('blue')\nfor x in range(4):\n t.fd(distance)\n t.left(triangleDegree)\n \n# range (기본 0부터 시작해서, 2번쨰 숫자의 앞자리까지)\nprint(list(range(5)), list(range(1, 10)))\nprint('[0-4]')\nfor x in range(5):\n print(x)\nprint('[1-10]')\nfor x in range(1, 11):\n print(x)\n\n# 오각형그리기\nn = 5\nt.color('purple')\nt.begin_fill()\nfor x in range(n):\n t.fd(50)\n t.left(360/n)\nt.end_fill()\n\n# 이쁜 원\nn = 50\nt.bgcolor('black')\nt.color('green')\nt.speed(0)\nfor x in range(n):\n t.circle(80)\n t.lt(360/n)\n\n# 이쁜 사각형\nangle = 89 # 각도에 따라 이쁘게 나타남\nt.bgcolor('black')\nt.color('yellow')\nt.speed(0)\nfor x in range(200):\n t.fd(x)\n t.lt(angle)\nt.done() # 화면 안닫고 유지\n'''\n'''\n# 키보드 인풋\nname = input('what is your name?')\nprint('Hello', name)\nx = input('?')\ny = input('?')\nintX = int(x) # 키보드 입력으로 받은 값은 string이다 int로 꼭 바꾸자!\nintY = int(y)\nprint(intX*intY)\n\n# 키보드 인풋 게임\ninput('엔터를 누르고 20초를 셉니다')\nstart = time.time() # 위에서 입력이 들어와야 실행된다!\ninput('20초 후 다시 엔터를 누릅니다')\nend = time.time() # 마찬가지로 위 입력이 안들어오면 실행X!\net = end - start\nprint('실제 시간 : ', et, '초')\nprint('차이 : ', abs(et - 20), '초')\n\n# if문\nx = input(\"1+2 = \")\nintX = int(x)\nif intX == 3:\n print('구우욷')\nelse:\n print('ㅋㅋㅋ')\n\n# random 모듈\nt.shape('turtle')\nt.speed(0)\nfor x in range(500):\n a = random.randint(1, 360)\n b = random.randint(1, 20)\n t.setheading(a)\n t.forward(b)\n\na = random.randint(1, 30)\nb = random.randint(1, 30)\nprint(a, '+', b, '=')\nx = input()\nintX = int(x)\nif a + b == intX:\n print('오오')\nelse:\n print('zzz')\n'''\n\n\ndef play_random_number_game():\n n = random.randint(1, 30)\n try_num = 0\n while True:\n try_num += 1\n assume = input('어떤 숫자일까?? (0~30) [시도횟수: %d] ' % try_num)\n assumeInt = int(assume)\n if assumeInt == n:\n print('정답')\n break\n elif assumeInt > n:\n print('응아냐~ 더 작아~')\n else:\n print('응아냐~ 더 커~')\n\n\nplay_random_number_game()\n" }, { "alpha_fraction": 0.46115192770957947, "alphanum_fraction": 0.5013529062271118, "avg_line_length": 32.06578826904297, "blob_id": "162eb4fedada3768e4de32b77467e9fde827fc7f", "content_id": "2409fac7433280830e4a5df3cf1cd94c79088645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3357, "license_type": "no_license", "max_line_length": 118, "num_lines": 76, "path": "/c/src/basic01/basic01/dataNtype.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "#include <stdio.h>\r\n\r\n//int main(void) {\r\n//\t// 연산자 : + - * / %\r\n//\tint a = 10; double b = 3; // 만약 b 또한 정수이면, 나누기 값은 정수(몫) & 이를 %f에 할당함으로써 0이 된다.\r\n//\tprintf(\"나누기 : %f\\n\", a / b);\r\n//\tint c = 10, d = 3; \r\n//\tprintf(\"c / d : %f\\n\", (double)c / d); // 정수와 정수의 나누기지만, 강제 형변환을 통해 결과값이 실수에 %f 할당되도록 함\r\n//\tprintf(\"c / d 몫 : %d\\n\", c / d); // 나누기 값은 정수(몫)\r\n//\tprintf(\"c %% d 나머지 : %d\\n\", c % d); // 나누기 값은 정수(나머지)\r\n//\r\n//\t// 각각의 자료형 byte 표시\r\n//\tprintf(\"%d \\n\", sizeof(short));\r\n//\tprintf(\"%d \\n\", sizeof(int));\r\n//\tprintf(\"%d \\n\", sizeof(long));\r\n//\tprintf(\"%d \\n\", sizeof(long long));\r\n//\tprintf(\"%d \\n\", sizeof(char));\r\n//\r\n//\t// 삼항 연산자\r\n//\tint e = 20, f = 10;\r\n//\t// 두 수 중에서 큰 수 출력\r\n//\tint g = (e > f) ? e : f;\r\n//\tprintf(\"큰 값은: %d\\n\", e);\r\n//\t// 참이면 수행 거짓이면 하지 않음!\r\n//\tint h = 10;\r\n//\t(h > 0) ? printf(\"양수\\n\") : printf(\"음수\\n\");\r\n//\t// 짝/홀수 판별기\r\n//\tint i = 7;\r\n//\t(7 % 2 == 1) ? printf(\"홀수\\n\") : printf(\"짝수\\n\");\r\n//\r\n//\t// 정밀도 문제\r\n//\tfloat x = 1.234567890123456789;\r\n//\tdouble y = 1.234567890123456789;\r\n//\tlong double z = 1.234567890123456789;\r\n//\tprintf(\"float의 크기 = %d, x = %.25f \\n\", sizeof(float), x);\r\n//\tprintf(\"double의 크기 = %d, y = %.25f \\n\", sizeof(double), y);\r\n//\tprintf(\"long double의 크기 = %d, z = %.25f \\n\", sizeof(long double), z);\r\n//\r\n//\t// 문자 입출력\r\n//\t//char initial;\r\n//\t//char grade;\r\n//\t//printf(\"영어 이름 첫 글자는 무엇인가요: \");\r\n//\t//scanf_s(\"%c\", &initial);\r\n//\t//printf(\"프로그래밍 과목 학점은요? (A-F): \");\r\n//\t//scanf_s(\" %c\", &grade); // 공백을 둠으로써 엔터키를 구분해서 input을 가져온다! ( 공백 없으면, enter가 바로 들어감;; )\r\n//\t//// keyboard -> c 오는 과정 중에, inputsteam 생성. 보내기 위한 enter 또한 스트링으로 들어감. -> 이렇게 들어간 enter값 소멸을 위해 \" %c\" or getchar();\r\n//\t//printf(\"\\n\");\r\n//\t//printf(\"%c군의 프로그래밍 과목 성적은 %c입니다. \\n\", initial, grade);\r\n//\tchar initial2;\r\n//\tchar grade2;\r\n//\tprintf(\"re 영어 이름 첫 글자는 무엇인가요: \");\r\n//\tscanf_s(\"%c\", &initial2);\r\n//\tchar aw = getchar(); // input stream 에서 enter를 뺀다!\r\n//\tprintf(\"%c, 체크체크\\n\", aw);\r\n//\tprintf(\"re 프로그래밍 과목 학점은요? (A-F): \");\r\n//\tscanf_s(\"%c\", &grade2);\r\n//\tprintf(\"\\n\");\r\n//\tprintf(\"re %c군의 프로그래밍 과목 성적은 %c입니다. \\n\", initial2, grade2);\r\n//\r\n//\t// 온도 연산자\r\n//\tdouble f_temp, c_temp;\r\n//\tprintf(\"화씨온도를 입력하시오: \");\r\n//\tscanf_s(\"%lf\", &f_temp);\r\n//\tc_temp = 5 / 9.0 * (f_temp - 32); // c_temp = 5 / 9 * (f_temp - 32); = 0 ~ 5 / 9 정수형 결과는 0이기 때문이다.\r\n//\tprintf(\"섭씨온도는 %f입니다. \\n\", c_temp);\r\n//\r\n//\t//교재108page 예제\r\n//\tint year, month, day;\r\n//\tprintf(\"오늘의 날짜를 입력(YYYY.MM.DD):\");\r\n//\t//.을 구분자로 해서 입력 ( ' ' , 다 됨! )\r\n//\tscanf_s(\"%dw%dw%d\", &year, &month, &day); // int가 바로 안들어가면 튕겨낸다!\r\n//\tprintf(\"년 : %d \\n\", year);\r\n//\tprintf(\"월 : %d \\n\", month);\r\n//\tprintf(\"일 : %d \\n\", day);\r\n//\treturn 0;\r\n//}" }, { "alpha_fraction": 0.42536115646362305, "alphanum_fraction": 0.4686998426914215, "avg_line_length": 29.25, "blob_id": "03cc0b1dec2692fc9d2543391f75a504c2d564ea", "content_id": "00f83ccdd7e8b056fcd601a43f2f3a284b650eac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 895, "license_type": "no_license", "max_line_length": 112, "num_lines": 20, "path": "/c/src/basic01/basic01/20190321.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "// 헤더파일 (.h) - 함수들의 정의 ( 어떤 함수가 정의 되어있는지 목록 ) ~ 메타데이터\r\n#include <stdio.h> \r\n\r\n //무조건 이 프로젝트의 시작과 끝은 main 함수\r\n//int main(void) {\r\n//\t/*\r\n//\t** 이스케이프 문자 : \\n \\\"(특수문자) \\t\r\n//\t*/\r\n//\tprintf(\"Hello \\\"C\\\" World\\n\");\r\n//\tprintf(\"반갑습니다.\");\r\n//\t// 도구 -> 옵션 -> 편집.선택 ( 드래그 주석 처리 및 해제 커스텀 가능 ( ctrl + / && alt + / )\r\n//\r\n//\t// 정수 출력 - printf는 문자열만 받을 수 있음! printf(10) -- X\r\n//\tprintf(\"10은 정수\\n\");\r\n//\tprintf(\"%d은 정수\\n\", 10);\r\n//\tprintf(\"%d + %d = %d\", 50, 10, 50+10);\r\n//\tprintf(\"%d / %d = %d\", 50, 13, 50 / 13.); // 13뒤에 . 붙이기: 정수 / 실수 = 실수 ~ 묵시적 형변환 처리 ( 만약에 . 안붙이면, 정수/정수 = 정수 )\r\n//\tprintf(\"my age is %d\\n\", 20);\r\n//\treturn 0;\r\n//}" }, { "alpha_fraction": 0.4097963273525238, "alphanum_fraction": 0.4548981487751007, "avg_line_length": 25.779220581054688, "blob_id": "605d2b1e9e0043e4bb64efe939074459e723008b", "content_id": "8695900f61cdd81f265af40d2b97513fee2d1dae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2490, "license_type": "no_license", "max_line_length": 58, "num_lines": 77, "path": "/python/src/basic01/week2_class.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "import turtle as t\nimport time\n# 리스트: 인덱스를 이용해 데이터 접근\na = [ 1, 2, 3 ]\nprint(a)\n\n# 튜플: 인덱스를 이용해 데이터 접근 / 값 수정 불가능\nb = (10, 20, 30)\n# 패킹 언패킹\nc = 10, 20, 30 # 소괄호 생략 가능 ( 패킹 )\nprint(b)\nprint(c)\nc1, c2, c3 = c\nprint( c1, c2, c3) # ( 언패킹 )\n\n# 스위치\nb,a = a,b\nprint(a, b) # a와 b를 바꿔서 대입한다\n\n# 딕셔너리: 키를 통해 데이터 접근\nw = { 'a': 1, 'b': 2, 'c': 3 } # 파이썬은 key도 스트링으로 감싸줘야 한다!\n\n# range 함수 # 인자 ( 시작, 끝, 건너뛰는 조건 )\ndef range_study():\n print( list( range(0, 10, 1) ) )\n # for i in range(50, 100):\n # if i % 2 == 1:\n # print(i)\n for i in range(51, 100, 2): # 이 방법이 훨씬 효율적이다!!\n print(i)\n num = int( input('몇 단 보고 싶니?') )\n for x in range(1, 10):\n print( \"%d x %d = %d\" % ( num, x, num * x ) )\n# range_study()\n# 실습\ndef practice():\n # 실습3\n # num1 = input('3의 배수 어디까지?')\n # print( list( range(0, int(num1) + 1, 3) ) )\n # 실습4\n while True:\n num2 = input('숫자1?')\n num3 = input('숫자2?')\n oper1 = input('연산자?')\n if num2 == 'q' or num3 == 'q' or oper1 == 'q':\n print('계산기 종료')\n break\n print( eval( \"%s %s %s\" % (num2, oper1, num3) ) )\n # 실습5\n # dicA = {1: 94, 2: 87, 3: 91, 4: 75, 5: 92}\n # list = ''\n # for key, value in dicA.items():\n # if value >= 90:\n # list += ( str(key) + ' ' )\n # print(list)\n # 실습5-1\n # names = ['홍길동', '이순신', '김순희', '이철수']\n # ans = ''\n # for name in names:\n # score = input('%s의 판매 수량?? ' % name)\n # starScore = '*' * int(score)\n # ans += f'{name} : {starScore}\\n'\n # print(ans)\n # 실습6\n # dicB = {'이름': '', '신장': 0, '몸무게': 0}\n # for key, value in dicB.items():\n # result = input('당신의 %s?? ' % key)\n # dicB[key] = result\n # ave_wight = ( int(dicB['신장']) - 100 ) * 0.9\n # status = '적정 체중입니다.'\n # print(ave_wight, int(dicB['몸무게']) - ave_wight)\n # if int(dicB['몸무게']) - ave_wight > 5:\n # status = '비만입니다.'\n # elif int(dicB['몸무게']) - ave_wight < -5:\n # status = '너무 말랐네요.'\n # print(f'{dicB[\"이름\"]}님은 {status}')\npractice()\n" }, { "alpha_fraction": 0.5439637303352356, "alphanum_fraction": 0.5715282559394836, "avg_line_length": 29.489360809326172, "blob_id": "3138713a59fef1a016978335777b663d3d911c54", "content_id": "19422400233e42083b7aba0628cce4de1a1dd72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4452, "license_type": "no_license", "max_line_length": 119, "num_lines": 94, "path": "/c/src/basic01/basic01/mainNMemory.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "/*\nmain 함수에 인자 전달\n프로그램 실행시 인자 전달\n\nint main(int argc, char* argv[]) { // 1인자가 인자 수 & 2인자가 인자들의 배열 ( 해당 인자들은 포인터로 들어오게 됨 )\n // ex basic01 i like c -> argc: 4 / argv (length 4) [ basic01의 포인터, i의 포인터, like의 포인터, c의 포인터 ] = ex [ 100, 108, .. ]\n \n // argument 값 출력\n for(int i = 0; i < argc; i++) {\n printf(\"%s\", argv[i]); // basic01 i like c\n }\n \n return 0;\n}\n*/\n\n/*\n전처리기: 본격적인 컴파일에 앞서, 소스 파일의 지시자( #~~ )를 처리하는 일종의 컴파일러\n #define: 매크로 정의\n #include: 파일 포함 - <stdio.h>: 컴파일러가 제공하는 라이브러리 헤더 파일, \"mylib.h\": 사용자가 만든 헤더 파일\n #define: 매크로 정의 - 기호 상수에 상수 리터럴 저장 ( #define PI 3.14 ... r * PI ... )\n\t- 간단한 함수도 저장 가능 \n\t ex1. #define SQUARE(x) ((x) * (x))\n\t ex2. #define SWAP = ( (t) = (x), (x) = (y), (y) = (t) )\n\t ~ 어떤 자료형에도 활용 가능!!\n #ifdef, endif ...\n/*\n 메모리 구분\n ROM - 읽기전용 메모리 공간 / 비휘발 공간\n - Text segment: 프로그램 코드와 초기 상수 (읽기 전용 데이터) 들이 들어 있는 공간.\n\n RAM - 읽기 쓰기 가능 메모리 공간 ~ heap, stack / 휘발 공간\n - Data segment: 프로그램 실행시 필요로 하는 전역, 정적 변수\n - stack memory (임시 메모리): main 블록부터 차례대로 쌓이는 공간 ( 함수 선형 흐름에 따라 쌓이고 처리되는 메모리 공간 )\n - heap memory (사용자 메모리): 동적으로 영역 할당하는 저장공간 (사용자가 직접 메모리 공간 만들 때, malloc)\n*/\n\n\n// in 수업\n//코드 영역 - 실행할 프로그램 코드 저장\n//데이터 영역 - 전역변수, static변수..\n//힙 영역 - 프로그래머가 관리하는 메모리\n//스택 영역 - 지역변수, 매개변수\n\n#include <stdlib.h> // malloc 함수 포함\nint main() {\n\t// 메모리 동적 할당: 프로그램 실행 도중 동적으로 메모리 할당 받는 것 ( 필요시 사용 & 끝나면 반환 )\n\n\t// c는 기본적으로 정적 할당\n\tint a = 3;\n // int arr[a]; // - c는 정적할당으로 컴파일 하는데. 그 시점에 a는 null임 -> int arr[3] : 정적할당!\n\t\n\t// 프로그램 실행 시점에 사용 메모리 크기 결정하기 (동적 할당)\n\t// malloc(): 힙 영역에 메모리 공간을 할당하는 함수 ( 생성된 동적 메모리 주소를 반환함 )\n\tvoid* p1 = malloc(4); // 4byte 할당 & 형은 아직 모르니 void\n\tvoid* p2 = malloc(8); // 8byte 할당 & 형은 아직 모르니 void\n\t// 형을 모르면, *p로 데이터 가져올 수도 없음;; 어디까지 긁어올지 모르잖아..\n\t//-> 형을 같이 선언해줘야 함!\n\n\t// int* 형으로 메모리 생성\n\t// heap에 4byte 가져오는데, 강제 형변환으로 int 타입화!\n\tint* pInt = (int*)malloc(sizeof(int)); // p는 포인터주소인데, 그 안에 데이터는 int형\n\tdouble* pDoub = (double*)malloc(sizeof(double)); // double형\n\t*pInt = 10;\n\t*pDoub = 3.14; // 동적 메모리 사용!\n\tprintf(\"저장된 값1: %d, %.2f\\n\", *pInt, *pDoub);\n\tfree(pInt); \n\t// printf(\"저장된 값: %d\\n\", *pInt); ~ 반납 후, 동적 메모리 활용 불가!\n\tfree(pDoub);\n\t// 정수 2개 저장\n\tint* p2Int = (int*)malloc( sizeof(int) * 2 );\n\t*p2Int = 11;\n\t*(p2Int+1) = 12;\n\tprintf(\"size: %d // 저장된 값2: %d, %d\\n\", sizeof(p2Int), *p2Int, *(p2Int +1));\n\n\t// 정수 3개 저장\n\tint c = 3;\n\tint* userMem = (int*)malloc(c * sizeof(int)); // 12byte 할당\n\t*userMem = 20;\n\t*(userMem + 1) = 30;\n\t*(userMem + 2) = 40;\n\tprintf(\"저장된 값3: %d, %d, %d\\n\", *userMem, *(userMem + 1), *(userMem + 2)); // 이렇게\n\tprintf(\"저장된 값3: %d, %d, %d\\n\", userMem[0], userMem[1], userMem[2]); // 또는 이렇게 값 출력\n\tfree(p2Int);\n\tfree(userMem); // 동적 메모리 반납!\n\t// 동적 메모리 반납이 안되어서 사용가능한 메모리가 점점 줄어드는 현상: 메모리 누수 (memory leak)\\\n\n\t// 동적 메모리를 통한 배열 사이즈 결정\n\tint* arr = (int*)malloc(sizeof(int) * 10); // 입력받은 (10) 수만 큼 배열 사이즈 결정\n\t// 정확히는, 입력받은 수 만큼 메모리 공간 할당 - 이를 배열처럼 활용!\n\n\t// realloc() : 기존에 존재하는 동적메모리의 메모리 사이즈를 변경해주는 함수\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5255497097969055, "alphanum_fraction": 0.5506348609924316, "avg_line_length": 18.815950393676758, "blob_id": "4762577441baf52418006b817c8500f303cbc08b", "content_id": "71089f493e252d46930b391c66f9425f102dabe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3523, "license_type": "no_license", "max_line_length": 86, "num_lines": 163, "path": "/python/src/basic01/week4_class.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "# 클래스: 객체를 만들기 위한 틀\n# 강아지 클래스\nclass Dog:\n # 클래스 필드\n type = '강아지'\n leg = 4\n\n # 메소드\n def setName(self, name):\n # 개별 인스턴스 필드 설정 ( 인스턴스 = self )\n self.name = name\n\n def setAge(self, age):\n # 개별 인스턴스 필드 설정 ( 인스턴스 = self )\n self.age = age\n\n def run(self):\n print('뛴다')\n\n def eat(self):\n print(self)\n print('먹다')\n\n\n# d1 = Dog()\n# d1.eat()\n# d1.setName('큰북이')\n# d1.setAge(11)\n#\n# d2 = Dog()\n# d2.eat()\n# d2.setName('꼬북이')\n# d2.setAge(9)\n# print(d1.name, d1.age, d1.type)\n# print(d2.name, d2.age, d2.type)\n\n\n# 계산기 클래스\n# 속성: 색, 메이커\n# 기능: 덧셈 (매개변수 필요)\nclass Cal:\n color = 'red'\n maker = 'casio'\n\n def add(self, a, b): # self는 없애면 안된다!\n print(self)\n return a + b\n\n def sub(self, a, b):\n return a - b\n\n\n# c1 = Cal()\n# print(c1.add(3, 2), c1.sub(3, 2), c1.maker) # c1.add('a', 3, 2) ~ 인자 3갠데 4개 줬다는 에러!\n\n\n# 자동차 클래스\nclass Car1:\n type = 'SUV'\n\n def __init__(self, color): # 초기화 메서드\n self.setColor(color)\n self.power = False\n\n def setColor(self, color):\n self.color = color\n\n def setPowerToggle(self):\n self.power = not self.power\n\n\n# c1 = Car1('red')\n# print(c1.color, c1.power)\n# c2 = Car1('blue')\n# print(c2.color, c2.power)\n# c1.setColor('yellow')\n# c1.setPowerToggle()\n# print(c1.color, c1.power)\n# c2.setPowerToggle()\n# c2.setPowerToggle()\n# print(c2.color, c2.power)\n\n\nclass Car2:\n maker = '현기차'\n\n def __init__(self, name): # 생성자 함수\n self.speed = 0\n self.name = name\n\n def speedUp(self):\n self.speed += 1\n print(self.name, ':', self.speed)\n\n def speedDown(self):\n self.speed -= 1\n print(self.name, ':', self.speed)\n\n\n# c3 = Car2(input('차명은??'))\n# print(c3.name, c3.speed)\n# while True:\n# no = input('1:Up / 2:Down / 0: 종료 ')\n# if no == '1':\n# c3.speedUp()\n# elif no == '2':\n# c3.speedDown()\n# else:\n# break\n\nclass People:\n state = 'mankind'\n manCount = 0\n\n @classmethod\n def peopleCountUp(cls):\n cls.manCount += 1\n print(cls.manCount, cls.state)\n\n @classmethod\n def peopleCountDown(cls):\n cls.manCount -= 1\n print(cls.manCount, cls.state)\n\n @classmethod\n def peopleCount(cls):\n print(cls.manCount, cls.state, '@@')\n\n\nclass Student(People):\n school = '똥대'\n # 학생 수\n count = 0\n\n @classmethod\n def StudentCountUp(cls):\n cls.count += 1\n super().peopleCount()\n @classmethod\n def StudentCountDown(cls):\n cls.count -= 1\n super().peopleCountDown()\n\n def __init__(self, clsRoom, name):\n self.clsRoom = clsRoom\n self.name = name\n Student.StudentCountUp()\n super().peopleCountUp()\n\n def __del__(self):\n Student.StudentCountDown()\n print('@@')\n # super().state = 'mankinds'\n # super().peopleCountDown()\n # super().peopleCount()\n\nprint( Student.mro() )\ns1 = Student('파이썬반', '민정')\ns2 = Student('C반', '선민')\nprint(s1.school, s1.clsRoom, s1.name, Student.count, Student.manCount)\nprint(s2.school, s2.clsRoom, s2.name, Student.count, Student.manCount)\n# del s2\nprint('지움', s1.school, s1.clsRoom, s1.name, Student.count, Student.manCount)" }, { "alpha_fraction": 0.4283319413661957, "alphanum_fraction": 0.49036043882369995, "avg_line_length": 26.452381134033203, "blob_id": "9bc7e1bb8c1471280d519ea8e2b096ed92975225", "content_id": "ec7c967a6c0f4ad91d1167ae534435ad64d214c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 73, "num_lines": 42, "path": "/c/src/basic01/basic01/variable.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "#include <stdio.h>\r\n\r\n// 기호 상수 선언 ( == const PI = 3.141592; )\r\n#define PI 3.141592\r\n//int main(void) {\r\n//\t// 변수 선언\r\n//\tint a;\r\n//\t// 값 할당(대입)\r\n//\ta = 10;\r\n//\tprintf(\"a의 값은 %d\\n\", a);\r\n//\t// 실습) 두개의 정수를 이용해 사칙연산\r\n//\tint b = 10; int c = 20;\r\n//\tprintf(\"%d * %d = %d\\n\", b, c, b*c);\r\n//\r\n//\t// 데이터 형\r\n//\tchar d = 'a';\r\n//\tprintf(\"문자는 %c\\n\", d); // 일반적인 문자\r\n//\tprintf(\"문자는 %d\\n\", d); // ASCII 코드로 반환\r\n//\t\r\n//\t// 실수형 \r\n//\tfloat e = 3.14159212141; // 4byte 보통 6자리까지\r\n//\tdouble f = 3.14159212141; // 8byte 6자리 이상 표현 가능 ( 정밀도 증가 )\r\n//\tprintf(\"e: %.15f \\n\", e); // 정밀도가 낮아 값 이상\r\n//\tprintf(\"f: %.15f \\n\", f); // 값 정확\r\n//\r\n//\t// 실습) 원 면적 구하기\r\n//\tprintf(\"반지름을 입력하시오.\");\r\n//\tdouble radius;\r\n//\tscanf_s(\"%lf\", &radius); // double을 받아서, radius 라는 변수에 대입한다!\r\n//\tconst float pi = 3.141592;\r\n//\tprintf(\"%f, %f\", radius * radius * PI, radius * radius * pi);\r\n//\r\n//\t// 두 수 입력 받기\r\n//\tint num1, num2; // 미리 선언\r\n//\tprintf(\"두 수는?\");\r\n//\tscanf_s(\"%d %d\", &num1, &num2); // 선언한 변수의 주소값을 맵핑 \r\n//\tprintf(\"%d, %d\\n\", num1, num2);\r\n//\tprintf(\"num1, num2 주소: %d %d\\n\", &num1, &num2); // 인자의 메모리 위치를 받아올 때,\r\n//\tprintf(\"num1, num2 주소: %p %p\\n\", &num1, &num2); // 인자의 메모리 위치를 받아올 때,\r\n//\tprintf(\"%d\", &e);\r\n//\treturn 0;\r\n//}" }, { "alpha_fraction": 0.4992428123950958, "alphanum_fraction": 0.5189298391342163, "avg_line_length": 21.05813980102539, "blob_id": "b64c7663766dc99663f7127891738deeac331276", "content_id": "3869862457b394c743717dc7cfb6cddcc6386038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2803, "license_type": "no_license", "max_line_length": 76, "num_lines": 86, "path": "/c/src/basic01/basic01/function.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "#include <stdio.h>\r\n#include \"accumulate.h\"\r\n\r\n// 전역변수: 모든 함수들이 접근 가능 & 자동 초기화가 됨\r\n// heap 영역에 생성 함수가 호출되는 stack이랑 무관 -> 휘발 X\r\n// 프로그램 시작시 메모리 할당 & 프로그램 종료시 소멸\r\nint globalSum = 0;\r\n\r\n// 리턴값이 있는 함수\r\nint add(int a, int b) {\r\n\treturn a + b;\r\n}\r\n// 리턴값이 없는 함수\r\nvoid addVoid(int a, int b) {\r\n\tprintf(\"합은 %d\\n\", a + b);\r\n}\r\n\r\nint power(int x, int y);\r\nint power(int x, int y, int z);\r\n\r\ndouble divide(unsigned int a, unsigned int b) {\r\n\tif (b == 0) {\r\n\t\treturn 0;\r\n\t}\r\n\treturn a / (double)b;\r\n}\r\n\r\n// 진입점 함수 (void : 없다)\r\n//int main(void) {\r\n//\t// 리턴 제공\r\n//\tint r = add(100, 25);\r\n//\tprintf(\"%d\\n\", r);\r\n//\t// 리턴 x\r\n//\taddVoid(100, 25);\r\n//\t// 나눗셈\r\n//\tdouble r1 = divide(100, 20);\r\n//\t// 2째 자리까지 반올림\r\n//\tprintf(\"%0.2lf\\n\", r1);\r\n//\t// static 활용한 누적함수\r\n//\taccumulate(2);\r\n//\taccumulate(3);\r\n//\taccumulate(4);\r\n//\taccumulate(5);\r\n//\r\n//\t// 실습\r\n//\tint a = 10, b = 20;\r\n//\tprintf(\"a: %d, b: %d\\n\", a, b);\r\n//\tchanger(10, 20);\r\n//\tprintf(\"a: %d, b: %d\\n\", a, b);\r\n//\treturn 0;\r\n//}\r\n\r\nint changer(int a, int b) {\r\n\tprintf(\"a: %d, b: %d\\n\", a, b);\r\n\tint temp = a;\r\n\ta = b;\r\n\tb = temp;\r\n\tprintf(\"a: %d, b: %d\\n\", a, b);\r\n}\r\n// 누적 함수\r\nvoid accumulate(unsigned int a) {\r\n\tprintf(\"---------------------------------\\n\");\r\n\tint sum = 0; // 지역변수 ( 함수 호출이 끝나면, 휘발됨 )\r\n\t// stack 영역에 생성 ( 함수 하나하나 스택에 저장 ) ~ 함수( 블록 { }) 이 끝나면 자동 소멸\r\n\tsum += a;\r\n\tprintf(\"지역변수: %d\\n\", sum);\r\n\tglobalSum += a; // 전역변수를 쓰면, 함수 호출 이후에도 값이 살아있다!\r\n\tprintf(\"전역변수: %d\\n\", globalSum);\r\n\r\n\tstatic int staticSum = 0; // 정적변수 ( 지역변수처럼 함수 내에서만 활용 가능 )\r\n\t// 호출시 처음 한번만 초기화! ( 이미 존재하면 실행하지 않음)\r\n\t// 그러나 전역변수처럼 heap 영역에 생성\r\n\t// -> 전역변수를 많이 쓰면, 복잡도 상승.. --> 함수 호출이 끝나도 유지 & 함수 내 접근 변수 필요시 정적 변수를 활용하자!\r\n\tstaticSum += a;\r\n\tprintf(\"정적변수: %d\\n\", staticSum);\r\n\t\r\n\tprintf(\"---------------------------------\\n\");\r\n}\r\n\r\n\r\n// static변수: 전역변수나 지역변수 앞에 붙일 수 있고.\r\n// 선언 함수 내에서만 접근 가능 (지역변수 처럼)\r\n// 한번만 초기화하고 프로그램 종료시 소멸 (전역변수 처럼)\r\n// cf 전역변수에 static을 붙이면, 해당 파일 내에서만 활용할 수 있도록 처리\r\n\r\n// 전역변수 static변수는 컴파일러가 선언시, 자동 초기화 해줌 0 or null? 그러나 지역변수는 초기화 없다." }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 29, "blob_id": "e0ba7ea79b1742515ba88a81b0c982be5c1b2fdd", "content_id": "cd39ba5794f95f66e1dde4d65eaa29a5e065dec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 60, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/c/src/basic01/basic01/accumulate.h", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "void accumulate(unsigned int a);\r\nint changer(int a, int b);" }, { "alpha_fraction": 0.45287638902664185, "alphanum_fraction": 0.5104039311408997, "avg_line_length": 19.399999618530273, "blob_id": "080c771a0fc1330dc919d089f71f0d9ba6c4dba3", "content_id": "8ab16365cf594d0f227c80ef400fabc593165a17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 91, "num_lines": 40, "path": "/python/src/basic01/week1_class.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "import sys\nweek1 = 1\n\nprint('튜토리얼', week1, '주차') # 띄어쓰기 적용해야 함\nprint('튜토리얼' + str(week1) + '주차') # 띄어쓰기 안됨\n# 짧은 주석은 이렇게 쓰고..\n\n'''\n 긴 주석은 이렇게 쓰자!!\n'''\n\n\nprint('\"korea\" = \\'seoul\\'')\nprint(''' \"korea\" =\\|`[{.,/ 'seoul' ''') # ''' ''' ~ 사이에는 자유롭게 따옴표를 쓸 수 있는 듯 (이스케이프문 전부 가능)\n\na = 2\nb = 10\nprint('사칙연산', a + b, a - b, a * b, a / b)\n\nprint(a ** b)\nprint(a // b) # 나눈 몫\nprint(a % b) # 나눈 나머지\nx = 10\ny = (x ** 2) + (2 * x) + 1\nprint('x^2 + 2x + 1 방정식 --> x:', x, 'y:', y)\n\nprint('숫자 1 사이즈:', sys.getsizeof(1))\n\n# 시간 생성기\ntimeSec = 10000\nhour = timeSec // (60 * 60)\nleftTime1 = timeSec % (60 * 60)\nminute = leftTime1 // 60\nsecond = leftTime1 % 60\nprint(hour, '시간', minute, '분', second, '초')\nprint(hour, '시간', minute, '분', second, '초', sep='')\n\n# 스트링 포맷\nprint('%d은(는) 정수입니다.' % 10)\nprint('%d + %d =%d' % (10, 20, 10 + 20))\n\n" }, { "alpha_fraction": 0.6019900441169739, "alphanum_fraction": 0.606965184211731, "avg_line_length": 29.200000762939453, "blob_id": "7753800fb600cae76d93ca913e6a8753461c3b20", "content_id": "f0fc45f60e8f528142a7ee8f527a9298aaf13cca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 68, "num_lines": 20, "path": "/c/src/basic01/basic01/file.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "/*\n파일: 보조기억장치에서 문서, 소리, 그림, 동영상과 같은 자료를 모아놓은 것\n - 파일 안에는 바이트들이 순차적으로 있고, 맨끝에는 EOF 마커가 있다.\n - 위치표시자는 파일 안 맨 처음 바이트!\n - 종류:\n a. 텍스트 파일 : 텍스트가 들어있고 각줄의 끝은 \\n으로 구성\n\tb. 이진 파일 : 이진데이터가 들어있고, 줄 끝 표시가 없음 ( 사운드, 이미지, 실행 파일.. )\n\n스트림: 입출력 장치와 프로그램을 연결하는 통신 채널 (통로)\n 스트림을 통해, buffer (연속된 바이트들의 청크) 흐름을 받거나 보냄\n - 입출력 스트림: stdin: 표준 입력 스트림 / stdout: 표준 출력 스트림 / stderr: 표준 에러 스트림\n - 보통 파일을 읽을 때, \n 1, open(입출력 stream 세팅)\n\t2, 스트림에 설정해 놓은 포인터 변수 체크 후, ( FILE* fp; if (fs == Null) { err } )\n\t 읽거나 쓰기 func(stream 통한 버퍼 송수신)\n 3, close(입출력 stream 반환)\n\n이진파일: 이진 데이터가 직접 저장된 파일 - 인간이 보기 어려움 & 따로 후처리가 없어 성능에 좋음\n\n*/" }, { "alpha_fraction": 0.4311642348766327, "alphanum_fraction": 0.47144755721092224, "avg_line_length": 33.88888931274414, "blob_id": "ec431c2739802b6908c30d2493c6fbcf15563d3f", "content_id": "cb0cef0ed85d2e8e96b03871522259b9a4592a05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2933, "license_type": "no_license", "max_line_length": 129, "num_lines": 63, "path": "/c/src/basic01/basic01/array.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "#include <stdio.h>\r\n\r\n//int main() {\r\n//\t// 배열 : 같은 타입의 다수의 데이터를 다루는데 활용되는 데이터 구조\r\n//\tint a[3]; // 정수가 들어가는(4byte) 공간 3개가 있는 배열 생성\r\n//\ta[0] = 10;\r\n//\ta[1] = 20;\r\n//\ta[2] = 30;\r\n//\tprintf(\"%d\\n\", a[0]);\r\n//\tprintf(\"%d\\n\", a[1]);\r\n//\tprintf(\"%d\\n\", a[2]);\r\n//\tprintf(\"%d\\n\", &a[0]);\r\n//\tprintf(\"%d\\n\", &a[1]);\r\n//\tprintf(\"%d\\n\", &a[2]);\r\n//\tprintf(\"%d\\n\", a); // a의 메모리 주소를 출력함\r\n//\t// 정확히는 a 배열이 시작하는 첫번째 공간의 주소! ( == printf(\"%d\\n\", &a[0]); )\r\n//\t// a는 해당 주소부터 시작해 12byte까지 활용한다.\r\n//\tprintf(\"---------------------------------\\n\");\r\n//\tdouble b[3];\r\n//\t// 사이즈 설정 (메모리 크기를 통한)\r\n//\tprintf(\"double형 사이즈: %d\\n\", sizeof(double)); // 8\r\n//\tprintf(\"b 사이즈: %d\\n\", sizeof(b)); // 24\r\n//\tint size = sizeof(b) / sizeof(double); // 3\r\n//\t// 보통은 배열 크기로 하지만, 이런식으로 메모리 위치에 대해 생각해보자!\r\n//\tfor (int i = 0; i < size; i++) {\r\n//\t\tprintf(\"%d\\n\", &b[i]);\r\n//\t\tb[i] = i + 1.1;\r\n//\t\tprintf(\"%d\\n\", &b[i]); // 메모리 공간은 생성때 픽스 된다. ( 3번 출력 모두 같음 )\r\n//\t}\r\n//\tfor (int i = 0; i < 3; i++) {\r\n//\t\tprintf(\"%f\\n\", b[i]);\r\n//\t\tprintf(\"%d\\n\", &b[i]);\r\n//\t}\r\n//\tprintf(\"%d\\n\", b); // double의 경우 8byte씩 증가하고 있다.\r\n//\tprintf(\"---------------------------------\\n\");\r\n//\t// 인덱스 위치를 넘어가는 설정은, 컴파일 에러가 나지 않는다. 그러나, 엄연히 컴퓨터가 정한 메모리 주소 바깥에다 설정하는 것으로. 심한 경우, 프로그램이 멈추기도 한다. ( 해당 메모리 주소의 값이 날라가므로써 )\r\n//\t\r\n//\t// 배열의 선언 & 정의 - 초기화\r\n//\t//int c[5] = { 10, 20, 30, 40, 50 }; // 5 크기\r\n//\t//int c[5] = { 10, 20, 30 }; // 5 크기 [ 10, 20, 30, 0, 0 ]\r\n//\tint c[] = { 10, 20, 30, 40, 50 }; // 5 크기 ( 초기화에 따른 자동 세팅 )\r\n//\tfor (int i = 0; i < 6; i++) {\r\n//\t\tprintf(\"%d, %d\\n, %d\\n\", c[i], &c[i], c);\r\n//\t}\r\n//\t// 245p 문제\r\n//\tint scores[3];\r\n//\tint sizeOfScores = sizeof(scores) / sizeof(int);\r\n//\tprintf(\"%d\", sizeOfScores);\r\n//\tfor (int i = 0; i < sizeOfScores; i++) {\r\n//\t\tprintf(\"게임 %d에서 선수의 득점은?\", i + 1);\r\n//\t\tscanf_s(\"%d\", &scores[i]);\r\n//\t}\r\n//\tfloat sum = 0;\r\n//\tprintf(\"array memory address: %d\\n\", &scores);\r\n//\tfor (int i = 0; i < sizeOfScores; i++) {\r\n//\t\tsum += scores[i];\r\n//\t\tprintf(\"element memory address1: %d\\n\", &scores[i]); // 배열의 메모리 가져오는 방법 1\r\n//\t\tprintf(\"element memory address2: %d\\n\", scores + i); // 배열의 메모리 가져오는 방법 2 ( 배열의 형을 인식 (int)해서 i만큼 넘어감 --> 1번i = 4, 2번i = 8...\r\n//\t}\r\n//\tprintf(\"Total array size: %d\\n\", sizeof(scores));\r\n//\tprintf(\"평균 득점은 %f입니다.\", sum / sizeOfScores);\r\n//\treturn 0;\r\n//}" }, { "alpha_fraction": 0.46631762385368347, "alphanum_fraction": 0.49441537261009216, "avg_line_length": 30.75428581237793, "blob_id": "b66262d2d2b40f2ec01efd7d81a4463988b25383", "content_id": "39162ab5eb6839b806e5606ae7d278c16fbc74dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8226, "license_type": "no_license", "max_line_length": 134, "num_lines": 175, "path": "/c/src/basic01/basic01/pointer.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "// 포인터는 메모리의 주소 값을 가리키는 것\r\n// 포인터를 통해 메모리 접근 가능\r\n\r\n// 포인터를 활용한 Call By Reference\r\nvoid callByRef(int *pa, int *pb) { // 인자는 포인터값!\r\n\t*pa += 1;\r\n\t*pb += 1;\r\n}\r\n\r\n//void main() {\r\n//\tint num = 10;\r\n//\tchar charec = 'a';\r\n//\tdouble doub = 3.14;\r\n//\t// 포인터 변수 : 메모리의 주소 값을 저장하고 있는 변수\r\n//\t// 주소를 저장하는 포인터변수는 4byte의 크기를 가짐.\r\n//\t/*\r\n//\t** 앞에 자료형을 붙여야, 포인터가 메모리 주소에 접근해서 얼마만큼의 주소까지 읽어와야하는건지 알 수 있음!\r\n//\t** -> 저장된 건 float이지만, int *p로 접근하면, int 메모리만큼 잘라서 읽어옴!\r\n//\t*/ \r\n//\tint *pnum = &num; // 자료형 *변수명 ~ & : 주소연산자\r\n//\tchar *pchar = &charec; \r\n//\tdouble *pdoub = &doub;\r\n//\t// 각 데이터의 포인터\r\n//\tprintf(\"int %d: %d, %p // %d, %p\\n\", num, pnum, num, &num, &num); // %p는 주어진 값을 16진수로 나타낼 뿐임. -> 실제 쓰이는 목표는 메모리 주소를 16진수로 표현하는데 쓰임!\r\n//\tprintf(\"char %c: %d, %p\\n\", charec, pchar, charec);\r\n//\tprintf(\"double %.2lf: %d, %p\\n\", doub, pdoub, doub);\r\n//\r\n//\t// 포인터 변수(주소)를 통해, 내장된 데이터를 도출\r\n//\tprintf(\"int %d: %d\\n\", num, *pnum); // *포인터 변수 ~ * : 간접 참조 연산자\r\n//\tprintf(\"char %c: %c\\n\", charec, *pchar);\r\n//\tprintf(\"double %.2lf: %lf\\n\", doub, *pdoub);\r\n//\t*pnum = 20; // *포인터 변수를 활용해, 해당 포인터에 위치한 데이터를 바꿈\r\n//\t*pchar = 'b';\r\n//\t*pdoub = 6.28;\r\n//\tprintf(\"int %d: %d\\n\", num, *pnum);\r\n//\tprintf(\"char %c: %c\\n\", charec, *pchar);\r\n//\tprintf(\"double %.2lf: %lf\\n\", doub, *pdoub);\r\n//\r\n//\t// 포인터변수의 주소 변경\r\n//\tint a = 10, b = 20;\r\n//\tint* p = &a;\r\n//\tprintf(\"%d\\n\", *p);\r\n//\tp = &b; // 형이 같기 때문에, 문제없이 데이터를 가져온다!\r\n//\tprintf(\"%d\\n\", *p);\r\n//\r\n//\t// 실습\r\n//\tint q = 10, w = 20, e = 30, r = 100;\r\n//\tint *pq = &q, *pw = &w, *pe = &e, *pr = &r;\r\n//\t*pq += *pr;\r\n//\t*pw += *pr;\r\n//\t*pe += *pr;\r\n//\tprintf(\"%d, %d, %d // %d, %d, %d\\n\", q, w, e, *pq, *pw, *pe);\r\n//\t// 배열로 처리하기\r\n//\tint qwe[3] = { 10, 20, 30 };\r\n//\tint *pqwe = qwe; // & 안붙여도 배열은 바로 메모리주소를 가짐! ( 배열의 0번째 메모리 주소를 갖겠지!! )\r\n//\tprintf(\"%d %d %d %d\\n\", pqwe, pqwe++, pqwe--, *pqwe);\r\n//\t// ㄴ 이게 바로 포인터 연산1\r\n//\tfor (int i = 0; i < sizeof(qwe) / sizeof(qwe[0]); i++) {\r\n//\t\t// 배열의 첫번째 인덱스 메모리 주소 , 메모리주소가 데이터형을 기본유닛으로 더해짐 (int - 4) & 해당 메모리주소에 있는 (배열 각 인덱스의) 데이터\r\n//\t\tprintf(\"%d, %d\\n\", *(pqwe + i), pqwe + i);\r\n//\t\t// ㄴ 이게 바로 포인터 연산2\r\n//\t}\r\n//\r\n//\t// 포인터의 이동\r\n//\tint arr[] = { 10, 20, 30 };\r\n//\tint* parr = arr;\r\n//\tfor (int i = 0; i < 3; i++) {\r\n//\t\tprintf(\"%d, %d\\n\", parr, *parr);\r\n//\t\tparr++;\r\n//\t}\r\n//\t\r\n//\t// 실습\r\n//\tdouble arr2[] = { 1.1, 2.1, 3.1 }; // 상수 ( 변경 불가 )\r\n//\tdouble *parr2 = arr2; // 배열 1번째 주소를 갖는 변수 ( 변경 가능 )\r\n//\tfor (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) {\r\n//\t\tprintf(\"%d, %.1f\\n\", parr, *parr2);\r\n//\t\tparr2++;\r\n//\t}\r\n//\r\n//\t// 포인터 주소로 값을 바꾸는 걸 활용해 call by reference를 이뤄낼 수 있음!\r\n//\tprintf(\"callByRef: %d, %d\\n\", a, b);\r\n//\tcallByRef( &a, &b ); // 함수 인자로 메모리 주소를 보내주고! 함수 내에서는 메모리주소를 가지고, 아예 해당 주소의 데이터 값을 바꿔버림!\r\n//\tprintf(\"callByRef: %d, %d\\n\", a, b);\r\n//\r\n//\t// 문자열의 포인터\r\n//\tchar arr3[] = \"happy\";\r\n//\tchar *parr3 = arr3;\r\n//\tfor (int i = 0; i < sizeof(arr3) / sizeof(arr3[0]); i++) {\r\n//\t\tprintf(\"%c\", *(parr3 + i)); // 이러면 '\\0'도 찍힘\r\n//\t}\r\n//\tprintf(\" == %s\\n\", arr3); // for 돌면서 '\\0' 전까지 출력!\r\n//\t// 상수 메모리 주소를 갖는 arr3[]은 바로 변경 못하지만, 메모리 주소를 복사해온 변수 parr3은,\r\n//\tparr3 = \"sad\";\r\n//\tprintf(\"parr3: %s\\n\", parr3); // 이렇게 arr 변경이 가능함!\r\n//\r\n//\t// 포인터의 배열 - 주소 값이 저장 가능한 배열 ex) int * 포인터명[3] = { 주소1,주소2,주소3 };\r\n//\t// 정수형 포인터를 저장하는 예 \r\n//\tint num1 = 10, num2 = 20, num3 = 30;\r\n//\tint * pNumArr[3] = { &num1, &num2, &num3 };\r\n//\t// 문자열을 포인터를 저장하는 예 \r\n//\tchar * pStrArr[3] = { \"happy\", \"sad\", \"angry\" };\r\n//\r\n//\t// 문자열 배열1\r\n//\tchar arr4[3][5] = {\r\n//\t\t\"hong\", // 각 원소 사이즈 5 이하의 문자 배열\r\n//\t\t\"kim\",\r\n//\t\t\"park\"\r\n//\t};\r\n//\tchar(*parr4)[5] = arr4; // 포인터 단위 선언 -> parr4 = char[5] ( char 5개로 이루어진 데이터형 )를 데이터 형으로 갖는 포인터\r\n//\tfor (int i = 0; i < sizeof(arr4) / sizeof(parr4); i++) {\r\n//\t\tprintf(\"%s\\n\", *(parr4 + i));\r\n//\t}\r\n//\t// 문자열배열2\r\n//\tchar *parr44[3] = { \"hong\", \"kim\", \"park\" }; // char 형 포인터 3개가 들어 있는 배열 (읽기 전용!)\r\n//\tfor (int i = 0; i < 3; i++) {\r\n//\t\tprintf(\"%s\\n\", *(parr44 + i));\r\n//\t}\r\n//\r\n//\t// 실습\r\n//\tchar colors[3][20];\r\n//\t// 입력 받기\r\n//\tfor (int i = 0; i < 3; i++) {\r\n//\t\tprintf(\"input your color ( %d / 3 ) : \", i + 1);\r\n//\t\t// colors의 각 배열 행에 string[]을 넣어줌!\r\n//\t\t//scanf_s(\"%s\", colors[i]);\r\n//\t\tgets(colors[i]); // 각 문자열배열의 주소에 넣어줌\r\n//\t}\r\n//\tprintf(\"Thanks! check your colors1 : \");\r\n//\t// 출력 하기 1\r\n//\tfor (int i = 0; i < 3; i++) {\r\n//\t\tprintf(\" %s \", &colors[i]);\r\n//\t\t//puts(colors[i]);\r\n//\t}\r\n//\t// 출력 하기 2\r\n//\tprintf(\"\\nThanks! check your colors2 : \");\r\n//\tchar(*pColor)[20] = colors;\r\n//\tfor (int i = 0; i < 3; i++) {\r\n//\t\t//printf(\" %s \", *(pColor + i)); // 포인터의 이동 ( char[20], 즉 1당 20byte씩 이동 )\r\n//\t\t//puts(colors[i]);\r\n//\t\t// 또는\r\n//\t\tputs(pColor++); // 출력하고, 포인터 한단계 이동\r\n//\t}\r\n//}\r\n\r\n/* \r\n** 포인터 주의 사항\r\n 1, 포인터 초기화를 안전하게 하자! ( 바로 변수 메모리 주소 || NULL (0) )\r\n 포인터를 초기화 시키지 않고, 바로 값을 넣으면 ( int *p; *p = 20; )\r\n 포인터는 랜덤 메모리 주소를 가짐 -> 중요 데이터를 갈아끼워버릴 수 있음.. -> 매우 위험;;\r\n\r\n 2, 포인터의 자료형은 데이터의 자료형과 일치시켜야 함!\r\n -> 해당 데이터를 잘라 가져오거나, 이웃 데이터를 포함해 긁어옴 || 이웃데이터에 수정/손상을 줄 수 있다!!\r\n\r\n** 배열과 포인터의 관계\r\n a[0] == *a, a[1] == *(a+1) ... a[n] == *(a+n) \r\n\r\n** 함수 포인터\r\n 함수가 시작되는 주소를 가리킴\r\n 포인터로 함수를 호출하려면, 반환형 & 함수 포인터라는 표시 (*pf) & 매개변수, 이렇게 3개를 알려주어야 함\r\n ex) int (*pf) (int x, int y);\r\n\r\n** 문자열 포인터\r\n char s[] = \"HelloWorld\"; ( 배열로 문자열 초기화해 저장 )\r\n char *ps = \"HelloWorld\"; ( 포인터를 정의하고 문자열의 주소로 포인터를 초기화 )\r\n -> 둘다 출력은 같음. 그러나,\r\n 1, 전자는 문자열 변경 불가능. 후자의 경우, ps = \"ByeWorld\"; 가능!\r\n cf. 데이터 세그먼트 (변수를 위한 메모리) & 텍스트 세그먼트 ( 상수, 코드 등 불변하는 값을 위한 메모리 ~ 읽기 전용 )\r\n ~ 전자(s[], HelloWorld) & 후자(HelloWorld)는 텍스트 세그먼트에 포함되는 데이터 (변경 불가)\r\n ~ 후자(*ps)는 데이터 세그먼트에 포함되는 데이터 (변경 가능) --> ps가 다른 문자열을 참조할 수는 있지만, 해당 문자열 원소 수정은 불가능 ( HelloWorld는 텍스트 세그먼트 )\r\n\r\n** 하나의 포인터 변수 (int* pnum)은 여러 주소를 필요에 따라 가리킬 수 있음\r\n** 배열도 똑같이 포인터를 가리키지만, 상수임.. (못 바꿈) -> 포인터 변수를 하나 만들어서 다룸! int* parr = arr; ( 포인터 이동 및 연산 등이 가능! )\r\n** 문자열의 포인터 char str[] \"happy\" ~ 상수형 포인터 // char* pstr = \"happy\" ~ 읽기 전용 메모리에 올린 포인터 변수\r\n\r\n*/" }, { "alpha_fraction": 0.4915158450603485, "alphanum_fraction": 0.5277149081230164, "avg_line_length": 20.69230842590332, "blob_id": "279ca95b7a13f7b0f1b497c17428fee4f29b27d4", "content_id": "dd0ed59b7a34771b65e95c9a0f61475a8d7c7e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2242, "license_type": "no_license", "max_line_length": 77, "num_lines": 78, "path": "/c/src/basic01/basic01/string.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "#include <stdio.h>\r\n#include <string.h>\r\n\r\nvoid characterOutput() {\r\n\t// 문자 인풋 출력\r\n\tchar input;\r\n\tprintf(\"문자 하나 입력!\\n\");\r\n\tscanf_s(\"%c\", &input);\r\n\tprintf(\"문자1: %c\\n\", input);\r\n\r\n\tchar enter = getchar();\r\n\r\n\t// 문자 전용 함수\r\n\tchar input2 = getchar();\r\n\tputchar(input2);\r\n}\r\n\r\nvoid string101() {\r\n\t// 문자열 저장\r\n\tchar s[] = \"hello\";\r\n\tprintf(\"%s\\n\", s);\r\n\tchar s1[6];\r\n\tint sizeOfS1 = sizeof(s1) / sizeof(char);\r\n\t//s1 = \"hello\"; ~ 불가: s1은 주소를 저장하고 있는 상수임\r\n\tfor (int i = 0; i < sizeOfS1 - 1; i++) {\r\n\t\t//s1[i] = s[i];\r\n\t\ts1[i] = 'a';\r\n\t\tprintf(\"%d, %c\\n\", i, s1[i]);\r\n\t}\r\n\ts1[sizeOfS1 - 1] = '\\0'; // 문자열의 끝\r\n\tprintf(\"%s\\n\", s1);\r\n\r\n\t//int s3[3] = { 1, 2, 3 };\r\n\t//s3[] = { 3, 4, 5 }; // 배열의 재선언 불가능!\r\n\t//printf(\"%s\\n\", s1);\r\n}\r\n\r\nvoid stringOutput() {\r\n\t// 사용자에게 문자열 입력받기\r\n\tchar name[20]; // 19byte 까지 입력 가능\r\n\tscanf_s(\"%s\", name, sizeof(name)); // 배열의 이름은 주소\r\n\t// 띄어쓰기 불가 ( scanf의 구분값으로 인식됨 )\r\n\tprintf(\"이름은 %s\\n\", name);\r\n\t\r\n\tchar enter = getchar();\r\n\r\n\t// 문자열 전용 함수\r\n\tchar name1[20];\r\n\tgets(name1); // 공백 입력 가능 ( scanf는 공백 입력 불가능했다! )\r\n\tputs(name1);\r\n\r\n\t// 문자열 복사\r\n\tchar source[] = \"hello\";\r\n\tchar dest[20];\r\n\tint sourceSize = sizeof(source) / sizeof(source[0]);\r\n\t// d = s; ~ 이미 만들어진 문자열에 강제로 다른 메모리 할당 불가능! ( 포인터 활용해야 함! )\r\n\t// 복사 라이브러리 활용\r\n\t// params = ( 복사내용을 받을 대상, 복사해오는 대상 사이즈 + 1, 복사해오는 대상)\r\n\tstrcpy_s(dest, sourceSize + 1, source); // source 사이즈 + '/0'\r\n\tprintf(\"%s\", dest);\r\n}\r\nvoid practice() {\r\n\t// call by reference ~ 실제 글자는 17자\r\n\tchar s[20]; // 넉넉하게~\r\n\tgets(s);\r\n\tputs(s);\r\n\tfor (int i = 0; i < 20; i++) {\r\n\t\tprintf(\"%d, %c\\n\", i, s[i]); // 18째는 지금 '\\0'을 가르키고 있다!\r\n\t}\r\n\tprintf(\"str length: %d\", strlen(s)); // 20 칸으로 잡았지만, gets가 자동 사이즈 조절까지 해주나 봄\r\n}\r\n//int main() {\r\n//\tcharacterOutput();\r\n//\tstring101();\r\n//\tstringOutput();\r\n//\tpractice();\r\n//\treturn 0;\r\n//}" }, { "alpha_fraction": 0.5297757387161255, "alphanum_fraction": 0.5576179623603821, "avg_line_length": 23.39622688293457, "blob_id": "89a8e14f730792aaafcf0ccbe71a09c52d516655", "content_id": "bfd351464afe313e95767ed7d80f6c2f38991948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1543, "license_type": "no_license", "max_line_length": 92, "num_lines": 53, "path": "/python/src/basic01/week3_class.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "# 내장함수\n# print(123, 456, sep='/', end='\\n\\n')\n# print('hello')\n# help(print)\n\n# def ope(a, b):\n# a /= 10\n# return a * b\n# q = 10\n# w = 5\n# print( ope(q, w), q, w )\n\n# def gugudan(dan):\n# for x in range(1, 10):\n# print(x * dan)\n# gugudan(3)\n\n# 텍스트 파일 입출력\n# help(open)\n\n# 파일 읽기 f.read() / f.readlines() / f.readline()\n# file = open('./week4_homework.txt', mode='rt', encoding='utf8')\n# # full_content = file.read()\n# # print(full_content)\n# part_content = file.readlines()\n# for x in part_content:\n# # print(x)\n# print(x[:-1]) # 파일 라인 읽기는 자체적으로 \\n을 내포함. 그래서 각 줄마다 마지막 글자를 빼버려서, enter 처리를 없애버린 것!\n# file.close()\n\n# 파일 쓰기 f.write()\n# file2 = open('./week4_homework.txt', mode='w', encoding='utf8') # 모드 w면 새파일로 아예 기입해 넣는다!\n# file2.write('write test\\n')\n# file2.close()\n\n# 파일 읽기 + 쓰기 (r+, w+)\n# file3 = open('./week4_homework.txt', mode='r+', encoding='utf8') # 파일이 없으면 에러 ( 읽기 중심 )\n# file4 = open('./week4_homework.txt', mode='w+', encoding='utf8') # 파일이 없어도 괜찮 ( 쓰기 중심 )\n\n# 실습 메뉴 리스트 텍스트 파일 만들기\nmenu = ''\nnum = 1\nf = open('./menu.txt', 'w', encoding='utf8')\nwhile menu != 'q':\n menu = input('판매메뉴?')\n f.write(f'{num} {menu} \\n')\n num += 1\nf.close()\n\nf1 = open('./menu.txt', 'r', encoding='utf8')\nfor x in f1.readlines():\n print(x, 'gg')\nf1.close()\n" }, { "alpha_fraction": 0.5402644276618958, "alphanum_fraction": 0.5727163553237915, "avg_line_length": 32.290000915527344, "blob_id": "ccd82bf1b9d13e83cfd472c79c6ab7ac0c192969", "content_id": "9683ab841681473b0c76a3952a0bc8ad09a3c540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4448, "license_type": "no_license", "max_line_length": 124, "num_lines": 100, "path": "/c/src/basic01/basic01/struct.c", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UHC", "text": "//#include <stdio.h>\n//#include <stdbool.h>\n//\n//// 구조체: 하나 이상의 변수를 묶어서 만든 새로운 자료형\n//struct vector {\n//\tdouble x;\n//\tdouble y;\n//\tbool isProcessed;\n//};\n//struct vector getVectorSum(struct vector* a, struct vector* b); // 구조체의 메모리주소를 받는다!\n//\n//int main( void ) {\n//\t\n//\tstruct student {\n//\t\tchar name[20];\n//\t\tint eng;\n//\t\tint math;\n//\t\tint phys;\n//\t};\n//\tstruct student studentA = { \"abc\", 1, 2, 3 };\n//\tstruct student* pStudentA;\n//\tpStudentA = &studentA;\n//\t// 구조체 멤버에 접근하는 방법\n//\t// 1, studentA.name\n//\t// 2, (*pStudentA).name\n//\t// 3, pStudentA->name ( 간접 멤버 연산자 : -> )\n//\tprintf(\"%s, %s, %s\\n\", studentA.name, (*pStudentA).name, pStudentA->name);\n//\t// 참고 (구조체 주소)\n//\tprintf(\"%d, %d, %d\\n\", &studentA, pStudentA, &studentA.eng); // 배열과 마찬가지로, 구조체의 메모리 주소는 첫번째 멤버변수의 메모리주소\n//\t// ( 다음 멤버변수 찍으면, 메모리 주소 20 차이 남 )\n//\t// student 구조체 안에서 name 앞에 멤버 변수 추가하면, 구조체 메모리주소와 2번째 멤버변수의 주소 차이가 또 달라짐!! ( 순서가 정말 중요하다!! )\n//\n//\t// 구조체로 이루어진 배열\n//\tstruct student studentList[] = {\n//\t\t{ \"홍길동\", 82, 72, 58 },\n//\t\t{ \"강감찬\", 97, 82, 39 },\n//\t\t{ \"이순신\", 42, 62, 29 },\n//\t\t{ \"장보고\", 52, 12, 0 },\n//\t};\n//\n//\tfor (int i = 0; i < sizeof(studentList) / sizeof(studentList[0]); i++) {\n//\t\tprintf(\"%s: 영어=%3d 수학=%3d 물리=%3d\\n\", studentList[i].name, studentList[i].eng, studentList[i].math, studentList[i].phys);\n//\t}\n//\n//\t// 구조체와 함수\n//\t// 구조체 또한 call by value로 활용됨 -> call by reference를 위해서는 포인터를 활용해야 한다!\n//\tstruct vector vector1 = { 2.0, 3.0, false };\n//\tstruct vector vector2 = { 5.0, 6.0, false };\n//\tstruct vector vectorSum = getVectorSum( &vector1, &vector2 ); // 구조체의 주소를 넘겨준다\n//\tprintf(\"vector1: %.2f, %.2f, %d\\n\", vector1.x, vector1.y, vector1.isProcessed);\n//\tprintf(\"vector2: %.2f, %.2f, %d\\n\", vector2.x, vector2.y, vector2.isProcessed);\n//\tprintf(\"vectorSum: %.2f, %.2f, %d\\n\", vectorSum.x, vectorSum.y, vectorSum.isProcessed);\n//\n//\t// 공용체: 하나의 메모리 공간을 여러 개의 멤버 변수들이 공유할 수 있게 하는 기능 ~ 메모리 공간을 한정시켜 놔서, 보수적인 프로그래밍 가능\n//\t// 공용체의 사이즈는 가장 큰 멤버변수의 메모리 사이즈와 똑같아짐 -> 하나의 멤버변수 사이즈가 너무 크면, 다른 멤버 변수 데이터가 깨진다!\n//\tunion example { // 공용체 사이즈는 가장 큰 멤버변수 메모리 사이즈인 4 (int)\n//\t\tchar a;\n//\t\tshort b;\n//\t\tint c;\n//\t};\n//\tunion example v;\n//\tv.a = 'A';\n//\tprintf(\"v.a:%c v.b:%d v.c:%d\\n\", v.a, v.b, v.c);\n//\tv.c = 10000;\n//\tprintf(\"v.a:%c v.b:%d v.c:%d\\n\", v.a, v.b, v.c); // v.a 의 멤버값 파괴됨.. (사이즈 초과)\n//\t// 공용체의 또다른 용도\n//\t// 위에서 왜 b, c가 10000으로 나왔을까? -> 공용체는 각 멤버변수로 지렛대로 할당된 메모리 공간을 보는것과 같다!\n//\t// 즉 멤버변수를 다 다르게 설정하고. 공용체에 하나의 데이터를 넣으면, 그 데이터를 각 데이터 타입에 맞게 출력할 수 있다는 것이다!\n//\tunion ip_address {\n//\t\tunsigned long num;\n//\t\tunsigned char ch[4];\n//\t};\n//\tunion ip_address addr;\n//\taddr.ch[0] = 0x1;\n//\taddr.ch[1] = 0x0;\n//\taddr.ch[2] = 0x0;\n//\taddr.ch[3] = 0x7f;\n//\tprintf(\"%x\\n\", addr.num);\n//\n//\t// 열거형: 변수가 가질 수 있는 값을 열거해 둠 ( 변수는 열거형에 정의된 값만을 가짐! )\n//\tenum days { SUN = 0, MON, TUE = 10, WED = 11, THU =12, FRI = 13, SAT =14 }; // MON = 1 (자동 ++)\n//\n//\t// typedef: 새로운 자료형을 정의하는 툴\n//\ttypedef struct point { int x; int y; } POINT; // 보통 이렇게, 구조체를 하나의 데이터 타입으로 alias 하는데 자주 쓰임!\n//\tPOINT point1 = { 7, 9 };\n//\tprintf(\"point1: %d, %d\\n\", point1.x, point1.y);\n//\n//\treturn 0;\n//}\n//\n//struct vector getVectorSum(struct vector* a, struct vector* b) {\n//\ta->isProcessed = true; \n//\t(*b).isProcessed = true; // 이렇게 call by reference 가능!!\n//\tstruct vector result = {\n//\t\t(*a).x + (*b).x,\n//\t\ta->y + b->y,\n//\t\tfalse,\n//\t};\n//\treturn result; // 이렇게 구조체 리턴도 가능!!\n//};" }, { "alpha_fraction": 0.44361412525177, "alphanum_fraction": 0.48260870575904846, "avg_line_length": 22.2208194732666, "blob_id": "a0a3c44c5e77c412f05440c8b9553ecfe5484d27", "content_id": "bb16aff4eba5fecfee03bccf86a3b3a5e62155e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7552, "license_type": "no_license", "max_line_length": 86, "num_lines": 317, "path": "/python/src/basic01/week2_book.py", "repo_name": "Jsunmin/2020_C-Python_101", "src_encoding": "UTF-8", "text": "import turtle as t\nimport random\nimport time\n\n# 1부터 n까지 합 / 곱 구하는 함수\ndef sum_func(n, operator):\n sum = 1\n if operator == 'multiple':\n for x in range(2, n + 1):\n sum += x\n elif operator == 'plus':\n for x in range(2, n + 1):\n sum *= x\n return sum\n\n\n# print(sum_func(10, 'plus'))\n# print(sum_func(10, 'multiple'))\n\n# 다각형 그리기\ndef polygon(n, length):\n for x in range(0, n):\n t.fd(length)\n print(t.pos())\n t.lt(360 / n)\n\n\n# polygon(10, 50)\n\n# 시공의 폭풍 그리기\ndef make_outer_beauty():\n t.bgcolor('black')\n t.speed(7)\n for x in range(200):\n if x % 5 == 0:\n t.color('red')\n elif x % 5 == 1:\n t.color('yellow')\n elif x % 5 == 2:\n t.color('blue')\n elif x % 5 == 3:\n t.color('green')\n elif x % 5 == 4:\n t.color('white')\n t.forward(x * 2)\n t.left(70)\n\n\n# make_outer_beauty()\n\n# 자유로운 터틀 립\n# 깔끔하게 수납하고 싶어??! obj에 함수넣기? 그게 바로 class지!! ~ 그치만 미래의 너를 위해 남겨놓을게 ^^\ndef t_free_mode(mode):\n t.shape('turtle')\n t.pensize(2)\n t.speed(6)\n if mode == 'keyboard':\n distance = 30\n\n def t_r():\n t.setheading(0)\n t.forward(distance)\n\n def t_l():\n t.setheading(180)\n t.forward(distance)\n\n def t_u():\n t.setheading(90)\n t.forward(distance)\n\n def t_d():\n t.setheading(270)\n t.forward(distance)\n\n def t_blank():\n t.clear()\n\n t.onkeypress(t_r, 'Right')\n t.onkeypress(t_l, 'Left')\n t.onkeypress(t_u, 'Up')\n t.onkeypress(t_d, 'Down')\n t.onkeypress(t_blank, 'Escape')\n elif mode == 'mouse':\n t.hideturtle()\n t.onscreenclick(t.goto)\n t.listen()\n t.mainloop()\n\n\n# t_free_mode('keyboard')\n\n# calculator game\ndef calculator_game():\n oper = input('원하는 연산자는? (+, -, *, /)')\n ans_count = 0\n err_count = 0\n while ans_count + err_count < 5:\n num1 = random.randint(1, 100)\n num2 = random.randint(1, 100)\n trial = input(f'{num1} {oper} {num2} = ?')\n if int(trial) == eval(f'{num1} {oper} {num2}'):\n ans_count += 1\n print('구우욷')\n else:\n err_count += 1\n print(f'한번 또 한번.. [{err_count}]')\n print(f'결과: 정답횟수= {ans_count} / 틀린횟수= {err_count}')\n\n\n# calculator_game()\n\n# typing game\ndef typing_game():\n words = ['cat', 'dog', 'fox', 'monkey', 'mouse', 'panda', 'frog', 'snake', 'wolf']\n pass_count = 0\n word = random.choice(words)\n start_time = time.time()\n while pass_count < 5:\n if word == input(f'{word} '):\n pass_count += 1\n word = random.choice(words)\n else:\n print('wrong... try again!')\n end_time = time.time()\n sec = round( end_time - start_time, 2 )\n print(f'finish!! time: { sec }sec')\n# typing_game()\n\n# turtle cannon\ndef turtle_cannon( power ):\n def turn_up():\n t.left(4)\n def turn_down():\n t.right(4)\n def fire():\n ang = t.heading()\n while t.ycor() > 0:\n t.forward(power)\n t.right(5)\n d = t.distance(target, 0)\n t.sety(random.randint(10, 100))\n if d < 25:\n t.color('blue')\n t.write('Good!', False, 'center', ('', 15))\n else:\n t.color('red')\n t.write('Bad!', False, 'center', ('', 15))\n t.color('black')\n t.goto(-300, 10)\n t.setheading(ang)\n t.goto(-400, 0)\n t.down()\n t.goto(400, 0)\n target = random.randint(100, 350)\n t.pensize(3)\n t.color('green')\n t.up()\n t.goto(target - 25, 2)\n t.down()\n t.goto(target + 25, 2)\n\n t.color('black')\n t.up()\n t.goto(-300, 10)\n t.setheading(25)\n\n t.onkeypress(turn_up, 'Up')\n\n t.onkeypress(turn_down, 'Down')\n t.onkeypress(fire, 'space')\n t.listen()\n t.mainloop()\n# turtle_cannon( 25 )\n\n# turtle run\ndef turtle_run1():\n # 세팅\n te = t.Turtle()\n ts = t.Turtle()\n def setup():\n te.shape('turtle')\n te.color('red')\n te.speed(0)\n te.up()\n te.goto(0, 200)\n ts.shape('circle')\n ts.color('green')\n ts.speed(0)\n ts.up()\n ts.goto(0, -200)\n t.goto(0, 0)\n setup()\n def t_r():\n t.setheading(0)\n def t_l():\n t.setheading(180)\n def t_u():\n t.setheading(90)\n def t_d():\n t.setheading(270)\n def play():\n t.forward(10)\n ang = te.towards( t.pos() )\n te.setheading(ang)\n te.forward(10)\n if t.distance(ts) < 12:\n star_x = random.randint(-230, 230)\n star_y = random.randint(-230, 230)\n star_x1 = random.randint(-230, 230)\n star_y2 = random.randint(-230, 230)\n ts.goto(star_x, star_y)\n te.goto(star_x1, star_y2)\n\n if t.distance(te) >= 12:\n t.ontimer(play, 100)\n\n t.setup(500, 500)\n t.bgcolor('orange')\n t.shape('turtle')\n t.speed(0)\n t.up()\n t.color('white')\n t.onkeypress(t_r, 'Right')\n t.onkeypress(t_l, 'Left')\n t.onkeypress(t_u, 'Up')\n t.onkeypress(t_d, 'Down')\n t.onkeypress(setup, 'Escape')\n t.listen()\n play()\n t.mainloop()\n# turtle_run1()\nplaying = False\nscore = 0\ndef turtle_run2():\n # 세팅\n te = t.Turtle()\n te.shape('turtle')\n te.color('red')\n te.speed(0)\n te.up()\n te.goto(0, 200)\n ts = t.Turtle()\n ts.shape('circle')\n ts.color('green')\n ts.speed(0)\n ts.up()\n ts.goto(0, -200)\n\n def start():\n global playing\n if playing == False:\n playing = True\n t.clear()\n play()\n def play():\n global score, playing\n t.forward(10)\n ang = te.towards(t.pos())\n te.setheading(ang)\n te.forward(10)\n if random.randint(1,5) ==3:\n ang - te.towards(t.pos())\n te.setheading(ang)\n speed = score + 4\n if speed > 15:\n speed = 15\n te.forward(speed)\n if t.distance(te) < 12:\n text = f'Score : {score}'\n message('Game Over', text)\n playing = False\n score = 0\n\n if t.distance(ts) < 12:\n star_x = random.randint(-230, 230)\n star_y = random.randint(-230, 230)\n star_x1 = random.randint(-230, 230)\n star_y2 = random.randint(-230, 230)\n ts.goto(star_x, star_y)\n te.goto(star_x1, star_y2)\n score += 1\n t.write(score)\n if t.distance(te) >= 12:\n t.ontimer(play, 100)\n def message( m1, m2 ):\n t.clear()\n t.goto(0, 100)\n t.write(m1, False, 'center', ('', 20))\n t.goto(0, -100)\n t.write(m2, False, 'center', ('', 20))\n t.home()\n def t_r():\n t.setheading(0)\n def t_l():\n t.setheading(180)\n def t_u():\n t.setheading(90)\n def t_d():\n t.setheading(270)\n t.title('Turtle Run')\n t.setup(500, 500)\n t.bgcolor('orange')\n t.shape('turtle')\n t.speed(0)\n t.up()\n t.color('white')\n t.onkeypress(t_r, 'Right')\n t.onkeypress(t_l, 'Left')\n t.onkeypress(t_u, 'Up')\n t.onkeypress(t_d, 'Down')\n t.onkeypress(start, 'space')\n message('Turtle Run', '[Space]')\n t.listen()\n t.mainloop()\n\nturtle_run2()" } ]
20
spacemanidol/CLMS473
https://github.com/spacemanidol/CLMS473
2b0252e846e3ca68338898e4aa52671d4f7d20ae
67ff650c905f7a8799fc3c97bd4cfbeea5801995
79f3470dcbe5e69b0b716223fa74177219bf56dd
refs/heads/master
2020-12-07T06:09:46.673469
2020-01-08T22:55:30
2020-01-08T22:55:30
232,655,104
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.717825174331665, "alphanum_fraction": 0.7904335856437683, "avg_line_length": 82.02857208251953, "blob_id": "271f2ee379a9cef09cd7205ef1f7deb13a4c2976", "content_id": "13d23b1263f595edd67c604eb337b5730cad1da9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2906, "license_type": "permissive", "max_line_length": 989, "num_lines": 35, "path": "/Projects/project5/readme.txt", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "Ling 473 Project 2 Unigrams Counting\nDaniel Campos 08/14/2018\n\n## Results table\ntgl\t Kasalukuyang nakararanas ng pag-unlad ang bansa sa mga remittances na ipinapadala pauwi ng mga OFW. Isa sa mga pinakaumuunlad na sektor ang teknolohiyang pang-impormasyon .\nswh\t -52.8260565482\nfra\t -49.8171184097\neng\t -57.9833349681\ndut\t -50.2549063072\ndan\t -54.6937460156\npor\t -48.2280049678\ndeu\t -57.9833349681\nnob\t -57.9833349681\nita\t -51.4756900907\nswe\t -55.0488365169\npol\t -48.0569686265\ngla\t -50.0597336897\nspa\t -57.9833349681\nfin\t -57.9833349681\ntgl\t -22.7613478411\nresult tgl\n\n## Approach\nFor my approach I tried to keep things simple and then scale. First I read through the corpus and when I encountered a new word I set the occourence of that word in all langauges to 0. As I came across actual numbers I updated. Once I was done reading the entire corpus I used additive smoothing(see smoothing approach) to update the counts in the corpus. Once this was done I started reading in the target file(train.text and test.txt) and remove all unwanted punctuation(using the translate library) and then proceeding to caclulate the log probability of the string given the langauge. If a word was not in my corpus lexicon than I assumed it was equaly likley to come from any langauge and applied addative smoothing(since it was unfound in any language total count would be 15 so odds are 1/15) and calculated an end weight. Then I loop though all the langauge probabilites and choose the langauge that has the highest number. This allowed me to get 14/15 examples in the train file. \n\n## Smoothing Approach\nBefore choosing a smoothing approach I researched various smoothing approaches in popular literature and found out that the most common in NLP style tasks with unknown words is Addative smoothing. The logic is we add 1 occourence to all different categories. This makes langauges that have count 0 be 1/total(very rare). This allows calculations to happen with ease with minimal core stat disruption.\n## Special Features\nThe Extra Credit\n\n## Extra credit.\nFor my extra credit approach I looked at both the extra-credit.train and regular train and looked at the difference between log probability of the predicted language vs the average log probability of all the langauges. In general I found that when the classifier was accurate, the difference was usually > 25. Based on this, when the diff is < 25 I output unk. If I wanted to be even more sure I would likley use a different smoothing function since my additive smoothing probabily was not the best for unknown langauges. Additionatlly when I had an unkown word I just treated it as the word being as likley from coming from any of the 15 target languagues, this biased my classifer toward predicting to one of these langauges since it didnt penalize out of vocabulary strings that much.\n\n## Missing Features\nNot robust to errors\n" }, { "alpha_fraction": 0.5502008199691772, "alphanum_fraction": 0.5522088408470154, "avg_line_length": 23.450000762939453, "blob_id": "ee0e2362e6cb656765d1525dc2f45458c775f335", "content_id": "651949280d1b5e8c696cee41657f31f3fc669266", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "permissive", "max_line_length": 53, "num_lines": 20, "path": "/Projects/project3/main.py", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nclass FSM:\n def __init__(self):\n self.handlers = {}\n self.stateStart = None\n self.endStates = []\n\n def add_state(self, name, handler, end_state=0):\n name = name.upper()\n self.handlers[name] = handler\n if end_state:\n sel.endStates.append(name)\n def set_start(self, name):\n self.startState = name.upper()\n\n def run(self, cargo):\n try:\n handler = self.handlers[self.startState]\n \n" }, { "alpha_fraction": 0.7446808218955994, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 22.5, "blob_id": "6b874815285fd6905155f1207968e4b22ca213d4", "content_id": "7b6bea67380a3854672b5d240a92a3df8d5899c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 47, "license_type": "permissive", "max_line_length": 36, "num_lines": 2, "path": "/Projects/project4/run.sh", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "#!/bin/sh\npython3 main.py targets target_files\n" }, { "alpha_fraction": 0.6511218547821045, "alphanum_fraction": 0.6634404063224792, "avg_line_length": 41.092594146728516, "blob_id": "d3a4f08b6153d5f74f037cc6bf67875fdaffce4a", "content_id": "d97265f8ccfb9043292c67521c06e8654cbb22b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2273, "license_type": "permissive", "max_line_length": 187, "num_lines": 54, "path": "/Projects/project2/main.py", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module computes unigram counts in a corpus\nTo execute please execute ./run.sh or python3 main.py\nCreation Date: 08/07//2018\nLasf Modified: 08/14/2018\nAuthors: Daniel Campos <[email protected]>\n\"\"\"\nimport os\nimport sys\nimport re\n\ndef read_and_prep_file(filename):\n \"\"\"Read contents of a file,remove html tags\n Args: filename(str): a filename with full path to be loaded\n Returns: text(str): a string containing all words\n We read in all lines in a file, join into one long string, remove all html, turn any character that isnt A-Z and ' into a ' ', then remove trailing and leading 's and convert to lower\n \"\"\"\n with open(filename, 'r') as f:\n clean_text = ''.join(f.readlines())\n clean_text = re.sub('<.*?>', '', clean_text) #remove html lines\n clean_text = re.sub('[^A-Za-z\\']', ' ', clean_text) #replace illegal characters with space\n clean_text = re.sub(' \\'*',' ', clean_text) #remove leading '\n clean_text = re.sub('\\'* ', ' ', clean_text).lower() # remove trailing ' and convert to lower\n return clean_text\n\ndef print_official_score(values):\n \"\"\"Print Output in desired format\n Args:values(dict): a dict of keys(type) and count.\n Returns:None\n Print unique values sorted by value high to low\n \"\"\"\n for v in sorted(values, key= values.get, reverse=True):\n print('{}\\t{}'.format(v,values[v]))\n\ndef calculate_unigrams(directory_path):\n \"\"\"Find Files, Read them, clean and count word occourances\n Args:directory_path(str): a path where prd files are located\n Returns:None\n Read all files, clean the text and turn into a list before looping over it to get unique counts of each word\n \"\"\"\n target_files = os.listdir(directory_path)\n formated_text = ''\n unigrams = {}\n for target_file in target_files:\n formated_text += read_and_prep_file(os.path.join(directory_path, target_file))\n all_clean_words = re.findall(r\"[\\w']+\",formated_text) #find all unique words. I found this was ~ 1 minute faster than spliting\n for word in all_clean_words:\n if word not in unigrams: #avoid key error\n unigrams[word] = 0\n unigrams[word] += 1\n print_official_score(unigrams) \n\nif __name__ == '__main__':\n calculate_unigrams('/corpora/LDC/LDC02T31/nyt/2000')\n" }, { "alpha_fraction": 0.6945454478263855, "alphanum_fraction": 0.7030302882194519, "avg_line_length": 53.27631759643555, "blob_id": "3014b3a189af4014fec4862d11545487d9cf2fa8", "content_id": "0dadd635d507d7fb1330dbfd7648f83703f5b818", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4125, "license_type": "permissive", "max_line_length": 241, "num_lines": 76, "path": "/Projects/project1/main.py", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module computes the occourances of various constituent types found in a folder of prd files.\n\nTo execute please execute ./run.sh or python3 main.py\n\nCreation Date: 07/30/2018\nLasf Modified: 08/02/2018\nAuthors: Daniel Campos <[email protected]>\n\n\"\"\"\nimport os\nimport sys\nimport nltk_tgrep as tgrep\nfrom nltk import SExprTokenizer\nfrom nltk.tree import ParentedTree\n\ndef get_files_in_dir(directory_path):\n \"\"\"Get all files in a path\n Args: directory_path(str) : path where target files are located\nted\n Returns: list_of_files (list): a list that contains all the file names in the directory\n \"\"\"\n return os.listdir(directory_path)\n\ndef read_and_prep_file(filename):\n \"\"\"Read contents of a file, tokenize contents and turn into a tree\n Args: filename(str): a filename with full path to be loaded\n Returns: ptree (parented tree nltk object): a tree object native to NLTK that automatically maintains parent pointer in every node. \n First off we initiallize SExprTokenizer a tool in nltk used to find parenthesized expressions \n \"\"\"\n tokenizer = SExprTokenizer()\n with open(filename, 'r') as f:\n tmp_strings = tokenizer.tokenize(''.join(map(str.strip,f.readlines()))) #read all the lines(f.readlines() produces a list), strip all bad characters(spaces, newlines, etc) and join into one large senteze and then use the SE tokenizer\n return [ParentedTree.fromstring(tmp) for tmp in tmp_strings] # for each string in file terun a tree \n\ndef count_occurrences(tree,pattern,constituent_filter):\n \"\"\"Take a tree, a desired search pattern and a filter and return a count\n Args:tree (ptree): a ParentedTree of a sentence, pattern (str): a pattern to search for, constituent(lambda filter): a filter condition based on desired properties\n Returns: count_of_constituents\n We take a tree and search for all matches in it using tgrep(tree grep) then we remove all matches that dont match out fiter. For S, VP, NP we set the filter to None, for IVP and DVP we set the appropirate conditions \n \"\"\"\n matches = tgrep.tgrep_nodes(tree, pattern) #find all items in tree that match our searc pattern\n constituents = list(filter(constituent_filter, matches)) #remove whatever doesnt match our filter\n return len(constituents)\n\ndef print_official_score(values):\n \"\"\"Print Output in desired format\n Args:values(dict): a dict of keys(type) and count.\n Returns:None\n \"\"\"\n for v in values:\n print('{}\\t{}'.format(v,values[v]))\n\ndef calculate_constituents(directory_path):\n \"\"\"Find Files, Read them, and calculate counts of various constituents across a directory\n Args:directory_path(str): a path where prd files are located\n Returns:None\n Simple process to get all constituents: 1. Find all file names in a directory, 2. Open them and turn them into ptrees, 3. use tgrep and filet using lambdas to get desired values, 4. output scores in desired format\n \"\"\"\n constituent_counts = {'Sentence':0, 'Verb Phrase':0, 'Noun Phrase':0,'Ditransitive Verb Phrase':0, 'Intransitive Verb Phrase':0}\n if os.path.isdir(directory_path):\n target_files = get_files_in_dir(directory_path)\n for target_file in target_files:\n trees = read_and_prep_file(directory_path+'/'+target_file)\n for tree in trees:\n constituent_counts['Sentence'] += count_occurrences(tree,'S', None)\n constituent_counts['Noun Phrase'] += count_occurrences(tree,'NP', None)\n constituent_counts['Verb Phrase'] += count_occurrences(tree,'VP', None)\n constituent_counts['Ditransitive Verb Phrase'] += count_occurrences(tree,\"VP < (NP $ NP)\", lambda x: len(x) == 3)\n constituent_counts['Intransitive Verb Phrase'] += count_occurrences(tree,\"VP\", lambda x: len(list(x.subtrees())) == 1)\n print_official_score(constituent_counts)\n else:\n print('Error: Invalid directory Path \\\"{}\\\". please confirm correct directory exits'.format(directory_path))\n exit(-1)\nif __name__ == '__main__':\n calculate_constituents('/corpora/LDC/LDC99T42/RAW/parsed/prd/wsj/14')\n" }, { "alpha_fraction": 0.522473931312561, "alphanum_fraction": 0.5280474424362183, "avg_line_length": 38.30434799194336, "blob_id": "747f1db6335330188905c4bb54eea46493bda55f", "content_id": "5c208dc55f0fb4d72c2afd28a3032834b2828157", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5562, "license_type": "permissive", "max_line_length": 175, "num_lines": 138, "path": "/Projects/project4/main.py", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "import os\r\nimport sys\r\n\r\nclass node:\r\n def __init__(self, char, is_done):\r\n self.char = char\r\n self.children = [None] * 4\r\n self.is_done = is_done \r\n def add(self, to_add):\r\n \"\"\"Add a child node based on its character\r\n Args:to_add(node) a new node we want to be a child of current node\r\n Returns:None\r\n Only gets added if matches one of the 4 DNA letters\r\n \"\"\"\r\n if to_add.char == 'A':\r\n self.children[0] = to_add\r\n elif to_add.char == 'C':\r\n self.children[1] = to_add\r\n elif to_add.char == 'G':\r\n self.children[2] = to_add\r\n elif to_add.char == 'T':\r\n self.children[3] = to_add\r\n\r\nclass trie:\r\n def __init__(self):\r\n self.head = node(\"head\",False)\r\n\r\n def get(self):\r\n return self.head\r\n\r\n def add(self, search_word):\r\n \"\"\"Adds target sequence to trie\r\n Args:search_word(str) a word to be added to the trie\r\n Returns:None\r\n Starting at the trie head, we add loop over the word letter by letter adding children if they dont exist. When done set final state to is_done(meaning final character)\r\n \"\"\"\r\n current = self.head\r\n for i in search_word:\r\n if i == 'A':\r\n if current.children[0] == None:\r\n current.add(node(i,False))\r\n current = current.children[0]\r\n elif i == 'C':\r\n if current.children[1] == None:\r\n current.add(node(i,False))\r\n current = current.children[1]\r\n elif i == 'G':\r\n if current.children[2] == None:\r\n current.add(node(i,False))\r\n current = current.children[2]\r\n elif i == 'T':\r\n if current.children[3] == None:\r\n current.add(node(i,False))\r\n current = current.children[3]\r\n current.is_done = True \r\n\r\ndef get_files_in_dir(dir_path):\r\n \"\"\"Finds all files in a directory and returns as a sorted list\r\n Args:dir_path(str) a folder where there are files\r\n Returns:alist(list) a list of all files in the directory sorted alphabetically by filename\r\n \"\"\"\r\n alist = os.listdir(dir_path)\r\n alist.sort()\r\n return alist\r\n\r\ndef populate_trie_with_target(a_trie, target_sequences):\r\n \"\"\"Read from a file with target sequences and load them into a trie structure\r\n Args:a_trie(trie) , target_sequences(str) a filename with target DNA sequences\r\n Returns:a_trie(trie) populated with all target strings.\r\n This function reads the file and then line by line loads them into the trie for future search\r\n \"\"\"\r\n with open(target_sequences, 'r') as f:\r\n for line in f:\r\n a_trie.add(line.strip().upper())\r\n return a_trie\r\n\r\ndef find_matches(a_trie, dna_corpus):\r\n \"\"\"Finds target DNA sequences in list of chromosomes\r\n Args:target_strings(str) a file location of target strings, dna_corpus(str) a folder location of chromosones\r\n Returns:None\r\n \"\"\"\r\n files = get_files_in_dir(dna_corpus)\r\n extra_credit = {}\r\n for file in files:\r\n filepath = os.path.join(dna_corpus, file)\r\n print(filepath)\r\n with open(filepath, 'r') as f:\r\n input_text = f.read().upper() \r\n input_text_len = len(input_text)\r\n i = 0\r\n while i < input_text_len:\r\n j = i\r\n current = a_trie.get()\r\n while j < input_text_len:\r\n current_char = input_text[j]\r\n if current_char == 'A' and current.children[0] != None:\r\n current = current.children[0]\r\n elif current_char == 'C' and current.children[1] != None:\r\n current = current.children[1]\r\n elif current_char == 'G' and current.children[2] != None:\r\n current = current.children[2]\r\n elif current_char == 'T' and current.children[3] != None:\r\n current = current.children[3]\r\n elif current.is_done == True:\r\n target_string = input_text[i:j]\r\n offset = str(format(i, '08X'))\r\n print(\"\\t\"+ offset +\"\\t\" + target_string)\r\n if target_string not in extra_credit:\r\n extra_credit[target_string] = []\r\n extra_credit[target_string].append('\\t'+offset+'\\t'+filepath)\r\n break\r\n else:\r\n break\r\n j += 1\r\n i += 1\r\n with open('extra-credit','w') as w:\r\n for target_string in extra_credit:\r\n w.write(target_string+'\\n')\r\n for key in extra_credit[target_string]:\r\n w.write(key+'\\n')\r\n\r\ndef main(target_strings, dna_corpus):\r\n \"\"\"Finds target DNA sequences in list of chromosomes\r\n Args:target_strings(str) a file location of target strings, dna_corpus(str) a folder location of chromosones\r\n Returns:None\r\n \"\"\"\r\n a_trie = trie()\r\n a_trie = populate_trie_with_target(a_trie, target_strings)\r\n find_matches(a_trie, dna_corpus)\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) != 3:\r\n print(\"Usage: main.py <target dna strings> <location of dna corpus>\")\r\n exit(-1)\r\n else:\r\n target_strings = sys.argv[1]\r\n dna_corpus = sys.argv[2]\r\n main(target_strings,dna_corpus)\r\n" }, { "alpha_fraction": 0.7460992932319641, "alphanum_fraction": 0.7560283541679382, "avg_line_length": 66.14286041259766, "blob_id": "4dbc98a36956b22b1dd6ee2a71c7cd5846db8af0", "content_id": "9a665f785a66c1e7e196cb0f09a17b57a3aabb45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1574, "license_type": "permissive", "max_line_length": 249, "num_lines": 21, "path": "/Projects/project3/readme.txt", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "Ling 473 Project 3 Thai Word Breaking\nDaniel Campos 08/21/2018\n\n## Results Just a few lines\n<html>\n<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/>\n<body>\nคู่ แข่ง ขัน ต่าง ก็ คุม เชิง กัน<br />\nเขา เงียบ ไป ครู่ หนึ่ง แล้ว พูด ขึ้น<br />\nเธอ หัน มา คุ้ย ทราย ขึ้น มา ใหม่<br />\n\n## Approach\nMy approach was simple: create a FSM class, add the states based on the rules provided in the assignment and loop over each line. \nMy FSM class at each step follows the accept conditions and if the next node will be an end state inserts a ' ' either before or after the character(depending on the specific state). \nAt each step the correct result is added to an output string and the last character is removed from the input string. When the input string is len == 0 then our output is ready and we write to a file. \nThis process is doen for each line in the file. It would have been easier to turn this all into a large for loop and an if statement but by implementing a FSM class our program can be easily modifiable to deal with any changes in our formal grammar.\nThe progam opens a write file with the proper encoding, writes the opening html tags and then opens the input file with the proper encoding. It then uses the FSM class for each line in the file outputing proper spacing and final closing html tags\n## Special Features\nNo Special Features\n## Missing Features\nNot robust to errors in input.\n" }, { "alpha_fraction": 0.6149859428405762, "alphanum_fraction": 0.6283739805221558, "avg_line_length": 40.72072219848633, "blob_id": "e4764d22c37463f4f2f63f4345198e3655c62ba0", "content_id": "705de983d0da514bfb0f01d54db05374956338af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4631, "license_type": "permissive", "max_line_length": 216, "num_lines": 111, "path": "/Projects/project5/main.py", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "import os\nimport string\nimport sys\nimport math\nlanguages = ['dan','deu','dut','eng','fin','fra','gla','ita','nob','pol','por','spa','swe','swh','tgl']\n\ndef smoothing(corpus):\n '''\n Addative Smoothing\n args: corpus(dict): words to occourences based on langauge \n returns: a modified corpus\n increase the count of each occourence to make zero count occourences 1 count. \n '''\n for word in corpus:\n for lang in corpus[word]:\n corpus[word]['total'] += 1\n corpus[word][lang] += 1\n return corpus\n\ndef print_odds(odds, identifier, sentence, extra_credit):\n '''\n Print odds in correct format\n args: odds(dict) a dict of odds of a sentence coming from a specific language, identifier(str) unique identifier coming from intial file, sentence(str) a input string, extra_credit(Bool) 1 if appling extra credit\n returns:Nothing\n Take in the calculated information and print in correct format\n '''\n max_value = -1 * float('inf')\n max_lang = 'unk'\n print('{}\\t{}'.format(identifier, sentence[:-1]))\n average_odds = 0\n for p in odds:\n average_odds += odds[p]\n if odds[p] > max_value:\n max_value = odds[p]\n max_lang = p\n print('{}\\t{}'.format(p,odds[p]))\n difference = max_value - average_odds/15\n if extra_credit == 1 and difference < 25:\n print(\"result unk\")\n else:\n print(\"result {}\".format(max_lang))\n\ndef calculate_odds(corpus, sentence):\n '''\n Calculate odds of a specific langauge for an input sentence\n args: corpus(dict) a dict of words and count of occourences across words, sentence(str) a sentence that has been cleaned that we want to caclulate its probabilities across languages\n returns: probabilities(dict) a dict with langauge being the key and the value being the log prob of it occouring. Higher numbers = more likley\n take a sentence and use bayesean stat to calculate likleyhood of each langauge\n ''' \n probabilities = {}\n for lang in languages:\n probability = float(0)\n for word in sentence:\n if word in corpus:\n probability += math.log10(float(corpus[word][lang])/float(corpus[word]['total']))\n else:\n probability += math.log10(0.06666666666666667\n)\n probabilities[lang] = probability\n return probabilities\n\ndef load_corpus(corpus_directory):\n '''\n Read a corpus and normalize\n args: corpus_directory(str) a location where expected corpus files exist\n returns: a corpus(dict) structure with the smoothed occourences of words across langauges\n Function reads all the corpuses in a directory and then normalizes their occourences using additive smoothing\n '''\n corpus = {}\n target_files = os.listdir(corpus_directory)\n for target_file in target_files:\n file_path = os.path.join(corpus_directory, target_file)\n with open(file_path,'r') as f:\n lang = target_file[:3]\n for l in f:\n l = l.split('\\t')\n target_word = l[0]\n target_value = int(l[1])\n if target_word not in corpus:\n corpus[target_word] = {'dan':0,'deu':0,'dut':0,'eng':0,'fin':0,'fra':0,'gla':0,'ita':0,'nob':0,'pol':0,'por':0,'spa':0,'swe':0,'swh':0,'tgl':0}\n corpus[target_word][lang] = target_value\n for word in corpus:\n unique_total = 0\n for lang in languages:\n unique_total += corpus[word][lang]\n corpus[word]['total'] = unique_total\n return smoothing(corpus)\n\ndef main(corpus_locaiton, target_file, extra_credit):\n '''\n Read a corpus and caculate most probable langauge for given sentences\n args: corpus_location(str) a location of a directory of corpuses, target_file(str) a file with sentences we want to calculate most likley langauges\n Load a corpus, smooth it, use it to calculate most likley langauges for files\n '''\n corpus = load_corpus(corpus_location)\n with open(target_file,'r') as f:\n for l in f:\n l = l.split('\\t')\n sentence = l[1].strip()\n sentence = sentence.translate(None, string.punctuation) \n print_odds(calculate_odds(corpus,sentence.split(' ')), l[0], l[1], extra_credit)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Usage: main.py <target dna strings> <location of dna corpus> <flag extra credit>\")\n exit(-1)\n else:\n corpus_location = sys.argv[1]\n target_file = sys.argv[2]\n extra_credit = int(sys.argv[3])\n main(corpus_location, target_file, extra_credit)\n" }, { "alpha_fraction": 0.5956054329872131, "alphanum_fraction": 0.7129499912261963, "avg_line_length": 32.9523811340332, "blob_id": "1f48d7e1fad69f4fe9d4779c4eef8a7ca61dcb57", "content_id": "86022e42ea4bafd42434f972a6caddfbd55b5d6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "permissive", "max_line_length": 108, "num_lines": 63, "path": "/Projects/project3/skeleton3.py~", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "#!/opt/python-2.7/bin/python2.7 -S\n# -*- coding: utf-8 -*-\n\n# Script to copy standard input to standard output, one line at a time.\n\n# This gets various items to interfaces with the OS, including the\n# standard input stream.\nimport sys\n\n# This is apparently for Python rogues, but I got it from SO and it seems to work.\n# http://stackoverflow.com/questions/11741574/how-to-set-the-default-encoding-to-utf-8-in-python\n# A key trick is the -S business above in the shebang line.\nsys.setdefaultencoding(\"UTF-8\")\n# print sys.getdefaultencoding()\nimport site\n\nV1 = u\"\\u0E40\\u0E41\\u0E42\\u0E43\\u0E44\"\nC1 = u\"\\u0E01\\u0E02\\u0E03\\u0E04\\u0E05\\u0E06\\u0E07\\u0E08\\u0E09\\u0E0A\\u0E0B\\u0E0C\\u0E0D\\u0E0E\\u0E0F\" \\\n + u\"\\u0E10\\u0E11\\u0E12\\u0E13\\u0E14\\u0E15\\u0E16\\u0E17\\u0E18\\u0E19\\u0E1A\\u0E1B\\u0E1C\\u0E1D\\u0E1E\\u0E1F\" \\\n + u\"\\u0E20\\u0E21\\u0E22\\u0E23\\u0E24\\u0E25\\u0E26\\u0E27\\u0E28\\u0E29\\u0E2A\\u0E2B\\u0E2C\\u0E2D\\u0E2E\"\nC2 = u\"\\u0E23\\u0E25\\u0E27\\u0E19\\u0E21\"\nV2 = u\"\\u0E34\\u0E35\\u0E36\\u0E37\\u0E38\\u0E39\\u0E31\\u0E47\"\nT = u\"\\u0E48\\u0E49\\u0E4A\\u0E4B\"\nV3 = u\"\\u0E32\\u0E2D\\u0E22\\u0E27\"\nC3 = u\"\\u0E07\\u0E19\\u0E21\\u0E14\\u0E1A\\u0E01\\u0E22\\u0E27\"\n\n# print V1\n# print C1\n# print C2\n# print V2\n# print T\n# print V3\n# print C3\n\n# print V1 + C1 + C2\n\ndef fst(I):\n # Transform the input to the result\n # A dummy line to prove stringy things work with Unicode.\n # return u\":\"+ I\n return I\n\nprint u\"<html><meta http-equiv='Content-Type' content='text/html; charset=UTF-8' /><body>\"\n\n# Readline is a method of stdin, which is in the standard object sys.\n# It returns the empty string on EOF.\nline = sys.stdin.readline()\n\n# The string line works as the while test. As several other scripting\n# languages, the empty string is treated as false, other strings are treated\n# as true.\nwhile line:\n # Transform the line read. Since readline leaves the terminating newline,\n # a slice is used to print all characters in the string but the last.\n # Otherwise, each input line would be output with two line terminators.\n result = fst(line[:-1])\n \n print result + u\"<br/>\"\n\n # Next line.\n line = sys.stdin.readline()\n\nprint u\"</body></html>\"\n" }, { "alpha_fraction": 0.5765765905380249, "alphanum_fraction": 0.7657657861709595, "avg_line_length": 54.5, "blob_id": "c964f545a88cef5355b07361a4bb1c80f08a6e73", "content_id": "9db92d2072833a089b812fe0164b9fa815a298da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 111, "license_type": "permissive", "max_line_length": 100, "num_lines": 2, "path": "/Projects/project4/run-patas.sh", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "#!/bin/sh\npython3 main.py /opt/dropbox/18-19/473/project4/targets /opt/dropbox/18-19/473/project4/hg19-GRCh37/\n" }, { "alpha_fraction": 0.631147563457489, "alphanum_fraction": 0.7704917788505554, "avg_line_length": 60, "blob_id": "e7a43bbc87bdec24901ac460c70ab8a244ba3529", "content_id": "22293dd72d3c7833e1f1fa988929a8df93395286", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 122, "license_type": "permissive", "max_line_length": 111, "num_lines": 2, "path": "/Projects/project5/run-extra.sh", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "#!/bin/sh\npython main.py /opt/dropbox/18-19/473/project5/language-models /opt/dropbox/18-19/473/project5/extra-test.txt 1\n" }, { "alpha_fraction": 0.7781690359115601, "alphanum_fraction": 0.7980153560638428, "avg_line_length": 122.72000122070312, "blob_id": "ad95323c5a7ef251681d9ab538819b65a183cb9f", "content_id": "1fdaaf6aaef2881bbf58910769fb9807c202d120", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 3124, "license_type": "permissive", "max_line_length": 1051, "num_lines": 25, "path": "/Projects/project4/Readme.txt", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "Ling 473 Project 4 DNA Search\r\nDaniel Campos 08/30/2018\r\n\r\n## Results Just a few lines\r\ntarget_files/chr1.dna\r\n\t0000C312\tAAACTAACTGAATGTTAGAACCAACTCCTGATAAGTCTTGAACAAAAG\r\n\t00022723\tGGGGCTGGAGACTGACTTAATCACCAACAGCCAAAGGTTTTATCAATCATGCTTGCATAATAAAGCCTC\r\n\t00071235\tCATATATAAAAAATGAAACTGTGACCGATTTTAAGGACAGTATTGGCAAATATTTCTGTGCTCTTGGAGGAGAAGACCCTTATTGG\r\n\t000A53CA\tGGGGCTGGAGACTGACTTAATCACCAACAGCCAAAGGTTTTATCAATCATGCTTGCATAATAAAGCCTC\r\n\t0014CFE3\tAGCTCTGGAAATCCCTCAACAATTGTGTCCAGTTTCACCACGAA\r\n\r\n## Approach\r\nMy approach to this project was to make a simple program first and then iterate on how to optimize and speed it up. \r\nI started by implementing a node and a trie class in the simplest wayI could imagine. I did so by just having a node include a pointer to its children node, and if they are a final node. Then I made a trie class that has a method that adds a word to a trie. Using these classes I read all the target strings in the target sequences files and load them into the trie. Next, for each file in our target corpus folder we load open each file and read all the contents into a string. Then at each step in the string (represented by variable i)we try to traverse our tree to find one of our target sequences. If we ever break from our target sequence, we break from our trie search move to i+1 and start our search again. If we find a target sequence we output the result and move the i to the end of that target sequence, which is since no sequence can contain subsequences of other strings. During the entire process we are converting all characters to upper case and ignore any character that is not a ACGT. \r\n\r\nFor the extra credit I just created a dict where the key is the target sequence and the value is a list of the offset plus the filename. When a sequence is found we just add its location(offset and filename) to our dict's value's list. Once we have finished finding all sequences we loop over our dict and print out results. \r\n\r\nDuring the entire process my approach was always start dirty and then optimize. I used tools like cProfile which helped me indentify inefficiencies in my code. One of the bigest examples was at one point I was comparing my i and j values to len(input_text) at every step. This calulation was meaning that at each step in my code we had to one calculate the length of text and then see if our i or j was larger. By saving len(input_text) to a variable and comparing that at each time my program ran 30% faster. I applied this sort of effort to the entire program and slowly went optimizing my code to its state now. That being said, for this program it seems that python was a terrible choice and I should have just bitten the bullet and written it in C which would have made my runtime in seconds or minutes istead of the ~ 70 minutes it currently runs in. Worth noting, on my personal rig the chr19 runs in ~110 seconds but on condor it runs in ~ 220 seconds. Same for the entire assignment. Personal device runs ~ 70 minutes and ~3 hours on condor. \r\n \r\n\r\n## Special Features\r\nNo Special Features\r\n\r\n## Missing Features\r\nSpeed Optimizations, robust error handling\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7666157484054565, "alphanum_fraction": 0.7807486653327942, "avg_line_length": 95.9259262084961, "blob_id": "c7605ebbaabcee948872a3826446364a7c28f895", "content_id": "8d7c57bbdc69fea70f211a3c0c37978ca04a45b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2618, "license_type": "permissive", "max_line_length": 422, "num_lines": 27, "path": "/Projects/project1/readme.txt", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "Ling 473 Project 1 Constituent Counting\nDaniel Campos 08/02/2018\n\n## Results table\nNoun Phrase 13221\nDitransitive Verb Phrase 33\nIntransitive Verb Phrase 123\nSentence 4670\nVerb Phrase 7920\n\n## Approach\nFind All Files -> Read, Normalize and turn into long sentences -> Turn into Trees -> Search Trees for things that match what we are searching for -> reduce lists using lambdas to match desired conditions -> sum over all possible trees\nLuck would have it that I actually regularly work with PTB parse trees(search query rewriting) for work which means I was already familiar with various tools that would help me.\nMy first step was find a way to get all the files and consume the text. I used pythons os library to get all files in a directory and then read through them. \nNext, I read all the lines in file with f.readlines() which produces a list of all the lines. I then use map along with the str.strip tool to remove all trailing spaces and characters and then I join everything in the list to a long string.\nNext, I use NLTK's SExpression parser since it is designed to parse sentences with parenthesis and turn my long string into a parsed string. \nAfter that, since the goal of this project was to count occurrences of specific types of words and in some cases that depending on the preceding context I thought it would be best to use a Parented Tree.\nI used NLTK's parented tree since every item has a direct way of searching for its parent node. Then for each tree I use tgrep(a tool deliberatley meant to find POS tags in Trees built into NLTK) to find all parts of a tree that match my desired condition. Finally, I remove all items that dont match my lambda function. The lambda function is none for S, VP, and NP, not having any children for IVP, and len of 3 for DVP.\nThese operations are performed on every file and subsequently every generated tree to product our output. \n\n## Special Features\nIts super short and concise! Without comments its 36 lines! Python FTW. \n\n## Missing Features\nOne thing I did not spend a lot of time on was preparing to deal with any errors in the problem. I assume all files have proper encoding, are readable, etc. I also struggled with condor so I just hardcoded the path of the files within the main.py script. \nAdditionally I spent some time debugging what my classmates wrote online since some people got 34 Ditransitive Verbs while I could only find 33. Despite my best efforts, I couldnt get my system to find this hidden DVP :( \nFinally, I have not really done any kind of robust testing(see above) so software provided with no guarentees. \n" }, { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.8653846383094788, "avg_line_length": 25, "blob_id": "12cec81764ba5ccea78db9bd2d9939fb4bae64b8", "content_id": "d456646caeb42a98e8518d1f7fbfd59c8b6eea42", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "permissive", "max_line_length": 41, "num_lines": 2, "path": "/README.md", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "# CLSM473\nIntroduction to Computational Linguistics\n" }, { "alpha_fraction": 0.70652836561203, "alphanum_fraction": 0.7596095204353333, "avg_line_length": 64.5199966430664, "blob_id": "41ac6d89210b0fa09a41561b7a77b254249bf518", "content_id": "e5c6dd02d2329634e59038b1d7cd2f23c6996568", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1639, "license_type": "permissive", "max_line_length": 502, "num_lines": 25, "path": "/Projects/project2/readme.txt", "repo_name": "spacemanidol/CLMS473", "src_encoding": "UTF-8", "text": "Ling 473 Project 2 Unigrams Counting\nDaniel Campos 08/14/2018\n\n## Results table(Just a few) out of the 269225 unique words\nthe\t 4398064\na\t 2032214\nto\t 1893205\nof\t 1888409\nand\t 1759680\nin\t 1486132\nthat\t 814646\nfor\t 793617\nis\t 712518\non\t 564762\n\n## Approach\nFind All Files -> Read each file and in each file use regex to->Remove any html tag->Turn anything not an A-Z or ' into a ' '(to help split words)-> Remove trailing and leading ' -> Convert to lower case->Join as text with the same process on all other files->Use Regex to create list of all words-> Loop over said list to get overall count of each unigram-> Output sorted unigrams.\n\nMy first step was find a way to get all the files and consume the text. I used pythons os library to get all files in a directory and then read through them. \nNext, I read all the lines in file with f.readlines() which I turn into a long string. After that I use various regex statements to remove all html files, turn illegal characters into spaces, and remove leader and ending '. When this is done convert all text to lower and join with the result of all the other files. Finally I use another regex to get all unique words as a list which I then loop over to get final counts of word occourences. I take this unigram dict and output it in a sorted fashion.\n## Special Features\nIts super short and concise(again)! Also most of my runs were little over 2 minutes so seems to be a faster solution than my peers found(based on their online comments. Even not minimized code is 28 lines!\n\n## Missing Features\nNot robust to errors in input and could probably be optimized much further. \n" } ]
15
ushrstha/Min-Hash
https://github.com/ushrstha/Min-Hash
447a0dacb62d4b666ebdc13f33f8bf9d2abfe072
ba7c841a9ca5f669ca16bf89fece30efeea3d8c2
1efba3157f9cb6472583b05b8cb6506b7d4af278
refs/heads/master
2020-04-12T20:25:03.019741
2018-12-21T16:46:16
2018-12-21T16:46:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6365236639976501, "alphanum_fraction": 0.6442244052886963, "avg_line_length": 42.067962646484375, "blob_id": "616327a42c9234ffb35e6623494cac4783d80789", "content_id": "b2f78654ada024cb56d82235b41dbf2ac2db660e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4545, "license_type": "no_license", "max_line_length": 196, "num_lines": 103, "path": "/MinHash.py", "repo_name": "ushrstha/Min-Hash", "src_encoding": "UTF-8", "text": "\"\"\"Finds the minhash value of the dna for a k-shingle using random permutation\"\"\"\r\n\r\nimport numpy\r\n\r\n\"\"\"\r\nparams: dna string, k int\r\nreturns: set of k-shingle created from the dna\r\n\"\"\"\r\ndef dna_shingle(dna, k):\r\n count, lastIndex = 0, k\r\n shingle_set = set()\r\n \r\n # checks to procceed if and only if value of k > 0 and k is an integer\r\n if(k <= 0 or type(k) != int):\r\n raise Exception(\"The value of k(%f) should be positive integer\" %(k))\r\n # checks to procceed if and only if dna is non-empty and length of dna > k\r\n elif len(dna) == 0 or k > len(dna):\r\n raise Exception(\"The dna(%s) should have length greater than the value of k(%d)\" %(dna, k))\r\n \r\n #loops through all the characters in a dna string to form k-shingle\r\n while(lastIndex <= len(dna)):\r\n # froms a k-shingle \r\n shingle = dna[count:lastIndex]\r\n # creates a list of bits indicating if the k-shingle contains character other than A, C, G or T; if it contains character other than 'A', 'C', 'G' and 'T' then True is inserted else False\r\n flagUnwantedChar = list(x not in [\"A\", \"C\", \"G\", \"T\"] for x in shingle.upper())\r\n # checks if the 'flagUnwantedChar' list contains any True bit (i.e. the shingle contains character other than 'A', 'C', 'G' and 'T') and warns the user accordingly\r\n if any(flagUnwantedChar):\r\n raise Exception(\"The provided DNA sample contains unwanted character: '%s' at Position %d\" %(shingle[flagUnwantedChar.index(True)], count+flagUnwantedChar.index(True)+1))\r\n # adds the k-shingle to the set of k-shingle\r\n else:\r\n shingle_set.add(dna[count:lastIndex])\r\n count += 1\r\n lastIndex = count+k\r\n \r\n # returns the set containing all possible k-shingle of the dna\r\n return shingle_set\r\n\r\n\r\n\"\"\"\r\nparams: dna string\r\nreturns: the decimal equivalent of the k-shingle of a dna\r\n\"\"\"\r\ndef shingle2decimal(dna):\r\n try:\r\n # Turns all the characters of the dna string to upper-case and replaces all 'A' as 0, 'C' as 1, 'G' as 2 and 'T' as 3 for base 4 representation of the dna string\r\n base4Shingle = dna.upper().replace(\"A\", \"0\").replace(\"C\", \"1\").replace(\"G\", \"2\").replace(\"T\", \"3\")\r\n # converts the base 4 shingle to its equivalent decimal representation\r\n return(int(base4Shingle, 4))\r\n except:\r\n # throws an exception if the dna shingle contains character other than 'A', 'C', 'G' or 'T'\r\n raise Exception(\"The dna contains unwanted character. Hence it cannot be converted to decimal.\")\r\n\r\n\r\n\"\"\"\r\nparams: dna string, k int\r\nreturns: list for the vector representation of each k-shingle of the dna\r\n\"\"\"\r\ndef dna_vector(dna, k):\r\n try:\r\n # get the set containing all possible k-shingle of the dna\r\n shingle=dna_shingle(dna, k)\r\n # creates a zero vector with 4^k dimensions\r\n dnaVector = [0]*(4**k)\r\n # replaces 0 with 1 in dna vector at index = (decimal value of the shingle)\r\n for s in shingle:\r\n dnaVector[shingle2decimal(s)]=1\r\n return dnaVector\r\n except Exception as ex:\r\n # throws exception to the calling function if generated while executing dna_shingle(dna, k) or shingle2decimal(s)\r\n raise Exception(ex)\r\n\r\n\r\n\"\"\"\r\nparams: k int\r\nreturns: list for the permutation vector of size 4^k\r\n\"\"\"\r\ndef permute(k):\r\n # performs permutation using 'numpy' module to shuffle a list containing numbers 0 to 4^k\r\n permutation = list(range(4**k))\r\n numpy.random.shuffle(permutation)\r\n return permutation\r\n\r\n\"\"\"\r\nparams: permutation function, dna string, k int\r\nreturns: int minhash value of the dna string for k-shingle using permutation\r\n\"\"\"\r\ndef minhash_dna(permutation, dna, k):\r\n # return a number that is a minhash of dna\r\n # if all dimensions of the dna vector is zero or some error occurs then it returns None\r\n try:\r\n # gets the vector representation of the input dna string\r\n dnaVector = dna_vector(dna, k)\r\n # gets the permuted vector to perform hash operation\r\n permutedVector = permutation(k)\r\n # returns the value of permuted vector for which the first value of dna vector at index = (value of the permuted vector) is 1\r\n for index in permutedVector:\r\n if dnaVector[index] == 1:\r\n return index\r\n except Exception as ex:\r\n print(ex)\r\n\r\n# to print the minhash value \r\nprint(\"The minhash value is %s\" %(minhash_dna(permute, \"AAAAACGTACCATGCAGTACGATCAGTTGCA\", 4)))\r\n " }, { "alpha_fraction": 0.8066298365592957, "alphanum_fraction": 0.8066298365592957, "avg_line_length": 180, "blob_id": "06f4e89a885a9a6b67644dcba3d815450a17a609", "content_id": "18a9b8d20f02a16f8442f94a513d81197398bcf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 181, "license_type": "no_license", "max_line_length": 180, "num_lines": 1, "path": "/README.md", "repo_name": "ushrstha/Min-Hash", "src_encoding": "UTF-8", "text": "Minhash is used to detect the similarity of two sets. This program helps in finding the similarity of DNA strings by finding the minhash value of a DNA string provided by the user.\n" } ]
2
fantashi099/CS336
https://github.com/fantashi099/CS336
f15405625d13e170685dc85ee586826a74c3b07d
9d93c1a8225914500a1368c813765dca8ce2b22d
59589fda42452bcb9e4e214f61f4ec26d3e4ee9a
refs/heads/main
2023-01-27T11:06:07.085745
2020-12-10T07:54:06
2020-12-10T07:54:06
307,687,986
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5759493708610535, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 25.33333396911621, "blob_id": "944d4cea52909ab3566a26c5e6edf95657082c33", "content_id": "a5da7f69a7c5c8e0b00ab2c99f404d18fd2db436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 66, "num_lines": 6, "path": "/README.md", "repo_name": "fantashi099/CS336", "src_encoding": "UTF-8", "text": "# 💻 Truy vấn thông tin đa phương tiện - CS336.L12.KHCL\n\n![](https://portal.uit.edu.vn/Styles/profi/images/logo186x150.png)\n\n+ MSSV: 18521492\n+ Họ tên: Trần Minh Tiến\n" }, { "alpha_fraction": 0.5549354553222656, "alphanum_fraction": 0.5706313252449036, "avg_line_length": 23.092437744140625, "blob_id": "c8fcb04680e354f306720dcdd344107b718bc0f6", "content_id": "aa66f7d1bf6e7f20f22c80129b5998857bc5a0ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2867, "license_type": "no_license", "max_line_length": 82, "num_lines": 119, "path": "/Deep Feature/deep_features.py", "repo_name": "fantashi099/CS336", "src_encoding": "UTF-8", "text": "from tensorflow.keras import Model\nfrom tensorflow.keras.applications import VGG16\nfrom tensorflow.keras.preprocessing import image\nimport os\nimport numpy as np\nfrom urllib import request\nfrom io import BytesIO\nfrom PIL import Image\n\nvgg_model = VGG16(include_top = \"True\", weights = \"imagenet\")\nvgg_model = Model(inputs = vgg_model.input, outputs = vgg_model.layers[-2].output)\n\ndef load_image(path):\n data = []\n label = []\n for file_name in os.listdir(path):\n if file_name.endswith(\".jpg\"):\n label.append(file_name)\n img_name = os.path.join(path,file_name)\n img = image.load_img(img_name, target_size = (224,224))\n img_data = image.img_to_array(img)\n data.append(img_data)\n data = np.asarray(data)\n deep_feature = vgg_model.predict(data)\n print(\"Loading Image Completed!\")\n return deep_feature, label\n\ndata, label = load_image('data')\nquery, anh = load_image('query')\nprint(data.shape)\n\n\ndef cosine_similarity(x,y):\n return np.dot(x,y) / (np.sqrt(np.dot(x,x)) * np.sqrt(np.dot(y,y)))\n\ndef L2_norm(x,y):\n return np.sqrt(np.sum((x-y) ** 2))\n\ndef L1_norm(x,y):\n return np.sum(np.abs(x-y))\n\ndef Metrics(q,data,count):\n\n fname = \"Ket qua anh\" + str(count) + \".txt\"\n\n f = open('./output/' + fname, 'a')\n\n L2 = np.asarray([L2_norm(x,q) for x in data])\n metric = sorted(L2)[:5]\n f.write('L2 Metrics:')\n f.write(str(metric))\n f.write('\\n')\n\n L2 = L2.argsort()[:5]\n\n print('L2 Metrics:')\n for i in L2:\n f.write(label[i])\n f.write('\\t')\n print(label[i], end='; ')\n f.write('\\n')\n\n print('\\n')\n\n L1 = [L1_norm(x,q) for x in data]\n metric = sorted(L1)[:5]\n f.write('L1 Metrics:')\n f.write(str(metric))\n f.write('\\n')\n\n L1 = np.argsort(L1)[:5]\n print('L1 Metrics:')\n for i in L1:\n f.write(label[i])\n f.write('\\t')\n print(label[i], end='; ')\n f.write('\\n')\n\n print('\\n')\n\n cosine = [cosine_similarity(x,q) for x in data]\n metric = sorted(cosine,reverse=True)[:5]\n f.write('Cosine Similarity Metrics:')\n f.write(str(metric))\n f.write('\\n')\n\n cosine = np.argsort(cosine)[::-1][:5]\n print('Cosine Similarity Metrics:')\n for i in cosine:\n f.write(label[i])\n f.write('\\t')\n print(label[i], end='; ')\n f.write('\\n')\n\n print('\\n')\n\n dot = [np.dot(x,q) for x in data]\n metric = sorted(dot,reverse=True)[:5]\n f.write('Dot Product Metrics:')\n f.write(str(metric))\n f.write('\\n')\n\n dot = np.argsort(dot)[::-1][:5]\n print('Dot Product Metrics:')\n for i in dot:\n f.write(label[i])\n f.write('\\t')\n print(label[i], end='; ')\n\n\n f.close()\n print('\\n' + '----'*30)\n\ncount = 0\nfor q in query:\n print(\"Thong tin truy van: \" + anh[count])\n print(q)\n Metrics(q,data,count+1)\n count += 1\n" }, { "alpha_fraction": 0.48624926805496216, "alphanum_fraction": 0.5082894563674927, "avg_line_length": 25.417112350463867, "blob_id": "91586383b5a85aaf7d7297eda13dac4be19f8b9f", "content_id": "9a6613683f12ca916d36b662a8f0454e5f7a5469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5137, "license_type": "no_license", "max_line_length": 82, "num_lines": 187, "path": "/MAP/deep_features.py", "repo_name": "fantashi099/CS336", "src_encoding": "UTF-8", "text": "from tensorflow.keras import Model\r\nfrom tensorflow.keras.applications import VGG16\r\nfrom tensorflow.keras.preprocessing import image\r\nimport os\r\nimport numpy as np\r\nfrom urllib import request\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\n\r\nvgg_model = VGG16(include_top = \"True\", weights = \"imagenet\")\r\nvgg_model = Model(inputs = vgg_model.input, outputs = vgg_model.layers[-2].output)\r\n\r\ndef load_image(path):\r\n data = []\r\n label = []\r\n for file_name in os.listdir(path):\r\n if file_name.endswith(\".jpg\"):\r\n label.append(file_name)\r\n img_name = os.path.join(path,file_name)\r\n img = image.load_img(img_name, target_size = (224,224))\r\n img_data = image.img_to_array(img)\r\n data.append(img_data)\r\n data = np.asarray(data)\r\n deep_feature = vgg_model.predict(data)\r\n print(\"Loading Image Completed!\")\r\n return deep_feature, label\r\n\r\ndata, label = load_image('data')\r\nquery, anh = load_image('query')\r\nprint(data.shape)\r\n\r\n\r\ndef cosine_similarity(x,y):\r\n return np.dot(x,y) / (np.sqrt(np.dot(x,x)) * np.sqrt(np.dot(y,y)))\r\n\r\ndef L2_norm(x,y):\r\n return np.sqrt(np.sum((x-y) ** 2))\r\n\r\ndef L1_norm(x,y):\r\n return np.sum(np.abs(x-y))\r\n\r\ndef mAP(data, classes):\r\n AP = []\r\n count_Overall_AP = 0\r\n if classes == 0:\r\n classes = 'human'\r\n for i in range(len(data),0,-1):\r\n TP = 0\r\n if data[i-1][:5] == \"human\":\r\n count_Overall_AP += 1\r\n for j in range(len(data[:i])):\r\n if data[j][:5] == 'human':\r\n TP += 1\r\n Precision = TP / len(data)\r\n AP.append(TP)\r\n\r\n elif classes == 1:\r\n classes = 'cat'\r\n for i in range(len(data),0,-1):\r\n TP = 0\r\n if data[i-1][:3] == \"cat\":\r\n count_Overall_AP += 1\r\n for j in range(len(data[:i])):\r\n if data[j][:3] == 'cat':\r\n TP += 1\r\n Precision = TP / len(data)\r\n AP.append(TP)\r\n\r\n elif classes == 2:\r\n classes = 'dog'\r\n for i in range(len(data),0,-1):\r\n TP = 0\r\n if data[i-1][:3] == \"dog\":\r\n count_Overall_AP += 1\r\n for j in range(len(data[:i])):\r\n if data[j][:3] == 'dog':\r\n TP += 1\r\n Precision = TP / len(data)\r\n AP.append(TP)\r\n\r\n elif classes == 3:\r\n classes = 'panda'\r\n for i in range(len(data),0,-1):\r\n TP = 0\r\n if data[i-1][:5] == \"panda\":\r\n count_Overall_AP += 1\r\n for j in range(len(data[:i])):\r\n if data[j][:5] == 'panda':\r\n TP += 1\r\n Precision = TP / len(data)\r\n AP.append(TP)\r\n else:\r\n classes = 'tiger'\r\n for i in range(len(data),0,-1):\r\n TP = 0\r\n if data[i-1][:5] == \"tiger\":\r\n count_Overall_AP += 1\r\n for j in range(len(data[:i])):\r\n if data[j][:5] == 'tiger':\r\n TP += 1\r\n Precision = TP / len(data)\r\n AP.append(TP)\r\n\r\n AP = np.asarray(AP)\r\n AP = AP/count_Overall_AP\r\n MAP = np.mean(AP)\r\n return MAP\r\n\r\n\r\ndef Metrics(q,data, k, classes):\r\n\r\n L2 = np.asarray([L2_norm(x,q) for x in data])\r\n # metric = sorted(L2)[:k]\r\n\r\n L2 = L2.argsort()[:k]\r\n\r\n print('L2 Metrics:')\r\n predict = []\r\n for i in L2:\r\n predict.append(label[i])\r\n MAP = mAP(predict, classes)\r\n print(MAP)\r\n\r\n L1 = [L1_norm(x,q) for x in data]\r\n # metric = sorted(L1)[:k]\r\n\r\n L1 = np.argsort(L1)[:k]\r\n print('L1 Metrics:')\r\n predict = []\r\n for i in L1:\r\n predict.append(label[i])\r\n MAP = mAP(predict, classes)\r\n print(MAP)\r\n\r\n cosine = [cosine_similarity(x,q) for x in data]\r\n # metric = sorted(cosine,reverse=True)[:k]\r\n\r\n cosine = np.argsort(cosine)[::-1][:k]\r\n print('Cosine Similarity Metrics:')\r\n predict = []\r\n for i in cosine:\r\n predict.append(label[i])\r\n MAP = mAP(predict, classes)\r\n print(MAP)\r\n\r\n dot = [np.dot(x,q) for x in data]\r\n # metric = sorted(dot,reverse=True)[:k]\r\n\r\n dot = np.argsort(dot)[::-1][:k]\r\n print('Dot Product Metrics:')\r\n predict = []\r\n for i in dot:\r\n predict.append(label[i])\r\n MAP = mAP(predict, classes)\r\n print(MAP)\r\n\r\n print('----'*30 + '\\n' )\r\n\r\n\r\n# Ảnh class Human\r\nprint(\"Thong tin truy van: anh1.jpg\")\r\nprint(query[0])\r\nMetrics(query[0],data, k = 20, classes = 0)\r\n\r\nprint(\"Thong tin truy van: anh2.jpg\")\r\nprint(query[1])\r\nMetrics(query[1],data, k = 20, classes = 0)\r\n\r\n# Ảnh class cat\r\nprint(\"Thong tin truy van: anh3.jpg\")\r\nprint(query[2])\r\nMetrics(query[2],data, k = 23, classes = 1)\r\n\r\n# Ảnh class dog\r\nprint(\"Thong tin truy van: anh4.jpg\")\r\nprint(query[3])\r\nMetrics(query[3],data, k = 22, classes = 2)\r\n\r\n# Ảnh class panda\r\nprint(\"Thong tin truy van: anh5.jpg\")\r\nprint(query[4])\r\nMetrics(query[4],data, k = 22, classes = 3)\r\n\r\n# Ảnh class tiger\r\nprint(\"Thong tin truy van: anh6.jpg\")\r\nprint(query[5])\r\nMetrics(query[5],data, k = 18, classes = 4)\r\n" }, { "alpha_fraction": 0.6046082973480225, "alphanum_fraction": 0.7216590046882629, "avg_line_length": 46.173912048339844, "blob_id": "56aebe57096f8fd7123e1d52b8ba1a9c7e3ba5ac", "content_id": "1f6f25b7a65c14f8f92e1439908ce57ce1737211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 175, "num_lines": 23, "path": "/Deep Feature/README.md", "repo_name": "fantashi099/CS336", "src_encoding": "UTF-8", "text": "# 💻 Truy vấn thông tin đa phương tiện - CS336.L12.KHCL\n\n![](https://portal.uit.edu.vn/Styles/profi/images/logo186x150.png)\n\n+ MSSV: 18521492\n+ Họ tên: Trần Minh Tiến\n\n# Báo cáo bài tập 5:\n1) Data ảnh nằm trong mục data, có 105 ảnh, đã được đánh nhãn cat,dog,human,panda,tiger\n2)\tẢnh truy vấn nằm ở mục query, bao gồm 5 class khác nhau và khác với tập data, để truy vấn ảnh khác thì lưu về folder query này\n3)\tKết quả được lưu về folder output dưới dạng file text có tên file trùng với tên ảnh trong query <br>\nMẫu kết quả gồm 4 độ đo L2, L1, Cosine và tích vô hướng:\n\n <img src=\"https://64.media.tumblr.com/11e3f629571d333f57d595e55b8c9348/1095e290b6668101-26/s1280x1920/b69d3f5731b3f9d5d5ca1994402121951eb936ae.jpg\" width=\"150\" height=\"200\">\n\"Ket qua anh1.jpg\":\n<img src=\"https://i.imgur.com/QKACJat.png\" width=\"50%\">\n<ul>\n <li>List là 5 giá trị lớn nhất theo từng độ đo</li>\n <li>Theo sau là 5 file ảnh tương ứng liên quan</li>\n</ul>\n4) Kết quả trên terminal khi chạy code:\n<img src=\"https://i.imgur.com/5Og0ms4.png\" width=\"60%\">\n<img src=\"https://i.imgur.com/fWtUwP2.png\" width=\"60%\">\n" } ]
4
anshpratap013/monitorPrice
https://github.com/anshpratap013/monitorPrice
bc2ab91876455def60d7c7ec40f327567401f65e
45e0ec580ead7ef6b95c88e3d88c0430a114f5c8
8eb03f2ff1713a20c929bc570b5ac3719ffd32e5
refs/heads/main
2023-08-05T18:05:33.331033
2021-10-06T03:59:51
2021-10-06T03:59:51
414,061,793
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8057553768157959, "alphanum_fraction": 0.8057553768157959, "avg_line_length": 68.5, "blob_id": "998f83720083db46723e9e7d38926476808c1f2a", "content_id": "8e516dca8c27274a948fba7f2e70ffc5315424a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 139, "license_type": "no_license", "max_line_length": 123, "num_lines": 2, "path": "/README.md", "repo_name": "anshpratap013/monitorPrice", "src_encoding": "UTF-8", "text": "# monitorPrice\nIn this we can monitor the price of our desired object and get notified via email when the price drops to our desired value\n" }, { "alpha_fraction": 0.591791033744812, "alphanum_fraction": 0.6470149159431458, "avg_line_length": 29.454545974731445, "blob_id": "e81e25b60a40b6e89d17a5cefa27aba74dbebdd4", "content_id": "0092f8d0c0ddee8771385fcfc872ad84e1f35fc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 233, "num_lines": 44, "path": "/price.py", "repo_name": "anshpratap013/monitorPrice", "src_encoding": "UTF-8", "text": "import bs4\nimport urllib.request\nimport smtplib\nimport time\nprices_list=[]\ndef check_price():\n \n\n url = 'https://www.amazon.in/dp/B082MDMW3X/ref=s9_acsd_al_bw_c2_x_0_i?pf_rd_m=A1K21FY43GMZF8&pf_rd_s=merchandised-search-5&pf_rd_r=CF9JY0WX1GAAPBD3S9KW&pf_rd_t=101&pf_rd_p=8398f427-fbf5-4310-a31e-29a4be7a59bc&pf_rd_i=26297682031'\n sauce = urllib.request.urlopen(url).read()\n soup = bs4.BeautifulSoup(sauce,\"html.parser\")\n price = soup.find(id=\"priceblock_dealprice\").get_text()\n p1 = float(price.replace(\",\",\"\").replace(\"₹\",\"\"))\n print(price)\n print(p1)\n prices_list.append(p1)\n return p1\ncheck_price()\n\ndef send_email(message):\n s=smtplib.SMTP('smtp.gmail.com',587)\n s.starttls()\n s.login(\"[email protected]\",\"anshpratap013@\")\n s.sendmail(\"[email protected]\",\"[email protected]\",message)\n s.quit()\n\nsend_email(\"Hi Ansh\")\ndef price_decrease_check(prices_list):\n if prices_list[-1]<prices_list[-2]:\n return True\n else:\n return False\ncount = 1\nwhile True:\n \n current_price = check_price()\n if count>1:\n flag = price_decrease_check(prices_list)\n if flag>1:\n decrease = prices_list[-1]-prices_list[-2]\n message = \"The price has decreased please check the item and it has decreased by {decrese} rupees\" \n send_email(message)\n time.sleep(1)\n count+=1\n" } ]
2
razieldrake/Abaddon
https://github.com/razieldrake/Abaddon
04574069aac1784c3c9aec910da294366889d810
b08fb70dbb31941f9e7aa404b8615df2260ba1d7
76c0671c4dcabfe58bd3577ce024143f07490f7b
refs/heads/master
2020-04-12T18:52:56.995592
2018-12-21T09:45:32
2018-12-21T09:45:32
162,693,094
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8292682766914368, "alphanum_fraction": 0.8292682766914368, "avg_line_length": 19.5, "blob_id": "86105bc40648b30740279abbf98129763caeeb0d", "content_id": "b4188f14cce6f16fd48df8b0eb4def4f689e58f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "razieldrake/Abaddon", "src_encoding": "UTF-8", "text": "# Abaddon\nComlete offensive secuity tool\n" }, { "alpha_fraction": 0.6811594367027283, "alphanum_fraction": 0.695652186870575, "avg_line_length": 9, "blob_id": "d367019b4679add419abecb71a31de5db031c7c0", "content_id": "d385170a4c1605dd0aeb102245903eae2501e257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 26, "num_lines": 7, "path": "/main.py", "repo_name": "razieldrake/Abaddon", "src_encoding": "UTF-8", "text": "#coding UTF-8\n\nfrom kamene.all import *\n\n\n\nprint (\"Welcome to Chaos\")" } ]
2
cursoweb/ejemplo-django
https://github.com/cursoweb/ejemplo-django
a273ceb521f080fbbd7bf01cb44ab3ac4aaf815e
f627fa37675a5e2a288fa784565786120d6f83be
d36a9ad5ad719b82b2d4307cb33541c070226add
refs/heads/master
2021-01-25T10:07:21.724068
2014-11-18T21:39:02
2014-11-18T21:39:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6517128944396973, "alphanum_fraction": 0.6533442139625549, "avg_line_length": 35.05882263183594, "blob_id": "44bffc6cc50312d872e9769608d799aed2ad3d4f", "content_id": "280574931e8956f2ae848878553afc877021bbda", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "permissive", "max_line_length": 85, "num_lines": 34, "path": "/encuestas/admin.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "#!env/bin/python\n# -*- coding: utf-8 -*-\n\nfrom django.contrib import admin\nfrom encuestas.models import Pregunta, Eleccion, Noticia\nfrom imagekit.admin import AdminThumbnail\n\nclass EleccionInline(admin.TabularInline):\n model = Eleccion\n extra = 3 # Aqui ndicamos la cantidad de \"slots\" que hay de elecciones, el \n # usuario puede agregar más si lo necesita\n\n\nclass PreguntaAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['texto_pregunta']}),\n (u'Información de fecha', {'fields': ['pub_date'], 'classes': ['collapse']}),\n ]\n inlines = [EleccionInline] # Le indicamos a Django que las elecciones se \n # cargan desde el admin de Pregunta\n list_display = ('texto_pregunta', 'pub_date', \n 'fue_publicada_recientemente')\n list_filter = ['pub_date']\n search_fields = ['texto_pregunta']\n date_hierarchy = 'pub_date'\n\nclass NoticiaAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'admin_thumbnail')\n admin_thumbnail = AdminThumbnail(image_field='imagen_thumbnail')\n\n\nadmin.site.register(Pregunta, PreguntaAdmin)\nadmin.site.register(Eleccion)\nadmin.site.register(Noticia, NoticiaAdmin)\n" }, { "alpha_fraction": 0.5976645350456238, "alphanum_fraction": 0.6114649772644043, "avg_line_length": 33.88888931274414, "blob_id": "534d1e8594bcceb6ad783893bbedfea13e859359", "content_id": "90699ce5c2622a4800e2b910e4ceb69ad36950fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1884, "license_type": "permissive", "max_line_length": 78, "num_lines": 54, "path": "/encuestas/models.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom ckeditor.fields import RichTextField\nfrom imagekit.models import ImageSpecField\nfrom imagekit.processors import ResizeToFill, ResizeToFit\n\nclass Pregunta(models.Model):\n texto_pregunta = models.CharField(max_length=200)\n pub_date = models.DateTimeField('fecha publicada')\n\n def __unicode__(self): # __str__ en Python 3\n return self.texto_pregunta\n\n def fue_publicada_recientemente(self):\n return self.pub_date >= timezone.now() - datetime.timedelta(days=1)\n\n fue_publicada_recientemente.admin_order_field = 'pub_date'\n fue_publicada_recientemente.boolean = True\n fue_publicada_recientemente.short_description = 'Publicada recientemente?'\n\n\nclass Eleccion(models.Model):\n pregunta = models.ForeignKey(Pregunta)\n texto_opcion = models.CharField(max_length=200)\n votos = models.IntegerField(default=0)\n\n def __unicode__(self): # __str__ en Python 3\n return self.texto_opcion\n\n class Meta:\n verbose_name_plural = \"elecciones\"\n\n\nclass Noticia(models.Model):\n titulo = models.CharField(max_length=200)\n contenidos = RichTextField()\n\n imagen = models.ImageField(upload_to='noticias', null=True, blank=True)\n\n imagen_thumbnail = ImageSpecField(source='imagen',\n processors=[ResizeToFill(50, 50)],\n format='JPEG',\n options={'quality': 60})\n imagen_principal = ImageSpecField(source='imagen',\n processors=[ResizeToFit(800)],\n format='JPEG',\n options={'quality': 80})\n\n def __unicode__(self): # __str__ en Python 3\n return self.titulo\n" }, { "alpha_fraction": 0.680477499961853, "alphanum_fraction": 0.6882022619247437, "avg_line_length": 34.625, "blob_id": "83ba5f9babc5efce56f8218fa0304e0e43186c70", "content_id": "2c405876c0d807116322c53e5a8dbd5870bf37b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1428, "license_type": "permissive", "max_line_length": 82, "num_lines": 40, "path": "/encuestas/views.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\n\nfrom encuestas.models import Eleccion, Pregunta\n\n\nclass IndexView(generic.ListView):\n template_name = 'encuestas/index.html'\n context_object_name = 'lista_ultimas_preguntas'\n\n def get_queryset(self):\n \"\"\"Retorna las ultimas 5 preguntas.\"\"\"\n return Pregunta.objects.order_by('-pub_date')[:5]\n\nclass ResultadosView(generic.DetailView):\n model = Pregunta\n template_name = 'encuestas/resultados.html'\n\n\n\ndef votar(request, pregunta_id):\n p = get_object_or_404(Pregunta, pk=pregunta_id)\n try:\n opcion_elegida = p.eleccion_set.get(pk=request.POST['eleccion'])\n except (KeyError, Eleccion.DoesNotExist):\n # Volver a mostrar el formulario para votar.\n return render(request, 'encuestas/detalle.html', {\n 'pregunta': p,\n 'error_message': u\"No seleccionaste una opción.\",\n })\n else:\n opcion_elegida.votos += 1\n opcion_elegida.save()\n # siempre retornar un HttpResponseRedirect después de lidiar\n # exitosamente con datos de POST. Esto evita que los datos sean\n # posteados 2 veces si el usuario presiona el botón de Atrás.\n return HttpResponseRedirect(reverse('encuestas:resultados', args=(p.id,)))" }, { "alpha_fraction": 0.6452932953834534, "alphanum_fraction": 0.6603001356124878, "avg_line_length": 27.230770111083984, "blob_id": "7be7f5c8a8e61003292ea7390b13433faf8ca965", "content_id": "5272fc221242bd687e260b6abd23a36900500c11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "permissive", "max_line_length": 75, "num_lines": 26, "path": "/curso/encuestas/models.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.db import models\nfrom django.utils import timezone\n\nclass Pregunta(models.Model):\n texto_pregunta = models.CharField(max_length=200)\n pub_date = models.DateTimeField('fecha publicada') \n\n def __unicode__(self): # __str__ en Python 3\n return self.texto_pregunta\n\n def fue_publicada_recientemente(self):\n return self.pub_date >= timezone.now() - datetime.timedelta(days=1)\n\n\n\nclass Eleccion(models.Model):\n pregunta = models.ForeignKey(Pregunta)\n texto_opcion = models.CharField(max_length=200)\n votos = models.IntegerField(default=0)\n\n def __unicode__(self): # __str__ en Python 3\n return self.texto_opcion" }, { "alpha_fraction": 0.6255850195884705, "alphanum_fraction": 0.6271451115608215, "avg_line_length": 31, "blob_id": "bf843abb73ff37ee3631c2b24465f2f7a8bbc3c5", "content_id": "5e5571be144805b78f492b2024085642bb32eb68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "permissive", "max_line_length": 74, "num_lines": 20, "path": "/encuestas/urls.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django.views.generic import DetailView, ListView\nfrom encuestas.models import Pregunta, Noticia\nfrom django.conf.urls import patterns, url\n\nfrom encuestas import views\n\nurlpatterns = patterns('',\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^noticias/$', ListView.as_view(model=Noticia), name='noticias'),\n url(r'^(?P<pk>\\d+)/$',\n DetailView.as_view(\n model=Pregunta,),\n name='detalle'),\n\n\n url(r'^(?P<pk>\\d+)/resultados/$', views.ResultadosView.as_view(), \n name='resultados'),\n url(r'^(?P<pregunta_id>\\d+)/votar/$', views.votar, name='votar'),\n)\n\n" }, { "alpha_fraction": 0.524193525314331, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 16.714284896850586, "blob_id": "395ebe8b57d73ea4aab567e28930fe3104c9ace7", "content_id": "35e1a2b3c93ceb143dcd84405b299e05c884ae11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 124, "license_type": "permissive", "max_line_length": 27, "num_lines": 7, "path": "/requirements.txt", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "Django==1.7\nargparse==1.2.1\ndjango-ckeditor==4.4.6\ndjango-debug-toolbar==1.2.1\npytz==2014.7\nsqlparse==0.1.13\nwsgiref==0.1.2\n" }, { "alpha_fraction": 0.6760563254356384, "alphanum_fraction": 0.678223192691803, "avg_line_length": 32, "blob_id": "13b31b3e1832382f04583fd8ac5910f52f7b765c", "content_id": "d814554b513fbf24a52dffe08eadc467766a1cfa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 927, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/curso/urls.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "#!env/bin/python\n# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom django.conf import settings\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'curso.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^encuestas/', include('encuestas.urls', namespace=\"encuestas\")),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^ckeditor/', include('ckeditor.urls')),\n\n url(r'^media/(?P<path>.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n)\n\n# Texto para poner al final del <title> de cada página.\nadmin.site.site_title = u'Administración del sitio cursoweb'\n\n# Texto a poner en los <h1> de todas las páginas.\nadmin.site.site_header = u'Administrador de Curso Web'\n\n# Texto a poner arriba de la página de index del admin\nadmin.site.index_title = u'Panel de control de Curso Web'" }, { "alpha_fraction": 0.5172684192657471, "alphanum_fraction": 0.523547887802124, "avg_line_length": 30.073171615600586, "blob_id": "76c65a8957b803b2b3ab81c8f40a63ddd2e25284", "content_id": "732ed21a6ae3a7b7e4b4c69fd86693e7df6b0417", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1274, "license_type": "permissive", "max_line_length": 114, "num_lines": 41, "path": "/encuestas/migrations/0001_initial.py", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Eleccion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('texto_opcion', models.CharField(max_length=200)),\n ('votos', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Pregunta',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('texto_pregunta', models.CharField(max_length=200)),\n ('pub_date', models.DateTimeField(verbose_name=b'fecha publicada')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='eleccion',\n name='pregunta',\n field=models.ForeignKey(to='encuestas.Pregunta'),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.4333333373069763, "avg_line_length": 14, "blob_id": "9c8d09b4ac09ae2b90b74f612f3c2059f60aaebe", "content_id": "d52595ffb3f0e44aa78ee80c423e3d4ffe609eb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "permissive", "max_line_length": 14, "num_lines": 2, "path": "/README.md", "repo_name": "cursoweb/ejemplo-django", "src_encoding": "UTF-8", "text": "ejemplo-django\n==============\n" } ]
9
liangyuwei/Topology-1
https://github.com/liangyuwei/Topology-1
6369dc4ccec6ff9c876526f6184f547e3c336ed9
00e882f28041d8bd2c18f8b12aedccc1c8f127cc
8f68adc5a029154671fabad2f2fe2919b0cdc4e6
refs/heads/master
2020-04-28T10:41:33.993075
2019-03-12T01:13:54
2019-03-12T01:13:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6105263233184814, "alphanum_fraction": 0.6105263233184814, "avg_line_length": 22.75, "blob_id": "6bebdcdcbad145678c47c969bab121a19400c9c8", "content_id": "e5b7beaa7b39ac6eaa816093269b2466b7a72478", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 95, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/clean.sh", "repo_name": "liangyuwei/Topology-1", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrm -f *.aux *.log *.pdf *.out\nrm -f lemmas_content.tex\nrm -f figs/*.bak figs/*.pdf\n" }, { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.7314285635948181, "avg_line_length": 42.75, "blob_id": "a59ba508751b6cbb562fd1effb04bc1544732d1a", "content_id": "a2799df39dd35c83cde607a630f157ddef716084", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1050, "license_type": "no_license", "max_line_length": 143, "num_lines": 24, "path": "/README.md", "repo_name": "liangyuwei/Topology-1", "src_encoding": "UTF-8", "text": "Topology by James Munkres, 2nd Edition\n======================================\n\nSolutions Manual\n----------------\n\nThe main solutions manual is `solutions.tex`.\n\nWhen doing exercises it can be useful to see a list of lemmas that have been written as part of the solutions.\nRunning the `lemmas.py` Python script will build `lemmas_content.tex` that contains these lemmas.\nYou can then build a lemma list document by compiling `lemmas.tex`, which includes `lemmas_content.tex`.\n\nAn up-to-date [PDF of this solutions manual](http://kyp4.dyndns-home.com/Topology.pdf) is built and published hourly as necessary.\n\nIf you are interested in contributing, send me a message.\n\nOther Solutions Manuals\n-----------------------\n\nThere are other solutions manuals floating around the Internet, and when studying mathematics it is always good to have different perspectives.\n\n[A nice online solutions manual](https://dbfin.com/topology/munkres/)\n\n[Another GitHub solutions manual project (includes both HTML and PDF)](https://github.com/9beach/munkres-topology-solutions)\n" }, { "alpha_fraction": 0.49072355031967163, "alphanum_fraction": 0.4944341480731964, "avg_line_length": 34.344261169433594, "blob_id": "51a3bfa76600068c615870810fc8f4ee43915bb0", "content_id": "99b2bc272f28655c57ae3e033cad083277844e54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6468, "license_type": "no_license", "max_line_length": 101, "num_lines": 183, "path": "/lemmas.py", "repo_name": "liangyuwei/Topology-1", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\"\"\"\nBuild lemmas document contents from other tex files.\n\nRun with -h for more information.\n\"\"\"\nfrom collections import namedtuple\nimport argparse\nimport os\n\n# Input directory\nidir = \"sections\" + os.sep\n\n# Output file name\nofname = \"lemmas_content.tex\"\n\n# Argument parsing\nparser = argparse.ArgumentParser(description=\"Builds lemmas document content from\\\nsection tex files as well as the theorems tex file. Output is written to \" + ofname + \".\")\nargs = parser.parse_args()\n\ndef get_arg(line, cmd) :\n \"\"\"\n Retuns the argument if the Latex cmd\n is found. Otherwise None is returned.\n \"\"\"\n lcmd = \"\\\\\" + cmd + \"{\"\n if line.find(lcmd) != 0 :\n return None\n j = line.find(\"}\")\n if j < 0 :\n return None\n return line[len(lcmd):j]\n\nsargs = (\"lem\", \"cor\", \"defin\", \"thrm\")\ncounter_map = {\"section\" : \"chapter\",\n \"subsection\" : \"section\"}\ndef process_file(fpath, ofile, headers=False, shared=None) :\n \"\"\"\n Processes latex file to extract statements\n \n fpath - Path to .tex file\n ofile - Output file object\n headers - Increment counters for header lines\n shared - Name of shared statement file we are\n reading (None indicates we are not in\n a shared statement)\n \"\"\"\n # Open up input file for reading\n with open(fpath, \"r\") as ifile :\n # Whether or not we are in a statement or comment block\n ins = False\n inc = False\n mdef = 0\n\n # Go through each line in the file\n for line in ifile :\n # We don't care about whitespace at the beginning or end of the line\n sl = line.strip()\n\n # Are we in a commented out block?\n if inc :\n if sl.find(r\"\\fi\") == 0 :\n inc = False\n continue\n\n # Are we in a statement block?\n if ins :\n # Pass the line through\n print(sl, file=ofile)\n\n # Are ending the statement?\n if get_arg(sl, \"end\") in sargs :\n # Print a blank line to leave some spaces\n print(\"\", file=ofile)\n ins = False\n else :\n # No, only pass through certain lines pertaining to the statements\n earg = get_arg(sl, \"exercise\")\n iarg = get_arg(sl, \"input\")\n scarg = get_arg(sl, \"setcounter\")\n if (sl.find(r\"\\def\") == 0 and sl.find(\"{\") >= 0) or mdef > 0 :\n # Include any macro definitions\n print(sl, file=ofile)\n if sl[-1] == \"{\" :\n # Multiline def\n mdef += 1\n elif sl[0] == \"}\" :\n # End multiline def\n mdef -= 1\n elif sl.find(r\"\\iffalse\") == 0:\n # Start of comment block\n inc = True\n continue\n elif earg :\n # Exercise line, set counters\n e = int(earg)\n print(r\"\\setcounter{section}{\" + str(e-1) + \"}\\stepcounter{section}\", file=ofile)\n elif iarg and iarg.find(\"shared\") == 0 :\n # Input line for shared satement\n process_file(iarg + \".tex\", ofile, shared=iarg.split(\"/\")[-1])\n elif get_arg(sl, \"begin\") in sargs :\n # We are starting a statement, verify that no content is also on this line\n if sl[-1] != \"}\" :\n raise ValueError(\"Statement has content on the same line!\")\n\n add = \"\"\n\n # Add shared tag if applicable\n if shared :\n add += \"[Shared: \" + shared.replace(\"_\", r\"\\textunderscore \") + \"] \"\n\n # Determine the label tag (if there is one)\n lbl = \"label\"\n i = sl.find(\"\\\\\" + lbl)\n if i >= 0 :\n larg = get_arg(sl[i:], lbl)\n if larg :\n add += r\"\\{\" + larg + r\"\\}\"\n print(sl + add, file=ofile)\n\n ins = True\n elif scarg :\n # Set counter line\n print(sl.replace(scarg, counter_map[scarg]), file=ofile)\n elif headers :\n # Search for counter lines to increment counters\n for hdr in counter_map.keys() :\n if sl.find(\"\\\\\" + hdr) == 0 :\n print(r\"\\stepcounter{\" + counter_map[hdr] + \"}\", file=ofile)\n\n# Section file record\nSection = namedtuple(\"Section\", (\"sec\", \"fname\"))\n \n# Open up output file for writing\nwith open(ofname, \"w\") as ofile :\n # Go through files in the current directory\n secs = []\n for fname in sorted(os.listdir(idir)) :\n # We only care about certain tex files\n (bname, ext) = os.path.splitext(fname)\n if ext != \".tex\" :\n continue\n if bname.find(\"sec_\") != 0 :\n continue\n secs.append(Section(\"_\".join(bname.split(\"_\")[1:]), idir + fname))\n\n # Sort sections and go through them\n print(r\"\\section{Solutions}\", file=ofile)\n for sec in sorted(secs, key=lambda s : s.sec) :\n print(\"Processing \" + sec.fname + \"...\")\n\n # Is this a special section with text?\n tsec = False\n try :\n slab = int(sec.sec)\n except :\n slab = sec.sec.split(\"_\")[1]\n tsec = True\n \n # Set section counter\n if not tsec :\n # Normal numbered section\n print(r\"\\setcounter{chapter}{\" + str(slab) + \"}\", file=ofile)\n else :\n # Special text section\n print(r\"\\renewcommand\\thechapter{\" + slab + \"}\", file=ofile)\n\n # Process section file\n process_file(sec.fname, ofile)\n\n # If text section the restore numbered sections\n if tsec :\n print(r\"\\renewcommand\\thechapter{\\arabic{chapter}}\", file=ofile)\n \n # Go through theorems\n \"\"\"\n print(\"Processing theorems...\")\n print(r\"\\setcounter{chapter}{0}\", file=ofile)\n print(r\"\\setcounter{section}{1}\", file=ofile)\n print(r\"\\section{Theorems}\", file=ofile)\n process_file(\"theorems.tex\", ofile, headers=True)\n \"\"\"\n" } ]
3
masenmatthews/Airship-takehome
https://github.com/masenmatthews/Airship-takehome
f260f34184b552eb997070ede9b5e7c20f9d8f14
9df43daf01d102c8df828dc872d5f6088a508ff6
503feb72e380090b9e58c11b1cd317d6338dab1f
refs/heads/master
2020-05-27T18:22:51.689282
2019-05-26T23:19:42
2019-05-26T23:19:42
188,740,848
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7835926413536072, "alphanum_fraction": 0.7861385941505432, "avg_line_length": 71.14286041259766, "blob_id": "239b18c297ac778163dd1e9258a355ee1bf0fc28", "content_id": "9cf8b7e0d3efe08145658eb347eb4b78d4d0783d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3535, "license_type": "no_license", "max_line_length": 558, "num_lines": 49, "path": "/README.md", "repo_name": "masenmatthews/Airship-takehome", "src_encoding": "UTF-8", "text": "# Airship Take Home Exercise\n\n## Position: Technical Implementation Consultant\n## Applicant: Masen Matthews\n\n## Technologies and Overview\n\nThis project was built using Python 3.7. It utilizes JSON, CSV, and OS functionalities that are built into Python. It also utilizes the Pandas library for data presentation.\n\nThe first piece of the project (convert.py) takes the JSON information that was provided at the start of the exercise (see \"reports.txt\") and converts it into a readable CSV file. By doing this, the script renames the header information and removes the excess JSON syntax. The new CSV information is then stored in the reports_new.csv file. \n\nThe second piece of the project (read.py) uses the Pandas library to read the information from the newly saved CSV file. I chose to go with this because the library presents the information in a simple, readable way in the terminal. There are a lot of customization options available with this library, but I chose to keep things simple for this exercise.\n\n## Setup instructions\n\n1. Copy the repository to the desktop or another directory\n2. Open the terminal and navigate to the airship_takehome directory\n3. Run the following command in the terminal to install Pandas:\n\n<pre><code>pip install pandas</code></pre>\n\nIf you have multiple versions of Python installed, you may need to run this command instead:\n\n<pre><code>pip3 install pandas</code></pre>\n\n4. Run the following command to convert the information from reports.txt. This will convert the JSON data in reports.txt to CSV. It will then populate the reports_new.csv file with the information. \n\n<pre><code>python convert.py</code></pre>\n\n5. Run the following command to view the contents of the newly created reports_new.csv file in the terminal.\n\n<pre><code>python read.py</code></pre>\n\n6. If you would prefer to view the data in a spreadsheet program, simply open the newly populated reports_new.csv file in Excel, Numbers, Google Sheets, or another application of your choice. \n\n## Sample email response to client\n\nHi Reilly,\n\nI received your inquiry regarding the reports that you're having issues with. I went ahead and put together a small Python program in hopes of making the data more readable and presentable for you. After running the scripts in the program, you'll be able to view your information in the terminal or in a formatted spraedsheet. You can download the program here: (insert link to program). There isn't much that needs to be done in terms of setup and there are detailed instructions in the README file. \n\nIn terms of web notifications, there are a few resources that I would suggest starting with. You can start by getting a demo of web notifications by signing up here: https://www.airship.com/platform/channels/web-notifications/#. If you're interested in skipping the demo and want to do a self-driven test of the service, we have a tutorial available here: https://docs.airship.com/tutorials/messages/web-push-notification/. Outside of that, we have a more complete set of documentation available here: https://docs.airship.com/platform/web/getting-started/. \n\nIf you have any questions or problems regarding the Python program or our Web Notify service, please let me know. I would be happy to jump on a call with you and do a walkthrough of the program or the Web Notify tutorial. Have a great afternoon!\n\nBest,\nMasen Matthews\n\n* Note - if I was writing this in an email client rather than a README file, I would do direct hyperlinks to the resources outlined above rather than pasting the links in. " }, { "alpha_fraction": 0.4992295801639557, "alphanum_fraction": 0.4992295801639557, "avg_line_length": 28.5, "blob_id": "7f2c228c7cc3c33be2b08ffbd461fdbc19adf121", "content_id": "7749b635f4f76960623c8c137f1d27db3ca30e36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 99, "num_lines": 22, "path": "/convert.py", "repo_name": "masenmatthews/Airship-takehome", "src_encoding": "UTF-8", "text": "import json\nimport csv\nimport os\n\ndef main():\n with open('reports.txt') as k:\n reports_parsed = json.loads(k.read())\n f = csv.writer(open(\"reports_new.csv\", \"w\"))\n \n f.writerow([\"Timestamp\", \"User Information\", \"Event Name\", \"Number of Events\", \"Event ID\"])\n\n for x in reports_parsed:\n f.writerow([x[\"timestamp\"],\n x[\"user_info\"],\n x[\"event_name\"],\n x[\"num_of_events\"],\n x[\"event_id\"]])\n\n print(\"JSON to CSV conversion successful! Check your reports_new.csv file.\")\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5703703761100769, "alphanum_fraction": 0.5703703761100769, "avg_line_length": 14, "blob_id": "744a4cc24ff334ee2bad9c2851a236b4821cb6cb", "content_id": "03ed025b466135c7e1ff3b78d869ae85a1aec37a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/read.py", "repo_name": "masenmatthews/Airship-takehome", "src_encoding": "UTF-8", "text": "import pandas\nimport csv\n\ndef main():\n df = pandas.read_csv('reports_new.csv')\n print(df)\n\nif __name__ == \"__main__\":\n main()\n" } ]
3
ohsean93/final-study-back
https://github.com/ohsean93/final-study-back
effb8e4a3ab0daaade8791689faf87adf1fd0168
6d48168d6f15bccb26f592dbc6aefc4edfdfa193
ab10ada518a0963c6e0f36f1afc8d86dc0305315
refs/heads/master
2020-09-13T00:47:51.169578
2019-11-20T08:04:50
2019-11-20T08:04:50
222,609,598
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.690405547618866, "alphanum_fraction": 0.7052423357963562, "avg_line_length": 28.735294342041016, "blob_id": "7ab436ce3715732f6641f3d6abb2a030fd37da9c", "content_id": "8746fb3bd134b72762884cff4088acaa2f17a35e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 57, "num_lines": 34, "path": "/todos/views.py", "repo_name": "ohsean93/final-study-back", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom .serializers import TodoSerializers, UserSerializers\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom django.contrib.auth import get_user_model\n\n# Create your views here.\n@api_view(['POST'])\ndef todo_create(request):\n serializer = TodoSerializers(data=request.POST)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=400)\n\n\n@api_view(['GET'])\ndef user_detail(request, pk):\n User = get_user_model()\n user = get_object_or_404(User, pk=pk)\n if request.user != user:\n return Response(status=404)\n\n serializer = UserSerializers(user)\n return Response(serializer.data)\n\n\n# @api_view(['get'])\n# def todo_create(request):\n# serializer = TodoSerializers(data=request.POST)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data)\n# return Response(status=400)\n" } ]
1
sand-ci/Analytics
https://github.com/sand-ci/Analytics
45eb0800b90aaa0a08edd3c941ac2b45d29840c4
c2e589992f97658aa7d5aee4f807f16912007312
d4fe02078657479be24f2a5ad43aac45692cea6c
refs/heads/master
2021-07-17T22:53:00.035776
2020-08-11T19:20:41
2020-08-11T19:20:41
201,978,279
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.29519280791282654, "alphanum_fraction": 0.2986105978488922, "avg_line_length": 26.69341468811035, "blob_id": "c1d99d7a02bd579086dc1b2df53e7278d3d24918", "content_id": "8f13210804a2872943f0d28b594f4f0b7596a598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13459, "license_type": "no_license", "max_line_length": 92, "num_lines": 486, "path": "/Utility_Modules/elasticqueries.py", "repo_name": "sand-ci/Analytics", "src_encoding": "UTF-8", "text": "import Utility_Modules.r_utils as ut\n\n\ndef getUniqueCount(es, index, field, time_from, time_to):\n '''\n Get Unique Count returns the distinct count of the field in the index.\n\n es: ElasticSearch Connection Object\n Field : Attribute In the Index (String)\n Index: Index in which we want ot search the field.\n '''\n\n query = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"timestamp\": {\n \"gte\": time_from,\n \"lte\": time_to,\n \"format\": \"epoch_millis\"\n }\n }\n },\n {\n \"term\": {\n \"src_production\": {\n \"value\": \"true\"\n }\n }\n },\n {\n \"term\": {\n \"dest_production\": {\n \"value\": \"true\"\n }\n }\n }\n ]\n }\n },\n 'aggs': {\n 'uniq_val': {\n 'cardinality': {\n 'field': field,\n }\n }\n }\n }\n\n try:\n result = es.search(index='ps_trace', body=query)\n val = result['aggregations']['uniq_val']['value']\n return val\n except Exception as e:\n print(e)\n return None\n\n\ndef getUniqueCountBy(es, index, field, time_from, time_to):\n '''\n Get Unique Count returns the distinct count of the for each value of field in the index.\n Field : Attribute In the Index (String)\n Index: Index in which we want ot search the field.\n\n Prints the number of buckets that will be returned.\n '''\n sz = getUniqueCount(es, index, field, time_from, time_to)\n print(\"Size : {}\".format(sz))\n\n query = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"timestamp\": {\n \"gte\": time_from,\n \"lte\": time_to,\n \"format\": \"epoch_millis\"\n }\n }\n },\n {\n \"term\": {\n 'src_production': {\n \"value\": \"true\"\n }\n }\n },\n {\n \"term\": {\n \"dest_production\": {\n \"value\": \"true\"\n }\n }\n }\n ]\n }\n },\n \"aggs\": {\n \"FieldCounts\": {\n \"terms\": {\n \"field\": field,\n \"size\": sz\n }\n }\n }\n }\n\n try:\n result = es.search(index=index, body=query)\n val = result['aggregations']['FieldCounts']['buckets']\n return val\n except Exception as e:\n print(e)\n return None\n\n\ndef getNumHashesBetweenHostsInTimeRange(es, index, time_from, time_to):\n '''\n es: Elastic Search connection object\n index: Index to be searched/scanned within\n Time Range\n Time Format: epoch_milliseconds\n '''\n\n pre_query = {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"timestamp\": {\n \"gte\": time_from,\n \"lte\": time_to,\n \"format\": \"epoch_millis\"\n }\n }\n },\n {\n \"term\": {\n \"src_production\": {\n \"value\": \"true\"\n }\n }\n },\n {\n \"term\": {\n \"dest_production\": {\n \"value\": \"true\"\n }\n }\n }\n ]\n }\n },\n \"size\": 0,\n \"aggs\": {\n\n \"uniq_val\": {\n \"cardinality\": {\n \"script\": {\n \"source\": \"doc['src_host'].value + ',' + doc['dest_host'].value\",\n \"lang\": \"painless\"\n }\n }\n }\n }\n }\n\n pre_result = es.search(index, body=pre_query)\n sz = pre_result['aggregations']['uniq_val']['value']\n print(\"Number of Source-Destination Pairs: \", sz)\n\n query = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"timestamp\": {\n \"gte\": time_from,\n \"lte\": time_to,\n \"format\": \"epoch_millis\"\n }\n }\n },\n {\n \"term\": {\n 'src_production': {\n \"value\": \"true\"\n }\n }\n },\n {\n \"term\": {\n \"dest_production\": {\n \"value\": \"true\"\n }\n }\n }\n ]\n }\n },\n \"aggs\": {\n \"uniq_val\": {\n \"terms\": {\n \"script\": {\n \"source\": \"doc['src_host'].value + ',' + doc['dest_host'].value\",\n \"lang\": \"painless\"\n },\n \"size\": sz,\n },\n \"aggs\": {\n \"uniq_hash\": {\n \"cardinality\": {\n \"field\": \"hash\"\n }\n }\n }\n }\n }\n }\n\n X = es.search(index, body=query, request_timeout=60)\n\n return X\n\n\ndef getDailyUniquePaths(es, index, src, dest, since):\n \"\"\"\n Get number of unique paths from \n src : Source (String)\n dest: Destination (String) \n since: how many past days\n \"\"\"\n toDate = ut.getDateFormat(delta=1)\n fromDate = ut.getDateFormat(delta=since)\n\n query = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"timestamp\": {\n \"gte\": fromDate,\n \"lte\": toDate,\n \"format\": \"epoch_millis\"\n }\n }\n },\n {\n \"term\": {\n \"src_host\": {\n \"value\": src\n }\n }\n },\n {\n \"term\": {\n \"dest_host\": {\n \"value\": dest\n }\n }\n },\n {\n \"term\": {\n \"src_production\": \"true\"\n }\n },\n {\n \"term\": {\n \"dest_production\": \"true\"\n }\n }\n ]\n }\n },\n \"aggs\": {\n \"time_hist\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"interval\": \"day\"\n },\n \"aggs\": {\n \"uniq_hash\": {\n \"cardinality\": {\n \"field\": \"hash\"\n }\n }\n }\n }\n }\n }\n\n return es.search(index, body=query)\n\n\ndef getSourceDestinationPairs(es, index):\n \"\"\"\n Get all source and destination pairs\n present in the given time range \n\n Args:\n es: Elasticsearch \n index: INdex in Elastic Search\n\n Returns:\n Datafame of all source destination pairs\n \"\"\"\n\n query = {\n \"size\": 0,\n \"aggs\": {\n \"sources\": {\n \"terms\": {\n \"field\": \"src\",\n \"size\": 9999\n },\n \"aggs\": {\n \"destinations\": {\n \"terms\": {\n \"field\": \"dest\",\n \"size\": 9999\n }\n }\n }\n }\n }\n }\n print(index, query)\n data = es.search(index, body=query)\n\n sources = []\n destinations = []\n\n for source in data['aggregations']['sources']['buckets']:\n src = source['key']\n for destination in source['destinations']['buckets']:\n sources.append(src)\n destinations.append(destination['key'])\n\n return {\"Source\": sources, \"Destinations\": destinations}\n\n\ndef getPathCounts(es, src_ip, dest_ip):\n \"\"\"\n Returns a list of Counts of Paths taken from given source and destination\n\n Args:\n src_ip: Source IP, String [ex: \"192.168.1.1\"]\n dest_ip: Destination IP, String [ex: \"192.168.1.5\"]\n\n Returns:\n A list of dictionaries. The dictionary looks as follows:\n {\n 'key':HASH VALUE,\n 'doc_count': # of times path taken\n }\n \"\"\"\n\n query = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"src\": {\n \"value\": src_ip\n }\n }\n },\n {\n \"term\": {\n \"dest\": {\n \"value\": dest_ip\n }\n }\n },\n ]\n }\n },\n \"aggs\": {\n \"HashCounts\": {\n \"terms\": {\n \"field\": \"hash\",\n \"size\": 9999\n }\n }\n }\n }\n\n data_flag = 0\n while data_flag == 0:\n try:\n data = es.search('ps_derived_complete_traces', body=query)\n data_flag = 1\n except Exception:\n print(\"ERROR in getPathCounts\", src_ip, dest_ip, \"\\n\")\n\n paths = data[\"aggregations\"][\"HashCounts\"][\"buckets\"]\n\n if len(paths) == 0:\n return -1\n else:\n return paths\n\n\ndef getPathReadTime(es, path, time_to, time_from, size):\n \"\"\"\n Gets the timestamps for the hash provided in the given time range\n\n Args:\n es: Elastic Search Object\n path: Hashed value of the path\n time_to: Time range start in epoch millisecond\n time_from: Time range end in epoch millisecond\n size: Number of readings for the path\n Returns:\n A list of time-stamps (epoch_millis) on which the path was recorded \n \"\"\"\n\n query = {\n \"_source\": ['timestamp'],\n \"size\": size,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"timestamp\": {\n \"gte\": time_from,\n \"lte\": time_to,\n \"format\": \"epoch_millis\"\n }\n }\n },\n {\n \"term\": {\n \"src_production\": {\n \"value\": \"true\"\n }\n }\n },\n {\n \"term\": {\n \"dest_production\": {\n \"value\": \"true\"\n }\n }\n },\n {\n \"term\": {\n \"hash\": {\n \"value\": path\n }\n }\n }\n ]\n }\n }\n }\n\n data_flag = 0\n while data_flag == 0:\n try:\n data = es.search('ps_trace', body=query, filter_path=[\n 'hits.hits._source.timestamp'])\n data_flag = 1\n except Exception:\n print(\"Error in PairPaths | src:{} | dest:{}\".format(src, dest))\n\n results = []\n\n for hit in data['hits']['hits']:\n results.append(hit['_source']['timestamp'])\n\n return results\n" }, { "alpha_fraction": 0.7658227682113647, "alphanum_fraction": 0.7658227682113647, "avg_line_length": 51.66666793823242, "blob_id": "b5751d7aedd24c8934ffcd9a69bbfdae1f489a0a", "content_id": "22abda473df94a4df1ccb05ffb097f4ea9dda652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 316, "license_type": "no_license", "max_line_length": 154, "num_lines": 6, "path": "/README.md", "repo_name": "sand-ci/Analytics", "src_encoding": "UTF-8", "text": "# SAND-Analytics\n--------\n\nWe're trying to detect the nodes in the network which cause packet loss. For that we're gonna start by analyzing the ps-trace data. \n\nIn this pursuit of this we try to detect and collapse equal cost multi-paths. Impute/Remove the missing data and then train some ML models for prediction.\n" }, { "alpha_fraction": 0.5380533933639526, "alphanum_fraction": 0.5506768822669983, "avg_line_length": 29.193370819091797, "blob_id": "94a5a0da8ce5e9dcc02b18f153277ab64d1d881a", "content_id": "b567a39309c70ab5028d90e53ba76f6abf3942fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5466, "license_type": "no_license", "max_line_length": 110, "num_lines": 181, "path": "/markStable.py", "repo_name": "sand-ci/Analytics", "src_encoding": "UTF-8", "text": "from elasticsearch import Elasticsearch\nfrom time import time, strftime, localtime\nfrom datetime import datetime, timedelta\nfrom itertools import islice\nimport numpy as np\nimport pandas as pd\nimport json\nimport multiprocessing as mp\n\nimport Utility_Modules.r_utils as ut\nimport Utility_Modules.elasticqueries as qrs\n\ndef consumeIter(iterator, n = None):\n \"Advance the iterator n-steps ahead. If n is None, consume entirely.\"\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n collections.deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)\n\n \ndef markStablePairPaths(es, src, dest, path_dict, threshold):\n \"\"\"\n Marks a single pair paths stable/unstable\n \n Args:\n es: ElasticSearch object\n src: Source IP\n dest: dest IP\n path_dict: Dictionary of the paths between the pair. [Results will be stored in this dict only]\n threshold: Amount of Consequetive reading to consider a path stable\n \n Returns:\n None\n \n \"\"\"\n query = {\n \"_source\":['hash', 'timestamp'],\n \"size\":9999,\n \"query\":{\n \"bool\":{\n \"must\":[\n {\"term\":{\"complete\":{\"value\":1}}},\n {\"term\":{\"src\":{\"value\":src}}},\n {\"term\":{\"dest\":{\"value\":dest}}}\n ]\n }\n }\n }\n \n \n times = []\n paths = []\n\n is_page = 0\n while is_page == 0:\n try:\n page = es.search(index = 'ps_derived_complete_traces', body = query, scroll='2m', size=1000)\n is_page = 1\n except:\n print(\"Error in retreiving timestamp data for the pair, retrying !:\")\n sleep(0.1)\n \n sid = page['_scroll_id']\n scroll_size = page['hits']['total']['value']\n \n while scroll_size > 0:\n for res in page['hits']['hits']:\n times.append(res['_source']['timestamp'])\n paths.append(res['_source']['hash'])\n is_page = 0\n while is_page == 0:\n try:\n page = es.scroll(scroll_id = sid, scroll='2m')\n is_page = 1 \n except:\n print(\"Error in retreiving timestamp data for the pair, retrying !:\")\n sleep(0.1)\n sid = page['_scroll_id']\n scroll_size = len(page['hits']['hits'])\n \n data_frame = pd.DataFrame({\"Time\":times, \"Path\":paths}).sort_values(by=['Time'])\n data_iterator = iter(range(data_frame.shape[0]-threshold))\n for indx in data_iterator:\n flag = 0\n if path_dict.get(data_frame.iloc[indx,1]) != 1:\n for i in range(indx,indx+threshold-1):\n if data_frame.iloc[i,1] != data_frame.iloc[i+1,1]:\n flag = 1\n break\n if flag == 0:\n path_dict[data_frame.iloc[indx,1]] = 1\n consumeIter(data_iterator, threshold)\n\ndef markStable(args): \n \"\"\"\n Marks paths between pairs as stable or unstable\n \n Args:\n pairs : Pandas df contaning columns containing src and dest\n threshold : Amount of readings per hour\n \n Returns:\n List of Dictionaries of type:\n {\n \"source\":<SRC>,\n \"destination\":<DEST>,\n \"paths\":{P1:1, P2:0 .... Pn:1}\n }\n \"\"\"\n \n pair, threshold, thread_id = args[0], args[1], args[2]\n print(\"Thread : {} , Processing: {} Pairs\".format(thread_id, pair.shape[0]))\n paths_stability = []\n \n start_time = time()\n for indx in range(pair.shape[0]):\n temp_res = {\n \"source\":pair.iloc[indx,0],\n \"destination\":pair.iloc[indx,1],\n \"path_dict\":{}\n }\n p_dict_t = qrs.getPathCounts(es, pair.iloc[indx,0], pair.iloc[indx,1])\n p_dict= {path['key']:0 for path in p_dict_t}\n \n markStablePairPaths(es, pair.iloc[indx,0], pair.iloc[indx,1], path_dict=p_dict, threshold=threshold)\n \n temp_res['path_dict'] = p_dict\n paths_stability.append(temp_res)\n \n if indx % 25 == 0:\n mins, secs = divmod(time()-start_time, 60)\n print(\"Thread : {} | Processed : {} pairs | Elapsed: {}m {}s\".format(thread_id, indx, mins, secs))\n \n return paths_stability\n\n\nif __name__ == \"__main__\":\n\n user = None\n passwd = None\n \n if user is None and passwd is None:\n with open(\"creds.key\") as f:\n user = f.readline().strip()\n passwd = f.readline().strip()\n\n credentials = (user, passwd)\n es = Elasticsearch(['atlas-kibana.mwt2.org:9200'], timeout = 180, http_auth=credentials)\n\n if es.ping() == True:\n print(\"Connection Successful\") \n else: \n print(\"Connection Unsuccessful\")\n\n #Getting the Pairs:\n\n pairs = qrs.getSourceDestinationPairs(es, 'ps_derived_complete_traces')\n pairs = pd.DataFrame(pairs)\n\n print(\"pairs Retreived\")\n\n THRESHOLD = 5\n n_threads = 16\n pair_pieces = np.array_split(pairs, n_threads)\n\n pool = mp.Pool(n_threads)\n results = pool.map(markStable, [[pair_pieces[i], THRESHOLD, i+1] for i in range(n_threads)])\n\n pool.join()\n pool.close()\n\n result = []\n\n for i in results:\n result += i\n\n with open(\"Results.json\") as f:\n f.write(json.dumps({\"PathStability\":result}))\n\n" }, { "alpha_fraction": 0.4868665337562561, "alphanum_fraction": 0.5054391026496887, "avg_line_length": 27.34586524963379, "blob_id": "ce8ffb5b9fac69176bf8eba7afe32a546aa11b80", "content_id": "9473ec455bcadb3de4ce6395e3caf733b86c0803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3769, "license_type": "no_license", "max_line_length": 124, "num_lines": 133, "path": "/reindex.py", "repo_name": "sand-ci/Analytics", "src_encoding": "UTF-8", "text": "from elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\nfrom time import time, sleep\nimport multiprocessing as mp\n\n\ndef process_row(row):\n looping = 0\n hops_complete = 0 \n if len(set(row['hops'])) < row['n_hops']:\n looping = 1\n if len(row['hops']) and row['hops'][-1] == row['dest']:\n hops_complete = 1\n \n row['looping'] = looping\n row['complete'] = hops_complete\n \n return row\n\n\ndef process_data(params):\n \n thread_id = params[0]\n time_from = params[1]\n time_to = params[2]\n \n query = {\n \"_source\":['timestamp','src','dest','traceroute','hops','n_hops','rtts','hops'],\n \"query\":{\n \"bool\":{\n \"must\":[\n {\"term\":{\"src_production\":{\"value\":'true'}}},\n {\"term\":{\"dest_production\":{\"value\":'true'}}},\n {\"range\":{\"timestamp\":{\n \"lte\":str(time_to),\n \"gte\":str(time_from),\n \"format\":\"epoch_millis\"\n }}}\n ]\n }\n }\n }\n \n start_time = time()\n is_page = 0\n while is_page == 0:\n try:\n page = es.search(index = 'ps_trace', scroll = '2m', size = 1000, body = query)\n is_page = 1\n except Exception:\n print(\"Error !, getting page. Retrying\")\n sleep(0.01)\n \n sid = page['_scroll_id']\n scroll_size = page['hits']['total']['value']\n print(\"Batch ID: {} Processing : {} documents\".format(thread_id, scroll_size))\n i = 0\n while (scroll_size > 0):\n actions = [process_row(result['_source']) for result in page['hits']['hits']]\n bulk_push = 0\n while bulk_push != 1:\n try:\n bulk(es, actions=actions, index='ps_derived_trace', doc_type='doc')\n bulk_push = 1\n except Exception:\n print(\"Bulk Push Error, Retrying !\")\n sleep(0.10)\n is_page = 0\n while is_page == 0:\n try:\n page = es.scroll(scroll_id = sid, scroll = '2m')\n is_page = 1\n except Exception:\n print(\"Error! Getting Page, Retrying\")\n sleep(0.10)\n \n sid = page['_scroll_id']\n scroll_size = len(page['hits']['hits'])\n if i % 25 == 0:\n total_time = time() - start_time\n mins, secs = divmod(total_time, 60)\n print(\"Thread Id: {:3d} | Iteration: {:3d} |Time Elapsed: {:4.0f}m {:4.4f}s\".format(thread_id, i+1, mins, secs))\n \n i += 1\n\n\nif __name__ == \"__main__\":\n\n user = None\n passwd = None\n \n if user is None and passwd is None:\n with open(\"creds.key\") as f:\n user = f.readline().strip()\n passwd = f.readline().strip()\n\n credentials = (user, passwd)\n es = Elasticsearch(['atlas-kibana.mwt2.org:9200'], timeout = 180, http_auth=credentials)\n\n if es.ping() == True:\n print(\"Connection Successful\") \n else: \n print(\"Connection Unsuccessful\")\n\n \n # Saved Time Range\n with open(\"times.txt\") as f:\n time_to = float(f.readline().strip())\n time_from = float(f.readline().strip())\n\n # Window of size 4 days to process by a processor\n window_millis = 4*24*60*60*1000\n\n # Creating Batches of 4 days each in the time range\n batches = []\n i = 1\n while time_from < time_to:\n batches.append((i, time_from, time_from+window_millis))\n time_from += window_millis\n i += 1\n \n for batch in batches:\n process_data(batch)\n \n \"\"\"\n n_threads = len(batches)\n\n pool = mp.Pool(n_threads)\n results = pool.map(process_data, batches)\n\n pool.close()\n pool.join()\n \"\"\"" }, { "alpha_fraction": 0.501520574092865, "alphanum_fraction": 0.5053550004959106, "avg_line_length": 31.88260841369629, "blob_id": "d6073e7446327bd6ddf4615517682c5b2ecc0f27", "content_id": "0357746911f3adaaefd80b8cc26d455e69c2250f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7563, "license_type": "no_license", "max_line_length": 165, "num_lines": 230, "path": "/Utility_Modules/kibana_to_neo_pipe.py", "repo_name": "sand-ci/Analytics", "src_encoding": "UTF-8", "text": "from itertools import chain\nimport certifi\nimport sys\n\nfrom elasticsearch import Elasticsearch\nimport neo4j\nfrom neo4j import GraphDatabase\n\nimport pandas as pd\nimport numpy as np\n\nimport Utility_Modules.r_utils as ut\n\nclass KibanaExtractor:\n \"\"\"\n This class helps get data from Kibana (Elastic Search) to be sent Neo4j.\n \"\"\"\n \n def __init__(self, host_addresss,usrname, passwd, timeout = 90):\n \"\"\"\n Initialize the Connection to Neo4j\n\n Args:\n host_address: Address for the ElasticSearch Instance\n usrname: Username for Authentication\n passwd : Password for Authentication\n timeout: Connection Timeout in Seconds, Default: 90s\n Returns:\n None\n \n Raises:\n None\n \"\"\"\n self.es = Elasticsearch([host_addresss], http_auth = (usrname, passwd) ,timeout = 90, scheme = 'ssl')\n self.data = None\n self.nodes = None\n self.paths = None\n\n def getdata(self, src_ip, dest_ip, since = 90, toDate=None, fromDate=None):\n \"\"\"\n \"\"\"\n self.src = src_ip\n self.dest = dest_ip\n if toDate is None and fromDate is None:\n toDate = ut.getDateFormat()\n fromDate = ut.getDateFormat(delta=since)\n elif toDate is not None or fromDate is not None:\n print(\"Please provide both to and from date in epoch miliseconds. Or don't provide either\")\n\n query = {\n \"size\":9999,\n \"_source\":[\"hops\"],\n \"query\":{\n \"bool\":{\n \"must\":[\n {\n \"range\":{\n \"timestamp\":{\n \"gte\":fromDate,\n \"lte\":toDate,\n \"format\":\"epoch_millis\"\n }\n }\n },\n {\n \"term\":{\n \"src\":{\n \"value\":src_ip\n }\n }\n },\n {\n \"term\":{\n \"dest\":{\n \"value\":dest_ip\n }\n }\n }\n ]\n }\n }\n } \n\n data = self.es.search('ps_trace', body=query)['hits']['hits']\n\n path_dict = {}\n\n for result in data:\n tmp = \",\".join(result['_source']['hops'])\n try:\n path_dict[tmp] += 1\n except:\n path_dict[tmp] = 1\n \n self.data = path_dict\n\n def getuniquenodes(self):\n if self.nodes is not None:\n return self.nodes\n else:\n if self.get_paths() is None:\n return\n self.nodes = set([i for i in chain.from_iterable(self.paths) if len(i)>0])\n\n return self.nodes\n\n def get_paths(self, k = 0):\n if self.data is None:\n print(\"Please call getData() before using this function\")\n return None\n paths = []\n for key in self.data.keys():\n if self.data[key] > k:\n paths.append([\"SOURCE : \"+self.src]+key.split(\",\")+[\"DESTINATION : \"+self.dest])\n self.paths = paths\n return paths\n\nclass NeoInjector:\n '''\n '''\n\n def __init__(self, host_address, usrname, passwd): \n \"\"\"\n Initialize the Connection to Neo4j\n\n Args:\n host_address: Bolt address for the Neo4j Instance\n usrname: Username for Authentication\n passwd : Password for Authentication\n\n Returns:\n None\n \n Raises:\n ServiceUnavailableError: When the neo4j service isn't available on the host specified\n Exception: All other address errors\n \"\"\"\n try:\n self.neo = GraphDatabase.driver(uri=host_address, auth=(usrname, passwd))\n except neo4j.ServiceUnavailable as err:\n print(err,file=sys.stderr)\n except Exception as err:\n print(err,file=sys.stderr)\n\n def send_nodes_to_neo(\n self, \n values, # Unique Node Values (Such as IP's)\n attribute_name = 'IP'): # Attribute Name (Can be anything) Default: IP\n \n \"\"\"\n Create Nodes in the Neo4j Database\n\n Args:\n values: Unique Node Values (Such as IP's)\n attribute_name: Attribute Name (Can be anything) Default: IP\n \n Raises:\n Client Errors: The Client sent a bad request - changing the request might yield a successful outcome,\n TransientErrors: The database cannot service the request right now, retrying later might yield a successful outcome.\n Database Error: The database failed to service the request.\n \"\"\"\n try:\n with self.neo.session() as session:\n for val in values:\n session.run(\"CREATE (n:Node{{{}:'{}'}})\".format(attribute_name ,str(val)))\n except Exception as err:\n print(err, file=sys.stderr)\n\n def send_relations_to_neo(\n self, \n data, # List of Two Items [Src_Attribute_Value, Dest_Attribute_Value]\n attr_one_name = 'IP', # Source Attribute Name\n attr_two_name = 'IP', # Destination Attribute Name\n relation_name = 'TO'): # Name of the Relation\n\n \"\"\"\n Adds Relationships between two specified nodes\n\n Args:\n data: List of Two Items [Src_Attribute_Value, Dest_Attribute_Value]\n attr_one_name : Source Attribute Name, Default: 'IP'\n attr_two_name : Destination Attribute Name, Default: 'IP'\n relation_name : Name of the Relation, Default: 'TO'\n\n Returns:\n None\n\n Raises:\n Client Errors: The Client sent a bad request - changing the request might yield a successful outcome,\n TransientErrors: The database cannot service the request right now, retrying later might yield a successful outcome.\n Database Error: The database failed to service the request.\n \"\"\"\n\n try:\n with self.neo.session() as session:\n for relation in data:\n session.run(\"MATCH (s:Node{{{}:'{}'}}), (d:Node{{{}:'{}'}}) MERGE (s)-[:TO]->(d)\".format(attr_one_name, relation[0], attr_two_name, relation[1]))\n except Exception as err:\n print(err, file=sys.stderr)\n\n def delete_all(self):\n \"\"\"\n Clears all the data in the connected Neo4j instance. \n \n Args:\n None\n Returns:\n None\n Raises:\n Client Errors: The Client sent a bad request - changing the request might yield a successful outcome,\n TransientErrors: The database cannot service the request right now, retrying later might yield a successful outcome.\n Database Error: The database failed to service the request.\n \"\"\"\n try:\n with self.neo.session() as session:\n session.run(\"MATCH (n) DETACH DELETE n\")\n except Exception as err:\n print(err, file=sys.stderr)\n\n def close(self):\n \"\"\"\n Close Connecion to Neo4j\n\n Returns:\n None\n \n Raises:\n None\n \"\"\"\n self.neo.close()\n" }, { "alpha_fraction": 0.5875118374824524, "alphanum_fraction": 0.6017029285430908, "avg_line_length": 31.030303955078125, "blob_id": "b08ba6274122a058a8d58635e32d0454183ea7c2", "content_id": "9298a408ed417ee22693b217f7ff360035367604", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 80, "num_lines": 33, "path": "/Utility_Modules/r_utils.py", "repo_name": "sand-ci/Analytics", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\n\ndef getDateFormat(dte = None, delta = 0):\n '''\n Returns the current date and time in the format required for Datetime. \n delta : the days before the current date, you want the date\n '''\n \n if dte is not None:\n return str(dte.timestamp()*1000)\n else:\n return str((datetime.now() - timedelta(days = delta )).timestamp()*1000)\n \n \ndef plotByHist(data, x_label, plt):\n cdata = {}\n for result in data:\n cdata[result['key']] = result['doc_count']\n \n labels = list(cdata.keys())\n values = list(cdata.values())\n plt.figure(figsize=(8,4.5), dpi=128)\n plt.bar(labels, values)\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False\n )\n plt.xlabel(x_label)\n plt.ylabel('Frequency')\n plt.yscale('log')\n" } ]
6
mertugur/Instagram_Search_v1.0
https://github.com/mertugur/Instagram_Search_v1.0
3e757140f21b63c3d4a67b331e2c0f8a844c7242
00d62157818941fa4784528012c920c05c8ee21b
5bce1662ef5360335102d90500aa748d9a761f31
refs/heads/master
2016-09-05T09:13:35.278806
2015-02-13T22:51:04
2015-02-13T22:51:04
30,778,144
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8383620977401733, "alphanum_fraction": 0.8426724076271057, "avg_line_length": 53.588233947753906, "blob_id": "56502423b1b7cdf2d65333e7cab166fd81597a80", "content_id": "1b8eaa4cdd14475fc5472c226eb711666a6ee6ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 989, "license_type": "no_license", "max_line_length": 168, "num_lines": 17, "path": "/README.md", "repo_name": "mertugur/Instagram_Search_v1.0", "src_encoding": "UTF-8", "text": "# Instagram_Search_v1.0\n\nInstagram'da son 24 saat içerisinde belirli kelimeleri tag olarak bulunduran fotoğrafları arar.\n\nArama sonucunda bulunan gönderileri istenilen adrese e - posta ile bildirir.\n\nScript'in çalışması için /instagram/ klasörünün oluşturulması, script'in burada oluşturulması ve keywords.txt dosyasının aynı dizin altında oluşturulması gerekmektedir.\n\nkeywords.txt dosyasının her satırında bir adet aranması istenilen tag olmalıdır.\n\nScript url isteği bulunarak sorgulama yapar. Sorgulamanın yapılabilmesi için url tanımının içine client_id değeri Instagram Developer üzerinden alınarak girilmelidir.\n\nE - postanın gönderilmesi için to, from ve smtp ayarları kodun içerisine girilmelidir.\n\nScript çalışması sonrasında html tablo içerisinde bulunan sonuçlar ile birlikte, tablonun altında arama kelimelerini listeleyerek e - posta gönderir.\n\nKod üzerinde iyileştirme ve geliştirme çalışmaları devam etmektedir.\n" }, { "alpha_fraction": 0.5252180695533752, "alphanum_fraction": 0.534319281578064, "avg_line_length": 33.24675369262695, "blob_id": "51209655eca1d9bd1f13c1ed6f5d2ce83c04d771", "content_id": "ba4fc360b70d23688bd20f65b4193e38c8fdd0be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2641, "license_type": "no_license", "max_line_length": 172, "num_lines": 77, "path": "/instagram.py", "repo_name": "mertugur/Instagram_Search_v1.0", "src_encoding": "UTF-8", "text": "import urllib2\nimport simplejson\nimport json\nfrom datetime import datetime, timedelta\nimport time\nfrom pprint import pprint\nimport calendar\nfrom datetime import datetime\nimport sys\nimport os\nimport subprocess\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n \nreload(sys)\n \nsys.setdefaultencoding('utf-8')\n \nd = datetime.now()\ny = d - timedelta(days=1)\ntimestamp=calendar.timegm(d.utctimetuple())\ntimestampy=calendar.timegm(y.utctimetuple())\n \nhtml_1 = \"<html><head></head><body><table border='1'><tr><th>Sosyal Ag</th><th>Tarih</th><th>Kullanici</th><th>Photo</th>\"\nhtml_2 = \"</tr></table><br><br><br><br>\"\nhtml_3 = \"</body></html>\"\nhtml_body = \"\"\n\nwith open ('/instagram/keywords.txt') as f:\n for line in f:\n inst_url = 'https://api.instagram.com/v1/tags/'\n tag = line.replace(\"\\n\",\"\")\n tag = tag.replace(\"\\t\",\"\")\n tag = tag.rstrip()\n inst_url = inst_url + tag + '/media/recent?min_timestamp=%s' %timestampy\n inst_url = inst_url + '&client_id= '\n\n req = urllib2.urlopen(inst_url)\n\n data = json.load(req)\n\n for i in data[\"data\"]:\n try:\n photodate = datetime.strptime(time.ctime(float(i[\"caption\"][\"created_time\"])), '%a %b %d %H:%M:%S %Y')\n if (y <= photodate):\n html_body1 = \"<tr><td>Instagram<td>%s</td>\" %time.ctime(float(i[\"caption\"][\"created_time\"])) + \"<td>%s</td>\" %i[\"user\"][\"username\"\"]\n photourl = i[\"images\"][\"low_resolution\"][\"url\"]\n html_body2 =  '<td><img src=\\\"%s\\\"></td> ' %photourl\n html_body3 =  \"</tr>\"\n html_body = html_body +  html_body1 + \" \" + html_body2 + \" \" + html_body3\n except:\n continue\n\nme = ' '\nyou = ' '\nmsg = MIMEMultipart('alternative')\nmsg['Subject'] = ' '\nmsg['From'] = me\nmsg['To'] = you\ntext = \" \"\n\nreport_info = \"<b>Bu rapor da yer alan gonderiler asagidaki kelimeler kullanilarak yapilmistir:</b><br>\"\nhtml = html_1 + html_body + html_2 + report_info.encode('utf-8')\nwith open('/instagram/keywords.txt') as f3:\n for line_keyword in f3:\n html = html + line_keyword + \"<br>\"\n\nhtml = html + html_3\n\nhtml = html.encode('utf-8')\npart1 = MIMEText(html, 'html')\n\nmsg.attach(part1)\ns = smtplib.SMTP(' ')\ns.sendmail(me, [you], msg.as_string())\ns.quit()\n" } ]
2
Dongzhixiao/tutorial
https://github.com/Dongzhixiao/tutorial
fbd22afcf84ef50b01e5fbc038ac62a1c27afa98
145a062c7f63a205fe08d992fce3d87179fb195f
cb17926f3882f1509499822987319371a7ac4502
refs/heads/master
2021-10-23T05:14:44.646719
2019-03-15T02:38:05
2019-03-15T02:38:05
43,938,769
4
1
null
null
null
null
null
[ { "alpha_fraction": 0.6726272106170654, "alphanum_fraction": 0.6795048117637634, "avg_line_length": 29.29166603088379, "blob_id": "05dbfdbfadc559d9e3da994ad7861289e927cee9", "content_id": "4b5bc008c03b2afba4001e8454e30dc0f8c95f9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 777, "license_type": "no_license", "max_line_length": 114, "num_lines": 24, "path": "/README.md", "repo_name": "Dongzhixiao/tutorial", "src_encoding": "UTF-8", "text": "Tutorial\n================\n            Author:冬之晓:blush:\n----------------\n           E-mail:[email protected]\n----------------\n\n[![Dongzhixiao](https://img.shields.io/badge/Written%20by-Dongzhixiao-ff69b4.svg)](https://github.com/Dongzhixiao)\n[![Language](https://img.shields.io/badge/Language-Python-yellow.svg)](https://www.python.org/)\n[![Framework](https://img.shields.io/badge/Framework-Scrapy-brightgreen.svg)](https://scrapy.org/)\n\nA simple sofeware to crawl the [Nature](https://www.nature.com/search?journal=sdata&subject=) article messages,\nthe contents include:\n\n- Article Title\n- Article Tag\n- Received Time\n- Accept Time\n- Published Time\n- Reference Number\n- Affiliation\n- Authors\n- The Country of Affiliation\n- Article URL\n" }, { "alpha_fraction": 0.48056280612945557, "alphanum_fraction": 0.49506816267967224, "avg_line_length": 44.30263137817383, "blob_id": "bd91fef78424225aeea6da04d2923201121a01bc", "content_id": "0f5b87213063316479abe5b29f4f85c12c5f65bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7164, "license_type": "no_license", "max_line_length": 152, "num_lines": 152, "path": "/tutorial/spiders/quotes_spider.py", "repo_name": "Dongzhixiao/tutorial", "src_encoding": "UTF-8", "text": "import scrapy\nimport tutorial.items\nimport re\n\nclass QuotesSpider(scrapy.Spider):\n name = \"quotes\"\n # headers = {\n # \"Accept\":\"*/*\",\n # \"Accept-Encoding\":\"gzip, deflate, sdch\",\n # \"Accept-Language\":\"zh-CN,zh;q=0.8\",\n # \"Cache-Control\":\"max-age=0\",\n # \"Connection\":\"keep-alive\",\n # \"Host\": \"www.xxxxxx.com\",\n # \"User-Agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\"\n # }\n raw_url = 'https://www.nature.com/search?journal=sdata&subject='\n base_url = 'https://www.nature.com/search?journal=sdata&page='\n\n def start_requests(self):\n#==============================================================================\n# urls = [\n# 'https://www.nature.com/search?journal=sdata&subject=',\n# 'https://www.nature.com/search?journal=sdata&page=2',\n# 'https://www.nature.com/search?journal=sdata&page=3',\n# 'https://www.nature.com/search?journal=sdata&page=4',\n# 'https://www.nature.com/search?journal=sdata&page=5',\n# 'https://www.nature.com/search?journal=sdata&page=6',\n# 'https://www.nature.com/search?journal=sdata&page=7',\n# 'https://www.nature.com/search?journal=sdata&page=8',\n# 'https://www.nature.com/search?journal=sdata&page=9',\n# 'https://www.nature.com/search?journal=sdata&page=10',\n# 'https://www.nature.com/search?journal=sdata&page=11',\n# 'https://www.nature.com/search?journal=sdata&page=12',\n# 'https://www.nature.com/search?journal=sdata&page=13',\n# 'https://www.nature.com/search?journal=sdata&page=14',\n# 'https://www.nature.com/search?journal=sdata&page=15',\n# 'https://www.nature.com/search?journal=sdata&page=16',\n# 'https://www.nature.com/search?journal=sdata&page=17',\n# 'https://www.nature.com/search?journal=sdata&page=18',\n# 'https://www.nature.com/search?journal=sdata&page=19',\n# \n# ]\n#==============================================================================\n yield scrapy.Request(url=self.raw_url, callback=self.parse,dont_filter = True)\n#==============================================================================\n for i in range(2,20):\n url = self.base_url + str(i)\n yield scrapy.Request(url=url, callback=self.parse,dont_filter = True)\n#==============================================================================\n\n def parse(self, response):\n # page = response.url.split(\"/\")[-2]\n # filename = 'quotes-%s.html' % page\n # self.log(\"*************************************************\")\n # self.log(\"the fileName is: %s\" % filename)\n \n netware = response.selector.xpath('//h2/a[contains(@itemprop,\"url\")]/@href').extract()\n #self.log('文章网址:%s' % netware)\n #self.log('搜索到了: %s' % len(netware))\n \n #netware = ['https://www.nature.com/articles/sdata2017176']\n \n for s in netware:\n yield scrapy.Request(url=s, callback=self.parseChild,dont_filter = True,meta={'url':s})\n \n \n \n def parseChild(self, response):\n #self.log(\"***********************\")\n articleTitle = response.selector.xpath('//header/div/h1')\n articleTitle = articleTitle.xpath('string(.)').extract()[0]\n #self.log('文章标题:%s' % articleTitle)\n articleTag = response.selector.xpath('//a[contains(@data-track-source,\"subject-name\")]/text()').extract()\n #self.log('文章类别:%s' % articleTag)\n ReceivedTime = response.selector.xpath('//*[@id=\"content\"]/div/div/article/div[1]/header/div/div/div[2]/div/dl/dd[1]/time/@datetime').extract()\n #self.log('文章接收时间:%s' % ReceivedTime)\n AcceptTime = response.selector.xpath('//*[@id=\"content\"]/div/div/article/div[1]/header/div/div/div[2]/div/dl/dd[2]/time/@datetime').extract()\n #self.log('文章接受录用时间:%s' % AcceptTime)\n PublishedTime = response.selector.xpath('//*[@id=\"content\"]/div/div/article/div[1]/header/div/div/div[2]/div/dl/dd[3]/time/@datetime').extract()\n #self.log('文章发表时间:%s' % PublishedTime)\n ReferencesNumber = len(response.selector.xpath('//*[@id=\"references-content\"]/div/ol/li').extract())\n #self.log('文章引用数目:%s' % ReferencesNumber)\n articleURL = response.meta['url']\n #self.log('文章地址:%s' % articleURL)\n Affiliations = response.selector.xpath('//*[@id=\"author-information-content\"]/ol/li/h3/text()').extract()\n Authors = response.selector.xpath('//*[@id=\"author-information-content\"]/ol/li/ul/li/span[2]/text()').extract()\n #Country = Affiliations[0].split(',')[-1]\n fileName = \"test.md\"\n with open(fileName, 'a',encoding='utf-8') as f:\n # f.write('文章标题:%s' % articleTitle)\n # f.write('\\n')\n # f.write('文章类别:%s' % articleTag)\n # f.write('\\n')\n #写文章标题\n articleTitle = re.sub(r\"\\n\", r\"\", articleTitle)\n articleTitle = re.sub(r\"\\s+\",r\" \",articleTitle)\n f.write(articleTitle)\n f.write(' ; ')\n #写文章类别\n f.write('/')\n for content in articleTag:\n f.write(content)\n f.write('/')\n f.write(' ; ')\n #写文章接收时间\n if len(ReceivedTime) > 0 :\n f.write(ReceivedTime[0])\n else:\n f.write('empty')\n f.write(' ; ')\n #写文章接受录用时间\n if len(AcceptTime) > 0 :\n f.write(AcceptTime[0])\n else:\n f.write('empty')\n f.write(' ; ')\n #文章在线发表时间\n if len(PublishedTime) > 0:\n f.write(PublishedTime[0])\n else:\n f.write('empty')\n f.write(' ; ')\n #写文章引用文章数目\n f.write(str(ReferencesNumber))\n f.write(' ; ')\n #文章作者\n if len(Authors) > 0 :\n f.write('/')\n for content in Authors:\n f.write(content)\n f.write('/')\n else:\n f.write(\"empty\")\n f.write(' ; ')\n #文章机构信息\n if len(Affiliations) > 0:\n affiliation = re.sub(r\"\\n\", r\"\", Affiliations[0].split(',')[0])\n f.write(affiliation)\n else:\n f.write(\"empty\")\n f.write(\" ; \")\n #文章作者国家\n if len(Affiliations) > 0:\n f.write(Affiliations[0].split(',')[-1].strip())\n else:\n f.write(\"empty\")\n f.write(' ; ')\n # 文章所在网址\n f.write(articleURL)\n #输出一个空行\n f.write('\\n')\n self.log('Saved file %s' % fileName)\n " } ]
2
arpansadhu10/python__dSA_project
https://github.com/arpansadhu10/python__dSA_project
404802dbb6667df4641a6f0d71a83eb01b152020
8bfccbff0d9e96c765052387ccc39a82710a217d
116ad1138b212bce9345524ec62f07a4b27b9c9e
refs/heads/master
2023-07-23T23:39:28.039851
2021-09-02T17:30:50
2021-09-02T17:30:50
402,506,432
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4453781545162201, "alphanum_fraction": 0.48739495873451233, "avg_line_length": 15.571428298950195, "blob_id": "aea75b8f36ae6d4da36af5d9a35d51e6f7e6b9f6", "content_id": "3832fadda9eed5008a1ee7075c403ae499a645e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/testarray.py", "repo_name": "arpansadhu10/python__dSA_project", "src_encoding": "UTF-8", "text": "arr={1,2,3,4,5}\ndef array():\n for i in arr:\n print(i,end=\"\")\n print()\ndef array_add(n):\n arr.add(n)\n\n\n\n" }, { "alpha_fraction": 0.6262295246124268, "alphanum_fraction": 0.631147563457489, "avg_line_length": 31.105262756347656, "blob_id": "70d4194119ea50eda6c5f4e88ef79757c76f57d8", "content_id": "834ed57494f91c6e6606d400079393a49a168d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 99, "num_lines": 19, "path": "/imageinput.py", "repo_name": "arpansadhu10/python__dSA_project", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom PyQt5.QtWidgets import QApplication, QWidget, QComboBox, QPushButton, QFileDialog, QVBoxLayout\n\n# TODO:how to open file upload dialog box in python qt5\n\ndef getSaveFileName(self):\n file_filter = 'Data File (*.xlsx *.csv *.dat);; Excel File (*.xlsx *.xls)'\n response = QFileDialog.getSaveFileName(\n parent=self,\n caption='Select a data file',\n directory= 'Data File.dat',\n filter=file_filter,\n initialFilter='Excel File (*.xlsx *.xls)'\n )\n print(response)\n return response[0]\n\ngetSaveFileName()\n" }, { "alpha_fraction": 0.5998021364212036, "alphanum_fraction": 0.6119218468666077, "avg_line_length": 26.317567825317383, "blob_id": "32f48804f825b5481a06e832f771cd81ae0133ac", "content_id": "0befb896aee175252de2f734b47a86f8f4b9361f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4043, "license_type": "no_license", "max_line_length": 128, "num_lines": 148, "path": "/WelcomeScreen.py", "repo_name": "arpansadhu10/python__dSA_project", "src_encoding": "UTF-8", "text": "\n# importing libraries\nfrom PyQt5.QtWidgets import QDialog,QApplication, QInputDialog, QStackedWidget, QTableWidgetItem,QWidget,QPushButton,QMainWindow\nfrom PyQt5.uic import loadUi\nfrom PyQt5 import QtWidgets\n# from PyQt5.QtCore import * \nimport sys\nfrom test_database import db\n\n# from testarray import *\n\narray_test=[1,2,3]\ncount=0\n\nclass WelcomeScreen(QDialog):\n \n def __init__(self):\n super(WelcomeScreen,self).__init__()\n loadUi(\"WelcomeScreen.ui\",self)\n self.StartButton.clicked.connect(self.online_offline_function)\n \n\n def online_offline_function(self):\n # print(\"clicked\")\n array_test.append(200)\n online_offline_pagechanger = online_offline_class()\n widget.addWidget(online_offline_pagechanger)\n widget.setCurrentIndex(widget.currentIndex()+1)\n\n\n\nclass online_offline_class(QDialog):\n def __init__(self):\n super(online_offline_class, self).__init__()\n loadUi(\"online_offline.ui\",self)\n array_test.append(100)\n self.homepage.clicked.connect(self.homepage_function)\n self.offline.clicked.connect(self.dataDisplay_function)\n \n def dataDisplay_function(self):\n # print(\"clicked\")\n dataDisplay_page = dataDisplay_class()\n widget.addWidget(dataDisplay_page)\n widget.setCurrentIndex(widget.currentIndex()+1)\n\n\n\n\n def homepage_function(self):\n # print(\"homepage\")\n homepage = WelcomeScreen()\n widget.addWidget(homepage)\n widget.setCurrentIndex(widget.currentIndex()-1)\n\n\nclass dataDisplay_class(QDialog):\n def __init__(self):\n super(dataDisplay_class, self).__init__()\n loadUi(\"Datadisplay.ui\",self)\n self.loaddata()\n self.createData.clicked.connect(self.createData_function)\n self.loaddata()\n self.deleteData.clicked.connect(self.deleteData_function)\n\n # self.homepage.clicked.connect(self.exit_func)\n\n def deleteData_function(self):\n print(\"deleteclicked\")\n id_to_delete,ok=QInputDialog.getText(self,\"input dialogue\",\"Enter id which is to be deleted:\")\n # print(id_to_delete)\n # print()\n print(type(id_to_delete),\"this is received\")\n \n index=-1\n for i in range(len(db)):\n # print(type(db[i]['id']))\n print(i,\"wanted\")\n if db[i][\"id\"] == int(id_to_delete):\n del db[i]\n index=i\n #!have to change ids after that \n self.loaddata() \n print(db)\n \n # def exit_func(self):\n # print(\"exiting\")\n # sys.exit(app.exec())\n\n\n def createData_function(self):\n print(\"createddata\")\n name,ok=QInputDialog.getText(self,\"input dialogue\",\"Enter Name:\")\n phone,ok=QInputDialog.getText(self,\"input dialogue\",\"Enter phone Number:\")\n email,ok=QInputDialog.getText(self,\"input dialogue\",\"Enter Email id:\")\n db.append({\n \"id\":len(db)+1,\n \"name\":name,\n \"email\":email,\n \"phone\":phone\n\n })\n print(db)\n \n # count=count+1\n self.loaddata()\n # print(name)\n # print(ok)\n \n\n\n\n \n def loaddata(self):\n # db2=[{\"name\":\"arpan\",\"email\":\"[email protected]\",\"phone\":\"123456\"},\n # {\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"}]\n row=0\n # print(\"here\")\n self.tableWidget.setRowCount(len(db))\n\n for person in db:\n # print(person[\"name\"])\n\n self.tableWidget.setItem(row,0,QtWidgets.QTableWidgetItem(person[\"name\"]))\n self.tableWidget.setItem(row,1,QtWidgets.QTableWidgetItem(str(person[\"phone\"])))\n self.tableWidget.setItem(row,2,QtWidgets.QTableWidgetItem(person[\"email\"]))\n row=row+1\n\n\n\n\n \n\n\n#main\napp=QApplication(sys.argv)\nwelcome=WelcomeScreen()\n\nwidget=QStackedWidget()\n\nwidget.addWidget(welcome)\nwidget.setFixedHeight(800)\nwidget.setFixedWidth(1200)\nwidget.show()\n\n\ntry:\n sys.exit(app.exec())\nexcept:\n print(\"exiting\")" }, { "alpha_fraction": 0.4517374634742737, "alphanum_fraction": 0.5250965356826782, "avg_line_length": 14.29411792755127, "blob_id": "20ae4689d3edd9fbb7761903a18412b0af9541d9", "content_id": "f78bd4b8f27c0e3a9217508def30256db22a4da7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/test_database.py", "repo_name": "arpansadhu10/python__dSA_project", "src_encoding": "UTF-8", "text": "db=[{\"id\":1,\"name\":\"arpan\",\"email\":\"[email protected]\",\"phone\":\"123456\"},\n {\"id\":2,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"}]\n\n\na=\"Arrrr\"\nbb=\"fhfs\"\ncc=\"erhturehh\"\ndb.append({\n \"id\":3,\n \"name\":a,\n \"email\":bb,\n \"phone\":cc\n\n})\n\n\nprint(db)" }, { "alpha_fraction": 0.42599278688430786, "alphanum_fraction": 0.5691937208175659, "avg_line_length": 54.46666717529297, "blob_id": "da8b7ec457f92b74d11b4e597070e84dcdccbc40", "content_id": "788afbe60f6e1713ff392532b801adeb0c70d630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/__concept__testing.py", "repo_name": "arpansadhu10/python__dSA_project", "src_encoding": "UTF-8", "text": "db=[{\"id\":1,\"name\":\"arpan\",\"email\":\"[email protected]\",\"phone\":\"123456\"},\n {\"id\":2,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":3,\"name\":\"newwwwwwww\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":4,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":5,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":6,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":7,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":8,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":9,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":10,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n {\"id\":11,\"name\":\"arpanAgain\",\"email\":\"[email protected]\",\"phone\":\"1234566666\"},\n ]\n\nfor i in db:\n print(db[i])" } ]
5
parkerwray/Materials
https://github.com/parkerwray/Materials
66f4d23e042350efbd90363e665f498efdd152bc
8d13def50546f2968086f211b50bc5a1c5b0541e
96eca06e0bc3376770bb526a36328bc24e162090
refs/heads/master
2022-12-27T01:26:01.074701
2020-09-22T19:29:18
2020-09-22T19:29:18
277,351,990
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.525585412979126, "alphanum_fraction": 0.5537727475166321, "avg_line_length": 30.46575355529785, "blob_id": "377d1cec31683b7d7c9f6ce6c3fdb7e8522be88c", "content_id": "22dcf0fbb0a807fe7642f9cb39d8e8d614076cfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2306, "license_type": "no_license", "max_line_length": 117, "num_lines": 73, "path": "/materials.py", "repo_name": "parkerwray/Materials", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 11 00:03:25 2020\n@author: parkerwray\n\n\nThis code defines functions that load and manipulate material data.\n\n\"\"\"\n\nimport pandas as pd\nimport scipy as sci\nimport tkinter as tk\nfrom tkinter import filedialog\n\n\n\nclass material():\n def __init__(self):\n \n self.n = []\n self.k = []\n self.data = []\n\n def refinfo(self, filename = None, unit = 'nm'):\n \n \"\"\"\n Read .csv files from refractive index info and save into Pandas data frame \n with collumns [lda, n, k]\n \n Inputs:\n filename: location of .csv file. If blank a gui will be used to select \n the data file\n \n unit: unit of the wavelength data\n \n Outputs:\n self.n = interpolation function that interpolates n data based on \n wavelength given a wavelength argument. I.e., mat1.n(400) gives n \n at 400 unit\n\n self.k = interpolation function that interpolates k data based on \n wavelength given a wavelength argument. I.e., mat1.k(400) gives k\n at 400 unit\n \n self.data = panda DataFrame of the raw [lda, n, k] data extracted.\n \"\"\"\n if filename == None:\n filename = grab_file()\n \n test = pd.read_csv(filename)\n mask = pd.to_numeric(test.iloc[:,1], errors='coerce').isnull().cumsum();\n #test2 = [g[1:].rename(columns={0:g.iloc[0].values[0]}) for i, g in test.groupby(mask)]\n test2 = [g[1:].rename(columns={0:g.iloc[0].values[0]}).reset_index(drop=True) for i, g in test.groupby(mask)]\n test2[0] = test2[0].rename(columns={'wl':'lda'})\n test2[1] = test2[1].rename(columns={\"wl\":\"lda\", \"n\":\"k\"})\n test3 = pd.merge(test2[0], test2[1], on = 'lda').astype('float')\n \n test3.lda = {\n 'nm': lambda x:x*1E3,\n 'um': lambda x:x,\n 'mm': lambda x:x*1E-3}[unit](test3.lda)\n \n self.n = sci.interpolate.interp1d(test3.lda, test3.n)\n self.k = sci.interpolate.interp1d(test3.lda, test3.k) \n self.data = test3\n\n\ndef grab_file():\n root = tk.Tk()\n root.withdraw()\n return filedialog.askopenfilename()\n\n\n\n\n\n\n\n\n\n" } ]
1
erenkulaksiz/atmXS
https://github.com/erenkulaksiz/atmXS
267b720a997cfc5855a539a89715294fb234cfb6
113b16bf9983f2c4ab4c6cac8e92429b33db3027
4d64cc4da11ed302645c426516241698e14a11ac
refs/heads/master
2022-02-18T14:38:49.818704
2019-09-21T16:19:36
2019-09-21T16:19:36
209,836,480
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5690154433250427, "alphanum_fraction": 0.5855855941772461, "avg_line_length": 32.42473220825195, "blob_id": "4f164e461732f4455372f86fe8d661ce559b0870", "content_id": "71d98f2806cb8b83862ff72f9fc222705ef99dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6216, "license_type": "no_license", "max_line_length": 134, "num_lines": 186, "path": "/atmController.py", "repo_name": "erenkulaksiz/atmXS", "src_encoding": "UTF-8", "text": "import os.path\nimport hashlib\n\n\nprogramSignature = \"7d1c1c8c79b0d0fb619ad978f15f8050695d2a2b2e5611430fde03ca995eaf8b\"\n\n\ndef getUsername():\n sys_username = input(\" > Username: \")\n return sys_username\n\n\ndef getPassword():\n sys_password = input(\" > Password: \")\n return sys_password\n\n\ndef check_file_exist(filename):\n isFileExist = os.path.exists(filename)\n return isFileExist\n\n\ndef erase_file_details(filename):\n fileToErase = open(filename, \"w\").close()\n\n\ndef read_from_file(filename):\n fileToRead = open(filename, \"r\")\n return fileToRead.read()\n\n\ndef confirm_file_signature(filename):\n signatureTryString = \"\"\n signatureTryString = signatureTryString + str(read_from_file(filename)[0:64])\n if (signatureTryString == programSignature):\n return True\n else:\n return False\n\ndef get_part_from_file(filename, partmin, partmax):\n thingToGet = \"\"\n thingToGet = thingToGet + str(read_from_file(filename)[partmin:partmax])\n return thingToGet\n\n# username 66:130\n# password 132:196\n# money 198:260\n\ndef write_money_to_file(filename, value):\n fileToRead = open(filename, \"r\")\n data = fileToRead.readlines()\n data[3] = str(value)\n fileToWrite = open(filename, \"w\")\n fileToWrite.writelines(data)\n\n\ndef encrypt_string(hash_string, input_salt):\n hash_string = hash_string + input_salt\n sha_signature = \\\n hashlib.sha256(hash_string.encode()).hexdigest()\n return sha_signature\n\n\ndef place_value(number):\n return (\"{:,}\".format(number))\n\n\nisLogined = False\nisReadedMoney = False\nselectedMenuItem = 0\nmoneyCurrent = 0\npeopleList = ['YusuFcuk', 'MAli', 'Taloc', 'gullu']\n\n# This is main loop\nwhile(True):\n\n\n if check_file_exist('pswd.txt'):\n\n isFileConfirmed = False\n\n if confirm_file_signature(\"pswd.txt\"):\n isFileConfirmed = True\n else:\n print(\" > Cannot confirm file integrity \\n > File erasing...\")\n erase_file_details(\"pswd.txt\")\n isFileConfirmed = False\n\n if not isFileConfirmed:\n\n print(\" > Cannot found user details. \\n > Please create a new account.\")\n tempUsername, tempPassword = \"\", \"\"\n tempUsername, tempPassword = getUsername(), getPassword()\n detailsFile = open(\"pswd.txt\", \"a\")\n detailsFile.write(programSignature + \" \\n\")\n detailsFile.write(encrypt_string(tempUsername, programSignature) + \" \\n\")\n detailsFile.write(encrypt_string(tempPassword, programSignature) + \" \\n\")\n detailsFile.write(\"10000\") # This is default starting money.\n detailsFile.close()\n\n else:\n\n if not isLogined:\n\n print(\" > Please sign in\")\n\n tempUsername, tempPassword = \"\", \"\"\n tempUsername = getUsername()\n tempPassword = getPassword()\n\n usernameCorrect = False\n passwordCorrect = False\n\n if encrypt_string(tempUsername, programSignature) == get_part_from_file(\"pswd.txt\", 66, 130):\n usernameCorrect = True\n else:\n usernameCorrect = False\n\n if encrypt_string(tempPassword, programSignature) == get_part_from_file(\"pswd.txt\", 132, 196):\n passwordCorrect = True\n else:\n passwordCorrect = False\n\n if usernameCorrect:\n if passwordCorrect:\n isLogined = True\n print(\"\\n > Welcome!\\n\")\n else:\n isLogined = False\n print(\" > Username or password is incorrect.\")\n else:\n isLogined = False\n print(\" > Username or password is incorrect.\")\n\n else:\n #print(\" > Cannot detect a file to write\")\n erase_file_details(\"pswd.txt\")\n\n if isLogined:\n\n if not isReadedMoney:\n moneyCurrent = get_part_from_file(\"pswd.txt\", 198, 260)\n isReadedMoney = True\n\n if selectedMenuItem == 0:\n\n print(\" > Money in the bank: \"+place_value(int(moneyCurrent))+\"$\\n\")\n print(\" > 1) Withdraw money\\n\")\n print(\" > 2) Deposit money\\n\")\n print(\" > 3) Send money\\n\")\n tempSelectedItemMenu = input(\" > Select an option (1-3): \")\n if int(tempSelectedItemMenu) < 1:\n print(\" > The entered option must be between 1 and 3!\")\n elif int(tempSelectedItemMenu) > 3:\n print(\" > The entered option must be between 1 and 3!\")\n else:\n selectedMenuItem = int(tempSelectedItemMenu)\n\n elif selectedMenuItem == 1:\n\n print(\"\\n > Money in the bank: \"+place_value(int(moneyCurrent))+\"$\")\n tempCekilecekMiktar = input(\"\\n > Enter a value to withdraw: \")\n moneyCurrent = int(moneyCurrent) - int(tempCekilecekMiktar)\n write_money_to_file(\"pswd.txt\", int(moneyCurrent))\n print(\"\\n > Withdraw success!\\n\\n\\n\")\n selectedMenuItem = 0\n\n elif selectedMenuItem == 2:\n\n print(\"\\n > Money in the bank: \"+place_value(int(moneyCurrent))+\"$\")\n tempYatirilacakMiktar = input(\"\\n > Enter a value to deposit: \")\n moneyCurrent = int(moneyCurrent) + int(tempYatirilacakMiktar)\n write_money_to_file(\"pswd.txt\", int(moneyCurrent))\n print(\"\\n > Deposit success!\\n\\n\\n\")\n selectedMenuItem = 0\n\n elif selectedMenuItem == 3:\n\n for x in range(len(peopleList)):\n print(\" > \"+str(x+1)+\") \"+str(peopleList[x]))\n tempSelectedSendMoney = input(\" > Please select someone to send money (1-\"+str(len(peopleList))+\"): \")\n tempSendMoneyMiktar = input(\" \\n > Enter a value to send money to \"+peopleList[int(tempSelectedSendMoney)-1]+\":\")\n print(\"\\n > You sent \"+str(place_value(int(tempSendMoneyMiktar)))+\"$ to \"+peopleList[int(tempSelectedSendMoney)-1]+\".\\n\")\n moneyCurrent = int(moneyCurrent) - int(tempSendMoneyMiktar)\n write_money_to_file(\"pswd.txt\", int(moneyCurrent))\n selectedMenuItem = 0" } ]
1
SayeedChowdhury/dct-snn
https://github.com/SayeedChowdhury/dct-snn
54329709550b932a0261f3f9d0ec54fd4f7d6873
c0ef166a8e7df4185c7774900f381d23e87e54ca
5cd7e4452b6ff3cb644a487623298fcb363213be
refs/heads/main
2023-08-02T16:39:30.781061
2021-09-30T16:30:41
2021-09-30T16:30:41
300,631,897
17
5
null
null
null
null
null
[ { "alpha_fraction": 0.7720075845718384, "alphanum_fraction": 0.8005066514015198, "avg_line_length": 46.84848403930664, "blob_id": "681bab68eac0d4ff0be0009d8aa09e9d611f65f4", "content_id": "eb4f1cf10e086a90ac3c5aae1dfe38f37a73d6da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1579, "license_type": "no_license", "max_line_length": 177, "num_lines": 33, "path": "/README.md", "repo_name": "SayeedChowdhury/dct-snn", "src_encoding": "UTF-8", "text": "# DCT-SNN ICCV 2021\n\n#### Code for the paper titled DCT-SNN: Using DCT to Distribute Spatial Information over Time for Learning Low-Latency Spiking Neural Networks ###\n\nhttps://arxiv.org/abs/2010.01795\n\nThe paper has been accepted to ICCV 2021.\n\nIn this projet, a new encoding scheme for SNNs is proposed, where the analog pixel values are represented over time through DCT based modulated by the correspoding coefficients.\n\nWe first train an ANN, if ANN training is intended, that can be done using\nvgg_ann_submit.py file which loads the models from vgg_ann_models_submit\n\nAfter training an ANN, subsequent SNN training can be done using main_cifar10_submit \n(for cifar10), main_cifar100_submit (for cifar100) and main_ti_submit (for tinyImagenet)\nfiles which load their corresponding model files. The snn model files include the encoding\npart.\n\nSNN training loads a pretrained ANN, we include a sample ANN for cifar10,\nwe also include DCT-SNN trained models for cifar10, cifar100 and tinyImagenet.\nThese models are available at-\n\nhttps://www.dropbox.com/sh/aroe6p16gcb2iwj/AACJkMZtwF0w6s9hZ6XyKQ5Wa?dl=0\n\nBefore SNN training, we compute the layerwise thresholds using find_threshold function,\nbut once computed, we can save them and use for later/testing purposes. If the user wants\nto compute the thresholds, the pre-computed ones must be commented and the following needs\nto uncommented-\nif pretrained and find_thesholds:\n find_threshold(ann_thresholds, train_loader1)\n\nNote, to train/test, the corresponding directories of datasets/pre-trained models need to\nbe changed.\n" }, { "alpha_fraction": 0.5219746828079224, "alphanum_fraction": 0.545045018196106, "avg_line_length": 35.82062911987305, "blob_id": "c252f11ddc082cb6bc3ad1266ee062675f243f10", "content_id": "16a8703564d3b40b030be66c9190b16791922da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16428, "license_type": "no_license", "max_line_length": 186, "num_lines": 446, "path": "/main_cifar10_submit.py", "repo_name": "SayeedChowdhury/dct-snn", "src_encoding": "UTF-8", "text": "\n\n\n\n#---------------------------------------------------\n# Imports\n#---------------------------------------------------\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms, models\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.autograd import Variable\n#from torchviz import make_dot\nfrom tensorboard_logger import configure, log_value\nfrom matplotlib import pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\nimport datetime\n#import pdb\nfrom spike_model_cifar import *\n\n\nimport sys\nimport os\nimport shutil\nimport math\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n\nuse_cuda = True\n\n\n\n\ntorch.manual_seed(0)\nif torch.cuda.is_available() and use_cuda:\n print (\"\\n \\t ------- Running on GPU -------\")\n #torch.cuda.set_device(int(sys.argv[1]))\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n\ndef find_threshold(ann_thresholds, loader):\n \n pos=0\n \n def find(layer, pos):\n max_act=0\n \n if architecture.lower().startswith('vgg'):\n if layer == (len(model.module.features) + len(model.module.classifier) -1):\n return None\n\n for batch_idx, (data, target) in enumerate(loader):\n \n if torch.cuda.is_available() and use_cuda:\n data, target = data.cuda(), target.cuda()\n #data=m(data)\n\n with torch.no_grad():\n model.eval()\n model.module.network_init(2000)\n output = model(data, 0, find_max_mem=True, max_mem_layer=layer)\n \n if output.max()>max_act:\n max_act = output.max().item()\n f.write('\\nBatch:{} Current:{:.4f} Max:{:.4f}'.format(batch_idx+1,output.max().item(),max_act))\n if batch_idx==0:\n ann_thresholds[pos] = max_act\n pos = pos+1\n \n model.module.threshold_init(scaling_threshold=scaling_threshold, reset_threshold=reset_threshold, thresholds = ann_thresholds[:], default_threshold=default_threshold)\n break\n return pos\n\n if architecture.lower().startswith('vgg'): \n for l in model.module.features.named_children():\n if isinstance(l[1], nn.Conv2d):\n pos = find(int(l[0]), pos)\n \n for c in model.module.classifier.named_children():\n if isinstance(c[1], nn.Linear):\n pos = find(int(l[0])+int(c[0])+1, pos)\n\n if architecture.lower().startswith('res'):\n for l in model.module.pre_process.named_children():\n if isinstance(l[1], nn.Conv2d):\n pos = find(int(l[0]), pos)\n\ndef train(epoch, loader):\n\n global learning_rate, start_time, batch_size\n learning_rate_use = learning_rate * (lr_decay_factor**((epoch)//lr_adjust_interval))\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate_use\n \n f.write('Epoch: {} Learning Rate: {:.2e}'.format(epoch,learning_rate_use))\n \n total_loss = 0.0\n total_correct = 0\n num_train = 50000\n train_loss = AverageMeter()\n model.train()\n \n current_time = start_time\n model.module.network_init(update_interval)\n\n for batch_idx, (data, target) in enumerate(loader):\n \n if torch.cuda.is_available() and use_cuda:\n data, target = data.cuda(), target.cuda()\n #data=m(data)\n \n #print(\"Epoch: {}/{};\".format(epoch, 20), \"Training batch:{}/{};\".format(batch_idx+1, math.ceil(num_train/batch_size)))\n t=0\n mem = 0\n spike =0\n mask = 0\n spike_count = 0\n \n \n optimizer.zero_grad()\n while t<timesteps:\n \n output, mem, spike, mask, spike_count = model(data, t, mem, spike, mask, spike_count) \n output = output/(t+update_interval)\n #loss = criterion(output, target)\n loss = F.cross_entropy(output,target)\n train_loss.update(loss.item(), target.size(0))\n loss.backward()\n t = t + update_interval\n total_loss += loss.item()\n \n optimizer.step() \n pred = output.max(1,keepdim=True)[1]\n correct = pred.eq(target.data.view_as(pred)).cpu().sum()\n total_correct += correct.item()\n \n if (batch_idx+1) % 10 == 0:\n \n f.write('\\nEpoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Current:[{}/{} ({:.2f}%)] Total:[{}/{} ({:.2f}%)] Time: {}({})'.format(\n epoch,\n (batch_idx+1) * len(data),\n len(loader.dataset),\n 100. * (batch_idx+1) / len(loader),\n total_loss/(batch_idx+1),\n correct.item(),\n data.size(0),\n 100. * correct.item()/data.size(0),\n total_correct,\n data.size(0)*(batch_idx+1),\n 100. * total_correct/(data.size(0)*(batch_idx+1)),\n datetime.timedelta(seconds=(datetime.datetime.now() - start_time).seconds),\n datetime.timedelta(seconds=(datetime.datetime.now() - current_time).seconds)\n )\n )\n current_time = datetime.datetime.now()\n \n train_loss_per_epoch = train_loss.avg\n print(\"Epoch: {}/{};\".format(epoch, 20), \"########## Training loss: {}\".format(train_loss_per_epoch))\n log_value('train_loss', train_loss_per_epoch, epoch) \ndef test(epoch, loader):\n\n global learning_rate, start_time, batch_size_test, leak_mem\n with torch.no_grad():\n model.eval()\n total_loss = 0\n correct = 0\n is_best = False\n print_accuracy_every_batch = True\n global max_correct, batch_size, update_interval\n test_loss = AverageMeter()\n num_test = 10000\n for batch_idx, (data, target) in enumerate(loader):\n \n #print(\"Epoch: {}/{};\".format(epoch, 20), \"Test batch: {}/{}\".format(batch_idx+1, math.ceil(num_test/batch_size_test))) \n if torch.cuda.is_available() and use_cuda:\n data, target = data.cuda(), target.cuda()\n #data=m(data)\n \n model.module.network_init(timesteps)\n output, _, _, _, spike_count = model(data, 0)\n output = output/update_interval\n #for key in spike_count.keys():\n # print('Key: {}, Average: {:.3f}'.format(key, (spike_count[key].sum()/spike_count[key].numel())))\n \n loss = F.cross_entropy(output,target)\n test_loss.update(loss.item(), target.size(0))\n total_loss += loss.item()\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n q=(batch_idx+1)*data.size(0)\n if((batch_idx+1)==math.ceil(num_test/batch_size_test)):\n q=num_test \n \n if print_accuracy_every_batch:\n \n f.write('\\nAccuracy: {}/{}({:.2f}%)'.format(\n correct.item(),\n q,\n 100. * correct.item() / (q)\n )\n ) \n \n \n \n\n test_loss_per_epoch = test_loss.avg \n print(\"Epoch: {}/{};\".format(epoch, 20), \"########## Test loss: {}\".format(test_loss_per_epoch))\n log_value('test_loss', test_loss_per_epoch, epoch)\n if correct>max_correct:\n max_correct = correct\n is_best = True \n \n state = {\n 'accuracy' : max_correct.item()/len(test_loader.dataset),\n 'epoch' : epoch,\n 'model_state_dict' : model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'thresholds' : ann_thresholds,\n 'timesteps' : timesteps,\n 'leak_mem' : leak_mem,\n 'scaling_threshold' : scaling_threshold,\n 'activation' : activation\n }\n filename = 'snn_'+architecture.lower()+'_'+dataset.lower()+'_'+str(timesteps)+'_lr'+str(learning_rate)+'_'+str(batch_size)+'_cf16_28'+'.pth'\n torch.save(state,filename) \n \n if is_best:\n shutil.copyfile(filename, 'best_'+filename)\n\n f.write('\\nTest set: Loss: {:.6f}, Current: {:.2f}%, Best: {:.2f}%\\n'. format(\n total_loss/(batch_idx+1), \n 100. * correct.item() / len(test_loader.dataset),\n 100. * max_correct.item() / len(test_loader.dataset)\n )\n )\n\n \ndataset = 'CIFAR10' # {'CIFAR10', 'CIFAR100'}\nbatch_size = 16\nbatch_size1 = 512\nbatch_size_test = 64\ntimesteps = 48 #64\nupdate_interval = 48 #64\nnum_workers = 4\nleak_mem = .9901\nscaling_threshold = 1.0\nreset_threshold = 0.0\ndefault_threshold = 1.0\nactivation = 'Linear' # {'Linear', 'STDB'}\narchitecture = 'VGG9'#{'VGG9','VGG11'}\nprint_to_file = True\nlog_file = 'snn_'+architecture.lower()+'_'+str(update_interval)+'_'+str(batch_size)+'_4avgpool_cf16_28'+'.log'\npretrained = True\n\n# load pre-trained ANN if intend to train the SNN, change directory\npretrained_state = './vgg9_cifar10_ann_lr.1_.1by100_bs128_pixel_submit_ckpt.pth'\n\n\n# uncomment to load pre-trained SNN if intend to resume or just test\n#pretrained_state = './best_snn_vgg9_cifar10_48_lr0.0001_16_expnotbig_4*4_99.9_wd5e-4_acc89.94.pth'\n\n\nfind_thesholds = True\n\nfreeze_conv = False\nresume = False\n#resume = './snn_vgg5_cifar10_128_lr0.0002_32_samdct2_1e-4.pth'\nlearning_rate = 1e-4\nlr_adjust_interval = 5\nlr_decay_factor = 0.5 # {0.1, 0.5, 1.0}\nSTDP_alpha = 0.3\nSTDP_beta = 0.01\n\nif print_to_file:\n f = open(log_file, 'w', buffering=1)\nelse:\n f = sys.stdout\n\nconfigure('RUNS/'+log_file)\n\nnormalize = transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\ntransform_test = transforms.Compose([transforms.ToTensor(), normalize])\n\nif dataset == 'CIFAR10':\n trainset = datasets.CIFAR10(root = './cifar_data', train = True, download = True, transform = transform_train)\n testset = datasets.CIFAR10(root='./cifar_data', train=False, download=True, transform= transform_test)\n labels = 10\n\nelif dataset == 'CIFAR100':\n trainset = datasets.CIFAR100(root = './cifar_data', train = True, download = True, transform = transform_train)\n testset = datasets.CIFAR100(root='./cifar_data', train=False, download=True, transform= transform_test)\n labels = 100\n\nelif dataset == 'IMAGENET':\n labels = 1000\n traindir = os.path.join('/local/scratch/a/imagenet/imagenet2012/', 'train')\n valdir = os.path.join('/local/scratch/a/imagenet/imagenet2012/', 'val')\n trainset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n testset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])) \n\n\ntrain_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)\ntrain_loader1 = DataLoader(trainset, batch_size=batch_size1, shuffle=True)\ntest_loader = DataLoader(testset, batch_size=batch_size_test, shuffle=False)\n\nif architecture[0:3].lower() == 'vgg':\n model = VGG_SNN_STDB_lin(vgg_name = architecture, activation = activation, labels=labels, timesteps=timesteps, leak_mem=leak_mem)\n \n\nif freeze_conv:\n for param in model.features.parameters():\n param.requires_grad = False\n\nmodel = nn.DataParallel(model) \n\n#copying weights from a pre-trained ann/snn\nif pretrained:\n \n if architecture[0:3].lower() == 'vgg':\n state = torch.load(pretrained_state, map_location='cpu')\n f.write('\\n Variables loaded from pretrained model:')\n \n for key, value in state.items():\n if isinstance(value, (int, float)):\n f.write('\\n {} : {}'.format(key, value))\n else:\n f.write('\\n {}: '.format(key))\n \n model.load_state_dict(state['model_state_dict'])\n\n \n \n \n\nif torch.cuda.is_available() and use_cuda:\n model.cuda()\n\noptimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=5e-4, amsgrad=False)\n#optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=5e-4, momentum=.9)\n\ncriterion = nn.CrossEntropyLoss()\nmax_correct = 0\nstart_epoch = 1\n\nf.write('\\nDataset :{} '.format(dataset))\nf.write('\\nBatch Size :{} '.format(batch_size))\nf.write('\\nTimesteps :{} '.format(timesteps))\nf.write('\\nUpdate Interval (time) :{} '.format(update_interval))\nf.write('\\nMembrane Leak :{} '.format(leak_mem))\nf.write('\\nScaling Threshold :{} '.format(scaling_threshold))\nf.write('\\nActivation :{} '.format(activation))\nf.write('\\nArchitecture :{} '.format(architecture))\nif pretrained:\n f.write('\\nPretrained Weight File :{} '.format(pretrained_state))\nelif resume:\n f.write('\\nResumed from state :{} '.format(resume))\nf.write('\\nStarting Learning Rate :{} '.format(learning_rate))\nf.write('\\nLR Adjust Interval :{} '.format(lr_adjust_interval))\nf.write('\\nLR Decay Factor :{} '.format(lr_decay_factor))\nf.write('\\nSTDP_alpha :{} '.format(STDP_alpha))\nf.write('\\nSTDP_beta :{} '.format(STDP_beta))\nf.write('\\nOptimizer :{} '.format(optimizer))\nf.write('\\nCriterion :{} '.format(criterion))\nf.write('\\n{}'.format(model))\n\nstart_time = datetime.datetime.now()\n\nann_thresholds = []\n\nif architecture.lower().startswith('vgg'):\n for l in model.module.features.named_children():\n \n if isinstance(l[1], nn.Conv2d):\n ann_thresholds.append(default_threshold)\n \n for l in model.module.classifier.named_children():\n \n if isinstance(l[1], nn.Linear):\n ann_thresholds.append(default_threshold)\n \n\n\n\n\n\n#VGG11 CIFAR100 4*4 stride2 small from pix 99.9 thresholds\n#ann_thresholds = [2.93, 1.72, 2.25, 0.85, 1.46, 1.39, 0.61, .94, 0.21, .51]\n\n\n#VGG9 CIFAR100 4*4 stride2 99.9 percentile thresholds\nann_thresholds = [2.72, 1.98, 1.98, .77, 1.56, 0.43, .71, .23, .71]\n\n\nthresholds_set = model.module.threshold_init(scaling_threshold=1.0, reset_threshold=reset_threshold, thresholds = ann_thresholds[:], default_threshold=default_threshold)\n\nf.write('\\n Threshold: {}'.format(thresholds_set))\n\n\n##Uncomment to find firing thresholds, else use pre-computed thresholds\n#if pretrained and find_thesholds:\n# find_threshold(ann_thresholds, train_loader1)\n# \n\nfor epoch in range(start_epoch, 25):\n \n train(epoch, train_loader)\n test(epoch, test_loader) \n\n#f.write('\\nHighest accuracy: {:.2f}%'.format(100*max_correct.item()/len(test_loader.dataset)))\n\n\n" }, { "alpha_fraction": 0.39439722895622253, "alphanum_fraction": 0.4683663845062256, "avg_line_length": 34.17777633666992, "blob_id": "d1b97bf8d3051de8d0d664347ebfe1a6229bc9d2", "content_id": "24be0e60a808d637f3b68f932744e4adc5bc7f4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3177, "license_type": "no_license", "max_line_length": 141, "num_lines": 90, "path": "/vgg_ann_models.py", "repo_name": "SayeedChowdhury/dct-snn", "src_encoding": "UTF-8", "text": "\nimport torch\nimport torch.nn as nn\nimport math\ntorch.manual_seed(0)\n\ncfg = {\n\t'VGG5' : [64, 'A', 128, 'D', 128, 'A'],\n\t'VGG9': [64, 'A', 128, 'D', 128, 'A', 256, 'D', 256, 'A', 512, 'D', 512, 'A'],\n\t'VGG11': [64, 'A', 128, 'D', 256, 'A', 512, 'D', 512, 'D', 512, 'A', 512, 'D', 512, 'A'],\n\t'VGG13': [64, 'D', 64, 'A', 128, 'D', 128, 'A', 256, 'D', 256, 'A', 512, 'D', 512, 'A', 512, 'D', 512, 'A'],\n 'VGG16': [64, 'D', 64, 'A', 128, 'D', 128, 'A', 256, 'D', 256, 'D', 256, 'A', 512, 'D', 512, 'D', 512, 'A', 512, 'D', 512, 'D', 512, 'D']\n}\n\n\n\n\nclass VGG(nn.Module):\n def __init__(self, vgg_name, labels=10):\n super(VGG, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.labels \t\t= labels\n self.vgg_name = vgg_name\n self.classifier = self._make_fc_layers()\n \n for m in self.modules():\n if(isinstance(m, nn.Conv2d)):\n #m.threshold = 0.999#0.75 #1.0\n n1 = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n variance1 = math.sqrt(1. / (n1)) # math.sqrt(6. / (n + n1))\n m.weight.data.normal_(0, variance1)\n #m.bias.data.zero_()\n \n \n elif(isinstance(m, nn.Linear)):\n #m.threshold = 0.999 \n size = m.weight.size()\n fan_in = size[1] # number of columns\n variance2 = math.sqrt(1.0 / (fan_in)) # + fan_out)) #math.sqrt(6.0 / (fan_in + fan_out))\n m.weight.data.normal_(0.0, variance2)\n \n \n \n \n \n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n out = self.classifier(out)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n stride = 1\n if x == 'A':\n layers += [nn.AvgPool2d(kernel_size=2, stride=2)]\n elif x == 'D':\n layers += [nn.Dropout(0.2)]\n elif x=='M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, stride=stride, bias=False),\n\t\t\t\t\t\t\tnn.ReLU(inplace=True)\n\t\t\t\t\t\t\t]\n in_channels = x\n \n return nn.Sequential(*layers)\n\n \n def _make_fc_layers(self):\n layers = []\n# if self.vgg_name=='VGG16' & self.labels==1000:\n if self.vgg_name=='VGG9':\n layers += [nn.Linear(512*9, 4096, bias=False)]\n elif self.vgg_name=='VGG11':\n layers += [nn.Linear(512*9, 4096, bias=False)]\n elif self.vgg_name=='VGG13':\n layers += [nn.Linear(512*4, 4096, bias=False)]\n else:\n layers += [nn.Linear(128*8*8, 4096, bias=False)]\n layers += [nn.ReLU(inplace=True)]\n layers += [nn.Dropout(0.5)]\n layers += [nn.Linear(4096, 4096, bias=False)]\n layers += [nn.ReLU(inplace=True)]\n layers += [nn.Dropout(0.5)]\n layers += [nn.Linear(4096, self.labels, bias=False)]\n \n return nn.Sequential(*layers)\n\n \n" }, { "alpha_fraction": 0.4918321967124939, "alphanum_fraction": 0.5274075865745544, "avg_line_length": 42.56621170043945, "blob_id": "88df1d69c28c6b2923da33838ac5b748cd04f14c", "content_id": "71592d00509e7f429b397f3c4b57a0a2ff54c985", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19283, "license_type": "no_license", "max_line_length": 207, "num_lines": 438, "path": "/spike_model_cifar.py", "repo_name": "SayeedChowdhury/dct-snn", "src_encoding": "UTF-8", "text": "\n\n#---------------------------------------------------\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport pdb\nimport math\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\nimport copy\n\ntorch.manual_seed(2)\n\ncfg = {\n\t'VGG5' : [64, 'A', 128, 'D', 128, 'A'],\n\t'VGG9': [64, 'A', 128, 'D', 128, 'A', 256, 'D', 256, 'A', 512, 'D', 512, 'A'],\n\t'VGG11': [64, 'A', 128, 'D', 256, 'A', 512, 'D', 512, 'D', 512, 'A', 512, 'D', 512, 'A'],\n\t'VGG16': [64, 'D', 64, 'A', 128, 'D', 128, 'A', 256, 'D', 256, 'D', 256, 'A', 512, 'D', 512, 'D', 512, 'A', 512, 'D', 512, 'D', 512, 'D']\n}\n\n\nfrom typing import Union\n\n\ndef percentile(t: torch.tensor, q: float) -> Union[int, float]:\n \"\"\"\n Return the ``q``-th percentile of the flattened input tensor's data.\n \n CAUTION:\n * Needs PyTorch >= 1.1.0, as ``torch.kthvalue()`` is used.\n * Values are not interpolated, which corresponds to\n ``numpy.percentile(..., interpolation=\"nearest\")``.\n \n :param t: Input tensor.\n :param q: Percentile to compute, which must be between 0 and 100 inclusive.\n :return: Resulting value (scalar).\n \"\"\"\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k)[0]\n return result\n\nclass LinearSpike(torch.autograd.Function):\n \"\"\"\n Here we implement our spiking nonlinearity which also implements\n the surrogate gradient. By subclassing torch.autograd.Function,\n we will be able to use all of PyTorch's autograd functionality.\n Here we use the piecewise-linear surrogate gradient as was done\n in Bellec et al. (2018).\n \"\"\"\n gamma = 0.3 # Controls the dampening of the piecewise-linear surrogate gradient\n\n @staticmethod\n def forward(ctx, input):\n \"\"\"\n In the forward pass, we compute a step function of the input Tensor and\n return it. ctx is a context object that we use to stash information which\n we need to later backpropagate our error signals. To achieve this we use\n the ctx.save_for_backward method.\n \"\"\"\n ctx.save_for_backward(input)\n out = torch.zeros_like(input).cuda()\n out[input > 0] = 1.0\n return out\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass, we receive a Tensor we need to compute\n the surrogate gradient of the loss with respect to the input.\n Here we use the piecewise-linear surrogate gradient as was\n done in Bellec et al. (2018).\n \"\"\"\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad = grad_input*LinearSpike.gamma*F.threshold(1.0-torch.abs(input), 0, 0)\n return grad\n\nfrom torch.autograd import Variable\nclass Sampled_DCT2ov(nn.Module):\n def __init__(self, block_size=8, stride=None, p=0, mode = 'random', mean = None, std=None, device = 'cpu'):\n\n super(Sampled_DCT2ov, self).__init__()\n ### forming the cosine transform matrix\n self.block_size = block_size\n self.device = device\n self.mean =mean\n self.std =std\n if stride==None:\n self.stride=block_size\n else:\n self.stride=stride\n self.Q = torch.zeros((self.block_size,self.block_size)).cuda()\n self.bases=torch.zeros(self.block_size,self.block_size,self.block_size,self.block_size).cuda()\n self.Q[0] = math.sqrt( 1.0/float(self.block_size) )\n for i in range (1,self.block_size,1):\n for j in range(self.block_size):\n self.Q[i,j] = math.sqrt( 2.0/float(self.block_size) ) * math.cos( float((2*j+1)*math.pi*i) /float(2.0*self.block_size) )\n\n \n ### forming the 2d DCT bases\n for i in range (self.block_size):\n for j in range(self.block_size):\n c = torch.zeros((self.block_size,self.block_size)).cuda()\n c[i,j]=1.0\n self.bases[i,j] = torch.matmul(torch.matmul(self.Q.permute(1,0).contiguous(), c), self.Q )\n\n self.tst=self.block_size*self.block_size\n self.loc=torch.zeros(self.tst, 2).cuda()\n if self.block_size==4:\n self.loc[0]=torch.tensor([0,0])\n self.loc[1]=torch.tensor([0,1])\n self.loc[2]=torch.tensor([1,0])\n self.loc[3]=torch.tensor([2,0])\n self.loc[4]=torch.tensor([1,1])\n self.loc[5]=torch.tensor([0,2])\n self.loc[6]=torch.tensor([0,3])\n self.loc[7]=torch.tensor([1,2])\n self.loc[8]=torch.tensor([2,1])\n self.loc[9]=torch.tensor([3,0])\n self.loc[10]=torch.tensor([3,1])\n self.loc[11]=torch.tensor([2,2])\n self.loc[12]=torch.tensor([1,3])\n self.loc[13]=torch.tensor([2,3])\n self.loc[14]=torch.tensor([3,2])\n self.loc[15]=torch.tensor([3,3])\n\n if self.block_size==8:\n self.loc[0]=torch.tensor([0,0])\n self.loc[1]=torch.tensor([0,1])\n self.loc[2]=torch.tensor([1,0])\n self.loc[3]=torch.tensor([2,0])\n self.loc[4]=torch.tensor([1,1])\n self.loc[5]=torch.tensor([0,2])\n self.loc[6]=torch.tensor([0,3])\n self.loc[7]=torch.tensor([1,2])\n self.loc[8]=torch.tensor([2,1])\n self.loc[9]=torch.tensor([3,0])\n self.loc[10]=torch.tensor([4,0])\n self.loc[11]=torch.tensor([3,1])\n self.loc[12]=torch.tensor([2,2])\n self.loc[13]=torch.tensor([1,3])\n self.loc[14]=torch.tensor([0,4])\n self.loc[15]=torch.tensor([0,5])\n self.loc[16]=torch.tensor([1,4])\n self.loc[17]=torch.tensor([2,3])\n self.loc[18]=torch.tensor([3,2])\n self.loc[19]=torch.tensor([4,1])\n self.loc[20]=torch.tensor([5,0])\n self.loc[21]=torch.tensor([6,0])\n self.loc[22]=torch.tensor([5,1])\n self.loc[23]=torch.tensor([4,2])\n self.loc[24]=torch.tensor([3,3])\n self.loc[25]=torch.tensor([2,4])\n self.loc[26]=torch.tensor([1,5])\n self.loc[27]=torch.tensor([0,6])\n self.loc[28]=torch.tensor([0,7])\n self.loc[29]=torch.tensor([1,6])\n self.loc[30]=torch.tensor([2,5])\n self.loc[31]=torch.tensor([3,4])\n self.loc[32]=torch.tensor([4,3])\n self.loc[33]=torch.tensor([5,2])\n self.loc[34]=torch.tensor([6,1])\n self.loc[35]=torch.tensor([7,0])\n self.loc[36]=torch.tensor([7,1])\n self.loc[37]=torch.tensor([6,2])\n self.loc[38]=torch.tensor([5,3])\n self.loc[39]=torch.tensor([4,4])\n self.loc[40]=torch.tensor([3,5])\n self.loc[41]=torch.tensor([2,6])\n self.loc[42]=torch.tensor([1,7])\n self.loc[43]=torch.tensor([2,7])\n self.loc[44]=torch.tensor([3,6])\n self.loc[45]=torch.tensor([4,5])\n self.loc[46]=torch.tensor([5,4])\n self.loc[47]=torch.tensor([6,3])\n self.loc[48]=torch.tensor([7,2])\n self.loc[49]=torch.tensor([7,3])\n self.loc[50]=torch.tensor([6,4])\n self.loc[51]=torch.tensor([5,5])\n self.loc[52]=torch.tensor([4,6])\n self.loc[53]=torch.tensor([3,7])\n self.loc[54]=torch.tensor([4,7])\n self.loc[55]=torch.tensor([5,6])\n self.loc[56]=torch.tensor([6,5])\n self.loc[57]=torch.tensor([7,4])\n self.loc[58]=torch.tensor([7,5])\n self.loc[59]=torch.tensor([6,6])\n self.loc[60]=torch.tensor([5,7])\n self.loc[61]=torch.tensor([6,7])\n self.loc[62]=torch.tensor([7,6])\n self.loc[63]=torch.tensor([7,7])\n\n def rgb_to_ycbcr(self,input):\n \n # input is mini-batch N x 3 x H x W of an RGB image\n #output = Variable(input.data.new(*input.size())).to(self.device)\n output = Variable(torch.zeros_like(input)).cuda()\n input = (input * 255.0)\n output[:, 0, :, :] = input[:, 0, :, :] * 0.299+ input[:, 1, :, :] * 0.587 + input[:, 2, :, :] * 0.114 \n output[:, 1, :, :] = input[:, 0, :, :] * -0.168736 - input[:, 1, :, :] *0.331264+ input[:, 2, :, :] * 0.5 + 128\n output[:, 2, :, :] = input[:, 0, :, :] * 0.5 - input[:, 1, :, :] * 0.418688- input[:, 2, :, :] * 0.081312+ 128\n return output/255.0\n\n def ycbcr_to_freq(self,input): \n \n x=int(((input.shape[2]-self.block_size)/self.stride)+1)*self.block_size\n y=int(((input.shape[3]-self.block_size)/self.stride)+1)*self.block_size\n output = Variable(torch.zeros(self.tst, input.shape[0],input.shape[1],x,y)).cuda()\n dctcoeff= torch.zeros(input.shape[0],input.shape[1],self.block_size,self.block_size).cuda()\n #a=int(input.shape[2]/self.block_size)\n #b=int(input.shape[3]/self.block_size)\n \n# print(input.device)\n# print(self.Q.device)\n self.Q=self.Q.to(input.device)\n self.bases=self.bases.to(dctcoeff.device)\n m1=-1\n # Compute DCT in block_size x block_size blocks \n for i in range(0, input.shape[2] - self.block_size + 1, self.stride):\n m1=m1+1\n n1=-1\n for j in range(0, input.shape[3] - self.block_size + 1, self.stride):\n n1=n1+1\n dctcoeff = torch.matmul(torch.matmul(self.Q, input[:, :, i:i+self.block_size, j:j+self.block_size]), self.Q.permute(1,0).contiguous() )\n \n for k in range(self.tst):\n m,n=self.loc[k]\n output[k,:,:,m1*self.block_size : (m1+1)*self.block_size ,n1*self.block_size : (n1+1)*self.block_size ]=torch.einsum('ij,kl->ijkl', dctcoeff[:,:,int(m),int(n)], self.bases[int(m),int(n)])\n\n\n\n\n #return dctcoeff \n return output\n def forward(self, x):\n #return self.ycbcr_to_freq( x )\n if (x.shape[1]==3):\n return self.ycbcr_to_freq( self.rgb_to_ycbcr(x) )\n else:\n return self.ycbcr_to_freq(x ) \n\n\nclass VGG_SNN_STDB_lin(nn.Module):\n def __init__(self, vgg_name, activation='STDB', labels=1000, timesteps=75, leak_mem=0.99, drop=0.2):\n super().__init__()\n \n self.timesteps= timesteps\n self.vgg_name= vgg_name\n self.labels= labels\n self.leak_mem=leak_mem\n self.act_func\t= LinearSpike.apply\n use_cuda =torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n \n self.process \t= Sampled_DCT2ov(block_size=4, stride=2,device = device)\n \n self.features, self.classifier = self._make_layers(cfg[self.vgg_name])\n \n def threshold_init(self, scaling_threshold=1.0, reset_threshold=0.0, thresholds=[], default_threshold=1.0):\n self.scaling_threshold \t= scaling_threshold\n self.reset_threshold \t= reset_threshold\n self.threshold \t\t\t= {}\n print('\\nThresholds:')\n \n for pos in range(len(self.features)):\n if isinstance(self.features[pos], nn.Conv2d):\n self.threshold[pos] = round(thresholds.pop(0) * self.scaling_threshold + self.reset_threshold * default_threshold, 2)\n print('\\t Layer{} : {:.2f}'.format(pos, self.threshold[pos]))\n \n prev = len(self.features)\n \n for pos in range(len(self.classifier)-1):\n if isinstance(self.classifier[pos], nn.Linear):\n self.threshold[prev+pos] = round(thresholds.pop(0) * self.scaling_threshold + self.reset_threshold * default_threshold, 2)\n print('\\t Layer{} : {:.2f}'.format(prev+pos, self.threshold[prev+pos]))\n \n return self.threshold\n \n def counting_spikes(cur_time, layer, spikes):\n self.spike_count\n \n def _make_layers(self, cfg):\n layers \t\t= []\n in_channels = 3\n \n for x in (cfg):\n stride = 1\n if x == 'A':\n layers += [nn.AvgPool2d(kernel_size=2, stride=2)]\n elif x == 'D':\n layers += [nn.Dropout(0.2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, stride=stride, bias=False),nn.ReLU(inplace=True)]\n in_channels = x\n \n features = nn.Sequential(*layers)\n \n layers = []\n layers += [nn.Linear(512*9, 4096, bias=False)]\n layers += [nn.ReLU(inplace=True)]\n layers += [nn.Dropout(0.2)]\n layers += [nn.Linear(4096,4096, bias=False)]\n layers += [nn.ReLU(inplace=True)]\n layers += [nn.Dropout(0.2)]\n layers += [nn.Linear(4096, self.labels, bias=False)]\n \n classifer = nn.Sequential(*layers)\n return (features, classifer)\n \n def network_init(self, update_interval):\n self.update_interval = update_interval\n \n def neuron_init(self, x):\n self.batch_size = x.size(0)\n self.width \t\t= 60\n self.height \t= 60\n \n self.mem \t\t= {}\n self.spike \t\t= {}\n self.mask \t\t= {}\n self.spike_count= {}\n \n for l in range(len(self.features)):\n if isinstance(self.features[l], nn.Conv2d):\n self.mem[l] \t\t= torch.zeros(self.batch_size, self.features[l].out_channels, self.width, self.height)\n self.spike_count[l] = torch.zeros(self.mem[l].size())\n elif isinstance(self.features[l], nn.Dropout):\n self.mask[l] = self.features[l](torch.ones(self.mem[l-2].shape))\n elif isinstance(self.features[l], nn.AvgPool2d):\n self.width = self.width//2\n self.height = self.height//2\n \n prev = len(self.features)\n for l in range(len(self.classifier)):\n if isinstance(self.classifier[l], nn.Linear):\n self.mem[prev+l] \t\t\t= torch.zeros(self.batch_size, self.classifier[l].out_features)\n self.spike_count[prev+l] \t= torch.zeros(self.mem[prev+l].size())\n \n elif isinstance(self.classifier[l], nn.Dropout):\n self.mask[prev+l] = self.classifier[l](torch.ones(self.mem[prev+l-2].shape))\n self.spike = copy.deepcopy(self.mem)\n for key, values in self.spike.items():\n for value in values:\n value.fill_(-1000)\n \n def forward(self, x, cur_time, mem=[], spike=[], mask=[], spike_count=[], find_max_mem=False, max_mem_layer=0):\n if cur_time == 0:\n self.neuron_init(x)\n else:\n self.batch_size = x.size(0)\n self.mem \t\t= {}\n self.spike \t\t= {}\n self.mask \t\t= {}\n for key, values in mem.items():\n self.mem[key] = values.detach()\n for key, values in spike.items():\n self.spike[key] = values.detach()\n for key, values in mask.items():\n self.mask[key] = values.detach()\n for key,values in spike_count.items():\n self.spike_count[key] = values.detach()\n \n #dct-encoding and threshold selection for input layer\n g=self.process(x)\n th_n=np.percentile(g.cpu(), 6.5)\n th_p=np.percentile(g.cpu(), 93.5)\n mem=torch.zeros(g.shape[1],g.shape[2],g.shape[3],g.shape[4]) \n features_max_layer \t= len(self.features)\n max_mem \t\t\t= 0.0\n for t in range(cur_time, cur_time+self.update_interval):\n #spike-generator encoding part\n mem=mem+g[t%16]\n spike_inp = torch.zeros_like(mem).cuda()\n \n spike_inp[mem >th_p] = 1.0\n spike_inp[mem < th_n] = -1.0\n rst = torch.zeros_like(mem).cuda()\n c = (mem >th_p)\n rst[c] = torch.ones_like(mem)[c]*th_p\n e = (mem < th_n)\n rst[e] = torch.ones_like(mem)[e]*th_n\n mem=mem-rst\n out_prev = spike_inp\n for l in range(len(self.features)):\n if isinstance(self.features[l], (nn.Conv2d)):\n mem_thr \t\t\t\t\t= (self.mem[l]/self.threshold[l]) - 1.0\n out \t\t\t\t\t\t= self.act_func(mem_thr)\n rst \t\t\t\t\t\t= self.threshold[l] * (mem_thr>0).float()\n self.spike[l] \t\t\t\t= self.spike[l].masked_fill(out.bool(),t-1)\n self.spike_count[l][out.bool()] \t= self.spike_count[l][out.bool()] + 1\n \n if find_max_mem and l==max_mem_layer:\n if (self.features[l](out_prev)).max()>max_mem:\n #max_mem = (self.features[l](out_prev)).max()\n max_mem=percentile((self.features[l](out_prev)), 99.9) \n #max_mem = np.percentile((self.features[l](out_prev)).cpu(), 99.9)\n break\n self.mem[l] \t= self.leak_mem*self.mem[l] + self.features[l](out_prev) - rst\n out_prev \t\t= out.clone()\n \n elif isinstance(self.features[l], nn.AvgPool2d):\n out_prev \t\t= self.features[l](out_prev)\n elif isinstance(self.features[l], nn.Dropout):\n out_prev \t\t= out_prev * self.mask[l]\n if find_max_mem and max_mem_layer<features_max_layer:\n continue\n out_prev \t= out_prev.reshape(self.batch_size, -1)\n prev = len(self.features)\n \n for l in range(len(self.classifier)-1):\n if isinstance(self.classifier[l], (nn.Linear)):\n mem_thr \t\t\t\t\t= (self.mem[prev+l]/self.threshold[prev+l]) - 1.0\n out \t\t\t\t\t\t= self.act_func(mem_thr)\n rst \t\t\t\t\t\t= self.threshold[prev+l] * (mem_thr>0).float()\n self.spike[prev+l] \t\t\t= self.spike[prev+l].masked_fill(out.bool(),t-1)\n self.spike_count[prev+l][out.bool()] \t= self.spike_count[prev+l][out.bool()] + 1\n \n if find_max_mem and (prev+l)==max_mem_layer:\n if (self.classifier[l](out_prev)).max()>max_mem:\n #max_mem = (self.classifier[l](out_prev)).max()\n max_mem=percentile((self.classifier[l](out_prev)), 99.9) \n #max_mem = np.percentile((self.classifier[l](out_prev)).cpu(), 99.9)\n \n break\n self.mem[prev+l] \t= self.leak_mem*self.mem[prev+l] + self.classifier[l](out_prev) - rst\n out_prev \t\t= out.clone()\n elif isinstance(self.classifier[l], nn.Dropout):\n out_prev \t\t= out_prev * self.mask[prev+l]\n \n if not find_max_mem:\n self.mem[prev+l+1] \t\t= self.mem[prev+l+1] + self.classifier[l+1](out_prev)\n if find_max_mem:\n return max_mem\n \n return self.mem[prev+l+1], self.mem, self.spike, self.mask, self.spike_count\n \n \n \n \n \n \n\t\t\n\n \n \n \n \n\n\n \n \n \n \n \n \n \n " }, { "alpha_fraction": 0.5483385324478149, "alphanum_fraction": 0.5763636231422424, "avg_line_length": 39.26262664794922, "blob_id": "89d0c4797e89eb18802ad202635fe298874ce38e", "content_id": "806cf4044e408b361ee903a2c38a7738bccb04f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15950, "license_type": "no_license", "max_line_length": 282, "num_lines": 396, "path": "/ann.py", "repo_name": "SayeedChowdhury/dct-snn", "src_encoding": "UTF-8", "text": "\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom torch.utils.data.dataloader import DataLoader\nimport torch.backends.cudnn as cudnn\nfrom tensorboard_logger import configure, log_value\nimport torchvision\n#import torchvision.transforms as transforms\nimport numpy as np\n\n\nimport os\nimport argparse\nimport math\nfrom vgg_ann_models import *\n#from utils import progress_bar\nimport time\n\n\ndef rin(input,b=4,s=2):\n x=int(((input.shape[2]-b)/s)+1)*b\n y=int(((input.shape[3]-b)/s)+1)*b\n output = torch.zeros(input.shape[0],input.shape[1],x,y)\n m=-1\n \n for i in range(0, input.shape[2] - b + 1, s):\n m=m+1\n n=-1\n for j in range(0, input.shape[3] - b + 1, s):\n n=n+1\n output[:,:,m*b : (m+1)*b,n*b : (n+1)*b]=input[:, :, i:i+b, j:j+b]\n \n return output\n\n\nclass DCT2(nn.Module):\n def __init__(self, block_size=4, p=0, mode = 'random', mean = None, std=None, device = 'cpu'):\n\n super(DCT2, self).__init__()\n ### forming the cosine transform matrix\n self.block_size = block_size\n self.device = device\n self.mean =mean\n self.std =std\n self.Q = torch.zeros((self.block_size,self.block_size)).to(self.device)\n \n self.Q[0] = math.sqrt( 1.0/float(self.block_size) )\n for i in range (1,self.block_size,1):\n for j in range(self.block_size):\n self.Q[i,j] = math.sqrt( 2.0/float(self.block_size) ) * math.cos( float((2*j+1)*math.pi*i) /float(2.0*self.block_size) )\n\n \n\n def rgb_to_ycbcr(self,input):\n \n # input is mini-batch N x 3 x H x W of an RGB image\n #output = Variable(input.data.new(*input.size())).to(self.device)\n output = torch.zeros_like(input).to(self.device)\n input = (input * 255.0)\n output[:, 0, :, :] = input[:, 0, :, :] * 0.299+ input[:, 1, :, :] * 0.587 + input[:, 2, :, :] * 0.114 \n output[:, 1, :, :] = input[:, 0, :, :] * -0.168736 - input[:, 1, :, :] *0.331264+ input[:, 2, :, :] * 0.5 + 128\n output[:, 2, :, :] = input[:, 0, :, :] * 0.5 - input[:, 1, :, :] * 0.418688- input[:, 2, :, :] * 0.081312+ 128\n return output/255.0\n\n def ycbcr_to_freq(self,input): \n \n \n output = torch.zeros_like(input).to(self.device)\n a=int(input.shape[2]/self.block_size)\n b=int(input.shape[3]/self.block_size)\n \n # Compute DCT in block_size x block_size blocks \n for i in range(a):\n for j in range(b):\n output[:,:,i*self.block_size : (i+1)*self.block_size,j*self.block_size : (j+1)*self.block_size] = torch.matmul(torch.matmul(self.Q, input[:, :, i*self.block_size : (i+1)*self.block_size, j*self.block_size : (j+1)*self.block_size]), self.Q.permute(1,0).contiguous() )\n \n return output \n\n def forward(self, x):\n #return self.ycbcr_to_freq( self.rgb_to_ycbcr(x) )\n if (x.shape[1]==3):\n return self.ycbcr_to_freq( self.rgb_to_ycbcr(x) )\n else:\n return self.ycbcr_to_freq(x ) \n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 2 every 30 epochs\"\"\"\n lr = args.lr * (0.001 ** (epoch // 100))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr \n\n\nparser = argparse.ArgumentParser(description='PyTorch tinyimagenet Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('-b', '--batch_size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 128)')\n#parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--seed', default=0, type=int, help='Random seed')\n\nparser.add_argument('--ckpt_dir', default=None, type=str, help='Checkpoint dir. If set to none, default dir is used')\nparser.add_argument('--ckpt_intrvl', default=1, type=int, help='Number of epochs between successive checkpoints')\nparser.add_argument('--num_epochs', default=312, type=int, help='Number of epochs for backpropagation')\nparser.add_argument('--resume_from_ckpt', default=0, type=int, help='Resume from checkpoint?')\nparser.add_argument('--tensorboard', default=0, type=int, help='Log progress to TensorBoard')\nglobal args\nargs = parser.parse_args()\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\n# Initialize seed\n#--------------------------------------------------\nseed = args.seed\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\nnum_train = 50000\nnum_test = 10000\nimg_size = 32\ninp_maps = 3\nnum_cls = 10\ntest_error_best = 100 \nstart_epoch = 0\nnum_epochs = args.num_epochs\nend_epoch = start_epoch+num_epochs\nbatch_size = args.batch_size\nckpt_dir = args.ckpt_dir\nckpt_intrvl = args.ckpt_intrvl\nresume_from_ckpt = True if args.resume_from_ckpt else False\n#model_str_use = 'vgg11_cifar100_ann'+'_bs'+str(batch_size)+'_new_'+str(args.lr)+'lrby5_every30epoch'\nmodel_str_use = 'vgg9_cifar10_ann_lr.1_.1by100'+'_bs'+str(batch_size)+'_pixelexpanded_4avgpool'\n#model_str_use = 'vgg13_tinyimgnet_4*4dctbnmaxpool_ann_lr.01_.1by100'+'_bs'+str(batch_size)+'_wd1e-4'\nif(ckpt_dir is None):\n ckpt_dir = '/home/vgg9_snn_surrgrad_backprop/CHECKPOINTS/'+model_str_use\n ckpt_dir = os.path.expanduser(ckpt_dir)\n if(ckpt_intrvl > 0):\n if(not os.path.exists(ckpt_dir)):\n os.mkdir(ckpt_dir)\nckpt_fname = ckpt_dir+'/ckpt.pth'\n# Use TensorBoard?\ntensorboard = True if args.tensorboard else False\n\n# Data\nprint('==> Preparing data..')\n\n#dataset = 'tinyIMAGENET' # {'CIFAR10', 'CIFAR100', 'IMAGENET'}\ndataset = 'CIFAR10'\n#usual\nnormalize = transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\n\n# usual imgnet stat from repos\n#normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])\n\n# calculated itiny-mgnet stat \n#normalize = transforms.Normalize(mean = [0.48, 0.448, 0.3975], std = [0.277, 0.269, 0.282])\n\n\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\ntransform_test = transforms.Compose([transforms.ToTensor(), normalize])\n\nif dataset == 'CIFAR10':\n trainset = datasets.CIFAR10(root = './cifar_data', train = True, download = True, transform = transform_train)\n testset = datasets.CIFAR10(root='./cifar_data', train=False, download=True, transform= transform_test)\n labels = 10\n\nelif dataset == 'CIFAR100':\n trainset = datasets.CIFAR100(root = './cifar_data', train = True, download = True, transform = transform_train)\n testset = datasets.CIFAR100(root='./cifar_data', train=False, download=True, transform= transform_test)\n labels = 100\n\nelif dataset == 'IMAGENET':\n labels = 1000\n traindir = os.path.join('/local/scratch/a/imagenet/imagenet2012/', 'train')\n valdir = os.path.join('/local/scratch/a/imagenet/imagenet2012/', 'val')\n trainset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n testset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])) \nelif dataset == 'tinyIMAGENET':\n labels = 200\n # adding the tinyimagenet directory\n traindir = os.path.join('/home/nano01/a/banerj11/srinivg_BackProp_CIFAR10/sayeed/tiny-imagenet-200/', 'train')\n valdir = os.path.join('/home/nano01/a/banerj11/srinivg_BackProp_CIFAR10/sayeed/tiny-imagenet-200/', 'val')\n \n# traindir = os.path.join('/local/scratch/a/chowdh23/data/tiny-imagenet-200/', 'train')\n# valdir = os.path.join('/local/scratch/a/chowdh23/data/tiny-imagenet-200/', 'val')\n trainset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(64),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n testset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n #transforms.Resize(256),\n #transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ]))\n\ntrainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)\ntestloader = DataLoader(testset, batch_size=batch_size, shuffle=False)\n\n\n\n# Model\nprint('==> Building model..')\nmodel = VGG('VGG9', labels=labels)\n# net = ResNet18()\n# net = PreActResNet18()\n# net = GoogLeNet()\n# net = DenseNet121()\n# net = ResNeXt29_2x64d()\n# net = MobileNet()\n# net = MobileNetV2()\n# net = DPN92()\n# net = ShuffleNetG2()\n# net = SENet18()\n# net = ShuffleNetV2(1)\n#net = EfficientNetB0()\nmodel = model.cuda()\nmodel = torch.nn.DataParallel(model).cuda()\n\nuse_cuda =torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nm=DCT2(block_size=4, device = device).to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)\n#optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4, amsgrad=False)\nif(resume_from_ckpt):\n ckpt = torch.load(ckpt_fname)\n start_epoch = ckpt['start_epoch']\n end_epoch = start_epoch+num_epochs\n test_error_best = ckpt['test_error_best']\n epoch_best = ckpt['epoch_best']\n# train_time = ckpt['train_time']\n model.load_state_dict(ckpt['model_state_dict'])\n optimizer.load_state_dict(ckpt['optim_state_dict'])\n print('##### Loaded ANN_VGG from {}\\n'.format(ckpt_fname))\n\n\nprint('********** ANN training and evaluation **********')\nfor epoch in range(start_epoch, end_epoch):\n train_loss = AverageMeter()\n test_loss = AverageMeter()\n# model.use_max_out_over_time = use_max_out_over_time\n# model.module.updt_tend(t_end_updt)\n model.train()\n adjust_learning_rate(optimizer, epoch)\n \n\n\n for i, data in enumerate(trainloader):\n# print(\"Epoch: {}/{};\".format(epoch+1, end_epoch), \"Training batch:{}/{};\".format(i+1, math.ceil(num_train/batch_size)))\n\n# start_time = time.time()\n # Load the inputs and targets\n inputs, targets = data\n #targets=torch.from_numpy(np.eye(num_cls)[targets])\n \n inputs, targets = inputs.cuda(), targets.cuda()\n if dataset=='CIFAR10' or dataset=='CIFAR100':\n inputs =rin(inputs)\n #inputs =m(inputs)\n \n \n\n # Reset the gradients\n optimizer.zero_grad()\n\n # Perform forward pass and compute the target loss\n output = model(inputs)\n #output= F.softmax(output,dim=1)\n \n #b=targets.float()\n loss = criterion(output, targets)\n train_loss.update(loss.item(), targets.size(0))\n\n # Perform backward pass and update the weights\n loss.backward()\n optimizer.step()\n# end_time = time.time()\n# train_time += (end_time-start_time)/3600\n \n \n # Print error measures and log progress to TensorBoard\n train_loss_per_epoch = train_loss.avg\n# print(\"Epoch: {}/{};\".format(epoch+1, end_epoch), \"########## Training loss: {}\".format(train_loss_per_epoch))\n# if(tensorboard):\n# log_value('train_loss', train_loss_per_epoch, epoch)\n\n # Evaluate classification accuracy on the test set\n# model.use_max_out_over_time = False\n# model.module.updt_tend(t_end)\n correct_pred_top1 = 0\n correct_pred_topk = 0\n model.eval()\n with torch.no_grad():\n for j, data in enumerate(testloader, 0):\n# print(\"Epoch: {}/{};\".format(epoch+1, end_epoch), \"Test batch: {}/{}\".format(j+1, math.ceil(num_test/batch_size)))\n images, labels = data\n\n images, labels = images.cuda(), labels.cuda()\n if dataset=='CIFAR10' or dataset=='CIFAR100':\n images =rin(images)\n \n \n #images =m(images)\n \n \n out = model(images)\n loss1 = criterion(out, labels)\n test_loss.update(loss1.item(), labels.size(0))\n _, predicted = out.max(1)\n# total += targets.size(0)\n correct_pred_top1 += predicted.eq(labels).sum().item()\n #print(correct_pred_top1)\n# _, pred = out.topk(topk, 1, True, True)\n# pred = pred.t()\n# correct = pred.eq(labels.view(1, -1).expand_as(pred))\n# correct_pred_top1 += correct[:1].view(-1).float().sum(0, keepdim=True)\n# correct_pred_topk += correct[:topk].view(-1).float().sum(0, keepdim=True)\n\n test_loss_per_epoch = test_loss.avg \n# print(\"Epoch: {}/{};\".format(epoch+1, end_epoch), \"########## Test loss: {}\".format(test_loss_per_epoch))\n if(tensorboard):\n log_value('test_loss', test_loss_per_epoch, epoch)\n # Print error measures and log progress to TensorBoard\n test_error_top1 = (1-(correct_pred_top1/num_test))*100\n# test_error_topk = (1-(correct_pred_topk/num_test))*100\n test_error_chgd = False\n if(test_error_top1 < test_error_best):\n test_error_best = test_error_top1\n epoch_best = epoch\n test_error_chgd = True\n print(\"Epoch: {}/{};\".format(epoch_best+1, end_epoch), \"########## Test error (top1-best): {:.2f}%\".format(test_error_best))\n print(\"Epoch: {}/{};\".format(epoch+1, end_epoch), \"########## Test error (top1-cur) : {:.2f}%\".format(test_error_top1))\n# print(\"Epoch: {}/{};\".format(epoch+1, end_epoch), \"########## Test error (top\"+str(topk)+\"-cur) : {:.2f}%\".format(test_error_topk[0]))\n if(tensorboard):\n log_value('test_error (top1-best)', test_error_best, epoch)\n log_value('test_error (top1)', test_error_top1, epoch)\n# log_value('test_error (top'+str(topk)+')', test_error_topk, epoch)\n\n # Checkpoint SNN training and evaluation states\n if((ckpt_intrvl > 0) and ((epoch == end_epoch-1) or test_error_chgd)):\n print('=========== Checkpointing ANN training and evaluation states')\n ckpt = {'model_state_dict': model.state_dict(),\n 'optim_state_dict': optimizer.state_dict(),\n 'start_epoch' : epoch+1,\n 'test_error_best' : test_error_best,\n 'epoch_best' : epoch_best}\n# 'train_time' : train_time}\n if(test_error_chgd):\n torch.save(ckpt, ckpt_fname)\n\n\n\n\n\n" } ]
5
Kristaspahilari/dogchat
https://github.com/Kristaspahilari/dogchat
7ad45ed1246f834184510688e901d8e0cc8d6725
8658d0acbb9d85ad7bcecc0a5a71d9e91e888ef5
041c6dc0c7a8ceb3acb1d149e2fe2bea42e8f0b2
refs/heads/main
2023-07-12T10:15:40.694771
2021-08-12T11:15:01
2021-08-12T11:15:01
395,291,219
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46148717403411865, "alphanum_fraction": 0.5108369588851929, "avg_line_length": 20.908397674560547, "blob_id": "d190a27a8c857a5f4eab7e74a9b3fb6d455693f5", "content_id": "cf290d533ba158526f5d8479960c1a18749e75a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2999, "license_type": "no_license", "max_line_length": 137, "num_lines": 131, "path": "/dogchat/data.py", "repo_name": "Kristaspahilari/dogchat", "src_encoding": "UTF-8", "text": "from datetime import datetime\r\n\r\n\r\n\r\ncomment1 = {\r\n \"Text\" : \"What time are you going?\",\r\n \"Name\": \"Charlie\",\r\n \"Username\" : \"chucky\",\r\n \"Picture\" : \"charlie_profile.png\",\r\n \"DateTime\" : datetime(2021, 7, 1, 18, 0, 0),\r\n}\r\n\r\ncomment2 = {\r\n \"Text\" : \"I'm going to go at 7 tonight!\",\r\n \"Name\": \"Melba\",\r\n \"Username\" : \"melba\",\r\n \"Picture\" : \"melba_profile.png\",\r\n \"DateTime\" : datetime(2021, 7, 1, 18, 30, 0),\r\n}\r\n\r\ncomment3 = {\r\n \"Text\" : \"You bet! I love naps.\",\r\n \"Name\": \"Melba\",\r\n \"Username\" : \"melba\",\r\n \"Picture\" : \"melba_profile.png\",\r\n \"DateTime\" : datetime(2021, 6, 29, 9, 30, 0),\r\n}\r\n\r\ncomment4 = {\r\n \"Text\" : \"What treats do you like the best?\",\r\n \"Name\": \"bubi\",\r\n \"Username\" : \"bubi12\",\r\n \"Picture\" : \"bub.jpg\",\r\n \"DateTime\" : datetime(2021, 6, 30, 14, 30, 0),\r\n}\r\n\r\npost1 = {\r\n \"PostId\" : 1,\r\n \"Text\": \"I can't wait to go to the park today\",\r\n \"Name\": \"Melba\",\r\n \"Username\" : \"melba\",\r\n \"Likes\": [\"charlie\"],\r\n \"Comments\" : [comment1, comment2],\r\n \"DateTime\" : datetime(2021, 7, 1, 17, 0, 0),\r\n \"Picture\" : \"melba_profile.png\"\r\n}\r\n\r\npost2 = {\r\n \"PostId\" : 2,\r\n \"Text\": \"I could really use a treat right now\",\r\n \"Name\": \"Melba\",\r\n \"Username\" : \"melba\",\r\n \"Likes\": [\"charlie\"],\r\n \"Comments\" : [comment4],\r\n \"DateTime\" : datetime(2021, 6, 30, 12, 30, 0),\r\n \"Picture\" : \"melba_profile.png\"\r\n}\r\n\r\npost3 = {\r\n \"PostId\" : 3,\r\n \"Text\": \"Arent' naps the best?\",\r\n \"Name\": \"Charlie\",\r\n \"Username\" : \"chucky\",\r\n \"Likes\": [\"melba\"],\r\n \"Comments\" : [comment3],\r\n \"DateTime\" : datetime(2021, 6, 29, 9, 0, 0),\r\n \"Picture\" : \"charlie_profile.png\"\r\n\r\n}\r\npost4 = {\r\n \"PostId\" : 4,\r\n \"Text\": \"I love this app!\",\r\n \"Name\": \"bubi\",\r\n \"Username\" : \"bubi12\",\r\n \"Likes\": [\"charlie\"],\r\n \"Comments\" : [],\r\n \"DateTime\" : datetime(2021, 6, 30, 12, 30, 0),\r\n \"Picture\" : \"bub.jpg\"\r\n}\r\n\r\ntest_posts = {\r\n 1 : post1,\r\n 2 : post2,\r\n 3 : post3,\r\n 4 : post4\r\n}\r\n\r\nmelba_posts = {\r\n 1 : post1,\r\n 2 : post2\r\n}\r\n\r\ncharlie_posts = {\r\n 3 : post3\r\n}\r\nbubi_posts = {\r\n 4 : post4\r\n}\r\n\r\ncharlie = {\r\n \"Name\": \"Charlie\",\r\n \"Bio\" : \"Hi, I'm Charlie! I'm a standard poodle and I love to nap.\",\r\n \"Username\" : \"chucky\",\r\n \"Picture\" : \"charlie_profile.png\",\r\n \"Birthday\" : datetime(2018, 1, 2),\r\n \"Posts\" : charlie_posts\r\n}\r\n\r\nmelba = {\r\n \"Name\": \"Melba\",\r\n \"Bio\" : \"My name is Melba. I'm a miniature golden-doodle. My favorite place in the world is Discovery Park in Seattle, Washington.\",\r\n \"Username\" : \"melba\",\r\n \"Picture\" : \"melba_profile.png\",\r\n \"Birthday\" : datetime(2013, 2, 14),\r\n \"Posts\": melba_posts\r\n}\r\n\r\nbubi = {\r\n \"Name\": \"Bubi\",\r\n \"Bio\" : \"Hi, I'm Bubi! I like treats and long walks!\",\r\n \"Username\" : \"bubi12\",\r\n \"Picture\" : \"bub.jpg\",\r\n \"Birthday\" : datetime(2019, 3, 5),\r\n \"Posts\" : bubi_posts\r\n}\r\n\r\ndogs = {\r\n \"melba\" : melba,\r\n \"chucky\" : charlie,\r\n \"bubi12\" : bubi\r\n}" } ]
1
kerrylily/myblog
https://github.com/kerrylily/myblog
9d1c3fc778864ad03314d8d4783888c83a7a02d9
82459934d92782698db3f85a4f314bb3a858b14b
556134929f6135ffbd93d7e8535eacabe6a72681
refs/heads/master
2021-01-17T01:09:18.419369
2016-06-24T10:35:29
2016-06-24T10:35:29
60,164,599
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5870485901832581, "alphanum_fraction": 0.5914868116378784, "avg_line_length": 28.861446380615234, "blob_id": "afa247d83cda14992af6e822adb7f663d91c0dd5", "content_id": "45694e71d54c0c1d75b697a0621925b8d72352c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4957, "license_type": "permissive", "max_line_length": 164, "num_lines": 166, "path": "/myBlog.py", "repo_name": "kerrylily/myblog", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport os\nimport random, datetime\n\nfrom hashlib import md5\nimport MySQLdb.cursors\n\nfrom flask import Flask, request, session, redirect, url_for, render_template, make_response\n\napp = Flask(__name__)\n\n\ndef sql_connection():\n conn = MySQLdb.connect(host='127.0.0.1', user='root', passwd='malele', db='blog',\n charset=\"utf8\", cursorclass=MySQLdb.cursors.DictCursor)\n conn.autocommit(True)\n return conn.cursor()\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n\n db = sql_connection()\n\n if request.method == 'POST':\n db.execute('SELECT password FROM blog.user WHERE username=%s', (request.form['username'],))\n password = db.fetchone()\n\n if password == None:\n error = \"User Error!\"\n elif password['password'] == request.form['password']:\n session['username'] = request.form['username']\n return redirect(url_for('blog_index'))\n elif password:\n error = \"Password Error!\"\n else:\n error = 'No Found Error!'\n\n return render_template('login.html', error=error)\n\n\[email protected]('/logout')\ndef logout():\n session.pop('username', None)\n return redirect(url_for('login'))\n\n\[email protected]('/')\ndef index():\n data = None\n\n db = sql_connection()\n try:\n db.execute('SELECT b.id, b.title, b.content, b.username, b.time, u.email FROM blog.user AS u, blog.blogs AS b WHERE u.username=b.username ORDER BY id DESC')\n data = gravatar(db.fetchall())\n except Exception as e:\n print e.message\n\n return render_template('blogs_list.html', entries=data)\n\n\[email protected]('/blog_index')\ndef blog_index():\n data = None\n\n db = sql_connection()\n try:\n db.execute('SELECT id, title, content, time FROM blog.blogs WHERE username=%s ORDER BY id DESC ', (\n session['username'],))\n data = db.fetchall()\n except Exception as e:\n print e.message\n\n return render_template('blog_index.html', entries=data)\n\n\[email protected]('/create_blog', methods=['POST'])\ndef create_blog():\n db = sql_connection()\n try:\n db.execute('INSERT INTO blog.blogs (`username`, `title`, `content`) VALUES (%s, %s, %s)', (\n session['username'], request.form['title'], request.form['content']))\n except Exception as e:\n print e.message\n\n return redirect(url_for('blog_index'))\n\n\[email protected]('/ckupload/', methods=['POST', 'OPTIONS'])\ndef ckupload():\n \"\"\"CKEditor file upload\"\"\"\n error = ''\n url = ''\n callback = request.args.get(\"CKEditorFuncNum\")\n\n def gen_rnd_filename():\n filename_prefix = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n return '%s%s' % (filename_prefix, str(random.randrange(1000, 10000)))\n\n if request.method == 'POST' and 'upload' in request.files:\n fileobj = request.files['upload']\n fname, fext = os.path.splitext(fileobj.filename)\n rnd_name = '%s%s' % (gen_rnd_filename(), fext)\n\n filepath = os.path.join(app.static_folder, 'upload', rnd_name)\n\n dirname = os.path.dirname(filepath)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except:\n error = 'ERROR_CREATE_DIR'\n elif not os.access(dirname, os.W_OK):\n error = 'ERROR_DIR_NOT_WRITEABLE'\n\n if not error:\n fileobj.save(filepath)\n url = url_for('static', filename='%s/%s' % ('upload', rnd_name))\n else:\n error = 'post error'\n\n res = \"\"\"<script type=\"text/javascript\">\n window.parent.CKEDITOR.tools.callFunction(%s, '%s', '%s');\n</script>\"\"\" % (callback, url, error)\n\n response = make_response(res)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n\n\[email protected]('/update_blog/<int:blog_id>', methods=['POST', 'GET'])\ndef update_blog(blog_id):\n db = sql_connection()\n\n if request.form['action'] == 'Update':\n db.execute('SELECT * FROM blog.blogs where id=%s', (blog_id,))\n blog = db.fetchone()\n return render_template('change.html', blog=blog)\n elif request.form['action'] == 'Delete':\n db.execute('DELETE FROM blog.blogs WHERE id=%s', (blog_id,))\n\n return redirect(url_for('blog_index'))\n\n\[email protected]('/commit/<int:blog_id>', methods=['POST'])\ndef commit(blog_id):\n db = sql_connection()\n db.execute('UPDATE blog.blogs SET title=%s, content=%s WHERE id=%s', (request.form['title'],\n request.form['content'], blog_id))\n return redirect(url_for('blog_index'))\n\n\ndef gravatar(data):\n for info in data:\n info['email'] = 'http://www.gravatar.com/avatar/' + md5(info['email']).hexdigest() + '?d=mm&s=' + '128'\n return data\n\n\nif __name__ == '__main__':\n app.secret_key = 'super secret key'\n app.config['SESSION_TYPE'] = 'filesystem'\n\n app.run()\n" }, { "alpha_fraction": 0.3571428656578064, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 12, "blob_id": "766a94ac403bc83132a0482433cca2e0e355ea46", "content_id": "ad75012435dc443aef668029b416de6a827df9ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 14, "license_type": "permissive", "max_line_length": 12, "num_lines": 1, "path": "/requirements.txt", "repo_name": "kerrylily/myblog", "src_encoding": "UTF-8", "text": "Flask=0.11.1\n\n" } ]
2
aundrelab/NextDoor
https://github.com/aundrelab/NextDoor
d6c2d57d5b6137ddfddee3cd973f9f06e91cfe85
96e56e1384fbf8fb94ceb5e2b48caa0ade4b4ea1
cd010a01e3e82d3a92d24100a042daedefab65b7
refs/heads/master
2022-12-18T20:51:28.294275
2020-09-26T23:39:19
2020-09-26T23:39:19
298,703,833
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6033898591995239, "alphanum_fraction": 0.6058111190795898, "avg_line_length": 30.287878036499023, "blob_id": "7fb60a98d7cabbbe8b69202a130c194a91269196", "content_id": "412bb7ce35ad14caae2d32a987df123e4b138f27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2065, "license_type": "no_license", "max_line_length": 80, "num_lines": 66, "path": "/wifi.py", "repo_name": "aundrelab/NextDoor", "src_encoding": "UTF-8", "text": "'''\n Using webscraing on the wifispc website to get the location of free wifi\n in different cities. (Currently only for Salinas, CA)\n'''\n\n# All neccessary imports=\nfrom urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nimport ssl\n\nclass Wifi:\n def find_wifi(self):\n # Fixes certification issue\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\n else:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\n # Use the web page you chose here\n my_site = \"https://wifispc.com/united-states/california/salinas.html\"\n html = urlopen(my_site)\n\n # Array of wifi addresses\n wifi_addresses = []\n\n # Getting the wifi addresses\n soup = BeautifulSoup(html, 'html.parser')\n tablelist = soup.find('table', id='tablelist')\n getTd = soup.findAll('td', class_=\"td_near\")\n\n for address in getTd:\n wifi_addresses.append(address.get_text())\n\n # Array of wifi locations\n wifi_locations = []\n\n getTd = soup.findAll('td', class_=\"td_poisk\")\n\n # For loop to append the wifi locations\n for location in getTd:\n wifi_locations.append(location.get_text())\n\n\n # Removing the / none from the location names\n for n in range(len(wifi_locations)):\n wifi_locations[n] = wifi_locations[n].split(\"/\")[0]\n wifi_locations[n] = wifi_locations[n][:-1]\n wifi_addresses[n] += \" Salinas, CA\"\n\n del wifi_addresses[-1]\n\n del wifi_locations[-1]\n\n wifi = {}\n # Converting the two lists into a dictionary\n for key in wifi_locations:\n for value in wifi_addresses:\n wifi[key] = value\n wifi_addresses.remove(value)\n break\n\n return wifi\n" }, { "alpha_fraction": 0.7115027904510498, "alphanum_fraction": 0.7161409854888916, "avg_line_length": 22.955554962158203, "blob_id": "c9fa731fc735e62531dc1a0058ee4b27cbea20ca", "content_id": "b83266504a50a7505fae20e7cf918e1c00031287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "no_license", "max_line_length": 62, "num_lines": 45, "path": "/app.py", "repo_name": "aundrelab/NextDoor", "src_encoding": "UTF-8", "text": "# Developers: Aundre Labrador, Edward Cluster, Prince Rios, Eric Chavez, Alex Espinoza-Fuentes\nfrom flask import Flask, render_template\nfrom wifi import Wifi\nfrom food_drive import FoodDrive\nfrom geopy.geocoders import Nominatim\n\napp = Flask(__name__)\n# How to Run Server in Terminal\n#\n# export FLASK_APP=\"app.py\"\n# export FLASK_DEBUG=1\n# flask run\n#\n\nfood_drive_dic = (FoodDrive().find_food_drive())\nwifi_dic = (Wifi().find_wifi())\ngeolocator = Nominatim(user_agent=\"myGeocoder\")\n \n\nwifi_coordinates = []\nfor key, value in wifi_dic.items():\n\tloc1 = geolocator.geocode(value)\n\tif(loc1):\n\t\tlat = loc1.latitude\n\t\tlng = loc1.longitude\n\t\twifi_coordinates.append((key,value,lat,lng))\n# print(wifi_coordinates)\n\n\nfood_drive_coordinates = []\ndel food_drive_dic['Henry F. Kammann Elementary']\nfor key, value in food_drive_dic.items():\n\tloc = geolocator.geocode(value)\n\tlat = loc.latitude\n\tlng = loc.longitude\n\n\tfood_drive_coordinates.append((key,value,lat,lng))\n\n# print(food_drive_coordinates)\n\ntotal_resources = food_drive_coordinates + wifi_coordinates\n\[email protected]('/')\ndef index():\n return render_template(\"map.html\", places=total_resources)\n" }, { "alpha_fraction": 0.5853852033615112, "alphanum_fraction": 0.5917394757270813, "avg_line_length": 36.77000045776367, "blob_id": "373a9ee981e5cd233082e80e6a02c5d3a1840dbf", "content_id": "3e4df78cc2325b3ecea2694f8dabb274ec6f3c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3777, "license_type": "no_license", "max_line_length": 121, "num_lines": 100, "path": "/food_drive.py", "repo_name": "aundrelab/NextDoor", "src_encoding": "UTF-8", "text": "'''\n This code is scraping urls for each elementary school in the Salinas City\n Elementary School District. This data comes from California Department of\n Education. Each school url gives info of that school.\n For example: name, address, district, CDS Code, ect.....\n The list of schools can be found at:\n https://www.cde.ca.gov/SchoolDirectory/results?districts=626&status=1&search=1\n'''\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport ssl\n\nclass FoodDrive:\n def find_food_drive(self):\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\n else:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\n\n # url for district page\n district_url = \"https://www.cde.ca.gov/SchoolDirectory/results?districts=626&status=1&search=1&order=-4&items=25\"\n # opening url in bytes\n district_html = urlopen(district_url)\n # creating soup object\n soup = BeautifulSoup(district_html, 'html.parser')\n # access table class\n table_tag = soup.find(\"table\", class_=\"table table-bordered small\")\n # access tr tag\n tr_tag = table_tag.findAll(\"tr\")\n\n # deleting junk from tr_tag\n del tr_tag[0]\n\n # holding list of school's url (only half of the url)\n school_list = []\n\n # adding second half rout to school list\n for i in tr_tag:\n # find all <a> html tags\n links = i.findAll('a')\n # look into all href\n for link in links:\n # add href url to school list\n school_list.append(link['href'])\n\n # holding list of school's FULL url for location\n school_address_url = []\n # looping through school_list\n for i in school_list:\n # adding https url with school list url. Then, we append to final address url\n school_address_url.append(\"https://www.cde.ca.gov\"+i)\n\n #################################################################################\n list_of_school_names = []\n list_of_school_addresses = []\n # Putting values of school names and addresses into lists (Still not formatted correctly)\n for i in school_address_url:\n school_html = urlopen(i)\n\n soup = BeautifulSoup(school_html, 'html.parser')\n\n table_tag = soup.find(\"table\", class_=\"table table-bordered small\")\n\n t_body = table_tag.findAll(\"tr\")\n\n\n list_of_school_names.append(t_body[2].getText())\n\n list_of_school_addresses.append(t_body[4].getText())\n\n # Fixing formatting\n for i in range(len(list_of_school_names)):\n\n list_of_school_names[i] = list_of_school_names[i][34:]\n # Cutting off the end of the string\n list_of_school_names[i] = list_of_school_names[i].split(\" \")[0]\n\n list_of_school_names[i] = list_of_school_names[i][:-2]\n\n\n for i in range(len(list_of_school_addresses)):\n list_of_school_addresses[i] = list_of_school_addresses[i][52:]\n list_of_school_addresses[i] = list_of_school_addresses[i].split(\". \")[0]\n list_of_school_addresses[i] += \" Salinas, CA\"\n\n # Converting the two lists into a dictionary\n food_drive = {}\n for key in list_of_school_names:\n for value in list_of_school_addresses:\n food_drive[key] = value\n list_of_school_addresses.remove(value)\n break\n\n return(food_drive)\n" }, { "alpha_fraction": 0.7477148175239563, "alphanum_fraction": 0.7575868368148804, "avg_line_length": 43.1129035949707, "blob_id": "b2be073a5265398bbeb35053bfbe0cf13fa5dc0b", "content_id": "b4ee68a746936ce06f9ceec6373b4799607a78db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2755, "license_type": "no_license", "max_line_length": 779, "num_lines": 62, "path": "/README.md", "repo_name": "aundrelab/NextDoor", "src_encoding": "UTF-8", "text": "# NextDoor\n> NextDoor is a web app that helps find food drives and free wifi locations for those who lack the resources.\n\n<img src=\"https://github.com/aundrelab/NextDoor/blob/master/walkthorugh_part1.gif?raw=true\" width=550><br>\n\nThis web application allows the user to use the resources offered to them to improve remote learning. The site is targeted more towards kids who lack basic resources to simulate a learning environment at home. Two barriers that students face while learning remotely are that they do not have the proper technology to work and learn. In addition, some students rely on school lunches because their families face economical and financial problems and school lunch is always a solution to help. Our web application displays a map of different locations that offer these resources. For example, there are some schools that offer 1 meal every day to students for free. Also, there are other locations that offer free wifi use since some students do not have a good connection at home.\n\n## Installation\n\nOS X & Linux:\n\n```sh\ngit clone https://github.com/aundrelab/NextDoor.git\n```\n\n## Development Setup \nMake sure to have an environments when downloading these libraries needed. \n\nFlask/render_template:\n```sh\npip install Flask\n```\n\nNominatim/geocoders/geopy:\n```sh\npip install geopy\n```\n\nBeautifulSoup:\n```sh\npip install bs4\n```\n\nurlopen:\n```sh\npip install urllib.request\n```\n\n## Usage example\n\nDuring the current times of Covid-19 and online learning, many people lack the basic necessities to be able to live and get a education. That is why we created a map that shows locations of both free wifi spots and food in Salinas, Ca. The user can scroll around the map and they will find pins of different wifi spots and food drives happening. The names and address of the places will also be available if needed. \n\n## Release History\n\n* 0.0.1\n * Created: displayed pins on maps of scrapped data\n * Work in progress\n\n## Contributors\n\n* Aundre Labrador – [Github](https://github.com/AlexFue) – [Linkedin](https://www.linkedin.com/in/aundrelabrador/)\n* Alex Espinoza-Fuentes – [Github](https://github.com/aundrelab) – [Linkedin](https://www.linkedin.com/in/alex-espinoza-fuentes/)\n* Edward Cluster – [Github](https://github.com/ecluster) – [Linkedin](https://www.linkedin.com/in/edward-cluster/)\n* Prince Rios – [Github](https://github.com/princeriostheprodigy) – [Linkedin](https://www.linkedin.com/in/prince-rios-511639194/)\n* Eric Chavez – [Github](https://github.com/ericchavez831) – [Linkedin](https://www.linkedin.com/in/echavezvelez/)\n\n## Contributing\n\n1. Fork it at (https://github.com/aundrelab/NextDoor.git)\n2. Commit your changes (`git commit -m 'Add comment'`)\n3. Push to the branch (`git push origin master`)\n4. Create a new Pull Request\n" } ]
4
rizkiialfarizi/IOT-vacant-space-detection
https://github.com/rizkiialfarizi/IOT-vacant-space-detection
8529621a41a0aba9c86080792f76efe6a47fa557
d67733e4630773477dbb0552d622c7ee1d954cf2
f69defdc977ff38582dbd9eb1aa9327443a92d5f
refs/heads/master
2022-12-05T18:42:24.406728
2020-09-03T11:58:53
2020-09-03T11:58:53
290,502,098
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6564582586288452, "alphanum_fraction": 0.6797107458114624, "avg_line_length": 28.759492874145508, "blob_id": "066528bd9bd945fc82a418d39b5ffda0b459fce8", "content_id": "b887e7699a0e60048a2764a0a69a1c6d425befca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7053, "license_type": "no_license", "max_line_length": 84, "num_lines": 237, "path": "/main.py", "repo_name": "rizkiialfarizi/IOT-vacant-space-detection", "src_encoding": "UTF-8", "text": "from imutils.video import VideoStream\nfrom imutils.video import FPS\nfrom imutils.object_detection import non_max_suppression\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nfrom tkinter import *\nfrom PIL import Image , ImageTk\nfrom tkinter import Tk , BOTH\nfrom tkinter . ttk import Frame , Button\nimport tkinter . messagebox as mb\n\nclass SplashScreen:\n def __init__(self, parent):\n self.parent = parent\n \n self.aturSplash()\n self.aturWindow()\n \n def aturSplash(self):\n # import image menggunakan Pillow\n self.gambar = Image.open('carpark.png')\n self.imgSplash = ImageTk.PhotoImage(self.gambar)\n \n def aturWindow(self):\n # ambil ukuran dari file image\n lebar, tinggi = self.gambar.size\n \n setengahLebar = (self.parent.winfo_screenwidth()-lebar)//2\n setengahTinggi = (self.parent.winfo_screenheight()-tinggi)//2\n \n # atur posisi window di tengah-tengah layar\n self.parent.geometry(\"%ix%i+%i+%i\" %(lebar, tinggi,\n setengahLebar,setengahTinggi))\n \n # atur Image via Komponen Label\n Label(self.parent, image=self.imgSplash).pack()\n \nif __name__ == '__main__':\n root = Tk()\n \n # menghilangkan judul dan batas frame Window\n root.overrideredirect(True)\n \n app = SplashScreen(root)\n \n # menutup window setelah 3 detik\n root.after(3000, root.destroy)\n \n root.mainloop()\n \ndef decode_predictions(scores, geometry):\n\t# grab the number of rows and columns from the scores volume, then\n\t# initialize our set of bounding box rectangles and corresponding\n\t# confidence scores\n\t(numRows, numCols) = scores.shape[2:4]\n\trects = []\n\tconfidences = []\n\n\t# loop over the number of rows\n\tfor y in range(0, numRows):\n\t\t# extract the scores (probabilities), followed by the\n\t\t# geometrical data used to derive potential bounding box\n\t\t# coordinates that surround text\n\t\tscoresData = scores[0, 0, y]\n\t\txData0 = geometry[0, 0, y]\n\t\txData1 = geometry[0, 1, y]\n\t\txData2 = geometry[0, 2, y]\n\t\txData3 = geometry[0, 3, y]\n\t\tanglesData = geometry[0, 4, y]\n\n\t\t# loop over the number of columns\n\t\tfor x in range(0, numCols):\n\t\t\t# if our score does not have sufficient probability,\n\t\t\t# ignore it\n\t\t\tif scoresData[x] < 0.5:\n\t\t\t\tcontinue\n\n\t\t\t# compute the offset factor as our resulting feature\n\t\t\t# maps will be 4x smaller than the input image\n\t\t\t(offsetX, offsetY) = (x * 4.0, y * 4.0)\n\n\t\t\t# extract the rotation angle for the prediction and\n\t\t\t# then compute the sin and cosine\n\t\t\tangle = anglesData[x]\n\t\t\tcos = np.cos(angle)\n\t\t\tsin = np.sin(angle)\n\n\t\t\t# use the geometry volume to derive the width and height\n\t\t\t# of the bounding box\n\t\t\th = xData0[x] + xData2[x]\n\t\t\tw = xData1[x] + xData3[x]\n\n\t\t\t# compute both the starting and ending (x, y)-coordinates\n\t\t\t# for the text prediction bounding box\n\t\t\tendX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n\t\t\tendY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n\t\t\tstartX = int(endX - w)\n\t\t\tstartY = int(endY - h)\n\n\t\t\t# add the bounding box coordinates and probability score\n\t\t\t# to our respective lists\n\t\t\trects.append((startX, startY, endX, endY))\n\t\t\tconfidences.append(scoresData[x])\n\n\t# return a tuple of the bounding boxes and associated confidences\n\treturn (rects, confidences)\n\n\n\n# initialize the original frame dimensions, new frame dimensions,\n# and ratio between the dimensions\n(W, H) = (None, None)\n(newW, newH) = (320, 320)\n(rW, rH) = (None, None)\n\n# define the two output layer names for the EAST detector model that\n# we are interested -- the first is the output probabilities and the\n# second can be used to derive the bounding box coordinates of text\nlayerNames = [\n\t\"feature_fusion/Conv_7/Sigmoid\",\n\t\"feature_fusion/concat_3\"]\n\n# load the pre-trained EAST text detector\n#print(\"[INFO] loading EAST text detector...\")\nnet = cv2.dnn.readNet(\"frozen_east_text_detection.pb\")\n\n# car detection cascade\n#print(\"[INFO] loading car cascade detector...\")\ncascade = cv2.CascadeClassifier('cars.xml')\n\n\n#print(\"[INFO] starting video stream...\")\nvs = VideoStream(\"media/test.mp4\").start()\n\n\n# start the FPS throughput estimator\nfps = FPS().start()\n\n\n# loop over frames from the video stream\nwhile True:\n\t# grab the current frame, then handle if we are using a\n\t# VideoStream or VideoCapture object\n\tframe = vs.read()\n\t#frame = frame[1] if args.get(\"video\", False) else frame\n\n\t# check to see if we have reached the end of the stream\n\tif frame is None:\n\t\tbreak\n\n\t# resize the frame, maintaining the aspect ratio\n\tframe = imutils.resize(frame, width=1000)\n\torig = frame.copy()\n\n\t# car detection\n\tgray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)\n\t# haar detection.\n\tcars = cascade.detectMultiScale(gray, 1.2, 3)\n\n\tfor (a, b, c, d) in cars:\n\t cv2.rectangle(orig, (a, b), (a+c, b+d), (0, 0, 255), 2)\n\t\n\t# end of car detection\n\n\t# if our frame dimensions are None, we still need to compute the\n\t# ratio of old frame dimensions to new frame dimensions\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\t\trW = W / float(newW)\n\t\trH = H / float(newH)\n\n\t# resize the frame, this time ignoring aspect ratio\n\tframe = cv2.resize(frame, (newW, newH))\n\n\t# construct a blob from the frame and then perform a forward pass\n\t# of the model to obtain the two output layer sets\n\tblob = cv2.dnn.blobFromImage(frame, 1.0, (newW, newH),\n\t\t(123.68, 116.78, 103.94), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t(scores, geometry) = net.forward(layerNames)\n\n\t# decode the predictions, then apply non-maxima suppression to\n\t# suppress weak, overlapping bounding boxes\n\t(rects, confidences) = decode_predictions(scores, geometry)\n\tboxes = non_max_suppression(np.array(rects), probs=confidences)\n\t\n\n\t# loop over the bounding boxes\n\tfor (startX, startY, endX, endY) in boxes:\n\t\t# scale the bounding box coordinates based on the respective\n\t\t# ratios\n\t\tstartX = int(startX * rW)\n\t\tstartY = int(startY * rH)\n\t\tendX = int(endX * rW)\n\t\tendY = int(endY * rH)\n\n\t\t# draw the bounding box on the frame\n\t\tcv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)\n\t\n\t# set parking data\n\tparkingLot = 14\n\tavailable = len(boxes)\n\tused = parkingLot - available\n\tcarCount = len(cars)\n\n\t# add label for parking lot information\n\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\tcv2.putText(orig,\"Parking Lot: \" + str(parkingLot), (30,50),font, 1.1, (0,255,0),2)\n\tcv2.putText(orig,\"Available: \" + str(available), (30,100),font, 1.1, (0,255,0),2)\n\tcv2.putText(orig,\"Used: \" + str(used), (30,150),font, 1.1, (0,0,255),2)\n\tcv2.putText(orig,\"Cars: \" + str(carCount), (30,200),font, 1.1, (0,0,255),2)\n\n\t# update the FPS counter\n\tfps.update() \n\n\t# show the output frame\n\tcv2.imshow(\"Parking Lot\", orig)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# stop the timer and display FPS information\nfps.stop()\n#print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n#print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n# release the pointer\nvs.stop()\n\n\n# close all windows\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 28, "blob_id": "3e954a802d9e37042f522e3a6a4f9b96c4237775", "content_id": "15e538830ef7aa2808393fbf6d88150d700664a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/README.md", "repo_name": "rizkiialfarizi/IOT-vacant-space-detection", "src_encoding": "UTF-8", "text": "# IOT-vacant-space-detection" } ]
2
Anshe-inc/SimpleMLserver
https://github.com/Anshe-inc/SimpleMLserver
7120711ed9f86a14c2f56e028fc726260e2a3379
14d3b41788ea268d1828818302ad5bbbcbf181ba
b8bc90b22b744b43574a3b19f139662e77445f14
refs/heads/master
2022-04-25T21:51:31.189394
2020-05-06T07:34:34
2020-05-06T07:34:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6845070719718933, "alphanum_fraction": 0.6845070719718933, "avg_line_length": 26.30769157409668, "blob_id": "97ef6853d020515d22e999b67f3d4f3d6b8db7fa", "content_id": "aec2171dd2669819a288d926042170a244f5de46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "permissive", "max_line_length": 66, "num_lines": 13, "path": "/server/router/router.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "from aiohttp import web\n\nfrom server.handlers import predict\nfrom server.handlers import root\nfrom server import config\n\n\ndef assign_routes(router: web.UrlDispatcher) -> None:\n router.add_routes([\n web.post('/predict', predict.handle),\n web.get('/', root.handle),\n web.static('/templates', config.CONFIG['templates_path']),\n ])\n" }, { "alpha_fraction": 0.5575221180915833, "alphanum_fraction": 0.5929203629493713, "avg_line_length": 21.600000381469727, "blob_id": "2d5f076e608b3f1defab5ab3c9250f8d3fe5b863", "content_id": "f4bd259c77fa63fca899a6a979e5fd29a693ffc5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "permissive", "max_line_length": 41, "num_lines": 5, "path": "/server/config.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "CONFIG = {\n 'port': 5000,\n 'templates_path': 'server/templates',\n 'model_path': 'model/model.pickle',\n}\n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 16.75, "blob_id": "b2fc635cc1868d26f045ddf5ef1c0eb537b3996b", "content_id": "2ffc768ee510d4fe0a6bbddad6ce6c372b687f02", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 141, "license_type": "permissive", "max_line_length": 45, "num_lines": 8, "path": "/build.sh", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "#! /bin/bash\n\nsudo apt-get install python3.8 python3.8-venv\n\npython3.8 -m venv venv\nsource venv/bin/activate\n\npip install -r requirements.txt" }, { "alpha_fraction": 0.5565217137336731, "alphanum_fraction": 0.7217391133308411, "avg_line_length": 15.571428298950195, "blob_id": "2c2fb83616d2ae755cf741911d2e0acf84f24387", "content_id": "4a0db89a8388f8f855583d5d37694f5e02cb1303", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 115, "license_type": "permissive", "max_line_length": 22, "num_lines": 7, "path": "/requirements.txt", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "aiohttp>=3.6.2\nsklearn>=0.0\npandas>=1.0.3\naiohttp_jinja2\npytest>=5.4.1\npytest-asyncio>=0.12.0\npytest-aiohttp>=0.3.0" }, { "alpha_fraction": 0.6972624659538269, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 19.700000762939453, "blob_id": "5afa254b4bcf45ca1741dbe1bfbf4525076ed8b6", "content_id": "ce9371979b04f8985c83dfb555d00148ef7644a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "permissive", "max_line_length": 94, "num_lines": 30, "path": "/server/main.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "import logging\nimport sys\n\nfrom aiohttp import web\nimport aiohttp_jinja2\nimport jinja2\n\nfrom server import config\nfrom server.router import router\nfrom server.views import model\n\n\ndef set_logger_settings():\n logging.basicConfig(level='INFO', stream=sys.stdout)\n\n\ndef main():\n app = web.Application()\n app['model'] = model.load()\n app['config'] = config.CONFIG\n\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(config.CONFIG['templates_path']))\n router.assign_routes(app.router)\n set_logger_settings()\n\n web.run_app(app, port=config.CONFIG['port'])\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7113401889801025, "alphanum_fraction": 0.7216494679450989, "avg_line_length": 23.25, "blob_id": "87ee20663e5582ef2c7c61f43a2cf0cb1724d5f6", "content_id": "68fd815ca41d697609668a92e9cac4b055e93970", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "permissive", "max_line_length": 60, "num_lines": 8, "path": "/server/handlers/root.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "import aiohttp_jinja2\nimport typing as tp\nfrom aiohttp import web\n\n\n@aiohttp_jinja2.template('index.html')\nasync def handle(request: web.Request) -> tp.Dict[str, str]:\n return {'result': ''}\n" }, { "alpha_fraction": 0.7092511057853699, "alphanum_fraction": 0.7092511057853699, "avg_line_length": 21.700000762939453, "blob_id": "47356e6c9e3fc9342246e7c8bb3110195aefb5ee", "content_id": "d9911aecb1d704147c28902a40f291982948a7bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "permissive", "max_line_length": 63, "num_lines": 10, "path": "/server/views/model.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "import pickle\n\nfrom sklearn.pipeline import Pipeline\n\nfrom server import config\n\n\ndef load() -> Pipeline:\n with open(config.CONFIG['model_path'], 'rb') as model_file:\n return pickle.load(model_file) # type: Pipeline\n" }, { "alpha_fraction": 0.7201257944107056, "alphanum_fraction": 0.7311320900917053, "avg_line_length": 24.440000534057617, "blob_id": "9b86c5fccd7260b2149524ea82e8684ce026684a", "content_id": "ae436c80cc5162590a8a60d9b5d649f8fdc2a894", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "permissive", "max_line_length": 85, "num_lines": 25, "path": "/server/tests/test_server.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "import pytest\nfrom aiohttp import web\nimport aiohttp_jinja2\nimport jinja2\n\nfrom server.router import router\nfrom server.views import model\n\n\[email protected]\ndef cli(loop, aiohttp_client, aiohttp_unused_port):\n port = aiohttp_unused_port()\n\n app = web.Application()\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('templates'))\n router.assign_routes(app.router)\n\n app['model'] = model.load()\n return loop.run_until_complete(aiohttp_client(app, server_kwargs={'port': port}))\n\n\[email protected]\nasync def test_predict_invalid_method(cli):\n resp = await cli.get('/predict')\n assert resp.status == 405\n" }, { "alpha_fraction": 0.7986577153205872, "alphanum_fraction": 0.7986577153205872, "avg_line_length": 23.83333396911621, "blob_id": "2635400f70c924a18f31ec2f65be665bba5db20d", "content_id": "4f19b148161e368411f8ef6ad71b28714e534b8c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 149, "license_type": "permissive", "max_line_length": 58, "num_lines": 6, "path": "/README.md", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "# SimpleMLserver\nServer for predicting flat cost based of required features\n\nRun `build.sh` to install project dependencies\n\n`run.sh` to execute app\n" }, { "alpha_fraction": 0.6870967745780945, "alphanum_fraction": 0.6951612830162048, "avg_line_length": 28.5238094329834, "blob_id": "cd76f43321ddc2878b5ed6a5a30073f732e72798", "content_id": "368d1db3e5a0802b596179411e3e192c1f40f740", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "permissive", "max_line_length": 74, "num_lines": 21, "path": "/server/handlers/predict.py", "repo_name": "Anshe-inc/SimpleMLserver", "src_encoding": "UTF-8", "text": "import aiohttp_jinja2\nfrom aiohttp import web\nimport pandas\nimport typing as tp\n\nFLAT_COLUMNS = ['totsp', 'livesp', 'kitsp', 'dist', 'code']\n\n\n@aiohttp_jinja2.template('index.html')\nasync def handle(request: web.Request) -> tp.Dict[str, str]:\n request.app.logger.info(f'got request')\n\n data = await request.post()\n data_list = [float(data[column]) for column in FLAT_COLUMNS]\n\n features_df = pandas.DataFrame(data=[data_list], columns=FLAT_COLUMNS)\n\n flat_model = request.app['model']\n prediction = flat_model.predict(features_df)[0][0]\n\n return {'result': f'{prediction:.2f} thousands of dollars'}\n" } ]
10
tatesnow/python-small-examples
https://github.com/tatesnow/python-small-examples
9fe1b3912f9caf224ae2a0a839ce36e9b1962b17
62864264a60ae98e67aae418a584828d13badb67
977c43a34e9f298dc12448fc60f6914b611dcba4
refs/heads/master
2020-09-24T17:29:06.875413
2019-12-04T07:47:29
2019-12-04T07:47:29
225,808,002
2
0
null
2019-12-04T07:40:39
2019-12-04T06:39:07
2019-12-03T15:16:02
null
[ { "alpha_fraction": 0.5315315127372742, "alphanum_fraction": 0.5630630850791931, "avg_line_length": 22.785715103149414, "blob_id": "08db3ba872d2e95b0caca7578294076575a834bf", "content_id": "3171f96d86c07b2cce966b640497e89a8ac4d7b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 105, "num_lines": 28, "path": "/src/test2.py", "repo_name": "tatesnow/python-small-examples", "src_encoding": "UTF-8", "text": "\n#请编写程序,将记录中标记有\"age\"和\"name\"的内容分别保存到两个列表中。\nrecords = [(\"age\", 30, \"10\"), (\"age\", 29, \"52\"),(\"name\", \"Newton\"),(\"name\", \"Maxwell\"),(\"age\", 20, \"90\")]\nage_lst=[]\nname_lst=[]\n#方式1:\nfor i in range(len(records)):\n if records[i][0]=='age':\n age_lst.append(list(records[i][1:]))\n else:\n name_lst.append(list(records[i][1:]))\nage_lst\nname_lst\n#方式2:\nfor i in records:\n if i[0]=='age':\n age_lst.append(list(i[1:]))\n else:\n name_lst.append(list(i[1:]))\nprint(age_lst)\nprint(name_lst)\n#方式3:\nfor tag, *tags in records: \n if tag == \"age\":\n age_lst.append(tags)\n elif tag==\"name\":\n name_lst.append(tags)\nage_lst\nname_lst" } ]
1
scoding123/Logistic_regression
https://github.com/scoding123/Logistic_regression
e985ed4e84aa135908d37797844c76bcc4c52b9a
84bf48d665b8e62543ba9f7ec6bd5259b792283e
63db9c791c6db2e240700ee6273aa2958d778b5a
refs/heads/master
2020-12-13T16:21:40.429580
2020-01-17T04:23:44
2020-01-17T04:23:44
234,469,770
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.42977526783943176, "alphanum_fraction": 0.4387640357017517, "avg_line_length": 27.951139450073242, "blob_id": "4883b44eded60111d23fe36b5769ac10facffbe7", "content_id": "6782a1330ed16f97852dfa16fefcf9f341cc6bd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8900, "license_type": "no_license", "max_line_length": 100, "num_lines": 307, "path": "/Logistic_regression.py", "repo_name": "scoding123/Logistic_regression", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef binary_train(X, y, loss=\"perceptron\", w0=None, b0=None, step_size=0.5, max_iterations=1000):\n \"\"\"\n Inputs:\n - X: training features, a N-by-D numpy array, where N is the \n number of training points and D is the dimensionality of features\n - y: binary training labels, a N dimensional numpy array where \n N is the number of training points, indicating the labels of \n training data\n - loss: loss type, either perceptron or logistic\n - step_size: step size (learning rate)\n\t- max_iterations: number of iterations to perform gradient descent\n\n Returns:\n - w: D-dimensional vector, a numpy array which is the weight \n vector of logistic or perceptron regression\n - b: scalar, which is the bias of logistic or perceptron regression\n \"\"\"\n N, D = X.shape\n assert len(np.unique(y)) == 2\n\n\n w = np.zeros(D)\n if w0 is not None:\n w = w0\n \n b = 0\n if b0 is not None:\n b = b0\n y[y==0] =-1\n\n if loss == \"perceptron\":\n ############################################\n # TODO 1 : Edit this if part #\n # Compute w and b here #\n #w = np.zeros(D)\n #b = 0\n i=0\n \n while i<max_iterations:\n X_T_w=np.dot(w,X.T)\n with_bias=(X_T_w+b) * y\n delta = (np.where( with_bias <= 0,1,0))*y\n delta_X = np.dot(delta,X)\n delta_X_N = delta_X/N\n w_val_add = step_size * delta_X_N\n w = w+ w_val_add\n b_val_add = step_size*np.sum(delta)/N\n b = b+ b_val_add\n i = i+1\n \n \n \n ############################################\n \n\n elif loss == \"logistic\":\n ############################################\n # TODO 2 : Edit this if part #\n # Compute w and b here #\n w = np.zeros(D)\n b = 0\n i = 0\n \n while i < max_iterations:\n X_T= X.T\n step_size_val = (step_size/N)\n prod_W_X = np.dot(w,X_T)\n prod_bias= prod_W_X + b\n sig_val = sigmoid(-y*(prod_bias))\n delta =sig_val *y\n delta_X = np.dot(delta,X)\n \n w_val_add =step_size_val *delta_X\n w += w_val_add\n sum_delta =np.sum(delta)\n b_val_add = step_size_val*sum_delta\n b += b_val_add\n i = i+1\n \n ############################################\n \n\n else:\n raise \"Loss Function is undefined.\"\n\n assert w.shape == (D,)\n return w, b\n\ndef sigmoid(z):\n \n \"\"\"\n Inputs:\n - z: a numpy array or a float number\n \n Returns:\n - value: a numpy array or a float number after computing sigmoid function value = 1/(1+exp(-z)).\n \"\"\"\n\n ############################################\n # TODO 3 : Edit this part to #\n # Compute value #\n \n exp_val = 1 + np.exp(-z)\n val = np.power(exp_val, -1)\n value = val\n ############################################\n \n \n return value\n\n\n\ndef binary_predict(X, w, b, loss=\"perceptron\"):\n \"\"\"\n Inputs:\n - X: testing features, a N-by-D numpy array, where N is the \n number of training points and D is the dimensionality of features\n - w: D-dimensional vector, a numpy array which is the weight \n vector of your learned model\n - b: scalar, which is the bias of your model\n - loss: loss type, either perceptron or logistic\n \n Returns:\n - preds: N dimensional vector of binary predictions: {0, 1}\n \"\"\"\n N, D = X.shape\n \n if loss == \"perceptron\":\n ############################################\n # TODO 4 : Edit this if part #\n # Compute preds #\n preds = np.zeros(N)\n preds = np.zeros(N)\n for i in range(N):\n prod_W_X = np.dot(w,X.T)+b\n if (prod_W_X[i]) <= 0:\n preds[i] = 0\n else:\n preds[i] = 1\n \n #preds = np.where((np.dot(w, X.T)+b) <= 0, 0, 1)\n ############################################\n \n\n elif loss == \"logistic\":\n ############################################\n # TODO 5 : Edit this if part #\n # Compute preds #\n preds = np.zeros(N)\n #preds = np.zeros(N)\n #preds = np.where(sigmoid(np.dot(w, X.T) + b) >= 0.5, 1, 0)\n for i in range(N):\n prod_W_X = sigmoid(np.dot(w,X.T)+b)\n if (prod_W_X[i]) < 0.5:\n preds[i] = 0\n else:\n preds[i] = 1\n \n ############################################\n \n\n else:\n raise \"Loss Function is undefined.\"\n \n\n assert preds.shape == (N,) \n return preds\n\n\n\n\ndef multiclass_train(X, y, C,\n w0=None, \n b0=None,\n gd_type=\"sgd\",\n step_size=0.5, \n max_iterations=1000):\n \"\"\"\n Inputs:\n - X: training features, a N-by-D numpy array, where N is the \n number of training points and D is the dimensionality of features\n - y: multiclass training labels, a N dimensional numpy array where\n N is the number of training points, indicating the labels of \n training data\n - C: number of classes in the data\n - gd_type: gradient descent type, either GD or SGD\n - step_size: step size (learning rate)\n - max_iterations: number of iterations to perform gradient descent\n\n Returns:\n - w: C-by-D weight matrix of multinomial logistic regression, where \n C is the number of classes and D is the dimensionality of features.\n - b: bias vector of length C, where C is the number of classes\n \"\"\"\n\n N, D = X.shape\n\n w = np.zeros((C, D))\n if w0 is not None:\n w = w0\n \n b = np.zeros(C)\n if b0 is not None:\n b = b0\n\n np.random.seed(42)\n if gd_type == \"sgd\":\n ############################################\n # TODO 6 : Edit this if part #\n # Compute w and b #\n w = np.zeros((C, D))\n b = np.zeros(C)\n \n\n w = np.column_stack((w, b))\n X = np.column_stack((X,np.ones(N)))\n i=0\n \n while i < max_iterations:\n \n random_index = np.random.randint(N, size=1)\n x,yn = X[random_index,:],y[random_index]\n \n #c*1 matrix --- x_w_T\n x_w_T = np.dot(w, x.T) \n \n e = np.exp(x_w_T - np.max(x_w_T))\n softmax_val= e / np.sum(e, axis=0, keepdims=True)\n softmax_val[yn] =softmax_val[yn]- 1\n #prod is C*D+1 matrix\n prod = np.dot(softmax_val,x)\n w -= (step_size*prod)\n i = i+1\n b,w = w[:,D], w[:,:D]\n ############################################\n \n\n elif gd_type == \"gd\":\n ############################################\n # TODO 7 : Edit this if part #\n # Compute w and b #\n w = np.zeros((C, D))\n b = np.zeros(C)\n \n \n w = np.column_stack((w, b))\n \n x = np.column_stack((X,np.ones(N)))\n\n \n\n i = 0\n while i < max_iterations:\n step_val = (step_size/N)\n x_w_T = np.dot(w, x.T) \n e = np.exp(x_w_T - np.max(x_w_T))\n softmax_val= e / np.sum(e, axis=0, keepdims=True)\n val = softmax_val - np.eye(C)[y].T\n x_val_dot = np.dot(val,x)\n w -= (step_val*x_val_dot)\n i += 1\n \n\n b,w = w[:,D], w[:,:D]\n \n ############################################\n \n\n else:\n raise \"Type of Gradient Descent is undefined.\"\n \n\n assert w.shape == (C, D)\n assert b.shape == (C,)\n\n return w, b\n\n\ndef multiclass_predict(X, w, b):\n \"\"\"\n Inputs:\n - X: testing features, a N-by-D numpy array, where N is the \n number of training points and D is the dimensionality of features\n - w: weights of the trained multinomial classifier, C-by-D \n - b: bias terms of the trained multinomial classifier, length of C\n \n Returns:\n - preds: N dimensional vector of multiclass predictions.\n Outputted predictions should be from {0, C - 1}, where\n C is the number of classes\n \"\"\"\n N, D = X.shape\n \n ############################################\n # TODO 8 : Edit this part to #\n # Compute preds #\n preds = np.zeros(N)\n ############################################\n \n X_W_dot_prod = X.dot(w.T) \n preds = np.argmax(X_W_dot_prod + b, axis=1)\n\n assert preds.shape == (N,)\n return preds\n\n\n\n\n " } ]
1
TK-DS-DS/Crawler-BiliBili
https://github.com/TK-DS-DS/Crawler-BiliBili
23ec8eae56adc8416d360f38a0c94905b371ebd0
e4f020b8597099eb3532c41c9381c3319381a1c9
355582e46eee66e1610e4678c38affd9f7075403
refs/heads/main
2023-03-13T05:34:11.740585
2021-03-07T06:50:44
2021-03-07T06:50:44
345,274,466
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5434966087341309, "alphanum_fraction": 0.5599662065505981, "avg_line_length": 32.764705657958984, "blob_id": "540183a40d39e2b151204bccec18632e6b3a19ca", "content_id": "46af3bfb0ff3cc66043a1bf2dbbea99e25c30eb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2892, "license_type": "no_license", "max_line_length": 110, "num_lines": 68, "path": "/article/article.py", "repo_name": "TK-DS-DS/Crawler-BiliBili", "src_encoding": "UTF-8", "text": "# -*-coding:utf-8-*- #编码,防乱码\r\n#代码仅供学习,不能用于商业等违法用途,否则后果由使用者承担\r\n\r\n#from lxml import etree #xpath分析image's url使用,\r\n#获取cv号,以cv号形式保存图片\r\n\r\nimport requests #模拟hppts请求\r\nimport re #正则表达式,用于提取image、cv号\r\nimport os #提供文件读写操作\r\nimport time #提供延时函数\r\nimport UserAgent\r\n\r\ndef findCvNumber(list,mid): #根据mid查找cvhao\r\n page = 1 #预定义page,即从第1页开始查找\r\n while (1):\r\n localurl = 'https://api.bilibili.com/x/space/article?mid=' + mid + '&pn=' + str(page) #合成具有cv号信息的网页A\r\n headers = UserAgent.get_headers() # 随机获取一个headers\r\n res = requests.get(url=localurl, headers=headers) #请求A\r\n html = res.text #转换A为文本\r\n length = len(res.content) #计算A的文本的长度\r\n if(length<100): #小于100算作无新的cv号了\r\n break\r\n #print('len=' + str(length)) #输出A的文本的长度\r\n rst = re.findall('{\"id\":(.*?),\".*?\":{\".*?,\".*?\",\".*?\"', html) #更具正则表达式提取具有cv号的文本\r\n # list.append(re.findall('[0-9]{7}', str(rst))) #提取cv号 0后面继续添加\r\n # rstforprint = re.findall('[0-9]{7}', str(rst)) #用于打印的cv号列表\r\n print('page=' + str(page)) #打印这是第几页\r\n # print(rstforprint,\"No use\") #打印cv号列表\r\n print(rst)\r\n\r\n #每个cv号,调用 cv下载img函数\r\n for i in range(len(rst)):\r\n CvGetImg(rst[i],PATH)\r\n time.sleep(1)\r\n page+=1\r\n time.sleep(0.05) #休眠1秒\r\n return list #返回二维cv号列表\r\n\r\ndef CvGetImg(cv,path):#输入cv号 创建cv号文件夹,其下 下载图片\r\n headers = UserAgent.get_headers() # 随机获取一个headers\r\n html = requests.get(url='https://www.bilibili.com/read/cv'+str(cv), headers=headers)\r\n html = html.text\r\n #解析html文本\r\n title=re.findall('<meta name=\"name\" content=\"(.*?)\">',html)\r\n print(title)\r\n if(len(title)==0):\r\n return\r\n Imgs=re.findall('<img data-src=\"(//i0.*?)\" .*?>',html,re.S)\r\n print(Imgs)\r\n #判断文件夹,存在就跳过这次函数\r\n if os.path.exists(path+str(title[0])):\r\n return\r\n if not os.path.exists(path+str(title[0])):\r\n os.mkdir(path+str(title[0]))\r\n for j in range(len(Imgs)):\r\n pic = requests.get('https:' + Imgs[j], timeout=10)\r\n fp = open(path+str(title[0])+'/'+ str(j) + '.jpg', 'wb')\r\n fp.write(pic.content)\r\n fp.close()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n cv_list =[] #初始化一个cv_list\r\n MID='00000000'#修改MID号\r\n PATH=\"P:\\images/\"#修改根目录\r\n findCvNumber(cv_list,MID)\r\n print(cv_list)\r\n\r\n\r\n" }, { "alpha_fraction": 0.7627118825912476, "alphanum_fraction": 0.7683615684509277, "avg_line_length": 16.700000762939453, "blob_id": "ad6b5ed6c7c42a8e30c9dd95315120a9a7f55605", "content_id": "4ae389d386b191310ea82f03c85a32d482b8c2f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 381, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/README.md", "repo_name": "TK-DS-DS/Crawler-BiliBili", "src_encoding": "UTF-8", "text": "# Crawler-BiliBili\nB站 bilibili 爬虫 专栏 图片(仅供学习) \n禁止商业引用与非法行为\n\n爬虫收集信息,为人工智能收集素材。如果你觉得好用,不要忘记给个star。\n\n# 当前整理:\n## 1.专栏图片下载\n思路:up的mid->专栏的cv->图片下载。 \n通过up的mid账号获得专栏的cv号,从而下载每个专栏下的图片\n" } ]
2
Parth-AI/Graph-Ploter
https://github.com/Parth-AI/Graph-Ploter
8af547770723a1a68553b55793aff9702003b9ba
e8cb431247701c50a7ea299f15849d72e65b6fb8
a93bf5fbcfe750504e755911f6fa018b4261f61c
refs/heads/main
2023-04-23T19:22:29.064086
2021-05-14T07:21:58
2021-05-14T07:21:58
367,282,396
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.640816330909729, "alphanum_fraction": 0.6693877577781677, "avg_line_length": 28.875, "blob_id": "c0784257c0f56e40719cd8dd1abade50592e3d68", "content_id": "791cda8b5145b7c704d2d2c0c6e11d505365097e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 131, "num_lines": 8, "path": "/main.py", "repo_name": "Parth-AI/Graph-Ploter", "src_encoding": "UTF-8", "text": "import pandas as pd\r\n\r\nimport plotly.express as px\r\n\r\n#data = [123,15]\r\ndf = pd.read_csv(\"Data.csv\")\r\nfig = px.scatter(df, x='Population', y='InternetUsers', title=\"Internet User\", color='Country', size = 'Percentage', size_max = 60)\r\nfig.show()" } ]
1
enzperuzzetto/pdp
https://github.com/enzperuzzetto/pdp
fde3b8509a46b7a196729a6edcf1749a6d94a548
bf6251774ae0c84b174baef3ce9a9132b0fd5c75
2fc96c654242009fd61bde53f961bfa97de3c506
refs/heads/master
2021-09-06T18:11:50.306970
2018-02-09T14:00:47
2018-02-09T14:00:47
115,807,063
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6995645761489868, "alphanum_fraction": 0.7039186954498291, "avg_line_length": 18.685714721679688, "blob_id": "907b9432bb0d6bc1f9705686650fea9681d4fe57", "content_id": "fa7591285cc6f4cd5f63a2511054f4802fd020cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 689, "license_type": "no_license", "max_line_length": 89, "num_lines": 35, "path": "/README.md", "repo_name": "enzperuzzetto/pdp", "src_encoding": "UTF-8", "text": "# pdp: REMOTELY TRIGGERED BLACK HOLE FILTERING (RTBH)\n\n## Meteor.js ##\n```sh\n>curl https://install.meteor.com/ | sh\n```\n\n**require packages:**\n\n\t- twbs:bootstrap\n\t- ian:accounts-ui-boostrap-3\n\t- iron:router\n```sh\n>meteor npm install --save babel-runtime\n```\n\n## NEmu ##\nhttp://nemu.valab.net/index.php?static27/tuto-setup-nemu-linux\n\n## Dynagene ##\n\n\n## Dynamips ##\n```sh\n>sudo apt install dynamips\n```\n\nhttps://www.inetdoc.net/pdf/dynamips-dynagen.pdf\n\n## ExaBGP ##\nhttps://thepacketgeek.com/influence-routing-decisions-with-python-and-exabgp/\nhttps://github.com/Exa-Networks/exabgp/wiki/Controlling-ExaBGP-:-interacting-from-the-API\n\n## Qemu ##\nhttp://dept-info.labri.fr/~magoni/infres/\n" }, { "alpha_fraction": 0.3571428656578064, "alphanum_fraction": 0.6520737409591675, "avg_line_length": 38.272727966308594, "blob_id": "f405543403550c528194e8aa682f8042779d7490", "content_id": "3c50c8ec8e979314b2346d49192a88ae0705cf42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 434, "license_type": "no_license", "max_line_length": 126, "num_lines": 11, "path": "/dyna.sh", "repo_name": "enzperuzzetto/pdp", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"lauching router 1\"\n\ndynamips -P 7200 --idle-pc 0x6077b2bc -i 1 -X -T 2001 \\\n -p 1:PA-FE-TX -s 1:0:udp:10003:127.0.0.1:10002 -p 2:PA-FE-TX -s 2:0:udp:10007:127.0.0.1:10006 ./c7200-jk9s-mz.124-13b.image &\n\necho \"lauching router 2\"\n\ndynamips -P 7200 --idle-pc 0x6077b2bc -i 2 -X -T 2002 \\\n -p 1:PA-FE-TX -s 1:0:udp:10005:127.0.0.1:10004 -p 2:PA-FE-TX -s 2:0:udp:10009:127.0.0.1:10008 ./c7200-jk9s-mz.124-13b.image &\n\n\n" }, { "alpha_fraction": 0.589090883731842, "alphanum_fraction": 0.589090883731842, "avg_line_length": 17.133333206176758, "blob_id": "3cc096e558f5bfd4bb4ef4189b60f9d0cda76036", "content_id": "3774a2a956e7f2cb5f2e6c0af91a14a527395ffa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 275, "license_type": "no_license", "max_line_length": 50, "num_lines": 15, "path": "/firstApp/client/main.js", "repo_name": "enzperuzzetto/pdp", "src_encoding": "UTF-8", "text": "import { Template } from 'meteor/templating';\nimport { ReactiveVar } from 'meteor/reactive-var';\n\nimport './main.html';\n//import './api.html';\n\nRouter.route('/', {\n name: 'Home',\n template: 'Home'\n});\n\nRouter.route('/api', {\n name: 'api',\n template: 'api'\n});\n\n\n\n" }, { "alpha_fraction": 0.5155541300773621, "alphanum_fraction": 0.6500324010848999, "avg_line_length": 33.66292190551758, "blob_id": "79d63b6609ad6fb155785094e88279f204597dfe", "content_id": "7aba2b0dec4960f7ea11c9f4e11d58548aac6254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3086, "license_type": "no_license", "max_line_length": 127, "num_lines": 89, "path": "/bhre.py", "repo_name": "enzperuzzetto/pdp", "src_encoding": "UTF-8", "text": "'''\nBlack Hole Routing Experiment\n'''\n\nInitNemu(session='blackholerouting', workspace='/home/magoni/bhre/', hdcopy=True)\n\nVHostConf('debian', display='sdl', vga='std', enable_kvm=None, localtime=None, k='fr', m='4G', cpu='kvm64')\n\nVHost('attacker', conf='debian', hds=[VFs('/home/magoni/debian8.img', 'cow', tag='attacker.img')], \n\tnics=[\n\tVNic(hw='0a:0a:0a:00:01:01'),\n\tVNic(hw='0a:0a:0a:00:01:02'), \n\tVNic(hw='0c:0c:0c:00:01:01')])\n\nVHost('border-router', conf='debian', hds=[VFs('/home/magoni/debian8.img', 'cow', tag='ce-bgp.img')], \n\tnics=[\n\tVNic(hw='0a:0a:0a:00:02:01'), \n\tVNic(hw='0a:0a:0a:00:02:02'), \n\tVNic(hw='0c:0c:0c:00:02:02')])\n\nVHost('route-server', conf='debian', hds=[VFs('/home/magoni/debian8.img', 'cow', tag='route-server.img')], \n\tnics=[\n\tVNic(hw='0a:0a:0a:00:03:01'), \n\tVNic(hw='0c:0c:0c:00:03:03')])\n\nVHost('target', conf='debian', hds=[VFs('/home/magoni/debian8.img', 'cow', tag='web-server.img')], \n\tnics=[\n\tVNic(hw='0a:0a:0a:00:04:01'), \n\tVNic(hw='0c:0c:0c:00:04:04')])\n\nVHost('client', conf='debian', hds=[VFs('/home/magoni/debian8.img', 'cow', tag='client.img')], \n\tnics=[\n\tVNic(hw='0a:0a:0a:00:05:01'), \n\tVNic(hw='0a:0a:0a:00:05:02'), \n\tVNic(hw='0c:0c:0c:00:05:05')])\n\n\n'''\nVRouter('box', nics=[VNic(), VNic()], services=[Service('ipforward'), Service('ifup', '1:192.168.0.1'), Service('gateway', 0), \n\tService('masquerade', ipsrc='192.168.0.0/24'), \n\tService('dnsmasq', domain='local', net='192.168.0.0/24', start='192.168.0.10', end='192.168.0.20', ifaces=[1])], \n\tenable_kvm=None, localtime=None, k='fr', display='sdl', vga='std')\n'''\n\nVSwitch('sw1', niface=3)\nSetIface(\"sw1:0\", proto='udp', port=11001, lport=11002)\nSetIface(\"sw1:1\", proto='udp', port=11003, lport=11004)\nSetIface(\"sw1:2\", proto='udp', port=10002, lport=10003)\n\nVSwitch('sw2', niface=3)\nSetIface(\"sw2:0\", proto='udp', port=11005, lport=11006)\nSetIface(\"sw2:1\", proto='udp', port=11007, lport=11008)\nSetIface(\"sw2:2\", proto='udp', port=10004, lport=10005)\n\nVSwitch('sw3', niface=4)\nSetIface(\"sw3:0\", proto='udp', port=11009, lport=11010)\nSetIface(\"sw3:1\", proto='udp', port=11011, lport=11012)\n\nSetIface(\"sw3:2\", proto='udp', port=10006, lport=10007)\nSetIface(\"sw3:3\", proto='udp', port=10008, lport=10009)\n\nVSwitch('sw4', niface=2)\nSetIface(\"sw4:0\", proto='udp', port=11013, lport=11014)\nSetIface(\"sw4:1\", proto='udp', port=11015, lport=11016)\n \nLink(client='attacker:0', core='sw1:0')\nLink(client='attacker:1', core='sw2:1')\nLink(client='client:0', core='sw2:0')\nLink(client='client:1', core='sw1:1')\n\nLink(client='border-router:0', core='sw3:0')\nLink(client='route-server:0', core='sw3:1')\nLink(client='target:0', core='sw4:0')\nLink(client='border-router:1', core='sw4:1')\n\nVSlirp('slirp1', net='192.168.1.0/24')\nLink(client='attacker:2', core='slirp1')\n\nVSlirp('slirp2', net='192.168.2.0/24')\nLink(client='border-router:2', core='slirp2')\n\nVSlirp('slirp3', net='192.168.3.0/24')\nLink(client='route-server:1', core='slirp3')\n\nVSlirp('slirp4', net='192.168.4.0/24')\nLink(client='target:1', core='slirp4')\n\nVSlirp('slirp5', net='192.168.5.0/24')\nLink(client='client:2', core='slirp5')\n\n" } ]
4
dknochen/dotfiles
https://github.com/dknochen/dotfiles
081745f80ef4a2c3a62fedf27329cff94aab6317
d36060505d97543a7fc5d176a3f24db545517058
ef6f097381914d83746026df97f2463db54a0bea
refs/heads/master
2021-01-02T23:08:05.204712
2012-06-07T22:27:27
2012-06-07T22:27:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5975672602653503, "alphanum_fraction": 0.6090093851089478, "avg_line_length": 31.884746551513672, "blob_id": "0a4df7ec51d68488380116398b77cd12c594ef4f", "content_id": "40009cf9154610a82e6e3963f173980f239e54a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9701, "license_type": "no_license", "max_line_length": 114, "num_lines": 295, "path": "/bin/s3up.py", "repo_name": "dknochen/dotfiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\ns3up.py\n2010-2011, Mike Tigas\nhttps://mike.tig.as/\n\nRequires boto: http://boto.cloudhackers.com/\n\nUsage:\n s3up filename\n Uploads the given file to the DEFAULT_BUCKET (see below)\n with the following key:\n files/YYYYMMDD/(filename)\n\n s3up filename [remote_directory]\n As above, except to the given directory:\n (remote_directory)/(filename)\n\n s3up filename [bucket] [remote_filename] [cache_time]\n s3up filename [bucket] [remote_filename] [cache_time] [policy]\n\n\nPlease double-check and set the following options below before using:\n AWS_ACCESS_KEY_ID\n AWS_SECRET_ACCESS_KEY\n DEFAULT_BUCKET\n UPLOAD_PARALLELIZATION\n CHUNKING_MIN_SIZE\n CHUNK_RETRIES\n\"\"\"\nimport sys\nimport traceback\nfrom mimetypes import guess_type\nfrom datetime import datetime\nfrom time import sleep\nfrom boto.s3.connection import S3Connection\nimport os\nfrom cStringIO import StringIO\nfrom threading import Thread\nfrom math import floor\n\nAWS_ACCESS_KEY_ID = ''\nAWS_SECRET_ACCESS_KEY = ''\n\n# When only giving one or two args, the following bucket is used:\nDEFAULT_BUCKET = 'my-awesome-bucket'\n\n# If you have a CNAME for this bucket (or, even better, a CNAME for a CloudFront for this bucket)\n# you can throw that in here, with the protocol you want to use. If None,\n# defaults to \"https://s3.amazonaws.com/{BUCKET}/\". Defaults to None.\n#BUCKET_CNAME = \"http://s3_media.example.com\"\n#BUCKET_CNAME = \"https://d312sd4f87pfh0.cloudfront.net\"\nBUCKET_CNAME = None\n\n# Number of simultaneous upload threads to execute.\nUPLOAD_PARALLELIZATION = 4\n\n# Minimum size for a file chunk (except final chunk). Minimum is 5242880. (5MB)\nCHUNKING_MIN_SIZE = 5242880\n\n# For robustness, we can retry uploading any chunk up to this many times. (Set to\n# 1 or less to only attempt one upload per chunk.) Because we chunk large uploads,\n# an error in a single chunk doesn't necessarily mean we need to re-upload the\n# entire thing.\nCHUNK_RETRIES = 10\n\n# ========== \"MultiPart\" (chunked) upload utility methods ==========\n\ndef mem_chunk_file(local_file):\n \"\"\"\n Given the file at `local_file`, returns a generator of CHUNKING_MIN_SIZE\n (default 5MB) StringIO file-like chunks for that file.\n \"\"\"\n fstat = os.stat(local_file)\n fsize = fstat.st_size\n \n num_chunks = max(int(floor(float(fsize) / 5242880.0)), 1)\n \n fp = open(local_file, 'rb')\n for i in range(num_chunks):\n if i == (num_chunks-1):\n size_hint = 0\n else:\n size_hint = fsize / num_chunks\n \n tfp = StringIO()\n tfp.writelines(fp.readlines(size_hint))\n tfp.seek(0)\n yield tfp\n fp.close()\n\ndef upload_worker(multipart_key, fp, index, headers=None):\n \"\"\"\n Uploads a file chunk in a MultiPart S3 upload. If an error occurs uploading\n this chunk, retry up to `CHUNK_RETRIES` times.\n \"\"\"\n success = False\n attempts = 0\n while not success:\n try:\n fp.seek(0)\n multipart_key.upload_part_from_file(fp, index, headers=headers)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n success = False\n \n attempts += 1\n if attempts >= CHUNK_RETRIES:\n break\n \n sleep(0.5)\n else:\n success = True\n \n if not success:\n raise Exception(\"Upload of chunk %d failed after 5 retries.\" % index)\n \n fp.close()\n\ndef upload_chunk(arg_list):\n thread = Thread(\n target=upload_worker,\n args=arg_list\n )\n thread.daemon = False\n thread.start()\n return thread\n\n# ========== Uploader methods ==========\n\ndef easy_up(local_file,rdir=None):\n if os.path.isfile(local_file):\n print \"File:\"\n print os.path.abspath(local_file)\n print\n\n if not rdir:\n rpath = \"files/\"+datetime.now().strftime(\"%Y%m%d\")\n else:\n rpath = rdir\n remote_path = rpath+\"/\"+os.path.basename(local_file)\n\n upload_file(os.path.abspath(local_file), DEFAULT_BUCKET, remote_path,0)\n\n print \"File uploaded to:\"\n if BUCKET_CNAME:\n print \"%s/%s\" % (BUCKET_CNAME, remote_path)\n else:\n print \"https://s3.amazonaws.com/%s/%s\" % (DEFAULT_BUCKET, remote_path)\n \n print\n else:\n print \"Path given is not a file.\"\n\ndef upload_file(local_file, bucket, remote_path, cache_time=0, policy=\"public-read\", force_download=False):\n # Expiration time:\n cache_time = int(cache_time)\n \n # Metadata that we need to pass in before attempting an upload.\n content_type = guess_type(local_file, False)[0] or \"application/octet-stream\"\n basic_headers = {\n \"Content-Type\" : content_type,\n }\n encrypt_key = False\n #if (policy != \"public-read\"):\n # print \"encryption on\"\n # encrypt_key = True\n if force_download:\n basic_headers[\"Content-Disposition\"] = \"attachment; filename=%s\"% os.path.basename(local_file)\n\n if \"spokesman\" in bucket:\n key_id = r\"AKIAIX4TYGMLSSXMW6EA\"\n secret = r\"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\"\n else:\n key_id = AWS_ACCESS_KEY_ID\n secret = AWS_SECRET_ACCESS_KEY\n \n # Set up a connection to S3\n if \"spokesman\" in bucket:\n s3 = S3Connection(aws_access_key_id=key_id,aws_secret_access_key=secret,host='s3-us-west-2.amazonaws.com')\n print \"https://s3-us-west-2.amazonaws.com/media-s3.spokesman.com/%s\" % remote_path\n else:\n s3 = S3Connection(aws_access_key_id=key_id,aws_secret_access_key=secret)\n bucket = s3.get_bucket(bucket)\n \n # Get info on the local file to determine whether it's large enough that we can perform\n # upload parallelization.\n fstat = os.stat(local_file)\n \n mp_key = bucket.initiate_multipart_upload(remote_path, headers=basic_headers,\n encrypt_key=encrypt_key\n )\n \n active_threads = []\n try:\n # Chunk the given file into `CHUNKING_MIN_SIZE` (default: 5MB) chunks that can\n # be uploaded in parallel.\n chunk_generator = mem_chunk_file(local_file)\n \n # Use `UPLOAD_PARALLELIZATION` (default: 4) threads at a time to churn through\n # the `chunk_generator` queue.\n for i, chunk in enumerate(chunk_generator):\n args = (mp_key, chunk, i+1, basic_headers)\n \n # If we don't have enough concurrent threads yet, spawn an upload thread to\n # handle this chunk.\n if len(active_threads) < UPLOAD_PARALLELIZATION:\n # Upload this chunk in a background thread and hold on to the thread for polling.\n t = upload_chunk(args)\n active_threads.append(t)\n \n # Poll until an upload thread finishes before allowing more upload threads to spawn.\n while len(active_threads) >= UPLOAD_PARALLELIZATION:\n for thread in active_threads:\n # Kill threads that have been completed.\n if not thread.isAlive():\n thread.join()\n active_threads.remove(thread)\n \n # a polling delay since there's no point in constantly waiting and taxing CPU\n sleep(0.1)\n \n # We've exhausted the queue, so join all of our threads so that we wait on the last pieces\n # to complete uploading.\n for thread in active_threads:\n thread.join()\n except:\n # Since we have threads running around and possibly partial data up on the server,\n # we need to clean up before propogating an exception.\n sys.stderr.write(\"Exception! Waiting for existing child threads to stop.\\n\\n\")\n for thread in active_threads:\n thread.join()\n \n # Remove any already-uploaded chunks from the server.\n mp_key.cancel_upload()\n for mp in bucket.list_multipart_uploads():\n if mp.key_name == remote_path:\n mp.cancel_upload()\n \n # Propogate the error.\n raise\n else:\n # We finished the upload successfully. \n mp_key.complete_upload()\n key = bucket.get_key(mp_key.key_name)\n \n # ===== / chunked upload =====\n \n if cache_time != 0:\n key.set_metadata('Cache-Control','max-age=%d, must-revalidate' % int(cache_time))\n else:\n key.set_metadata('Cache-Control','no-cache, no-store')\n \n if policy == \"public-read\":\n key.make_public()\n else:\n key.set_canned_acl(policy)\n\n\ndef main(args):\n if len(args) == 5:\n upload_file(args[0],args[1],args[2],args[3],args[4])\n elif len(args) == 4:\n upload_file(args[0],args[1],args[2],args[3])\n elif len(args) == 3:\n upload_file(args[0],args[1],args[2])\n elif len(args) == 2:\n easy_up(args[0],args[1])\n elif len(args) == 1:\n easy_up(args[0],None)\n else:\n print \"Usage:\"\n print \"s3up filename\"\n print \" Uploads the given file to DEFAULT_BUCKET (%s) at the following path:\" % DEFAULT_BUCKET\n print \" files/YYYYMMDD/(filename)\"\n print\n print \"s3up filename [remote_directory]\"\n print \" As above, except the file is uploaded to the given directory:\"\n print \" (remote_directory)/(filename)\"\n print\n print \"s3up filename [bucket] [remote_filename] [cache_time]\"\n print\n print \"s3up filename [bucket] [remote_filename] [cache_time] [policy]\"\n print\n\nif __name__ == '__main__':\n try:\n main(sys.argv[1:])\n except Exception, e:\n sys.stderr.write('\\n')\n traceback.print_exc(file=sys.stderr)\n sys.stderr.write('\\n')\n sys.exit(1)\n" } ]
1
ambujbpl/pythonBasics
https://github.com/ambujbpl/pythonBasics
47510e049746e7fbd9d1d41bd2a183616c3f1f4d
d869fc314e2ce88a66c7badafafb2ab2a73093f2
803c18293ca58d778381608fdfb30d5767c97b93
refs/heads/master
2020-12-21T15:16:49.919146
2020-02-03T04:36:48
2020-02-03T04:36:48
236,471,653
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6781326532363892, "alphanum_fraction": 0.7592137455940247, "avg_line_length": 15.319999694824219, "blob_id": "8b124a971be28eb100a71de9edb6d66103042678", "content_id": "dd7e58d168ad75d2f3d7cfef295a77fd0dd3fc60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 49, "num_lines": 25, "path": "/calender_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "import calendar;\nimport datetime;\nimport time;\n\nprint(calendar.weekheader(1))\n\nprint(calendar.firstweekday())\n\nprint(calendar.month(2020, 1))\n\nprint(calendar.monthcalendar(2020, 1))\n\nprint(calendar.calendar(2020))\n\ndow = calendar.weekday(2020, 3,3)\nprint(dow)\n\nisLeap = calendar.isleap(2020)\nprint(isLeap)\n\n\nhow_many_leap_days = calendar.leapdays(2000,2005)\nprint(how_many_leap_days)\n\nprint(datetime.date())" }, { "alpha_fraction": 0.5433186292648315, "alphanum_fraction": 0.5844346284866333, "avg_line_length": 22.517240524291992, "blob_id": "3238a4adf74d5a1771ee2e57838931aed3989f09", "content_id": "55d5d5abce05c0883532007630d55287cdfe35bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 84, "num_lines": 29, "path": "/zipFunction_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "list1 = [1,2,3,4,5,6]\nlist2 = [\"amb\",\"harshi\",\"ami\",\"sid\",\"ma\",\"pa\"]\nzipped = list(zip(list1,list2))\nprint(zipped)\nunzipped = list(zip(*zipped))\nprint(unzipped)\nprint(unzipped[0])\nprint(unzipped[1])\n\nfor (l1,l2) in zip(list1,list2):\n print(l1)\n print(l2)\n print('\\n')\n\n\nproducts = ['Apple','Mango','Banana']\nquantity = [1,5,12]\nprice = [50,12,30]\nsentances = []\nfor (p,q,pri) in zip(products,quantity,price):\n # p, r, pri = str(p), str(q), str(pri) // group typcasting\n sentance = 'I bought ' + str(q) + ' ' + p + ' in ' + str(pri) + ' rupees only.';\n sentances.append(sentance)\n # print(p)\n # print(q)\n # print(pri)\n # print('\\n')\n\nprint(sentances)" }, { "alpha_fraction": 0.6728395223617554, "alphanum_fraction": 0.6975308656692505, "avg_line_length": 26, "blob_id": "c03260df0f3fc49148aa57d0d654e0bd29479c63", "content_id": "3f31aaa95f328057c08f749dac9d3366035c4da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/if_else_conditional.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "import turtle_square_using_function;\nyour_input = 300;\nif(your_input>0):\n turtle_square_using_function.square();\nelse:\n print(\"number : \"+ str(your_input))\n" }, { "alpha_fraction": 0.5608247518539429, "alphanum_fraction": 0.6082473993301392, "avg_line_length": 16.321428298950195, "blob_id": "5c419fde0097d10b010c70b94d67b9e84dcc1806", "content_id": "f709f376e0c6a12d4ed245cdfbde3788b268f636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 27, "num_lines": 28, "path": "/turtle_square_using_function.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "\nimport turtle;\n\ntur = turtle.Turtle();\ntur.speed(10)\nforward = 200;\ndef square():\n tur.forward(forward);\n tur.right(90);\n tur.forward(forward);\n tur.right(90);\n tur.forward(forward);\n tur.right(90);\n tur.forward(forward);\n # tur.right(90);\n\n\n\ndef side6():\n # tur.forward(forward);\n tur.right(45);\n tur.forward(forward);\n tur.right(45);\n tur.forward(forward);\n tur.right(45);\n tur.forward(forward);\n# square()\n# tur.forward(200);\n# square()" }, { "alpha_fraction": 0.41025641560554504, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 20.794116973876953, "blob_id": "c2472fce43a3ef5cc7de909c1c02d777a4eef681", "content_id": "1c83f81de2cd25984dde53d4ba96bf0f818f4ccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 62, "num_lines": 34, "path": "/list_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "arr = [\"ambu\",12,[\"new1\",\"new2\"],\"test\"];\nprint(arr);\narr.append(\"newTest\");\narr.insert(0,\"oldTest\");\nprint(len(arr));\nfor item in arr:\n print(item);\n\n\n# print(arr[:2])\nprint(arr[:-1])\n\n\nprint(\"-----------------------------------------------------\")\narr1 = [0,1,2,3,4,5,6,7,8,9,10]\nfor i in range(len(arr1)):\n print(arr1[0:i]);\n\n\n\nprint(\"-----------------------------------------------------\")\nshow_length_size = 7;\narr1 = [0,1,2,3,4,5,6,7,8,9,10]\nfor i in range(len(arr1)-(show_length_size-1)):\n print(arr1[i:i+show_length_size]);\n\n\n\nprint(\"-----------------------------------------------------\")\narr2 = \"test1,test2,test3,test4,test5\";\nsplited = arr2.split(',');\nprint(splited);\nnewStr = ' Start : '.join(splited);\nprint(newStr);\n" }, { "alpha_fraction": 0.4835355281829834, "alphanum_fraction": 0.5133448839187622, "avg_line_length": 27.574256896972656, "blob_id": "41e52e7e031c453428311fdb68a68b79aaa59c12", "content_id": "dd55dceeb2909847f90c11ce8359760578f15a77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2885, "license_type": "no_license", "max_line_length": 138, "num_lines": 101, "path": "/project/tictoc.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "# All Global Variable is here:-)\nboard = [\"-\",\"-\",\"-\",\n \"-\",\"-\",\"-\",\n \"-\",\"-\",\"-\"]\n\ngame_still_in_progress = True\n\nwinner = None;\n\ncurrent_player = \"X\";\n\ndef display_board():\n print(board[0] + ' | ' + board[1] + ' | ' + board[2])\n print(board[3] + ' | ' + board[4] + ' | ' + board[5])\n print(board[6] + ' | ' + board[7] + ' | ' + board[8])\n\ndef play_game():\n # display 1\n # initial board\n # display_board()\n while game_still_in_progress:\n take_user_input()\n flip_players()\n winner = check_if_game_over()\n if winner == \"X\" or winner == \"O\":\n print(winner + ' won the match.');\n else:\n print('Match Tie');\n return\ndef take_user_input():\n print(current_player + \" 's turn now\")\n position = int(input(\"chose any position from 1 to 9 :- \")) -1;\n while position not in [0,1,2,3,4,5,6,7,8]:\n position = int(input(\"Position : \" + str(position + 1) + \" is not valid input. So please chose any position from 1 to 9 :- \")) -1;\n while board[position] != '-':\n position = int(input(\"Position : \" + str(\n position + 1) + \" is already filled. So please chose new position from 1 to 9 :- \")) - 1;\n if(current_player == \"X\"):\n board[position] = \"X\"\n else:\n board[position] = \"O\"\n display_board()\n return\n\ndef check_if_game_over():\n win = check_if_win()\n check_if_tie()\n return win\ndef check_if_win():\n global game_still_in_progress\n global current_player\n # check rows\n row_1 = board[0] == board[1] == board[2] != \"-\"\n row_2 = board[3] == board[4] == board[5] != \"-\"\n row_3 = board[6] == board[7] == board[8] != \"-\"\n if(row_1 or row_2 or row_3):\n game_still_in_progress = False\n if row_3:\n return board[6]\n elif row_2:\n return board[3]\n elif row_1:\n return board[0]\n # check columns\n col_1 = board[0] == board[3] == board[6] != \"-\"\n col_2 = board[1] == board[4] == board[7] != \"-\"\n col_3 = board[2] == board[5] == board[8] != \"-\"\n if(col_1 or col_2 or col_3):\n game_still_in_progress = False\n if col_3:\n return board[2]\n elif col_2:\n return board[1]\n elif col_1:\n return board[0]\n # check Diagonals\n dia_1 = board[0] == board[4] == board[8] != \"-\"\n dia_2 = board[6] == board[4] == board[2] != \"-\"\n if(dia_1 or dia_2):\n game_still_in_progress = False\n if dia_1:\n return board[0]\n elif dia_2:\n return board[2]\n return\n\ndef check_if_tie():\n global game_still_in_progress\n if '-' not in board:\n game_still_in_progress = False\n return\n\ndef flip_players():\n global current_player\n if(current_player == \"X\"):\n current_player = \"O\";\n else:\n current_player = \"X\";\n return\n#display_board()\nplay_game()" }, { "alpha_fraction": 0.8245614171028137, "alphanum_fraction": 0.8245614171028137, "avg_line_length": 27.5, "blob_id": "68446d27bfecb7ed04a81ae06fbec26d9de7e4e1", "content_id": "fd5e72abc5593aa6c1e3a89c177f7f9707a4f9cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/main.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "#import zipFunction_example;\nfrom project import tictoc;\n" }, { "alpha_fraction": 0.5633803009986877, "alphanum_fraction": 0.6173709034919739, "avg_line_length": 22.66666603088379, "blob_id": "4f66dd42ee87b79b31941d49ee439c68a72143d0", "content_id": "7a635c8af4ed5026f045f1701bf84ea72a3dc2b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 87, "num_lines": 18, "path": "/dictionary_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\nobj = {\"name\":\"ambuj\",\"surname\":\"dubey\",\"org\":\"AVR\"};\nprint(obj[\"name\"]);\nprint(obj.get(\"mobile\"));\nprint(obj.get(\"test\"));\n\nprint( sorted(list(obj.values())))\n\nobj1 = {\"ambuj_age\":30,\"skd_age\":56,\"kal_age\":51,\"ami_age\":31,\"har_age\":25,\"sid_age\":8}\narr =[];\nfor key in obj1:\n if obj1[key] >= 30:\n arr.append(key)\n\nprint(arr)\n\n\nprint([arr2 for arr2 in obj1 if obj1[arr2] >= 30] );\n" }, { "alpha_fraction": 0.516539454460144, "alphanum_fraction": 0.669211208820343, "avg_line_length": 34.818180084228516, "blob_id": "751e19913185ea382530d830f65633978ae97db2", "content_id": "6a612741d637e670c30fa7fe5fb334927330389d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/tupel_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "tup1 = (\"ambuj\",\"dubey\",\"bhopal\",\"9753750955\")\ntup2 = (\"harshi\",\"dubey\",\"bhopal\",\"9669809091\")\ntup3 = (\"shrawan\",\"dubey\",\"bhopal\",\"9926740459\")\ntup4 = (\"kalpana\",\"dubey\",\"bhopal\",\"9691953737\")\ntup5 = (\"amidha\",\"dubey\",\"bhopal\",\"7869596292\")\ntupArr = [tup1,tup2,tup3,tup4,tup5]\nfor name,surname,location,mobile in tupArr:\n print(name)\n print(surname)\n print(location)\n print(mobile)" }, { "alpha_fraction": 0.4644412100315094, "alphanum_fraction": 0.49492016434669495, "avg_line_length": 39.588233947753906, "blob_id": "91b2e79e6f6618d0e355569ee7d9b67f65e2b35c", "content_id": "5760e0508f567b47c09f7d8bc42f82f8bb1e0543", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/regax_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "import re;\ntext = \"qqqqqq [email protected] 8 A my name is ambuj dubey [email protected]\";\nregax_pattern = re.compile(\"ambuj\");\nresult = regax_pattern.search(text);\nprint(result)\nprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\nregax_pattern1 = re.compile(\"[ad]\");\nresult1 = regax_pattern1.search(text);\nprint(result1)\nprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\nregax_pattern2 = re.compile(\"[a-jA-D0-9]+\");\nresult3 = regax_pattern2.search(text);\nprint(result3)\nprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\nregax_pattern3 = re.compile(\"[a-zA-z0-9]+@[a-zA-z0-9]+\\.[a-zA-z0-9]+\");\nresult4 = regax_pattern2.findall(text);\nprint(result4)" }, { "alpha_fraction": 0.6244131326675415, "alphanum_fraction": 0.6384976506233215, "avg_line_length": 22.77777862548828, "blob_id": "6384cb684685401c372799c818e8352efe05ecb4", "content_id": "ab5619a8f9ef5f154a79fd87144db8bf095b4077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/for_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "import turtle_square_using_function;\nfor i in range(3):\n print(\"i : \",str(i));\n turtle_square_using_function.side6();\n\n\nfor i in range(4):\n print(\"i : \",str(i));\n turtle_square_using_function.square();" }, { "alpha_fraction": 0.6140350699424744, "alphanum_fraction": 0.6900584697723389, "avg_line_length": 27.66666603088379, "blob_id": "14221b69aba42aeb96614d20f39407d0ceec30eb", "content_id": "5a52d1af6852752ec5b20b0187600e050669675e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/set_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "old_list = [1,2,2,1,3,4,3,4,6,5,7,6,7];\nprint(old_list)\nreduced_list_by_set = set(old_list)\nprint(reduced_list_by_set)\nnew_list = list(reduced_list_by_set)\nprint(new_list)" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6708074808120728, "avg_line_length": 25.66666603088379, "blob_id": "bf334a4d5262eb140a6da7dd9b68d89bf9338b74", "content_id": "494ecdfda6352bc0e4f45e50947e67f7ca2ef0cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/while_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "import turtle_square_using_function\nrepeat = 5;\nwhile(repeat>0):\n print(\"repeat : \",str(repeat));\n turtle_square_using_function.square();\n repeat -= 1\n\n" }, { "alpha_fraction": 0.6763540506362915, "alphanum_fraction": 0.7186261415481567, "avg_line_length": 22.30769157409668, "blob_id": "b6c13a1f776dc8e7f083a0d7bba1a480643f4759", "content_id": "d294952d0f704d78a05bf0f4935ec2e56fc7114a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1514, "license_type": "no_license", "max_line_length": 84, "num_lines": 65, "path": "/datetime_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "import datetime;\nad = \"ambuj dubey\"\ntoday = datetime.date.today();\ntodayDT = datetime.datetime.today();\ndob = datetime.date(1989,8,6);\nprint(today);\nprint(dob);\nprint(repr(dob))# like type of in js\n\nday_since_birthday = (today - dob).days;\nprint(day_since_birthday);#1647 9469 11131\n\n# add some(Exa :- 10 more day) day in current day (today)\naddTimeDelta = datetime.timedelta(days=10);\nprint(today + addTimeDelta)\n\n# today month\nprint(today.month)\n\n# today day\nprint(today.day)\n\n\n# today weekdy -> Mon=0 - Sun=6\nprint(today.weekday())\n\n\n# today set time\nprint(datetime.time(9,43,50,10))\n#datetime.date(Y-M-D)\n#datetime.time(H-M-S-MS)\n#atetime.datetime(Y-M-D H-M-S-MS)\nprint(todayDT.time())\n\n# timedelta\n# add some(Exa :- 10 more hour) hour in current time (today)\nhour_delta = datetime.timedelta(hours=10);\nprint(todayDT + hour_delta)\n\n\n#pip install --user pytz set Time zone in python\nimport pytz;\ndatetime_today_utc = datetime.datetime.now(tz=pytz.UTC);\nprint(datetime_today_utc);\n\ndatetime_today_pacific = datetime_today_utc.astimezone(pytz.timezone('US/Pacific'));\nprint(datetime_today_pacific);\n\n#print all time zone in python\nfor tz in pytz.all_timezones:\n print(tz)\n\n\n\n#string formatting with dates in python\n#1 January 27, 2020\nprint(datetime_today_pacific.strftime('%B %d, %Y'));\n\n#2 Take input as 'January 27, 2020' and output as '2020-01-27'\ndatetime_today_str = datetime.datetime.strptime('January 27, 2020','%B %d, %Y')\nprint(datetime_today_str);\nprint(repr(datetime_today_str));\n\n\n# pip install maya" }, { "alpha_fraction": 0.6684092879295349, "alphanum_fraction": 0.6960418224334717, "avg_line_length": 30.904762268066406, "blob_id": "bc126963bd3bdc6686fe02e4b14d91e8f02b2950", "content_id": "17d2bb16fe23ccf27b2c34d4da98058887f739f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1339, "license_type": "no_license", "max_line_length": 130, "num_lines": 42, "path": "/scraping_example.py", "repo_name": "ambujbpl/pythonBasics", "src_encoding": "UTF-8", "text": "# pip install beautifulsoup4\n# pip install requests // --user\n# pip install pandas\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n#page = requests.get(\"https://forecast.weather.gov/MapClick.php?lat=44.9055&lon=-122.8107&lg=english&FcstType=text#.XjUM82gzZPY\");\npage = requests.get(\"https://forecast.weather.gov/MapClick.php?lat=36.3741&lon=-119.2702#.XjUcwWgzZPY\");\nsoup = BeautifulSoup(page.content,'html.parser')\n#print(soup.find_all('a'))\nday7 = soup.find(id=\"seven-day-forecast-container\");\n#print(day7)\n#print(day7.find_all('li'))\n#print(day7.find_all(class_=\"forecast-tombstone\"))\nitems = day7.find_all(class_=\"tombstone-container\");\n#print(items[0])\n# for item in items:\n# print(\"\\n\\n\")\n# print(item.find(class_=\"period-name\").getText())\n# print(item.find(class_=\"short-desc\").getText())\n# print(item.find(class_=\"temp\").getText())\n# print(\"\\n\\n\")\n\npName = [item.find(class_=\"period-name\").getText() for item in items]\nsDesc = [item.find(class_=\"short-desc\").getText() for item in items]\ntemp = [item.find(class_=\"temp\").getText() for item in items]\n\n\n#\n# print(pName)\n# print(sDesc)\n# print(temp)\n\nweather_stuff = pd.DataFrame({\n \"period\":pName,\n \"short description\":sDesc,\n \"temprature\":temp\n})\n\nprint(weather_stuff)\nweather_stuff.to_csv(\"weather_stuff1.csv\") # make csv for you" } ]
15
austin-schick/puzzlehunt-website
https://github.com/austin-schick/puzzlehunt-website
0c21968bcdafa4826c654f234a82d922b365e6c0
839536d87c1e17c44c506abbdf38f1c5d5a5ce08
6beae834dfd6696feb45756b1c5bec2024e3fa95
refs/heads/master
2021-09-07T12:17:53.388227
2018-02-22T19:38:24
2018-02-22T19:38:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47050240635871887, "alphanum_fraction": 0.5028142333030701, "avg_line_length": 31.632652282714844, "blob_id": "44d98968bc63dc4a310dc46dd3cb3870599cef2f", "content_id": "7be23619055007f7eea9e884413c0f409f157241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4797, "license_type": "no_license", "max_line_length": 80, "num_lines": 147, "path": "/notes/hw4.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "#################################################\n# Hw4\n#################################################\n\nimport cs112_s18_week4_linter\nimport math\nimport string\nimport copy\n\n#################################################\n# Helper functions\n#################################################\n\ndef almostEqual(d1, d2, epsilon=10**-7):\n # note: use math.isclose() outside 15-112 with Python version 3.5 or later\n return (abs(d2 - d1) < epsilon)\n\nimport decimal\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n#################################################\n# Hw4 problems\n#################################################\n\n\"\"\"\nFill in your answers to the List Function Table problem here.\n\na = a + b\na += b\na.append(x)\na.insert(0, x)\na.extend(b)\na.remove(x)\na.pop(0)\ndel a[0]\na.reverse()\nreversed(a)\na.sort()\nsorted(a)\ncopy.copy(a)\n\"\"\"\n\ndef inverseLookAndSay(a):\n return 42\n\ndef bestScrabbleScore(dictionary, letterScores, hand):\n return 42 \n\n######################################################################\n# ignore_rest: The autograder will ignore all code below here\n######################################################################\n\nfrom tkinter import *\n\ndef drawChessboard(winWidth=640, winHeight=640):\n root = Tk()\n canvas = Canvas(root, width=winWidth, height=winHeight)\n canvas.pack()\n\n canvas.create_text(winWidth/2, winHeight/2,\n text=\"return 42\", font=\"Arial 20 bold\")\n\n root.mainloop()\n\n##### Bonus #####\n\n\n#################################################\n# Hw4 Test Functions\n#################################################\n\ndef _verifyInverseLookAndSayIsNondestructive():\n a = [(1,2), (2,3)]\n b = copy.copy(a)\n inverseLookAndSay(a) # ignore result, just checking for destructiveness here\n return (a == b)\n\ndef testInverseLookAndSay():\n print(\"Testing inverseLookAndSay()...\", end=\"\")\n assert(_verifyInverseLookAndSayIsNondestructive() == True)\n assert(inverseLookAndSay([]) == [])\n assert(inverseLookAndSay([(3,1)]) == [1,1,1])\n assert(inverseLookAndSay([(1,-1),(1,2),(1,7)]) == [-1,2,7])\n assert(inverseLookAndSay([(2,3),(1,8),(3,-10)]) == [3,3,8,-10,-10,-10])\n assert(inverseLookAndSay([(5,2), (2,5)]) == [2]*5 + [5]*2)\n assert(inverseLookAndSay([(2,5), (5,2)]) == [5]*2 + [2]*5)\n print(\"Passed!\")\n\ndef testBestScrabbleScore():\n print(\"Testing bestScrabbleScore()...\", end=\"\")\n def dictionary1(): return [\"a\", \"b\", \"c\"]\n def letterScores1(): return [1] * 26\n def dictionary2(): return [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"] \n def letterScores2(): return [1+(i%5) for i in range(26)]\n assert(bestScrabbleScore(dictionary1(), letterScores1(), list(\"b\")) ==\n (\"b\", 1))\n assert(bestScrabbleScore(dictionary1(), letterScores1(), list(\"ace\")) ==\n ([\"a\", \"c\"], 1))\n assert(bestScrabbleScore(dictionary1(), letterScores1(), list(\"b\")) ==\n (\"b\", 1))\n assert(bestScrabbleScore(dictionary1(), letterScores1(), list(\"z\")) ==\n None)\n # x = 4, y = 5, z = 1\n # [\"xyz\", \"zxy\", \"zzy\", \"yy\", \"yx\", \"wow\"]\n # 10 10 7 10 9 -\n assert(bestScrabbleScore(dictionary2(), letterScores2(), list(\"xyz\")) ==\n ([\"xyz\", \"zxy\"], 10))\n assert(bestScrabbleScore(dictionary2(), letterScores2(), list(\"xyzy\")) ==\n ([\"xyz\", \"zxy\", \"yy\"], 10))\n assert(bestScrabbleScore(dictionary2(), letterScores2(), list(\"xyq\")) ==\n (\"yx\", 9))\n assert(bestScrabbleScore(dictionary2(), letterScores2(), list(\"yzz\")) ==\n (\"zzy\", 7))\n assert(bestScrabbleScore(dictionary2(), letterScores2(), list(\"wxz\")) ==\n None)\n print(\"Passed!\")\n\ndef testDrawChessboard():\n print(\"Testing drawChessboard()...\")\n print(\"Since this is graphics, this test is not interactive.\")\n print(\"Inspect each of these results manually to verify them.\")\n drawChessboard()\n drawChessboard(winWidth=400, winHeight=400)\n print(\"Done!\")\n\n\n\n#################################################\n# Hw4 Main\n#################################################\n\ndef testAll():\n testInverseLookAndSay()\n testBestScrabbleScore()\n testDrawChessboard()\n\ndef main():\n cs112_s18_week4_linter.lint() # check style rules\n testAll()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.578422486782074, "alphanum_fraction": 0.5908129215240479, "avg_line_length": 30.74257469177246, "blob_id": "999883da1cb82c6fe11466e7ac990c7ee2ed5dfe", "content_id": "1e8cf57bef57e4b00a1560754b4ab37e78ee3b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3309, "license_type": "no_license", "max_line_length": 80, "num_lines": 101, "path": "/notes/keyEventsDemo.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# keyEventsDemo.py\r\n#\r\n# keyPressed, keyReleased\r\n# with ctrl and shift\r\n\r\nfrom tkinter import *\r\n\r\ndef setEventInfo(event, data, eventName):\r\n ctrl = ((event.state & 0x0004) != 0)\r\n shift = ((event.state & 0x0001) != 0)\r\n msg = eventName + \": \"\r\n msg += \"(ctrl=\" + str(ctrl) + \")\"\r\n msg += \"(shift=\" + str(shift) + \")\"\r\n msg += \"(char=\" + event.char + \")\"\r\n msg += \"(keysym=\" + event.keysym + \")\"\r\n data.info = msg\r\n\r\ndef ignoreKey(event):\r\n # Helper function to return the key from the given event\r\n ignoreSyms = [ \"Shift_L\", \"Shift_R\", \"Control_L\", \"Control_R\", \"Caps_Lock\" ]\r\n return (event.keysym in ignoreSyms)\r\n\r\ndef keyPressed(event, data):\r\n if (ignoreKey(event) == False):\r\n setEventInfo(event, data, \"keyPressed\")\r\n if (event.keysym not in data.pressedLetters):\r\n data.pressedLetters.add(event.keysym)\r\n\r\ndef keyReleased(event, data):\r\n if (ignoreKey(event) == False):\r\n setEventInfo(event, data, \"keyReleased\")\r\n if (event.keysym in data.pressedLetters):\r\n data.pressedLetters.remove(event.keysym)\r\n\r\ndef redrawAll(canvas, data):\r\n # Draw the pressedLetters\r\n font = (\"Arial\", 16, \"bold\")\r\n msg = \"Pressed Letters: \" + str(sorted(data.pressedLetters))\r\n canvas.create_text(data.width/2, data.height/3, text=msg, font=font)\r\n # Draw the event info message\r\n font = (\"Arial\", 16, \"bold\")\r\n canvas.create_text(data.width/2, data.height*2/3, text=data.info, font=font)\r\n\r\ndef init(data):\r\n data.info = \"Key Events Demo\"\r\n data.pressedLetters = set()\r\n\r\ndef timerFired(data): pass\r\ndef mousePressed(event, data): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n # Note changes #1:\r\n def keyWrapper(keyFn, event, canvas, data):\r\n keyFn(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n init(data)\r\n # create the root and the canvas\r\n root = Tk()\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n\r\n # Note changes #2:\r\n root.bind(\"<KeyPress>\", lambda event:\r\n keyWrapper(keyPressed, event, canvas, data))\r\n root.bind(\"<KeyRelease>\", lambda event:\r\n keyWrapper(keyReleased, event, canvas, data))\r\n\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(600, 300)\r\n\r\n" }, { "alpha_fraction": 0.5792264938354492, "alphanum_fraction": 0.591160237789154, "avg_line_length": 31.76865577697754, "blob_id": "073bc4084d93d8c71f7915f18b2f08bd55a75a9e", "content_id": "6776841b2501c89d93c38ccfd6a9d17bc87d5ac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4525, "license_type": "no_license", "max_line_length": 77, "num_lines": 134, "path": "/notes/mouseEventsDemo.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# mouseEventsDemo.py\r\n#\r\n# mousePressed, mouseMoved, mouseReleased\r\n# with left or right button\r\n# and with ctrl and shift\r\n# and mouseMotion (when no button is pressed)\r\n\r\nfrom tkinter import *\r\n\r\ndef setEventInfo(event, data, eventName):\r\n ctrl = ((event.state & 0x0004) != 0)\r\n shift = ((event.state & 0x0001) != 0) \r\n msg = \"\"\r\n if ctrl: msg += \"ctrl-\"\r\n if shift: msg += \"shift-\"\r\n msg += eventName\r\n msg += \" at \" + str((event.x, event.y))\r\n data.info = msg\r\n\r\ndef mouseMotion(event, data):\r\n setEventInfo(event, data, \"mouseMotion\")\r\n data.motionPosn = (event.x, event.y)\r\n\r\ndef leftPressed(event, data):\r\n setEventInfo(event, data, \"leftPressed\")\r\n data.leftPosn = (event.x, event.y)\r\n\r\ndef leftMoved(event, data):\r\n setEventInfo(event, data, \"leftMoved\")\r\n data.leftPosn = (event.x, event.y)\r\n\r\ndef leftReleased(event, data):\r\n setEventInfo(event, data, \"leftReleased\")\r\n data.leftPosn = (event.x, event.y)\r\n\r\ndef rightPressed(event, data):\r\n setEventInfo(event, data, \"rightPressed\")\r\n data.rightPosn = (event.x, event.y)\r\n\r\ndef rightMoved(event, data):\r\n setEventInfo(event, data, \"rightMoved\")\r\n data.rightPosn = (event.x, event.y)\r\n\r\ndef rightReleased(event, data):\r\n setEventInfo(event, data, \"rightReleased\")\r\n data.rightPosn = (event.x, event.y)\r\n\r\ndef redrawAll(canvas, data):\r\n # Draw the \"L\"\r\n font = (\"Arial\", 24, \"bold\")\r\n (cx, cy) = data.leftPosn\r\n canvas.create_text(cx, cy, text=\"L\", font=font)\r\n # Draw the \"R\"\r\n (cx, cy) = data.rightPosn\r\n canvas.create_text(cx, cy, text=\"R\", font=font)\r\n # Draw the \"M\"\r\n (cx, cy) = data.motionPosn\r\n canvas.create_text(cx, cy, text=\"M\", font=font)\r\n # Draw the event info message\r\n font = (\"Arial\", 16, \"bold\")\r\n canvas.create_text(300, 25, text=data.info, font=font)\r\n\r\ndef init(data):\r\n data.leftPosn = (data.width//4, data.height//2)\r\n data.rightPosn = (data.width*3//4, data.height//2)\r\n data.motionPosn = (data.width//2, data.height//2)\r\n data.info = \"Mouse Events Demo\"\r\n\r\ndef timerFired(data): pass\r\ndef keyPressed(event, data): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n # Note changes #1:\r\n def mouseWrapper(mouseFn, event, canvas, data):\r\n mouseFn(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n init(data)\r\n # create the root and the canvas\r\n root = Tk()\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n\r\n # Note changes #2:\r\n root.bind(\"<Button-1>\", lambda event:\r\n mouseWrapper(leftPressed, event, canvas, data))\r\n root.bind(\"<Button-3>\", lambda event:\r\n mouseWrapper(rightPressed, event, canvas, data))\r\n canvas.bind(\"<Motion>\", lambda event:\r\n mouseWrapper(mouseMotion, event, canvas, data))\r\n canvas.bind(\"<B1-Motion>\", lambda event:\r\n mouseWrapper(leftMoved, event, canvas, data))\r\n canvas.bind(\"<B3-Motion>\", lambda event:\r\n mouseWrapper(rightMoved, event, canvas, data))\r\n root.bind(\"<B1-ButtonRelease>\", lambda event:\r\n mouseWrapper(leftReleased, event, canvas, data))\r\n root.bind(\"<B3-ButtonRelease>\", lambda event:\r\n mouseWrapper(rightReleased, event, canvas, data))\r\n\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(600, 300)\r\n" }, { "alpha_fraction": 0.6348229646682739, "alphanum_fraction": 0.6758801341056824, "avg_line_length": 52.478023529052734, "blob_id": "6aa71083979137a0ceefff296ce836332e66f8b0", "content_id": "f891603cf4fb31e794a40984d7af3b6bd5aebd90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 9913, "license_type": "no_license", "max_line_length": 274, "num_lines": 182, "path": "/notes/notes-tetris/2_3_CreatingTheFallingPiece.html", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html><head><title>Tetris for Intro/Intermediate Programmers (Fall 2017 update)</title></head>\r\n<body bgcolor=\"#ADDEFA\">\r\n\r\n<hr color=\"#e88100\">\r\n<font face=\"Arial\"><b>Tetris for Intro/Intermediate Programmers (Fall 2017 update)<br>\r\nStep 3:&nbsp; Creating and Drawing the fallingPiece</b></font>\r\n\r\n<hr color=\"#e88100\">\r\n<ul>\r\n\t<li>\r\n\t<p align=\"left\">The goal for this step is to select a random falling piece \r\n\tin a random color, to position it in the top-middle of the board, and draw \r\n\tit over the board.&nbsp; Note that the \"falling piece\" will not be falling \r\n\tafter this step.&nbsp; That comes later.&nbsp; For now, it will remain at \r\n\tthe top-middle of the board.&nbsp; Also, for testing purposes, we will add \r\n\ttemporary code that changes the falling piece whenever any key is pressed.&nbsp; \r\n\tThis code will be removed after this step.<br>\r\n&nbsp;</p></li>\r\n\t<li>\r\n\t<p align=\"left\">Recall\r\nthat the falling piece is not part of the board, but drawn over the\r\nboard.&nbsp; It becomes part of the board when it can no longer fall\r\nand we place it on the board, which will not happen until several steps\r\nfrom now in our design process.<br>\r\n&nbsp;</p></li>\r\n\t<li>\r\n\t<p align=\"left\">Defining the piece types:<br>\r\n\tIn this design, the falling piece is represented by a 2-dimensional list of \r\n\tbooleans, indicating whether the given cell is or is not painted in this \r\n\tpiece.&nbsp; For example, here is the definition of an S piece:<br><font color=\"#800000\">\r\n<pre> sPiece = [\r\n [ False, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ],\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, False ]\r\n ]</pre></font>\r\n\tThis defines a 2-dimensional boolean list with 2 rows and 3 columns.&nbsp; \r\n\tThe \"True\" values have been highlighted to make it clear that they form an \r\n\tS \r\n\tpattern.&nbsp; This is how we know which cells are \r\n\tpart of the falling piece.<br>\r\n&nbsp;</p></li>\r\n\t<li>\r\n\t<p align=\"left\">Here are all the piece types, with the \"True\" values \r\n\thighlighted to make the shapes easier to discern (note that they are \r\n\tprovided in their standard configurations -- how they should enter the board \r\n\tfrom the top -- so, for example, the T piece is upside down):</p>\r\n<pre><font color=\"#800000\"> # Seven \"standard\" pieces (tetrominoes)\r\n\r\n iPiece = [\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ]\r\n ]\r\n\r\n jPiece = [\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, False, False ],\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ]\r\n ]\r\n\r\n lPiece = [\r\n [ False, False, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ],\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ]\r\n ]\r\n\r\n oPiece = [\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ],\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ]\r\n ]\r\n\r\n sPiece = [\r\n [ False, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ],\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, False ]\r\n ]\r\n\r\n tPiece = [\r\n [ False, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, False ],\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ]\r\n ]\r\n\r\n zPiece = [\r\n [ <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, False ],\r\n [ False, <span style=\"background-color: rgb(192, 192, 192);\">True</span>, <span style=\"background-color: rgb(192, 192, 192);\">True</span> ]\r\n ]</font></pre>\r\n\t</li>\r\n\t<li>\r\n\t<p align=\"left\">For this design, we need to place all 7 of these piece types \r\n\tinto a single list, tetrisPieces.&nbsp; That will be a list of \r\n\t2-dimensional lists, so it is in fact a 3-dimensional list!&nbsp; Here it \r\n\tis:<br><font color=\"#800000\">\r\n<pre> tetrisPieces = [ iPiece, jPiece, lPiece, oPiece, sPiece, tPiece, zPiece ]</pre>\r\n\t</font></p></li>\r\n\t<li>\r\n\t<p align=\"left\">We also need to define colors corresponding to each of these \r\n\tpieces, and place them in a list of the same size, which we do as such:<br><font color=\"#800000\">\r\n<pre> tetrisPieceColors = [ \"red\", \"yellow\", \"magenta\", \"pink\", \"cyan\", \"green\", \"orange\" ]</pre>\r\n\t</font></p></li>\r\n\t<li>\r\n\t<p align=\"left\">Store these values in the data fields:<br>We\r\nshould define the pieces and the piece colors in our init function, and\r\nthen we should also store them in the data object, with this code:<br><font color=\"#800000\">\r\n<pre> data.tetrisPieces = tetrisPieces\r\n data.tetrisPieceColors = tetrisPieceColors</pre>\r\n\t</font></p></li>\r\n\r\n\t<li><p align=\"left\">Writing newFallingPiece:<br>\r\nThe newFallingPiece function (which takes one parameter, data, and returns nothing) is responsible for randomly choosing a new\r\npiece, setting its color, and positioning it in the middle of the top\r\nrow.&nbsp; The first step is to randomly choose an index from the\r\ntetrisPieces list. We haven't learned how to use random numbers yet, but the following lines of code will do it for us.<br><font color=\"#800000\">\r\n<pre> import random\r\n randomIndex = random.randint(0, len(data.tetrisPieces) - 1)</pre>\r\n </font>\r\n\r\n Then we set the\r\ndata values holding the fallingPiece and the fallingPieceColor\r\nto the indexed elements from the lists of tetrisPieces and\r\ntetrisPieceColors.<br>\r\n\t<br>\r\n\tNext, we set the top row of the falling piece (fallingPieceRow) to the \r\n\ttop row of the board (that is, to zero).&nbsp; And then we set the left column of the new falling piece \r\n\t(fallingPieceCol).&nbsp; As we want \r\n\tthe piece to emerge at the top-middle of the board, we might set this to the \r\n\tcenter, or&nbsp;cols//2.&nbsp; However, this will place the new falling piece \r\n\ta bit off to the right, since its full width would extend beyond the middle.&nbsp; \r\n\tTo compensate for this fact, we subtract half the width of the falling \r\n\tpiece, or&nbsp;fallingPieceCols//2, from the middle of the board<br>\r\n&nbsp;</p></li>\r\n\t<li>\r\n\t<p align=\"left\">Drawing the falling piece:<br>\r\n\tAfter calling drawBoard, you should then call drawFallingPiece, so the falling piece is drawn <span style=\"font-style: italic;\">over the board</span>\r\n(in this way, to the user it looks like the falling piece is on the\r\nboard, but in reality it is stored separately and drawn\r\nseparately).&nbsp; To draw the falling piece (in the drawFallingPiece\r\nfunction, which takes both canvas and data), we iterate over each cell in the fallingPiece, and if the\r\nvalue of that cell is&nbsp;True, then we should draw it reusing the\r\nsame drawCell function that drawBoard uses, but in the color of the\r\nfallingPiece (rather than the color stored in the board for that row\r\nand column).&nbsp; However, we have to add the offset of the left-top\r\nrow and column (that is,&nbsp;fallingPieceRow and&nbsp;fallingPieceCol)\r\nso that the fallingPiece is properly positioned on the board.\r\nAlso, note that this step requires that we add an additional parameter\r\nto the drawCell function -- the color to fill the cell.<br>\r\n&nbsp;</p></li>\r\n\t<li>\r\n\t<p align=\"left\">Initial falling piece:<br>\r\n\tWe must add one line to our init function to select the first falling piece of \r\n\tthe game (by calling the newFallingPiece function we just wrote).<br>\r\n\t&nbsp;</p></li>\r\n\t<li>\r\n\t<p align=\"left\">Temporary test code:<br>\r\n\tAs noted above, for testing purposes, we will add temporary code that \r\n\tchanges the falling piece whenever any key is pressed.&nbsp; This code will \r\n\tbe removed after this step.&nbsp; This involves the following simple key \r\n\tevent handler:<br><font color=\"#800000\">\r\n<pre> def keyPressed(event, data):\r\n # for now, for testing purposes, just choose a new falling piece\r\n # whenever ANY key is pressed!\r\n newFallingPiece(data)</pre>\r\n\t</font></p></li>\r\n\r\n<li>At the end of this stage, your Tetris board should be able to do this:<br><br>\r\n\t<img src=\"pix/step-3.gif\" border=\"0\" width=\"250\">\r\n</li>\r\n</ul>\r\n\r\n<hr color=\"#e88100\">\r\n<table style=\"border-collapse: collapse;\" id=\"table2\" border=\"0\" cellpadding=\"0\" width=\"100%\">\r\n\t<tbody><tr>\r\n\t\t<td width=\"33%\">&nbsp;</td>\r\n\t\t<td width=\"33%\">\r\n\t\t<p align=\"center\">\r\n<a href=\"2_2_CreatingTheBoard.html\"><img src=\"pix/previous.gif\" border=\"0\" height=\"15\" width=\"73\"></a>\r\n<a href=\"TetrisForIntroIntermediateProgrammers.html\"><img src=\"pix/home.gif\" border=\"0\" height=\"16\" width=\"19\"></a>\r\n<a href=\"2_4_MovingTheFallingPiece.html\"><img src=\"pix/next.gif\" border=\"0\" height=\"16\" width=\"73\"></a></p></td>\r\n\t\t<td width=\"33%\">\r\n\t\t<p align=\"right\"><font face=\"Arial\" size=\"2\">David Kosbie<br>\r\nCarnegie Mellon University<br>\r\n<a href=\"mailto:[email protected]\">[email protected]</a></font></p></td>\r\n\t</tr>\r\n</tbody></table>\r\n\r\n<hr color=\"#e88100\">\r\n\r\n</body></html>" }, { "alpha_fraction": 0.672025740146637, "alphanum_fraction": 0.7138263583183289, "avg_line_length": 21.923076629638672, "blob_id": "bc67086e450456c740d16974009d279587dcd980", "content_id": "c36928aed652c29bcbadfa72f72bed43e96ba298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/notes/imagesDemo3a.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# imagesDemo3a.py\r\n# create base64 encoding\r\n# for use in imagesDemo3b.py\r\n\r\nimport base64\r\n\r\ndef readBinaryFile(path):\r\n with open(path, \"rb\") as f: return f.read()\r\n\r\ndef getBase64(filename):\r\n return base64.encodebytes(readBinaryFile(filename))\r\n\r\nprint(getBase64(\"sampleImage1.gif\").decode('ascii'))\r\n" }, { "alpha_fraction": 0.5721306800842285, "alphanum_fraction": 0.5903440117835999, "avg_line_length": 28.883928298950195, "blob_id": "0f327a50850f041f8203d20f072daa5a42848a38", "content_id": "8b3683ddb429e1b03abe34912e94ec3f319a4f18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3459, "license_type": "no_license", "max_line_length": 80, "num_lines": 112, "path": "/notes/dialogs-demo2.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# dialogs-demo2.py\r\n# modal dialog, text input field, and hidden password\r\n\r\nfrom tkinter import *\r\nfrom tkinter import messagebox, simpledialog\r\n\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\nclass MyDialog(simpledialog.Dialog):\r\n def body(self, master):\r\n self.modalResult = None\r\n Label(master, text=\"User:\").grid(row=0)\r\n Label(master, text=\"Password:\").grid(row=1)\r\n self.e1 = Entry(master)\r\n self.e2 = Entry(master, show=\"*\")\r\n self.e1.grid(row=0, column=1)\r\n self.e2.grid(row=1, column=1)\r\n return self.e1 # initial focus\r\n\r\n def apply(self):\r\n first = self.e1.get()\r\n second = self.e2.get()\r\n self.modalResult = (first, second)\r\n\r\ndef showDialog(data):\r\n dialog = MyDialog(data.root)\r\n return dialog.modalResult\r\n\r\ndef button1Pressed(data):\r\n message = \"Result = \" + str(showDialog(data))\r\n data.message = message\r\n data.count += 1\r\n \r\ndef redrawAll(canvas, data):\r\n # background (fill canvas)\r\n canvas.create_rectangle(0,0,300,300,fill=\"cyan\")\r\n # print message\r\n msg = \"message: \" + str(data.message)\r\n canvas.create_text(150,130,text=msg)\r\n msg = \"count: \" + str(data.count)\r\n canvas.create_text(150,170,text=msg)\r\n\r\ndef onButton(data, buttonId):\r\n if (buttonId == 1): button1Pressed(data)\r\n\r\ndef init(data):\r\n data.message = \"none\"\r\n data.count = 0\r\n buttonFrame = Frame(data.root)\r\n b1 = Button(buttonFrame, text=\"Click me!\", command=lambda:onButton(data,1))\r\n b1.grid(row=0,column=0)\r\n buttonFrame.pack(side=TOP)\r\n\r\ndef mousePressed(event, data): pass\r\ndef keyPressed(event, data): pass\r\ndef timerFired(event): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n\r\n # create the root and the canvas (Note Change: do this BEFORE calling init!)\r\n root = Tk()\r\n\r\n # Store root in data so buttons can access\r\n data.root = root\r\n\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(300, 300)\r\n" }, { "alpha_fraction": 0.6915589570999146, "alphanum_fraction": 0.7169299721717834, "avg_line_length": 50.79338836669922, "blob_id": "fe569bbb53f367e1c1ea7a823bcd1fdd9ed46724", "content_id": "80abd0ec389a2487431076618f7cd52f3a8151e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 6267, "license_type": "no_license", "max_line_length": 499, "num_lines": 121, "path": "/notes/colab1.html", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<head>\n <title>15-112: Fundamentals of Programming</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/reset.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/bootstrap.min.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112-highlight-style.css\">\n <script src=\"../js/jquery-2.1.4.min.js\"></script>\n <script src=\"../js/highlight.pack.js\"></script>\n <script src=\"../js/bootstrap.min.js\"></script>\n <script id=\"112-script\" src=\"../js/112.js\"></script>\n <base target=\"_self\">\n</head>\n<body>\n\n<div class=\"navbar\">\n15-112 <br> Spring 18\n<br><br><a target=\"_self\" href=\"../index.html\">Home</a>\n<br><br><a target=\"_self\" href=\"../syllabus.html\">Syllabus</a>\n<br><br><a target=\"_self\" href=\"../schedule.html\">Schedule</a>\n<br><br><a target=\"_self\" href=\"../gallery.html\">Gallery</a>\n<br><br><a target=\"_self\" href=\"../staff.html\">Staff</a>\n<br><br><a target=\"_self\" href=\"../piazza.html\">Piazza</a>\n<br><br><a target=\"_self\" href=\"../autolab.html\">Autolab</a>\n<br><br><a target=\"_blank\" href=\"../oh-queue.html\">OH Queue</a>\n</div>\n\n<div class=\"content\">\n<h1>\nCMU 15-112 Spring 2018: Fundamentals of Programming and Computer Science<br>\nColab 1 (Due Thursday 18-Jan, at 10pm)\n</h1>\n\n<hr>\n\n<ul>\n<li>This assignment is <span class=\"collaborative\">collaborative</span>.\nThat means you may work with other students enrolled in the course, and you may even help each other write code and debug. However, you must still type all of your own work, and you must fully understand the code that you submit. Even though this is collaborative, you may not directly copy any code from anyone, and you may not electronically share your code with anyone. See the syllabus for more details.</li>\n\n<li>List your collaboration partners (name and andrew id) in a comment on the first line of this file. If you collaborate with another student and do not include their name in a comment, it will be considered cheating. You may work alone if you want to, but we recommend working with others, as it generally leads to better learning.</li>\n\n<li>Be a good collaborator! Help everyone in your group, and accept\ntheir help if you need it. Don't be in a hurry to finish the problems.\nInstead, take your time and be sure that everyone in the group is\nfollowing and understanding. The goal is to learn, not just to finish.\n</li>\n<br>\n<li>To start:\n<ol>\n<li>Create a folder named 'week1'</li>\n<li>Download both\n <a href=\"colab1.py\" download>colab1.py</a>\n and\n <a href=\"cs112_s18_week1_linter.py\" download>cs112_s18_week1_linter.py</a>\n to that folder</li>\n<li>Edit colab1.py using Pyzo</li>\n<li>When you are ready, submit colab1.py to Autolab. For this colab, you may submit up to 20 times\n(which is way more than you should require),\nbut only your last submission counts.</li>\n</ol>\n</li>\n<li>Do not use strings, loops, lists, or recursion this week.</li>\n<li>Do not hardcode the test cases in your solutions.</li>\n</ul>\n\n<hr>\n\n<ol>\n\n<br><li><b>isPerfectSquare(n)</b> [15pts]<br>\nWrite the function isPerfectSquare(n) that takes a possibly-non-int value, and returns True if it is an int that is a perfect square (that is, if there exists an integer m such that m**2 == n), and False otherwise. Do not crash on non-ints nor on negative ints.\n</li>\n\n<br><li><b>perfectSquareHandler()</b> [15pts]<br>\nWrite the function perfectSquareHandler() that does not take any parameters and does not return any values. Instead, it asks the user to input a number, then prints whether or not that number is a perfect square. You must use the specific prompts shown in the examples below to pass the test cases (where the bolded numbers are user input), so you should copy the prompts directly into your code to avoid typos. Of course, your program should be able to handle any integer, not just 4 and 7.<br><br>\n\n<div class=\"python-code no-run no-viz\">\nEnter a number:<b>4</b>\nThe number 4 is a perfect square\n</div>\n\n<div class=\"python-code no-run no-viz\">\nEnter a number:<b>7</b>\nThe number 7 is not a perfect square\n</div>\n</li>\n\n<br><li><b>nearestOdd(n)</b> [25pts]<br>\nWrite the function nearestOdd(n) that takes an int or float n, and returns as an int value the nearest odd number to n. In the case of a tie, return the smaller odd value.\n</li>\n\n<br><li><b>rectanglesOverlap(x1, y1, w1, h1, x2, y2, w2, h2)</b> [25pts]<br>\nA rectangle can be described by its left, top, width, and height. This function takes two rectangles described this way, and returns True if the rectangles overlap at all (even if just at a point), and False otherwise.<br><br>\n\nNote: here we will represent coordinates the way they are usually represented in computer graphics, where (0,0) is at the left-top corner of the screen, and while the x-coordinate increases while you head right, the y-coordinate increases while you head down. Yes, up is down! This is quite common in computer graphics, and is how Tkinter and Brython in particular both work.\n</li>\n\n<br><li><b>getKthDigit(n, k)</b> [10pts]<br>\n Write the function getKthDigit(n, k) that takes a possibly-negative int n and a non-negative int k, and returns the kth digit of n, starting from\n 0, counting from the right. So:<br>\n &nbsp;&nbsp;&nbsp;getKthDigit(789, 0) returns 9<br>\n &nbsp;&nbsp;&nbsp;getKthDigit(789, 2) returns 7<br>\n &nbsp;&nbsp;&nbsp;getKthDigit(789, 3) returns 0<br>\n &nbsp;&nbsp;&nbsp;getKthDigit(-789, 0) returns 9\n</li>\n\n<br><li><b>setKthDigit(n, k, d=0)</b> [10pts]<br>\nWrite the function setKthDigit(n, k, d=0) that takes three integers -- n, k, and d -- where n is a possibly-negative int, k is a non-negative int, and d is a non-negative single digit (between 0 and 9 inclusive) with a default value of 0. This function returns the number n but with the kth digit replaced with d. Counting starts at 0 and goes right-to-left, so the 0th digit is the rightmost digit. For example:<br>\n&nbsp;&nbsp;&nbsp;setKthDigit(468, 0, 1) returns 461<br>\n&nbsp;&nbsp;&nbsp;setKthDigit(468, 1, 1) returns 418<br>\n&nbsp;&nbsp;&nbsp;setKthDigit(468, 2, 1) returns 168<br>\n&nbsp;&nbsp;&nbsp;setKthDigit(468, 3, 1) returns 1468<br>\n&nbsp;&nbsp;&nbsp;setKthDigit(468, 1) returns 408\n\n</li>\n</ol>\n</div>\n<hr>\n</body>\n</html>\n" }, { "alpha_fraction": 0.594277560710907, "alphanum_fraction": 0.6117310523986816, "avg_line_length": 34.03092956542969, "blob_id": "d763df1dc1b054883ffe97806051e11446f55178", "content_id": "d151bc6dcbf8c70091cb85fcce0fe71a75ae8c16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3495, "license_type": "no_license", "max_line_length": 80, "num_lines": 97, "path": "/notes/imagesDemo1.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# imagesDemo1.py\r\n# view in canvas\r\n# read from file\r\n# with transparent pixels\r\n# get size, resize (zoom and subsample)\r\n\r\n# image resized, made transparent with:\r\n# http://www.online-image-editor.com/\r\n\r\nfrom tkinter import *\r\n\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\ndef redrawAll(canvas, data):\r\n # Draw a background rectangle to highlight the transparency\r\n # of the images\r\n canvas.create_rectangle(0, 10, data.width, 190, fill=\"cyan\")\r\n # Draw the demo info\r\n font = (\"Arial\", 16, \"bold\")\r\n msg = \"Image Demo #1 (read from file)\"\r\n canvas.create_text(data.width/2, 25, text=msg, font=font)\r\n # Draw the original size image on the left\r\n imageSize = ( (data.image.width(), data.image.height()) )\r\n msg = \"Full-size \" + str(imageSize)\r\n canvas.create_text(data.width/5, 50, text=msg, font=font)\r\n canvas.create_image(data.width/5, 100, anchor=N, image=data.image)\r\n # Draw a half-size image in the middle\r\n imageSize = ( (data.halfImage.width(), data.halfImage.height()) )\r\n msg = \"Half-size \" + str(imageSize)\r\n canvas.create_text(data.width/2, 50, text=msg, font=font)\r\n canvas.create_image(data.width/2, 100, anchor=N, image=data.halfImage)\r\n # Draw a double-size image on the right\r\n imageSize = ( (data.doubleImage.width(), data.doubleImage.height()) )\r\n msg = \"Double-size \" + str(imageSize)\r\n canvas.create_text(data.width*4/5, 50, text=msg, font=font)\r\n canvas.create_image(data.width*4/5, 100, anchor=N, image=data.doubleImage)\r\n\r\ndef init(data):\r\n data.image = PhotoImage(file=\"sampleImage1.gif\")\r\n data.halfImage = data.image.subsample(2,2)\r\n data.doubleImage = data.image.zoom(2,2)\r\n\r\ndef mousePressed(event, data): pass\r\ndef keyPressed(event, data): pass\r\ndef timerFired(data): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n\r\n # create the root and the canvas (Note Change: do this BEFORE calling init!)\r\n root = Tk()\r\n\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(1000, 500)\r\n" }, { "alpha_fraction": 0.5480272769927979, "alphanum_fraction": 0.5999512672424316, "avg_line_length": 30.153263092041016, "blob_id": "2e00b13c93aabeff7c701f5d9adf07f50777b019", "content_id": "1cc4cf949920bc542011729bbd120457735caff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20530, "license_type": "no_license", "max_line_length": 78, "num_lines": 659, "path": "/notes/wk2_practice.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "#################################################\n# Week2 Practice\n#################################################\n\nimport cs112_s18_week2_linter\nimport math\n\n#################################################\n# Helper functions\n#################################################\n\ndef almostEqual(d1, d2, epsilon=10**-7):\n # note: use math.isclose() outside 15-112 with Python version 3.5 or later\n return (abs(d2 - d1) < epsilon)\n\nimport decimal\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n#################################################\n# Tue Lecture\n#################################################\n\ndef digitCount(n):\n return 42\n\ndef hasConsecutiveDigits(n):\n return 42\n\ndef gcd(x, y):\n return 42\n\ndef pi(n):\n return 42\n\ndef h(n):\n return 42\n\ndef estimatedPi(n):\n return 42\n\ndef estimatedPiError(n):\n return 42\n\ndef sumOfDigits(n):\n return 42\n\ndef nthAdditivePrime(n):\n return 42\n\ndef nthPerfectNumber(n):\n return 42\n\ndef vowelCount(s):\n return 42\n\ndef interleave(s1, s2):\n return 42\n\ndef hasBalancedParentheses(s):\n return 42\n\n#################################################\n# Wed Recitation\n#################################################\n\ndef longestDigitRun(n):\n return 42\n\ndef longestIncreasingRun(n):\n return 42\n\ndef nthPalindromicPrime(n):\n return 42\n\ndef nthLeftTruncatablePrime(n):\n return 42\n\ndef nthCarolPrime(n):\n return 42\n\ndef rotateStringLeft(s, k):\n return 42\n\ndef rotateStringRight(s, k):\n return 42\n\ndef wordWrap(text, width):\n return 42\n\ndef largestNumber(s):\n return 42\n\n#################################################\n# Thu Lecture\n#################################################\n\ndef sumOfSquaresOfDigits(n):\n return 42\n\ndef isHappyNumber(n):\n return 42\n\ndef nthHappyNumber(n):\n return 42\n\ndef isHappyPrime(n):\n return 42\n\ndef nthHappyPrime(n):\n return 42\n\ndef mostFrequentDigit(n):\n return 42\n\ndef nthPowerfulNumber(n):\n return 42\n\ndef nthCircularPrime(n):\n return 42\n\ndef findZeroWithBisection(f, x0, x1, epsilon):\n return 42\n\ndef longestSubpalindrome(s):\n return 42\n\ndef leastFrequentLetters(s):\n return 42\n\n#################################################\n# Extra Practice\n#################################################\n\ndef sameChars(s1, s2):\n return 42\n\ndef mostFrequentLetters(s):\n return 42\n\ndef areAnagrams(s1, s2):\n return 42\n\ndef collapseWhitespace(s):\n return 42\n\ndef replace(s1, s2, s3):\n return 42\n\ndef encodeOffset(s, d):\n return 42\n\ndef decodeOffset(s, d):\n return 42\n\ndef encrypt(msg, pwd):\n return 42\n\ndef decrypt(msg, pwd):\n return 42\n\n######################################################################\n# ignore_rest: The autograder will ignore all code below here\n######################################################################\n\n#################################################\n# Test Functions\n#################################################\n\ndef testDigitCount():\n print('Test digitCount()...', end='')\n assert(digitCount(0) == 1)\n assert(digitCount(5) == 1)\n assert(digitCount(-5) == 1)\n assert(digitCount(42) == 2)\n assert(digitCount(-42) == 2)\n assert(digitCount(121) == 3)\n assert(digitCount(-121) == 3)\n assert(digitCount(-10002000) == 8)\n print('Passed')\n\ndef testHasConsecutiveDigits():\n print('Testing hasConsecutiveDigits()... ', end='')\n assert(hasConsecutiveDigits(0) == False)\n assert(hasConsecutiveDigits(123456789) == False)\n assert(hasConsecutiveDigits(1212) == False)\n assert(hasConsecutiveDigits(1212111212) == True)\n assert(hasConsecutiveDigits(33) == True)\n assert(hasConsecutiveDigits(330) == True)\n assert(hasConsecutiveDigits(3003) == True)\n assert(hasConsecutiveDigits(-1212111212) == True)\n print('Passed.')\n\ndef testGcd():\n print('Testing gcd()... ', end='')\n assert(gcd(3, 3) == 3)\n assert(gcd(3**6, 3**6) == 3**6)\n assert(gcd(3**6, 2**6) == 1)\n assert (gcd(2*3*4*5,3*5) == 15)\n x = 1568160 # 2**5 * 3**4 * 5**1 * 11**2\n y = 3143448 # 2**3 * 3**6 * 7**2 * 11**1\n g = 7128 # 2**3 * 3**4 * 11**1\n assert(gcd(x, y) == g)\n print('Passed.')\n\ndef testPi():\n print('Testing pi()... ', end='')\n assert(pi(1) == 0)\n assert(pi(2) == 1)\n assert(pi(3) == 2)\n assert(pi(4) == 2)\n assert(pi(5) == 3)\n assert(pi(100) == 25) # there are 25 primes in the range [2,100]\n print('Passed.')\n\ndef testH():\n print('Testing h()... ', end='')\n assert(almostEqual(h(0),0))\n assert(almostEqual(h(1),1/1 )) # h(1) = 1/1\n assert(almostEqual(h(2),1/1 + 1/2 )) # h(2) = 1/1 + 1/2\n assert(almostEqual(h(3),1/1 + 1/2 + 1/3)) # h(3) = 1/1 + 1/2 + 1/3\n print('Passed.')\n\ndef testEstimatedPi():\n print('Testing estimatedPi()... ', end='')\n assert(estimatedPi(100) == 27)\n print('Passed.')\n\ndef testEstimatedPiError():\n print('Testing estimatedPi()... ', end='')\n assert(estimatedPiError(100) == 2) # pi(100) = 25, estimatedPi(100) = 27\n assert(estimatedPiError(200) == 0) # pi(200) = 46, estimatedPi(200) = 46\n assert(estimatedPiError(300) == 1) # pi(300) = 62, estimatedPi(300) = 63\n assert(estimatedPiError(400) == 1) # pi(400) = 78, estimatedPi(400) = 79\n assert(estimatedPiError(500) == 1) # pi(500) = 95, estimatedPi(500) = 94\n print('Passed.')\n\ndef testNthPrime():\n print('Testing nthPrime()... ', end='')\n assert(nthPrime(0) == 2)\n assert(nthPrime(1) == 3)\n assert(nthPrime(2) == 5)\n assert(nthPrime(3) == 7)\n assert(nthPrime(10) == 31)\n assert(nthPrime(20) == 73)\n assert(nthPrime(30) == 127)\n print('Passed.')\n\ndef testNthAdditivePrime():\n print('Testing nthAdditivePrime()... ', end='')\n assert(nthAdditivePrime(0) == 2)\n assert(nthAdditivePrime(1) == 3)\n assert(nthAdditivePrime(5) == 23)\n assert(nthAdditivePrime(10) == 61)\n assert(nthAdditivePrime(15) == 113)\n print('Passed.')\n\ndef testNthPerfectNumber():\n print('Testing nthPerfectNumber()... ', end='')\n assert(nthPerfectNumber(0) == 6)\n assert(nthPerfectNumber(1) == 28)\n assert(nthPerfectNumber(2) == 496) \n assert(nthPerfectNumber(3) == 8128) # this can be slow \n print('Passed.')\n\ndef testLongestDigitRun():\n print('Testing longestDigitRun()... ', end='')\n assert(longestDigitRun(117773732) == 7)\n assert(longestDigitRun(-677886) == 7)\n assert(longestDigitRun(5544) == 4)\n assert(longestDigitRun(1) == 1)\n assert(longestDigitRun(0) == 0)\n assert(longestDigitRun(22222) == 2)\n assert(longestDigitRun(111222111) == 1)\n print('Passed.')\n\ndef testLongestIncreasingRun():\n print('Testing longestIncreasingRun()... ', end='')\n assert(longestIncreasingRun(27648923679) == 23679)\n assert(longestIncreasingRun(123345) == 345)\n assert(longestIncreasingRun(1232) == 123)\n assert(longestIncreasingRun(0) == 0)\n assert(longestIncreasingRun(1) == 1)\n assert(longestIncreasingRun(10012301230123) == 123)\n assert(longestIncreasingRun(12345678987654321) == 123456789)\n print('Passed.')\n\ndef testNthPalindromicPrime():\n print('Testing nthPalindromicPrime()... ', end='')\n assert(nthPalindromicPrime(0) == 2)\n assert(nthPalindromicPrime(1) == 3)\n assert(nthPalindromicPrime(5) == 101)\n assert(nthPalindromicPrime(10) == 313)\n print('Passed.')\n\ndef testNthLeftTruncatablePrime():\n print('Testing nthLeftTruncatablePrime()... ', end='')\n assert(nthLeftTruncatablePrime(0) == 2)\n assert(nthLeftTruncatablePrime(10) == 53)\n assert(nthLeftTruncatablePrime(1) == 3)\n assert(nthLeftTruncatablePrime(5) == 17)\n print('Passed.')\n\ndef testNthCarolPrime():\n print('Testing nthCarolPrime()... ', end='')\n assert(nthCarolPrime(0) == 7)\n assert(nthCarolPrime(1) == 47)\n assert(nthCarolPrime(3) == 3967)\n assert(nthCarolPrime(6) == 16769023)\n print('Passed.')\n\ndef testSumOfSquaresOfDigits():\n print(\"Testing sumOfSquaresOfDigits()...\", end=\"\")\n assert(sumOfSquaresOfDigits(5) == 25) # 5**2 = 25\n assert(sumOfSquaresOfDigits(12) == 5) # 1**2 + 2**2 = 1+4 = 5\n assert(sumOfSquaresOfDigits(234) == 29) # 2**2 + 3**2 + 4**2 = 4+9+16 = 29\n print(\"Passed.\")\n\ndef testIsHappyNumber():\n print(\"Testing isHappyNumber()...\", end=\"\")\n assert(isHappyNumber(-7) == False)\n assert(isHappyNumber(1) == True)\n assert(isHappyNumber(2) == False)\n assert(isHappyNumber(97) == True)\n assert(isHappyNumber(98) == False)\n assert(isHappyNumber(404) == True)\n assert(isHappyNumber(405) == False)\n print(\"Passed.\")\n\ndef testNthHappyNumber():\n print(\"Testing nthHappyNumber()...\", end=\"\")\n assert(nthHappyNumber(0) == 1)\n assert(nthHappyNumber(1) == 7)\n assert(nthHappyNumber(2) == 10)\n assert(nthHappyNumber(3) == 13)\n assert(nthHappyNumber(4) == 19)\n assert(nthHappyNumber(5) == 23)\n assert(nthHappyNumber(6) == 28)\n assert(nthHappyNumber(7) == 31)\n print(\"Passed.\")\n\ndef testIsHappyPrime():\n print(\"Testing isHappyPrime()...\", end=\"\")\n assert(isHappyPrime(1) == False)\n assert(isHappyPrime(2) == False)\n assert(isHappyPrime(3) == False)\n assert(isHappyPrime(7) == True)\n assert(isHappyPrime(10) == False)\n assert(isHappyNumber(13) == True)\n print(\"Passed.\")\n\ndef testNthHappyPrime():\n print(\"Testing nthHappyPrime...\", end=\"\")\n assert(nthHappyPrime(0) == 7)\n assert(nthHappyPrime(1) == 13)\n assert(nthHappyPrime(2) == 19)\n assert(nthHappyPrime(3) == 23)\n assert(nthHappyPrime(4) == 31)\n assert(nthHappyPrime(10) == 167)\n assert(nthHappyPrime(20) == 397)\n print(\"Passed.\")\n\ndef testMostFrequentDigit():\n print('Testing mostFrequentDigit()... ', end='')\n assert(mostFrequentDigit(0) == 0)\n assert(mostFrequentDigit(1223) == 2)\n assert(mostFrequentDigit(12233) == 2)\n assert(mostFrequentDigit(-112233) == 1)\n assert(mostFrequentDigit(1223322332) == 2)\n assert(mostFrequentDigit(123456789) == 1)\n assert(mostFrequentDigit(1234567789) == 7)\n assert(mostFrequentDigit(1000123456789) == 0)\n print('Passed.')\n\ndef testNthPowerfulNumber():\n print('Testing nthPowerfulNumber()... ', end='')\n assert(nthPowerfulNumber(0) == 1)\n assert(nthPowerfulNumber(1) == 4)\n assert(nthPowerfulNumber(2) == 8)\n assert(nthPowerfulNumber(3) == 9)\n assert(nthPowerfulNumber(4) == 16)\n assert(nthPowerfulNumber(5) == 25)\n assert(nthPowerfulNumber(10) == 64)\n assert(nthPowerfulNumber(15) == 121)\n assert(nthPowerfulNumber(20) == 196)\n print('Passed.')\n\ndef testNthCircularPrime():\n print('Testing nthCircularPrime()... ', end='')\n assert(nthCircularPrime(0) == 2)\n assert(nthCircularPrime(1) == 3)\n assert(nthCircularPrime(2) == 5)\n assert(nthCircularPrime(10) == 73)\n assert(nthCircularPrime(15) == 197)\n assert(nthCircularPrime(16) == 199)\n print('Passed.')\n\ndef testFindZeroWithBisection():\n print('Testing findZeroWithBisection()... ', end='')\n def f1(x): return x*x - 2 # root at x=sqrt(2)\n x = findZeroWithBisection(f1, 0, 2, 0.000000001)\n assert(almostEqual(x, 1.41421356192)) \n def f2(x): return x**2 - (x + 1) # root at x=phi\n x = findZeroWithBisection(f2, 0, 2, 0.000000001)\n assert(almostEqual(x, 1.61803398887))\n def f3(x): return x**5 - 2**x # f(1)<0, f(2)>0\n x = findZeroWithBisection(f3, 1, 2, 0.000000001)\n assert(almostEqual(x, 1.17727855081))\n print('Passed.')\n\n\ndef testVowelCount():\n print(\"Testing vowelCount()...\", end=\"\")\n assert(vowelCount(\"abcdefg\") == 2)\n assert(vowelCount(\"ABCDEFG\") == 2)\n assert(vowelCount(\"\") == 0)\n assert(vowelCount(\"This is a test. 12345.\") == 4)\n assert(vowelCount(string.ascii_lowercase) == 5)\n assert(vowelCount(string.ascii_lowercase*100) == 500)\n assert(vowelCount(string.ascii_uppercase) == 5)\n assert(vowelCount(string.punctuation) == 0)\n assert(vowelCount(string.whitespace) == 0)\n print(\"Passed!\")\n\ndef testInterleave():\n print(\"Testing interleave()...\", end=\"\")\n assert(interleave(\"abcdefg\", \"abcdefg\") == \"aabbccddeeffgg\")\n assert(interleave(\"abcde\", \"abcdefgh\") == \"aabbccddeefgh\")\n assert(interleave(\"abcdefgh\",\"abcde\") == \"aabbccddeefgh\")\n assert(interleave(\"Smlksgeneg n a!\", \"a ie re gsadhm\") ==\n \"Sam likes green eggs and ham!\")\n assert(interleave(\"\",\"\") == \"\")\n print(\"Passed!\")\n\ndef testHasBalancedParentheses():\n print(\"Testing hasBalancedParentheses()...\", end=\"\")\n assert(hasBalancedParentheses(\"()\") == True)\n assert(hasBalancedParentheses(\"\") == True)\n assert(hasBalancedParentheses(\"())\") == False)\n assert(hasBalancedParentheses(\"()(\") == False) \n assert(hasBalancedParentheses(\")(\") == False)\n assert(hasBalancedParentheses(\"(()())\") == True)\n assert(hasBalancedParentheses(\"((()())(()(()())))\") == True)\n assert(hasBalancedParentheses(\"((()())(()((()())))\") == False)\n assert(hasBalancedParentheses(\"((()())(((()())))\") == False)\n print(\"Passed!\")\n\ndef testRotateStringLeft():\n print(\"Testing rotateStringLeft()...\", end=\"\")\n assert(rotateStringLeft(\"abcde\", 0) == \"abcde\")\n assert(rotateStringLeft(\"abcde\", 1) == \"bcdea\")\n assert(rotateStringLeft(\"abcde\", 2) == \"cdeab\")\n assert(rotateStringLeft(\"abcde\", 3) == \"deabc\")\n assert(rotateStringLeft(\"abcde\", 4) == \"eabcd\")\n assert(rotateStringLeft(\"abcde\", 5) == \"abcde\")\n assert(rotateStringLeft(\"abcde\", 25) == \"abcde\")\n assert(rotateStringLeft(\"abcde\", 28) == \"deabc\")\n print(\"Passed!\")\n\ndef testRotateStringRight():\n print(\"Testing rotateStringRight()...\", end=\"\")\n assert(rotateStringRight(\"abcde\", 0) == \"abcde\")\n assert(rotateStringRight(\"abcde\", 1) == \"eabcd\")\n assert(rotateStringRight(\"abcde\", 2) == \"deabc\")\n assert(rotateStringRight(\"abcde\", 3) == \"cdeab\")\n assert(rotateStringRight(\"abcde\", 4) == \"bcdea\")\n assert(rotateStringRight(\"abcde\", 5) == \"abcde\")\n assert(rotateStringRight(\"abcde\", 25) == \"abcde\")\n assert(rotateStringRight(\"abcde\", 28) == \"cdeab\")\n print(\"Passed!\")\n\ndef testSameChars():\n print(\"Testing sameChars()...\", end=\"\")\n assert(sameChars(\"abcabcabc\", \"cba\") == True)\n assert(sameChars(\"cba\", \"abcabcabc\") == True)\n assert(sameChars(\"abcabcabc\", \"cbad\") == False)\n assert(sameChars(\"abcabcabc\", \"cBa\") == False)\n assert(sameChars(42,\"The other parameter is not a string\") == False)\n assert(sameChars(\"\",\"\") == True)\n assert(sameChars(\"\",\"a\") == False)\n print(\"Passed!\")\n\ndef testMostFrequentLetters():\n print(\"Testing mostFrequentLetters()...\", end=\"\")\n assert(mostFrequentLetters(\"Cat\") == 'ACT')\n assert(mostFrequentLetters(\"A cat\") == 'A')\n assert(mostFrequentLetters(\"A cat in the hat\") == 'AT')\n assert(mostFrequentLetters(\"This is a test\") == 'ST')\n assert(mostFrequentLetters(\"This is an I test?\") == 'IST')\n assert(mostFrequentLetters(\"\") == \"\")\n print(\"Passed!\")\n\ndef testWordWrap():\n print(\"Testing wordWrap()...\", end=\"\")\n assert(wordWrap(\"abcdefghij\", 4) == \"\"\"\\\nabcd\nefgh\nij\"\"\")\n assert(wordWrap(\"a b c de fg\", 4) == \"\"\"\\\na*b\nc*de\nfg\"\"\")\n print(\"Passed!\")\n\ndef testLargestNumber():\n print(\"Testing largestNumber()...\", end=\"\")\n assert(largestNumber(\"I saw 3\") == 3)\n assert(largestNumber(\"3 I saw!\") == 3)\n assert(largestNumber(\"I saw 3 dogs, 17 cats, and 14 cows!\") == 17)\n assert(largestNumber(\"I saw 3 dogs, 1700 cats, and 14 cows!\") == 1700)\n assert(largestNumber(\"One person ate two hot dogs!\") == None)\n print(\"Passed!\")\n\ndef testLongestSubpalindrome():\n print(\"Testing longestSubpalindrome()...\", end=\"\")\n assert(longestSubpalindrome(\"ab-4-be!!!\") == \"b-4-b\")\n assert(longestSubpalindrome(\"abcbce\") == \"cbc\")\n assert(longestSubpalindrome(\"aba\") == \"aba\")\n assert(longestSubpalindrome(\"a\") == \"a\")\n print(\"Passed!\")\n\ndef testLeastFrequentLetters():\n print(\"Testing leastFrequentLetters()...\", end=\"\")\n assert(leastFrequentLetters(\"abc def! GFE'cag!!!\") == \"bd\")\n assert(leastFrequentLetters(\"abc def! GFE'cag!!!\".lower()) == \"bd\")\n assert(leastFrequentLetters(\"abc def! GFE'cag!!!\".upper()) == \"bd\")\n assert(leastFrequentLetters(\"\") == \"\")\n assert(leastFrequentLetters(\"\\t \\n&^#$\") == \"\")\n noq = string.ascii_lowercase.replace('q','')\n assert(leastFrequentLetters(string.ascii_lowercase + noq) == \"q\")\n print(\"Passed!\")\n\ndef testAreAnagrams():\n print(\"Testing areAnagrams()...\", end=\"\")\n assert(areAnagrams(\"\", \"\") == True)\n assert(areAnagrams(\"abCdabCd\", \"abcdabcd\") == True)\n assert(areAnagrams(\"abcdaBcD\", \"AAbbcddc\") == True)\n assert(areAnagrams(\"abcdaabcd\", \"aabbcddcb\") == False)\n print(\"Passed!\")\n\ndef testCollapseWhitespace():\n print(\"Testing collapseWhitespace()...\", end=\"\")\n assert(collapseWhitespace(\"a\\n\\n\\nb\") == \"a b\")\n assert(collapseWhitespace(\"a\\n \\t b\") == \"a b\")\n assert(collapseWhitespace(\"a\\n \\t b \\n\\n \\t\\t\\t c \") ==\n \"a b c \")\n print(\"Passed!\")\n\ndef testReplace():\n print(\"Testing replace()...\", end=\"\")\n (s1, s2, s3) = (\"abcde\", \"ab\", \"cd\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n (s1, s2, s3) = (\"abcdeabcde\", \"ab\", \"cd\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n (s1, s2, s3) = (\"babababa\", \"ab\", \"cd\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n (s1, s2, s3) = (\"abb\", \"ab\", \"a\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n (s1, s2, s3) = (\"\", \"ab\", \"a\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n (s1, s2, s3) = (\"abc\", \"\", \"q\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n (s1, s2, s3) = (\"abc\", \"ab\", \"\")\n assert(replace(s1, s2, s3) == s1.replace(s2, s3))\n print(\"Passed!\")\n\ndef testEncodeOffset():\n print(\"Testing encodeOffset()...\", end=\"\")\n assert(encodeOffset(\"ACB\", 1) == \"BDC\")\n assert(encodeOffset(\"ACB\", 2) == \"CED\")\n assert(encodeOffset(\"XYZ\", 1) == \"YZA\")\n assert(encodeOffset(\"ABC\", -1) == \"ZAB\")\n assert(encodeOffset(\"ABC\", -27) == \"ZAB\")\n assert(encodeOffset(\"Abc\", -27) == \"Zab\")\n assert(encodeOffset(\"A2b#c\", -27) == \"Z2a#b\")\n print(\"Passed!\")\n\ndef testDecodeOffset():\n print(\"Testing decodeOffset()...\", end=\"\")\n assert(decodeOffset(\"BDC\", 1) == \"ACB\")\n assert(decodeOffset(\"CED\", 2) == \"ACB\")\n assert(decodeOffset(\"YZA\", 1) == \"XYZ\")\n assert(decodeOffset(\"ZAB\", -1) == \"ABC\")\n assert(decodeOffset(\"ZAB\", -27) == \"ABC\")\n assert(decodeOffset(\"Zab\", -27) == \"Abc\")\n assert(decodeOffset(\"Z2a#b\", -27) == \"A2b#c\")\n print(\"Passed!\")\n\ndef testEncrypt():\n print(\"Testing encrypt()...\", end=\"\")\n assert(encrypt(\"Go Team!\", \"azby\") == \"GNUCAL\")\n assert(encrypt(\"a1m2a3z4i5n6g !?!?\", \"yes\") == \"YQSXMFE\")\n assert(encrypt(\"\", \"wow\") == \"\")\n assert(encrypt(\"Wow!\", \"AZBY\") == \"password must be all lowercase\")\n print(\"Passed!\")\n\ndef testDecrypt():\n print(\"Testing decrypt()...\", end=\"\")\n assert(decrypt(\"GNUCAL\", \"azby\") == \"GOTEAM\")\n assert(decrypt(\"YQSXMFE\", \"yes\") == \"AMAZING\")\n assert(decrypt(\"\", \"wow\") == \"\")\n print(\"Passed!\")\n\n#################################################\n# testAll and main\n#################################################\n\ndef testAll():\n testDigitCount()\n testHasConsecutiveDigits() \n testGcd()\n testPi()\n testH()\n testEstimatedPi()\n testEstimatedPiError()\n testNthAdditivePrime()\n testNthPerfectNumber() \n testVowelCount()\n testInterleave()\n testHasBalancedParentheses()\n testLongestDigitRun()\n testLongestIncreasingRun()\n testNthPalindromicPrime()\n testNthLeftTruncatablePrime()\n testNthCarolPrime()\n testSumOfSquaresOfDigits()\n testRotateStringLeft()\n testRotateStringRight()\n testWordWrap()\n testLargestNumber()\n testIsHappyNumber()\n testNthHappyNumber()\n testNthHappyPrime()\n testMostFrequentDigit()\n testNthPowerfulNumber()\n testNthCircularPrime()\n testFindZeroWithBisection()\n testLongestSubpalindrome()\n testLeastFrequentLetters()\n testSameChars()\n testMostFrequentLetters()\n testAreAnagrams()\n testCollapseWhitespace()\n testReplace()\n testEncodeOffset()\n testDecodeOffset()\n testEncrypt()\n testDecrypt()\n\ndef main():\n cs112_s18_week2_linter.lint() # check style rules\n testAll()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6427028179168701, "alphanum_fraction": 0.6673867702484131, "avg_line_length": 43.11913299560547, "blob_id": "bac339fe2d939c60b99ee894b1960295b1c4d92d", "content_id": "a553495ac2ba90b3bea3b2e51066b0bc225eb6ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 24996, "license_type": "no_license", "max_line_length": 692, "num_lines": 554, "path": "/notes/week2-practice.html", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html>\r\n<head>\r\n <title>15-112: Fundamentals of Programming</title>\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/reset.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/bootstrap.min.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112-highlight-style.css\">\r\n <script src=\"../js/jquery-2.1.4.min.js\"></script>\r\n <script src=\"../js/highlight.pack.js\"></script>\r\n <script src=\"../js/bootstrap.min.js\"></script>\r\n <script id=\"112-script\" src=\"../js/112.js\"></script>\r\n <base target=\"_self\">\r\n</head>\r\n<body>\r\n\r\n<div class=\"navbar\">\r\n15-112 <br> Spring 18\r\n<br><br><a target=\"_self\" href=\"../index.html\">Home</a>\r\n<br><br><a target=\"_self\" href=\"../syllabus.html\">Syllabus</a>\r\n<br><br><a target=\"_self\" href=\"../schedule.html\">Schedule</a>\r\n<br><br><a target=\"_self\" href=\"../gallery.html\">Gallery</a>\r\n<br><br><a target=\"_self\" href=\"../staff.html\">Staff</a>\r\n<br><br><a target=\"_self\" href=\"../piazza.html\">Piazza</a>\r\n<br><br><a target=\"_self\" href=\"../autolab.html\">Autolab</a>\r\n<br><br><a target=\"_blank\" href=\"../oh-queue.html\">OH Queue</a>\r\n</div>\r\n\r\n<div class=\"content\">\r\n<h1>\r\nCMU 15-112: Fundamentals of Programming and Computer Science<br>\r\nWeek2 Practice (Due never)\r\n</h1>\r\n\r\n\r\n<hr>\r\n\r\n<ul>\r\n<li>These problems will help you prepare for colab2, hw2, and quiz2.</li>\r\n<li>To start:\r\n<ol>\r\n<li>Go to your folder named 'week2'</li>\r\n<li>Download both\r\n <a href=\"wk2_practice.py\" download>wk2_practice.py</a>\r\n and\r\n <a href=\"cs112_s18_week2_linter.py\" download>cs112_s18_week2_linter.py</a>\r\n to that folder</li>\r\n<li>Edit wk2_practice.py using pyzo</li>\r\n</ol>\r\n</li>\r\n<li>Do not use lists or recursion this week.</li>\r\n<li>Do not hardcode the test cases in your solutions.</li>\r\n</ul>\r\n\r\n<hr>\r\n\r\n<b>Code Tracing</b><br>\r\nWhat will this code print? Figure it out by hand, then run the code to confirm. Then slightly edit the code and try again.<br><br>\r\n<b>Loops:</b>\r\n<ul>\r\n<li><b>Trace #1 of 3:</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/MOybYhryCMo\"></span>\r\n<br><div class=\"python-code\">\r\ndef ct1(m, n):\r\n total = 0\r\n for x in range(m, n+1, 3): \r\n print('x =', x)\r\n total += x\r\n for y in range(m, m+2):\r\n print('y = ', y)\r\n total += y\r\n return total\r\n\r\nprint(ct1(1,9))\r\n</div></li>\r\n\r\n<br><li><b>Trace #2 of 3:</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/1gIucWZVUqE\"></span>\r\n<br><div class=\"python-code\">\r\ndef ct2(n):\r\n k = 0\r\n total = 0\r\n while (n >= k):\r\n print('k =', k)\r\n for i in range(k):\r\n total += n%10\r\n n //= 10\r\n print(i, n%10, total)\r\n k += 1\r\n print('total =', total)\r\n\r\nprint(ct2(1234))\r\n</div></li>\r\n\r\n<br><li><b>Trace #3 of 3:</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/2CLViaCLTck\"></span>\r\n<br><div class=\"python-code\">\r\ndef ct3(z):\r\n total = 0 \r\n for y in range(z,1,-1):\r\n if (y % 2 == 0):\r\n print('skip y =', y)\r\n continue\r\n total += y\r\n if (total > 20):\r\n print('break at y =', y)\r\n break\r\n return total\r\nprint(ct3(10))\r\n</div></li>\r\n</ul>\r\n\r\n<b>Strings:</b>\r\n<ul>\r\n<li><b>Trace #1 of 3:</b>\r\n<br><div class=\"python-code\">\r\ndef ct1(s, t):\r\n result = \"\"\r\n for c in s:\r\n if (c.upper() not in \"NO!!!\"):\r\n i = t.find(c)\r\n if (result != \"\"): result += \":\"\r\n result += \"%d%s%s%s\" % (i, c, s[i], t[i])\r\n return result\r\nprint(ct1(\"net\", \"two\"))\r\n</div></li>\r\n\r\n<br><li><b>Trace #2 of 3:</b>\r\n<br><div class=\"python-code\">\r\ndef ct2(s):\r\n result = \"\"\r\n d = ord(\"a\")\r\n for c in s.lower():\r\n if (c.isalpha() and (ord(c) >= d)):\r\n result += str(ord(c) - d) + chr(d)\r\n d += 1\r\n return result\r\nprint(ct2(\"Be a CA?!?\"))\r\n</div></li>\r\n\r\n<br><li><b>Trace #3 of 3:</b>\r\n<br><div class=\"python-code\">\r\ndef ct3(s):\r\n result = \"\"\r\n while (len(s) > 1):\r\n result += s[:1] + s[2:4] + \".\"\r\n s = s[1:-1:2]\r\n return result + s\r\nprint(ct3(\"abcdefghi\"))\r\n</div></li>\r\n</ul>\r\n</li>\r\n\r\n<hr>\r\n\r\n<b>Reasoning Over Code</b><br>\r\nFind parameter(s) to the following functions so that they\r\nreturn True. Figure it out by hand, then run the code to confirm.\r\nThere may be more than one correct answer for each function, and\r\nyou can provide any one of them.<br><br>\r\n<b>Loops:</b>\r\n<ul>\r\n<li><b>RC #1 of 2:</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/iokRXFrRNwY\"></span>\r\n<br><div class=\"python-code\">\r\ndef rc1(n):\r\n if ((not isinstance(n, int)) or (n > 100)): return False\r\n total = 0\r\n while (n > 0):\r\n total = 10*total + n%10\r\n n //= 10\r\n return (total == 42)\r\n</div></li>\r\n\r\n<br><li><b>RC #2 of 2:</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/IXbsB5qkyfY\"></span>\r\n<br><div class=\"python-code\">\r\ndef f(n):\r\n if (n == 0): return 1\r\n n = abs(n)\r\n count = 0\r\n while (n > 0):\r\n count += 1\r\n n //= 10\r\n return count\r\n\r\ndef rc2(m):\r\n if (not(isinstance(m, int)) or (m < 0)): return False\r\n start = 0\r\n while True:\r\n count = 0\r\n for n in range(start, start+3):\r\n count += f(n)\r\n if (count > 9): break\r\n start += 1\r\n return (m == start)\r\n</div></li>\r\n\r\n</ul>\r\n\r\n<b>Strings:</b>\r\n\r\n<ul>\r\n<li><b>RC #1 of 2:</b>\r\n<br><div class=\"python-code\">\r\ndef rc1(s):\r\n if (not isinstance(s, str)): return False\r\n if ('0' in s): return False\r\n t,n = s[1:-1], int(s[0]+s[-1])\r\n return (t.isalpha() and (t == t[0]*(n//2)))\r\n</div></li>\r\n\r\n<br><li><b>RC #2 of 2:</b>\r\n<br><div class=\"python-code\">\r\ndef rc2(s, t):\r\n assert((s != \"\") and (t != \"\") and (s in t) and (s != t))\r\n result = \"\"\r\n for i in range(len(s)):\r\n if ((i % 2) == 0): result += t[i]\r\n else: result += t[-1-i]\r\n return (result == s)\r\n</div></li>\r\n</ul>\r\n\r\n\r\n<hr>\r\n\r\n<b>Free Response (Problem-Solving)</b>\r\n<br><br>\r\n<b>Tue Lecture</b>\r\n\r\n<ol>\r\n\r\n<br><li><b>digitCount(n)</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/zzwlH7bJqyQ\"></span>\r\n<br>\r\nWrite the function digitCount(n) that takes a possibly-negative int and returns the number of digits in it. So, digitCount(12323) returns 5, digitCount(0) returns 1, and digitCount(-111) returns 3. One way you could do this would be to return len(str(abs(n))), but you cannot do that, since you may not use strings here! This can be solved with logarithms, but seeing as this is \"loops week\", you should instead simply repeatedly remove the ones digit until you cannot.\r\n</li>\r\n\r\n<br><li><b>hasConsecutiveDigits(n)</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/8EMWfkU0QH8\"></span>\r\n<br>\r\nWrite the function hasConsecutiveDigits(n) that takes a possibly- negative int value n and returns True if that number contains two consecutive digits that are the same, and False otherwise.\r\n</li>\r\n\r\n<br><li><b>gcd(m, n)</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/BRXcAQMYy4w\"></span>\r\n<br>\r\n[Note: to receive any credit, you must solve this problem\r\nusing Euclid's algorithm, and by no other means.\r\nIn particular, do not just loop through all integers\r\nless than min(m,n) and find the common factors that way --\r\nit is much too slow!]<br>\r\nAccording to Euclid, the greatest\r\ncommon divisor, or gcd, can be found like so:\r\n<br><font class=\"snippet\">\r\n&nbsp;&nbsp;&nbsp;gcd(x,y) == gcd(y, x%y)\r\n</font><br>\r\nWe can use that to quickly find gcd's. For example:\r\n<br><font class=\"snippet\">\r\n &nbsp;&nbsp;&nbsp;\r\n gcd(270,250) == gcd(250, 20) # 270 % 250 == 20\r\n<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\r\n\r\n == gcd(20, 10) # 250 % 20 == 10\r\n<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\r\n\r\n == gcd(10, 0) # 20 % 10 == 0\r\n</font><br>\r\nWhen we get to gcd(x,0), the answer is x. So gcd(270, 250)\r\nis 10. With this in mind, write the function gcd(x,y) that\r\ntakes two positive integers x and y and returns their gcd\r\nusing Euclid's gcd algorithm.\r\n</li>\r\n\r\n<br><li><b>countingPrimes</b><br>\r\nDo the \"Counting Primes\" problem\r\n<a target=\"_blank\" href=\"http://www.kosbie.net/cmu/fall-13/15-112/handouts/hw2.html#Counting_Primes\">here</a>.\r\n\r\n</li>\r\n\r\n<br><li><b>nthAdditivePrime(n)</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/WyNDXIj7vBg\"></span>\r\n<br>\r\nWrite the function nthAdditivePrime(n) that takes a non-negative\r\nint n and returns the nth Additive Prime, which is a prime number\r\nsuch that the sum of its digits is also prime. For example,\r\n113 is prime and 1+1+3==5 and 5 is also prime, so 113 is an Additive\r\nPrime.\r\n</li>\r\n\r\n<br><li><b>nthPerfectNumber(n)</b>\r\n<span class=\"play-video\"\r\n data-src=\"https://www.youtube.com/embed/yCp1eHYOA3w\"></span>\r\n<br>\r\nWrite the function nthPerfectNumber(n) that takes a non-negative\r\ninteger n and returns the nth perfect number, starting at n=0,\r\nwhere a number is perfect if it is the sum of its positive\r\ndivisors less than itself. For example, 6 is perfect because\r\n6 = 1 + 2 + 3. Also, 28 is perfect because\r\n28 = 1 + 2 + 4 + 7 + 14. The next one is 496, then 8128.\r\nFor full credit, you need to use a faster version, which uses\r\nthe same observation that sped up isPrime, so that you\r\nonly have to search for factors up to the square root of n.\r\n</li>\r\n\r\n<br><li><b>vowelCount(s)</b><br>\r\nWrite the function vowelCount(s), that takes a string s, and returns the number of vowels in s, ignoring case, so \"A\" and \"a\" are both vowels. The vowels are \"a\", \"e\", \"i\", \"o\", and \"u\". So, for example, (\"Abc def!!! a? yzyzyz!\") returns 3 (two a's and one e).\r\n</li>\r\n\r\n<br><li><b>interleave(s1, s2)</b><br>\r\nWrite the function interleave(s1, s2) that takes two strings, s1 and s2, and interleaves their characters starting with the first character in s1. For example, interleave('pto', 'yhn') would return the string \"python\". If one string is longer than the other, concatenate the rest of the remaining string onto the end of the new string. For example ('a#', 'cD!f2') would return the string \"ac#D!f2\". Assume that both s1 and s2 will always be strings.</li>\r\n\r\n<br><li><b>hasBalancedParentheses(s)</b><br>\r\nWrite the function hasBalancedParentheses, which takes a string and returns True if that code has balanced parentheses and False otherwise (ignoring all non-parentheses in the string). We say that parentheses are balanced if each right parenthesis closes (matches) an open (unmatched) left parenthesis, and no left parentheses are left unclosed (unmatched) at the end of the text. So, for example, \"( ( ( ) ( ) ) ( ) )\" is balanced, but \"( ) )\" is not balanced, and \"( ) ) (\" is also not balanced. Hint: keep track of how many right\r\nparentheses remain unmatched as you iterate over the string.</li>\r\n\r\n</ol>\r\n\r\n<hr>\r\n\r\n<b>Wed Recitation</b>\r\n\r\n<ol start=\"10\">\r\n\r\n<br><li><b>longestDigitRun(n)</b><br>\r\nWrite the function longestDigitRun(n) that takes a possibly-negative int value n and returns the digit that has the longest consecutive run, or the smallest such digit if there is a tie. So, longestDigitRun(117773732) returns 7 (because there is a run of 3 consecutive 7's), as does longestDigitRun(-677886).\r\n</li>\r\n\r\n<br><li><b>longestIncreasingRun(n)</b><br>\r\nWrite the function longestIncreasingRun that takes in a positive int value n and returns the longest increasing run of digits. For example longestIncreasingRun(1232) would return 123 and longestIncreasingRun(27648923679) returns 23679. If there is a tie in run length, the larger of the two runs should be returned. So longestIncreasingRun(123345) would return 345.\r\n</li>\r\n\r\n<br><li><b>nthPalindromicPrime(n)</b><br>\r\nWrite the function nthPalindromicPrime(n). See\r\n<a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Palindromic_prime\">here</a>\r\nfor details. So nthPalindromicPrime(0) returns 2, and nthPalindromicPrime(10) returns 313.\r\n</li>\r\n\r\n<br><li><b>nthLeftTruncatablePrime(n)</b><br>\r\nWrite the function nthLeftTruncatablePrime(n). See\r\n<a target=\"_blank\" href=\"http://en.wikipedia.org/wiki/Truncatable_prime\">here</a>\r\nfor details. So nthLeftTruncatablePrime(0) returns 2, and nthLeftTruncatablePrime(10) returns 53.\r\n</li>\r\n\r\n<br><li><b>nthCarolPrime(n)</b><br>\r\nWrite the function nthCarolPrime(n), which takes a non-negative int and returns the nth Carol Prime, which is a prime number of the form ((2**k - 1)**2 - 2) for some value positive int k. For example, if k equals 3, ((2**3 - 1)**2 -2) equals 47, which is prime, and so 47 is a Carol Prime. The first several Carol primes are: 7, 47, 223, 3967, 16127, 1046527, 16769023,... As such, nthCarolPrime(0) returns 7.\r\n<br><br>\r\nNote: You must use a reasonably efficient approach that quickly works up to n==9, which will return a 12-digit answer! In particular, this means you cannot just edit isPrime. Hint: you may need to generate only Carol numbers, and then test those as you go for primality (and you may need to think about that hint for a while for it to make sense!).\r\n</li>\r\n<br><li><b>rotateStringLeft(s, k)</b><br>\r\nWrite the function rotateStringLeft that takes a string s and a non-negative\r\ninteger k, and returns the string s rotated k places to the left.</li>\r\n\r\n<br><li><b>rotateStringRight(s, k)</b><br>\r\nWrite the function rotateStringRight that takes a string s and a non-negative\r\ninteger k, and returns the string s rotated k places to the right.</li>\r\n\r\n<br><li><b>\r\n<a name=\"wordWrap\"></a>wordWrap(text, width)\r\n</b><br>\r\nWrite the function wordWrap(text, width) that takes a string of text (containing only lowercase letters or spaces) \r\nand a positive integer width, and returns a possibly-multiline string that matches the original string, only with line\r\nwrapping at the given width. So wordWrap(\"abc\", 3) just returns \"abc\", but wordWrap(\"abc\",2) returns a 2-line \r\nstring, with \"ab\" on the first line and \"c\" on the second line. After you complete word wrapping in this way, only \r\nthen: All spaces at the start and end of each resulting line should be removed, and then all remaining spaces should \r\nbe converted to dashes (\"-\"), so they can be easily seen in the resulting string. Here are some test cases for you:\r\n<pre>\r\n assert(wordWrap(\"abcdefghij\", 4) == \"\"\"\\\r\nabcd\r\nefgh\r\nij\"\"\")\r\n assert(wordWrap(\"a b c de fg\", 4) == \"\"\"\\\r\na-b\r\nc-de\r\nfg\"\"\")\r\n</pre></li>\r\n\r\n<br><li><b></a>largestNumber(text)</b><br>\r\nlargestNumber: Write the function largestNumber(text) that takes a string of text and returns the largest int value that occurs within that text, or None if no such value occurs. You may assume that the only numbers in the text are non-negative integers and that numbers are always composed of consecutive digits (without commas, for example). For example:\r\n<pre>\r\n largestNumber(\"I saw 3 dogs, 17 cats, and 14 cows!\")\r\n</pre>\r\nreturns 17 (the int value 17, not the string \"17\"). And\r\n<pre>\r\n largestNumber(\"One person ate two hot dogs!\")\r\n</pre>\r\nreturns None (the value None, not the string \"None\").</li>\r\n\r\n</ol>\r\n<hr>\r\n\r\n<b>Thu Lecture</b>\r\n\r\n<ol start=\"19\">\r\n\r\n<br><li><b>Happy Primes</b><br>\r\nBackground: read the first paragraph from\r\n<a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Happy_number\">the Wikipedia page on happy numbers</a>.\r\nAfter some thought, we see that no matter what number we start with, when we keep replacing the number by the sum of the squares of its digits, we'll always either arrive at 4 (unhappy) or at 1 (happy). With that in mind, we want to write the function nthHappyNumber(n). However, to write that function, we'll first need to write isHappyNumber(n) (right?). And to write that function, we'll first need to write sumOfSquaresOfDigits(n). And that's top-down design! Here we go....\r\n<br><br>\r\nNote: the autograder will grade each of the following functions, so they are required. However, they also are\r\nhere specifically because they are just the right helper\r\nfunctions to make nthHappyNumber(n) easier to write!\r\n<ol type=\"a\">\r\n<br>\r\n<li><b>sumOfSquaresOfDigits(n)</b><br>\r\nWrite the function sumOfSquaresOfDigits(n) which takes a non-negative integer and returns the sum of the squares of its digits. Here are some test assertions for you\r\n(note that in the hw2.py starter file, instead\r\nof assert, these use assertEqual):\r\n<pre>\r\nassert(sumOfSquaresOfDigits(5) == 25) # 5**2 = 25\r\nassert(sumOfSquaresOfDigits(12) == 5) # 1**2 + 2**2 = 1+4 = 5\r\nassert(sumOfSquaresOfDigits(234) == 29) # 2**2 + 3**2 + 4**2 = 4 + 9 + 16 = 29\r\n</pre>\r\n</li>\r\n\r\n<li><b>isHappyNumber(n)</b><br>\r\nWrite the function isHappyNumber(n) which takes a possibly-negative integer and returns True if it is happy and False otherwise. Note that all numbers less than 1 are not happy. Here are some test assertions for you:\r\n<pre>\r\nassert(isHappyNumber(-7) == False)\r\nassert(isHappyNumber(1) == True)\r\nassert(isHappyNumber(2) == False)\r\nassert(isHappyNumber(97) == True)\r\nassert(isHappyNumber(98) == False)\r\nassert(isHappyNumber(404) == True)\r\nassert(isHappyNumber(405) == False)\r\n</pre>\r\n</li>\r\n\r\n<li><b>nthHappyNumber(n)</b><br>\r\nWrite the function nthHappyNumber(n) which takes a non-negative integer and returns the nth happy number (where the 0th happy number is 1). Here are some test assertions for you:\r\n<pre>\r\nassert(nthHappyNumber(0) == 1)\r\nassert(nthHappyNumber(1) == 7)\r\nassert(nthHappyNumber(2) == 10)\r\nassert(nthHappyNumber(3) == 13)\r\nassert(nthHappyNumber(4) == 19)\r\nassert(nthHappyNumber(5) == 23)\r\nassert(nthHappyNumber(6) == 28)\r\nassert(nthHappyNumber(7) == 31)\r\n</pre>\r\n</li>\r\n\r\n<li><b>nthHappyPrime(n)</b><br>\r\nA happy prime is a number that is both happy and prime. Write the function nthHappyPrime(n) which takes a non-negative integer and returns the nth happy prime number (where the 0th happy prime number is 7). \r\n</li>\r\n</ol>\r\n</li>\r\n\r\n<br><li><b>mostFrequentDigit(n)</b><br>\r\nWrite the function mostFrequentDigit(n), that takes a non-negative integer n and returns the digit from 0 to 9 that occurs most frequently in it, with ties going to the smaller digit.\r\n</li>\r\n\r\n<br><li><b>nthPowerfulNumber(n)</b><br>\r\nWrite the function nthPowerfulNumber(n). See\r\n<a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Powerful_number\">here</a>\r\nfor details. So nthPowerfulNumber(0) returns 1, and nthPowerfulNumber(10) returns 64.\r\n</li>\r\n\r\n<br><li><b>nthCircularPrime(n)</b><br>\r\nWrite the function nthCircularPrime that takes a non-negative int n and returns the nth Circular prime, which is a prime number that does not contain any 0's and such that all the numbers resulting from rotating its digits are also prime. The first Circular primes are 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, 97, 113, 131, 197... To see why 197 is a Circular prime, note that 197 is prime, as is 971 (rotated left), as is 719 (rotated left again).\r\n</li>\r\n\r\n<br><li><b>findZeroWithBisection(f, x0, x1, epsilon)</b><br>\r\nWrite the function findZeroWithBisection(f, x0, x1, epsilon) as described <a target=\"_blank\" href=\"http://www.kosbie.net/cmu/spring-13/15-112/handouts/hw3.html#findZeroWithBisection\">here</a>.\r\n</li>\r\n<br><li><b>longestSubpalindrome(s)</b><br>\r\nWrite the function longestSubpalindrome(s), that takes a string s and returns the longest palindrome that occurs as consecutive characters (not just letters, but any characters) in s. So: \r\n<pre>\r\n longestSubpalindrome(\"ab-4-be!!!\") \r\n</pre>\r\nreturns \"b-4-b\". If there is a tie, return the lexicographically larger value -- in Python, a string s1 is lexicographically greater than a string s2 if (s1 > s2). So: \r\n<pre>\r\n longestSubpalindrome(\"abcbce\") \r\n</pre>\r\nreturns \"cbc\", since (\"cbc\" > \"bcb\"). Note that unlike the previous functions, this function is case-sensitive (so \"A\" is not treated the same as \"a\" here). Also, from the explanation above, we see that longestSubpalindrome(\"aba\") is \"aba\", and longestSubpalindrome(\"a\") is \"a\".</li>\r\n\r\n</li><br><li><b>leastFrequentLetters(s)</b><br>\r\nWrite the function leastFrequentLetters(s), that takes a string s, and ignoring case (so \"A\" and \"a\" are treated the same), returns a lowercase string containing the least-frequent alphabetic letters that occur in s, each included only once in the result and then in alphabetic order. So: \r\n<pre>\r\n leastFrequentLetters(\"aDq efQ? FB'daf!!!\") \r\n</pre>\r\nreturns \"be\". Note that digits, punctuation, and whitespace are not letters! Also note that seeing as we have not yet covered lists, sets, maps, or efficiency, you are not expected to write the most efficient solution. Finally, if s does not contain any alphabetic characters, the result should be the empty string (\"\").</li>\r\n\r\n</ol>\r\n<hr>\r\n\r\n\r\n<b>Extra Practice</b>\r\n\r\n<ol start=\"26\">\r\n\r\n<br><li><b>sameChars(s1, s2)</b><br>\r\nWrite the function sameChars(s1, s2) that takes two strings and returns True if the two strings are composed of the same characters (though perhaps in different numbers and in different orders) -- that is, if every character that is in the first string, is in the second, and vice versa -- and False otherwise. This test is case-sensitive, so \"ABC\" and \"abc\" do not contain the same characters. The function returns False if either parameter is not a string, but returns True if both strings are empty (why?).</li>\r\n\r\n\r\n<br><li><b>mostFrequentLetters(s)</b><br>\r\nWrite the function mostFrequentLetter(s) that takes a string s and returns the letter that occurs the most frequently in it. Your test should be case-insensitive, to \"A\" and \"a\" are the same, though your return value should always be uppercase. And if there is a tie, you should return a string with all the most frequent letters in alphabetic order. You should ignore non-letters. And if there are no letters, you should return the empty string.</li>\r\n\r\n<br><li><b>areAnagrams(s1, s2)</b><br>\r\nWrite the function areAnagrams(s1, s2) that takes two strings, s1 and s2,\r\nthat you may assume contain only upper and/or lower case letters, and returns True if the strings are anagrams, and False otherwise. Two strings are\r\nanagrams if each can be reordered into the other. Treat \"a\" and \"A\" as\r\nthe same letters (so \"Aba\" and\r\n\"BAA\" are anagrams). You may not use sort() or sorted() or any other\r\nlist-based functions or approaches. Hint: you may use s.count(), which\r\ncould be quite handy here.</li>\r\n\r\n<br><li><b>collapseWhitespace(s)</b><br>\r\nWithout using the s.replace() method, write the function collapseWhitespace(s), that takes a string s and returns an equivalent string except that each occurrence of whitespace in the string is replaced by a single space. So, for example, collapseWhitespace(\"a\\t\\t\\tb\\n\\nc\") replaces the three tabs with a single space, and the two newlines with another single space , returning \"a b c\". Here are a few more test cases for you:\r\n<pre>\r\n assert(cw(\"a\\nb\") == \"a b\")\r\n assert(cw(\"a\\n \\t b\") == \"a b\")\r\n assert(cw(\"a\\n \\t b \\n\\n \\t\\t\\t c \") == \"a b c \")\r\n</pre>\r\nOnce again, do not use s.replace() in your solution.\r\n</li>\r\n\r\n<br><li><b>replace(s1, s2, s3)</b><br>\r\nWithout using the builtin method s.replace(), write its equivalent. Specifically, write the function replace(s1, s2, s3) that returns a string equal to s1.replace(s2, s3), but again without calling s.replace().</li>\r\n\r\n<br><li><b>encodeOffset(s, d)</b><br>\r\nWrite the function encodeOffset(s, d) that takes a string and a possibly-negative int offset d (for \"delta\"), and returns the string formed by replacing each letter in s with the letter d steps away in the alphabet. So: encodeOffset(\"ACB\", 1) return \"BDC\" encodeOffset(\"ACB\", 2) return \"CED\" This works with wraparound, so: encodeOffset(\"XYZ\", 1) returns \"YZA\" And with negative offsets, so: encodeOffset(\"ABC\", -1) returns \"ZAB\" And the wraparound repeats with d>26, so: encodeOffset(\"ABC\", -27) returns \"ZAB\" And it is case-preserving, so: encodeOffset(\"Abc\", -27) returns \"Zab\" And it does not affect non-alphabetic characters (non-letters), so: encodeOffset(\"A2b#c\", -27) returns \"Z2a#b\" \r\n</li>\r\n\r\n<br><li><b>decodeOffset(s, d)</b><br>\r\nWrite the function decodeOffset(s, d) that takes a string that was encoded by encodeOffset using the given offset d, and returns the original string. \r\n</li>\r\n\r\n<br><li><b>encrypt and decrypt</b><br>\r\nWrite the encrypt and decrypt functions described\r\nin part 6 (Simple Encryption)\r\n<a target=\"_blank\" href=\"http://www.kosbie.net/cmu/spring-14/15-112/handouts/hw3.html\">here</a>.</li>\r\n\r\n<br><li><b>Mastermind</b><br>\r\nWrite the Mastermind functions described\r\n<a target=\"_blank\" href=\"http://www.kosbie.net/cmu/spring-11/15-110/notes/recitation3.html\">here</a>. (Note: this exercise is not in the starter file.) </li>\r\n\r\n</ol>\r\n\r\n<hr>\r\n</div>\r\n</body>\r\n</html>\r\n" }, { "alpha_fraction": 0.553508996963501, "alphanum_fraction": 0.5966425538063049, "avg_line_length": 31.248119354248047, "blob_id": "46006491a11b66cc4904a87985444161ae18e215", "content_id": "577adf2a0ce50b5907de3fc5efe64435efd2c2dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4289, "license_type": "no_license", "max_line_length": 78, "num_lines": 133, "path": "/notes/colab2.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "#################################################\n# Colab2\n#################################################\n\nimport cs112_s18_week2_linter\nimport math\nimport string\n\n#################################################\n# Helper functions\n#################################################\n\ndef almostEqual(d1, d2, epsilon=10**-7):\n # note: use math.isclose() outside 15-112 with Python version 3.5 or later\n return (abs(d2 - d1) < epsilon)\n\nimport decimal\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n#################################################\n# Colab2 problems\n#################################################\n\ndef rotateNumber(x):\n return 42\n\ndef isCircularPrime(x):\n return 42\n\ndef nthCircularPrime(n):\n return 42\n\ndef countLowercaseUpToPercent(s):\n return 42\n\ndef longestCommonSubstring(s1, s2):\n return 42\n\ndef gradebookSummary(gradebookFilename):\n return 42\n\n#################################################\n# Colab2 Test Functions\n################################################\n\ndef testRotateNumber():\n print('Testing rotateNumber()... ', end='')\n assert(rotateNumber(1234) == 4123)\n assert(rotateNumber(4123) == 3412)\n assert(rotateNumber(3412) == 2341)\n assert(rotateNumber(2341) == 1234)\n assert(rotateNumber(5) == 5)\n assert(rotateNumber(111) == 111)\n print('Passed!')\n\ndef testIsCircularPrime():\n print('Testing isCircularPrime()... ', end='')\n assert(isCircularPrime(2) == True)\n assert(isCircularPrime(11) == True)\n assert(isCircularPrime(13) == True)\n assert(isCircularPrime(79) == True)\n assert(isCircularPrime(197) == True)\n assert(isCircularPrime(1193) == True)\n print('Passed!')\n\ndef testNthCircularPrime():\n print('Testing nthCircularPrime()... ', end='')\n assert(nthCircularPrime(0) == 2)\n assert(nthCircularPrime(4) == 11)\n assert(nthCircularPrime(5) == 13)\n assert(nthCircularPrime(11) == 79)\n assert(nthCircularPrime(15) == 197)\n assert(nthCircularPrime(25) == 1193)\n print('Passed!')\n\ndef testCountLowercaseUpToPercent():\n print('Testing countLowercaseUpToPercent()...', end='')\n assert(countLowercaseUpToPercent(\"abCDe\") == 3)\n assert(countLowercaseUpToPercent(\"WxyZ\") == 2)\n assert(countLowercaseUpToPercent(\"Journey Before %Destination\") == 11)\n assert(countLowercaseUpToPercent(\"%Testing, testing, 123%\") == 0)\n assert(countLowercaseUpToPercent(\"Here`s some {weird} chars\") == 18)\n assert(countLowercaseUpToPercent(\"\") == 0)\n print('Passed!')\n\ndef testLongestCommonSubstring():\n print(\"Testing longestCommonSubstring()...\", end=\"\")\n assert(longestCommonSubstring(\"abcdef\", \"abqrcdest\") == \"cde\")\n assert(longestCommonSubstring(\"abcdef\", \"ghi\") == \"\")\n assert(longestCommonSubstring(\"\", \"abqrcdest\") == \"\")\n assert(longestCommonSubstring(\"abcdef\", \"\") == \"\")\n assert(longestCommonSubstring(\"abcABC\", \"zzabZZAB\") == \"AB\")\n print(\"Passed!\")\n\ndef testGradebookSummary():\n print(\"Testing gradebookSummary()...\", end=\"\")\n assert(gradebookSummary(\"sampleFiles/gradebook1.txt\") == \n \"wilma\\t92.67\\nfred\\t90.40\\nbetty\\t88.00\")\n assert(gradebookSummary(\"sampleFiles/gradebook2.txt\") == \n \"wilma\\t92.67\\nfred\\t90.40\\nbetty\\t88.00\")\n assert(gradebookSummary(\"sampleFiles/small1.txt\") == \n \"fred\\t0.00\")\n assert(gradebookSummary(\"sampleFiles/small2.txt\") == \n \"fred\\t-1.00\\nwilma\\t-2.00\")\n assert(gradebookSummary(\"sampleFiles/small3.txt\") == \n \"fred\\t100.50\")\n assert(gradebookSummary(\"sampleFiles/small4.txt\") == \n \"fred\\t49.00\\nwilma\\t50.00\")\n print(\"Passed!\")\n\n#################################################\n# Colab2 Main\n################################################\n\ndef testAll():\n testRotateNumber()\n testIsCircularPrime()\n testNthCircularPrime()\n testCountLowercaseUpToPercent()\n testLongestCommonSubstring()\n testGradebookSummary()\n\ndef main():\n cs112_s18_week2_linter.lint() # check style rules\n testAll()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6671727299690247, "alphanum_fraction": 0.6774628758430481, "avg_line_length": 42.57143020629883, "blob_id": "4ba1e72e36a5993c81f6a11243159e4a15d33da4", "content_id": "6cc1a857f0f167c77111d68ce09ffb8c0d06576c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5928, "license_type": "no_license", "max_line_length": 479, "num_lines": 133, "path": "/notes/week5-practice.html", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html>\r\n<head>\r\n <title>15-112: Fundamentals of Programming</title>\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/reset.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/bootstrap.min.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112-highlight-style.css\">\r\n <script src=\"../js/jquery-2.1.4.min.js\"></script>\r\n <script src=\"../js/highlight.pack.js\"></script>\r\n <script src=\"../js/bootstrap.min.js\"></script>\r\n <script id=\"112-script\" src=\"../js/112.js\"></script>\r\n <base target=\"_self\">\r\n</head>\r\n<body>\r\n\r\n<div class=\"navbar\">\r\n15-112 <br> Spring 18\r\n<br><br><a target=\"_self\" href=\"index.html\">Home</a>\r\n<br><br><a target=\"_self\" href=\"syllabus.html\">Syllabus</a>\r\n<br><br><a target=\"_self\" href=\"schedule.html\">Schedule</a>\r\n<br><br><a target=\"_self\" href=\"gallery.html\">Gallery</a>\r\n<br><br><a target=\"_self\" href=\"staff.html\">Staff</a>\r\n<br><br><a target=\"_self\" href=\"piazza.html\">Piazza</a>\r\n<br><br><a target=\"_self\" href=\"autolab.html\">Autolab</a>\r\n<br><br><a target=\"_blank\" href=\"oh-queue.html\">OH Queue</a>\r\n</div>\r\n\r\n<div class=\"content\">\r\n<h1>\r\nCMU 15-112: Fundamentals of Programming and Computer Science<br>\r\nWeek 5-6 Practice (Due never)\r\n</h1>\r\n\r\n\r\n<hr>\r\n\r\n<ul>\r\n<li>These problems will help you prepare for hw5/hw6 and for quiz5/quiz6.</li>\r\n\r\n<li>No starter files this week.</li>\r\n<li>Do not use recursion this week.</li>\r\n<li>Do not hardcode the test cases in your solutions.</li>\r\n</ul>\r\n\r\n<hr>\r\n\r\n<ol>\r\n\r\n<li><b>Some Worked Examples Using Lists:</b><br>\r\n<ol>\r\n<li><a target=\"_blank\" href=\"http://www.cs.cmu.edu/~112/notes/notes-2d-lists-examples.html#wordSearch1\"><b>\r\nWord Search\r\n</b></a></li>\r\n<li><a target=\"_blank\" href=\"notes-2d-lists-examples.html#wordSearch2\"><b>\r\nWord Search Redux\r\n</b></a></li>\r\n<li><a target=\"_blank\" href=\"notes-2d-lists-examples.html#connect4\"><b>\r\nConnect4\r\n</b></a></li>\r\n<li><a target=\"_blank\" href=\"notes-2d-lists-examples.html#othello\"><b>\r\nOthello\r\n</b></a></li>\r\n</ol>\r\n</li>\r\n\r\n<br><li><b>makeMagicSquare(n)</b><br>\r\nWrite the function makeMagicSquare(n) that takes a positive odd integer n and returns an nxn magic square by following De La Loubere's Method as described\r\n<a target=\"_blank\" href=\"http://britton.disted.camosun.bc.ca/magicsq/magic.html\">here</a>.\r\nIf n is not a positive odd integer, return None.\r\n</li>\r\n\r\n<br><li><b>isLatinSquare(a)</b><br>\r\nWrite the function isLatinSquare(a) that takes a 2d list and returns \r\n True if it is a\r\n <a target=\"_blank\" href=\"http://en.wikipedia.org/wiki/Latin_square\">Latin square</a>\r\nand False otherwise.\r\n</li>\r\n\r\n<br><li><b>isKnightsTour(a)</b><br>\r\n </b>Background:&nbsp; A &quot;<a target=\"_blank\" href=\"http://en.wikipedia.org/wiki/Knight's_tour\">knight's \r\n tour</a> in chess is a sequence of legal knight moves such that the knight \r\n visits every square exactly once. We can represent a (supposed) \r\n knight's tour as an NxN list of the integers from 1 to N<sup>2</sup> listing \r\n the positions in order that the knight occupied on the tour.&nbsp; If it is \r\n a legal knight's tour, then all the numbers from 1 to N<sup>2</sup> will be \r\n included and each move from k to (k+1) will be a legal knight's move. \r\n With this in mind, write the function isKnightsTour(board) that takes such a \r\n 2d list of integers and returns True if it represents a legal knight's tour \r\n and False otherwise.\r\n</li>\r\n\r\n<br><li><b>nQueensChecker(a)</b><br>\r\nBackground: The \"N Queens\" problem asks if we can place N queens on an NxN chessboard such that no two queens are attacking each other. For most values of N, there are many ways to solve this problem. Here, you will write the function nQueensChecker(board) that takes a 2d list of booleans where True indicates a queen is present and False indicates a blank cell, and returns True if this NxN board contains N queens all of which do not attack any others, and False otherwise.\r\n</li>\r\n\r\n<br><li><b>makeOthelloMove(board, row, col, player)</b><br>\r\nBackground: read about the game of\r\n <a target=\"_blank\" href=\"http://en.wikipedia.org/wiki/Reversi\">Othello</a> \r\n (also known as Reversi).&nbsp; Maybe even play it briefly (say,\r\n <a target=\"_blank\" href=\"http://www.freegames.ws/games/boardgames/othello/othello.htm\">\r\n here</a>).&nbsp; We can represent an Othello board as an NxN list of values \r\n -- 'w' for white, 'b' for black, and the empty string for empty (of course).&nbsp; \r\n If we number the rows from the top and columns from the left, write the \r\n function makeOthelloMove(board, row, col, player) that takes such a board, a \r\n row, a col, and a player ('w' or 'b'), and, if it is legal for the given \r\n player to place a piece at the given position, the function <i>destructively</i> \r\n modifies the board to reflect this move (flipping pieces as needed), and it \r\n returns <i>the number of pieces flipped</i>.&nbsp; If the move is not legal, \r\n the function does not modify the board and returns 0.\r\n</li>\r\n\r\n<br><li><b>Games, games, games!</b><br>\r\nConsole-based 2d board games (human-human mainly, but maybe a simple human-computer game) such as:\r\n<ol>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Draughts\">Checkers</a> </li>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Chess\">Chess </a></li>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Isola_(board_game)\">\r\n Isola </a> </li>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Fox_games#Fox_and_Hounds\">\r\n Fox and Hounds </a> </li>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Backgammon\">Backgammon </a></li>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Stratego\">Stratego </a></li>\r\n<li> <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Board_game\">Or many, many others...</a></li>\r\n</ol>\r\n\r\n</li>\r\n\r\n</ol>\r\n<hr>\r\n</div>\r\n</body>\r\n</html>\r\n" }, { "alpha_fraction": 0.7161865830421448, "alphanum_fraction": 0.7353909611701965, "avg_line_length": 56.401573181152344, "blob_id": "799b4d7414ce2de1d756ece7cbffc332a4a7a685", "content_id": "0a60c023d8ba1a572097e71c0b1b72c3c3337952", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 7290, "license_type": "no_license", "max_line_length": 500, "num_lines": 127, "path": "/notes/colab2.html", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<head>\n <title>15-112: Fundamentals of Programming</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/reset.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/bootstrap.min.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112.css\">\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112-highlight-style.css\">\n <script src=\"../js/jquery-2.1.4.min.js\"></script>\n <script src=\"../js/highlight.pack.js\"></script>\n <script src=\"../js/bootstrap.min.js\"></script>\n <script id=\"112-script\" src=\"../js/112.js\"></script>\n <base target=\"_self\">\n</head>\n<body>\n\n<div class=\"navbar\">\n15-112 <br> Spring 18\n<br><br><a target=\"_self\" href=\"../index.html\">Home</a>\n<br><br><a target=\"_self\" href=\"../syllabus.html\">Syllabus</a>\n<br><br><a target=\"_self\" href=\"../schedule.html\">Schedule</a>\n<br><br><a target=\"_self\" href=\"../gallery.html\">Gallery</a>\n<br><br><a target=\"_self\" href=\"../staff.html\">Staff</a>\n<br><br><a target=\"_self\" href=\"../piazza.html\">Piazza</a>\n<br><br><a target=\"_self\" href=\"../autolab.html\">Autolab</a>\n<br><br><a target=\"_blank\" href=\"../oh-queue.html\">OH Queue</a>\n</div>\n\n<div class=\"content\">\n<h1>\nCMU 15-112 Spring 2018: Fundamentals of Programming and Computer Science<br>\nColab 2 (Due Thursday 25-Jan, at 10pm)\n</h1>\n\n<hr>\n\n<ul>\n<li>This assignment is <span class=\"collaborative\">collaborative</span>.\nThat means you may work with other students enrolled in the course, and you may even help each other write code and debug. However, you must still type all of your own work, and you must fully understand the code that you submit. Even though this is collaborative, you may not directly copy any code from anyone, and you may not electronically share your code with anyone. See the syllabus for more details.</li>\n\n<li>List your collaboration partners (name and andrew id) in a comment on the first line of this file. If you collaborate with another student and do not include their name in a comment, it will be considered cheating. You may work alone if you want to, but we recommend working with others, as it generally leads to better learning.</li>\n\n<li>Be a good collaborator! Help everyone in your group, and accept\ntheir help if you need it. Don't be in a hurry to finish the problems.\nInstead, take your time and be sure that everyone in the group is\nfollowing and understanding. The goal is to learn, not just to finish.\n</li>\n<br>\n<li>To start:\n<ol>\n<li>Go to your folder named 'week2'</li>\n<li>Download\n <a href=\"colab2.py\" download>colab2.py</a>,\n <a href=\"cs112_s18_week2_linter.py\" download>cs112_s18_week2_linter.py</a>,\n and\n <a href=\"sampleFiles.zip\">sampleFiles.zip</a>\n to that folder. Unzip sampleFiles.zip in that folder.</li>\n<li>Edit colab2.py using Pyzo</li>\n<li>When you are ready, submit colab2.py to Autolab. For this colab, you may submit up to 10 times, but only your last submission counts.</li>\n</ol>\n</li>\n<li>Do not use lists or recursion this week.</li>\n<li>Do not hardcode the test cases in your solutions.</li>\n</ul>\n\n<hr>\n\n<ol>\n\n<br><li><b>nthCircularPrime</b> [25 pts]<br>\nA circular prime is a number with the property that any rotation of that number's digits is prime. In this case, rotation refers to cycling the digits of a number; for example, the rotations of 1234 are 1234, 2341, 3412, and 4123. You can read more about this on <a href=\"https://en.wikipedia.org/wiki/Circular_prime\">the Wikipedia page</a>. Single-digit primes are all circular, of course. To find the nth circular prime, you'll need to write isPrime and three other functions:\n\n<ol><br><li><b>rotateNumber</b> [10 pts]<br>\nThis function takes a number, x, and rotates that number's digits by one place. This would turn the number 1234 to 4123. You should work with the number input directly instead of casting it to a string.</li>\n\n<br><li><b>isCircularPrime</b> [10 pts]<br>\nThis function takes a number, x, and determines whether that number is a circular prime. To do this, you'll need to check whether every rotation of the number is prime.</li>\n\n<br><li><b>nthCircularPrime</b> [5 pts]<br>\nThis function takes a number, n, and returns the nth circular prime.</li>\n\n</ol></li>\n\n<br><li><b>countLowercaseUpToPercent</b> [25 pts]<br>\nWrite the function countLowercaseUpToPercent(s) that takes a possibly-empty string and returns the number of lowercase letters that occur in the string before the first percent sign (%). If no percent signs occur, the function should return the total number of lowercase characters in the string.</li>\n\n<br><li><b>longestCommonSubstring</b> [25 pts]<br>\nWrite the function, longestCommonSubstring(s1, s2), that takes two possibly-empty strings and returns the longest string that occurs in both strings (and returns the empty string if either string is empty). For example:\n<pre>\n longestCommonSubstring(\"abcdef\", \"abqrcdest\") returns \"cde\"\n longestCommonSubstring(\"abcdef\", \"ghi\") returns \"\" (the empty string)\n</pre>\nIf there are two or more longest common substrings, return the\nlexicographically smaller one (ie, just use \"<\" to compare the strings). So, for example:\n<pre>\n longestCommonSubstring(\"abcABC\", \"zzabZZAB\") returns \"AB\" and not \"ab\"\n</pre><br>\n\n<b>Hint:</b> Start by solving a simpler problem: how would you find and return the longest-matching substring starting from the beginning of each of the strings? Under this restriction:\n<pre>\n longestCommonSubstring*(\"abcdef\", \"abqrcdest\") returns \"ab\"\n</pre>\nNow imagine you have a helper function that implements that simpler version of longestCommonSubstring. With that helper function, you can solve longestCommonSubstring by generating <i>all possible combinations</i> of the starting places of s1 and s2, and calling the helper function with each. This can help you identify which sequence of matching characters is the longest.\n</li>\n\n<br><li><b>gradebookSummary</b> [25 pts]<br>\nFor this problem, we'll assume that gradebooks are stored in .txt files. Each row of the gradebook file contains a student's name (one word, all lowercase), followed by one or more comma-separated integer grades. A gradebook always contains at least one student, and each row always contains at least one grade. Gradebooks can also contain blank lines and lines starting with the \"#\" character, which should be ignored.<br><br>\n\nWith this in mind, write the function gradebookSummary(gradebookFilename) that takes the filename of a gradebook as an argument and returns a summary of the gradebook as a string. This summary string should show each student followed by a tab followed by their average grade (rounded to the hundreth place). The summary string should have the students listed in their original order (separated by newlines, but without a newline at the end), but should get rid of any comments or blank lines.<br><br>\n\nFor example, here is a test case:\n<pre>\n# the following string is the content of the file gradebook1.txt\n\"\"\"# ignore blank lines and lines starting with #'s\nwilma,91,93,94\nfred,80,85,90,97,100\nbetty,88\"\"\"\nassert(gradebookSummary(\"gradebook1.txt\") == \"wilma\\t92.67\\nfred\\t90.40\\nbetty\\t88.00\"))\n</pre>\n<b>Hint:</b> you most likely will want to use both s.split(\",\") and s.splitlines() in your solution.\n</li> \n\n</ol>\n</div>\n<hr>\n</body>\n</html>\n" }, { "alpha_fraction": 0.602245569229126, "alphanum_fraction": 0.603201150894165, "avg_line_length": 82.72000122070312, "blob_id": "4cdfec75c12fa1f6da09580e754ad8704f1f6c6b", "content_id": "001a8b2d1bb5d83ff5d70c6ce42d0bc40201c6c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4186, "license_type": "no_license", "max_line_length": 349, "num_lines": 50, "path": "/wordToTA.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "import random\n\nletterDict = {\n \"a\": ['<td><img src=\"staff-photos/yuzhes.jpg\"/>Alex (yuzhes)</td>', '<td><img src=\"staff-photos/ambikac.jpg\"/>Ambika (ambikac)</td>',\n '<td><img src=\"staff-photos/agwu.jpg\"/>Amy (agwu)</td>', '<td><img src=\"staff-photos/arestrad.jpg\"/>Andrea (arestrad)</td>',\n '<td><img src=\"staff-photos/aschreff.jpg\"/>Andrew (aschreff)</td>', '<td><img src=\"staff-photos/asilbaug.jpg\"/>Anne (asilbaug)</td>',\n '<td><img src=\"staff-photos/ahavanur.jpg\"/>Apoorva (ahavanur)</td>', '<td><img src=\"staff-photos/aschick.jpg\"/>Austin (aschick)</td>',\n '<td><img src=\"staff-photos/armanh.jpg\"/>Arman (armanh)</td>'],\n \"b\": ['<td><img src=\"staff-photos/ybb.png\"/>Bulut (ybb)</td>'],\n \"c\": ['<td><img src=\"staff-photos/mengyinf.jpg\"/>Cathy (mengyinf)</td>', '<td><img src=\"staff-photos/cwurman.jpg\"/>Chaya (cwurman)</td>'],\n \"d\": ['<td><img src=\"staff-photos/dzq.jpg\"/>Doug (dzq)</td>'],\n \"e\": ['<td><img src=\"staff-photos/edryer.jpg\"/>Eddie (edryer)</td>', '<td><img src=\"staff-photos/eclinch.jpg\"/>Eric (eclinch)</td>', '<td><img src=\"staff-photos/eyluo.jpg\"/>Eugene (eyluo)</td>'],\n \"f\": ['<td><img src=\"staff-photos/fmarsh.jpg\"/>Fletcher (fmarsh)</td>'],\n \"g\": ['<td><img src=\"staff-photos/kghiam.jpg\"/>Kamyar (k<strong>g</strong>hiam)</td>'],\n \"h\": ['<td><img src=\"staff-photos/hshalaby.png\"/>Habiba (hshalaby)</td>', '<td><img src=\"staff-photos/hnelson1.jpg\"/>Henry (hnelson1)</td>'],\n \"i\": ['<td><img src=\"staff-photos/aykilinc.jpg\"/>Ike (aykilinc)</td>'],\n \"j\": ['<td><img src=\"staff-photos/jxgong.jpg\"/>Jason G. (jxgong)</td>', '<td><img src=\"staff-photos/jasonh1.jpg\"/>Jason H. (jasonh1)</td>', '<td><img src=\"staff-photos/judyz.jpg\"/>Judy (judyz)</td>', '<td><img src=\"staff-photos/oweijin.jpg\"/>Justyn (oweijin)</td>'],\n \"k\": ['<td><img src=\"staff-photos/kghiam.jpg\"/>Kamyar (kghiam)</td>', '<td><img src=\"staff-photos/kdchin.jpg\"/>Kyle (kdchin)</td>'],\n \"l\": ['<td><img src=\"staff-photos/ldegroot.jpg\"/>Lisanne (ldegroot)</td>', '<td><img src=\"staff-photos/ethrashe.jpg\"/>Lizzy (ethrashe)</td>'],\n \"m\": ['<td><img src=\"staff-photos/mbgardne.jpg\"/>Madeline (mbgardne)</td>', '<td><img src=\"staff-photos/ymkong.jpg\"/>Matt (ymkong)</td>', '<td><img src=\"staff-photos/mnowrooz.jpg\"/>Mina (mnowrooz)</td>'],\n \"n\": ['<td><img src=\"staff-photos/nanakis.jpg\"/>Nanaki (nanakis)</td>', '<td><img src=\"staff-photos/nviggian.jpg\"/>Nick V. (nviggian)</td>', '<td><img src=\"staff-photos/nawilson.jpg\"/>Nick W. (nawilson)</td>', '<td><img src=\"staff-photos/nraju.png\"/>Nitya (nraju)</td>'],\n \"o\": ['<td><img src=\"staff-photos/oweiss.jpg\"/>Olly (oweiss)</td>', '<td><img src=\"staff-photos/ouk.jpg\"/>Omkar (ouk)</td>'],\n \"p\": ['<td><img src=\"staff-photos/plocula.jpg\"/>Pranathi (plocula)</td>', ],\n \"q\": ['<td><img src=\"staff-photos/dzq.jpg\"/>Doug (dz<strong>q</strong>)</td>'],\n \"r\": ['<td><img src=\"staff-photos/raahuja.jpg\"/>Rahul (raahuja)</td>', '<td><img src=\"staff-photos/ramgopav.jpg\"/>Ramgopal (ramgopav)</td>', '<td><img src=\"staff-photos/raunaksg.jpg\"/>Raunak (raunaksg)</td>', '<td><img src=\"staff-photos/rishabhc.jpg\"/>Rishabh (rishabhc)</td>', '<td><img src=\"staff-photos/rkaufman.jpg\"/>Roman (rkaufman)</td>'],\n \"s\": ['<td><img src=\"staff-photos/sbhartiy.jpg\"/>Sanjna (sbhartiy)</td>'],\n \"t\": ['<td><img src=\"staff-photos/cowarang.jpg\"/>Tian (cowarang)</td>'],\n \"u\": ['<td><img src=\"staff-photos/uar.jpg\"/>Udit (uar)</td>'],\n \"v\": ['<td><img src=\"staff-photos/vbaskar.png\"/>Vishal (vbaskar)</td>'],\n \"w\": ['<td><img src=\"staff-photos/oweiss.jpg\"/>Olly (o<strong>w</strong>eiss)</td>'],\n \"x\": ['<td><img src=\"staff-photos/xinhuig.jpg\"/>Xinhui (xinhuig)</td>'],\n \"y\": ['<td><img src=\"staff-photos/yongyiz.jpg\"/>Yongyi (yongyiz)</td>'],\n \"z\": ['<td><img src=\"staff-photos/yongyiz.jpg\"/>Yongyi (yongyi<strong>z</strong>)</td>'],\n}\n\ndef wordToTA(word, anagram=False):\n if anagram:\n wordList = list(word)\n random.shuffle(wordList)\n word = ''.join(wordList)\n\n out = \"<tr>\\n\"\n for c in word:\n out += (\"\\t\" + random.choice(letterDict[c]) + \"\\n\")\n out += \"</tr>\"\n\n return out\n\nword = input(\"Word: \")\nprint(wordToTA(word))\n" }, { "alpha_fraction": 0.5668696761131287, "alphanum_fraction": 0.5948842763900757, "avg_line_length": 28.634328842163086, "blob_id": "5469efc5e542d9b1a7a120a542444952d975969d", "content_id": "cde9bfb9aaccea08afb6dfecddb07f35c0e7f769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4105, "license_type": "no_license", "max_line_length": 80, "num_lines": 134, "path": "/notes/deltaGraphicsDemo2.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# deltaGraphicsDemo2.py\r\n\r\n# Fast version of deltaGraphicsDemo1.py\r\n# This time, using delta graphics.\r\n\r\n# Displays lots (~5000) random rectangles that do\r\n# not move.\r\n\r\n# Then moves an oval over the top of them. Also changes\r\n# the oval's size and color to show that we can.\r\n\r\n# Only delta updated the oval, not the rectangles\r\n\r\nfrom tkinter import *\r\nimport random\r\n\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\ndef drawRectangles(canvas, data):\r\n for (left, top, right, bottom, color) in data.rects:\r\n canvas.create_rectangle(left, top, right, bottom, fill=color)\r\n\r\ndef drawOval(canvas, data):\r\n left = data.ovalX % 400\r\n width = 5 + left // 5\r\n color = hexColor(left * 255// 400, left * 255// 400, 0)\r\n data.oval = canvas.create_oval(left, 250, left+width, 300, fill=color)\r\n \r\n\r\ndef deltaDrawOval(canvas, data):\r\n left = data.ovalX % 400\r\n width = 5 + left // 5 \r\n color = hexColor(left * 255// 400, left * 255// 400, 0)\r\n # change oval's bounds\r\n canvas.coords(data.oval, (left, 250, left+width, 300) )\r\n # change oval's color\r\n canvas.itemconfig(data.oval, fill=color)\r\n\r\ndef redrawAll(canvas, data):\r\n drawRectangles(canvas, data)\r\n drawOval(canvas, data)\r\n\r\ndef deltaDraw(canvas, data):\r\n deltaDrawOval(canvas, data)\r\n\r\ndef timerFired(data):\r\n data.ovalX += 10\r\n\r\ndef hexColor(red, green, blue):\r\n return (\"#%02x%02x%02x\" % (red, green, blue))\r\n\r\ndef randomColor():\r\n red = random.randint(0, 255)\r\n green = random.randint(0, 255)\r\n blue = random.randint(0, 255)\r\n return hexColor(red, green, blue)\r\n\r\ndef init(data):\r\n data.rects = []\r\n rectCount = 5000\r\n for i in range(rectCount):\r\n left = random.randint(0,450)\r\n right = left + random.randint(5,50)\r\n top = random.randint(0,450)\r\n bottom = top + random.randint(5,50)\r\n color = randomColor()\r\n data.rects.append((left, top, right, bottom, color))\r\n data.ovalX = 200\r\n data.timerDelay = 1\r\n\r\ndef mousePressed(event, data): pass\r\ndef keyPressed(event, data): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def deltaDrawWrapper(canvas, data):\r\n if (data.readyForDeltaDraw == True):\r\n deltaDraw(canvas, data)\r\n canvas.update()\r\n else:\r\n redrawAllWrapper(canvas, data)\r\n\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n data.readyForDeltaDraw = True\r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n # redrawAllWrapper(canvas, data)\r\n deltaDrawWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n data.readyForDeltaDraw = False\r\n\r\n # create the root and the canvas (Note Change: do this BEFORE calling init!)\r\n root = Tk()\r\n\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(500, 500)\r\n" }, { "alpha_fraction": 0.5992917418479919, "alphanum_fraction": 0.621628999710083, "avg_line_length": 33.29807662963867, "blob_id": "a4ff1f0ed0a147e601d53870cc8ec95757836aab", "content_id": "f10a3129d8aef99e1cf2e4da48b76d2b5b42721c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3671, "license_type": "no_license", "max_line_length": 80, "num_lines": 104, "path": "/notes/imagesDemo3b.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# imagesDemo3b.py\r\n# same as imagesDemo1.py but here we read from base64 encoding\r\n# that we created in imagesDemo3a.py\r\n\r\n# view in canvas\r\n# read from base64 encoding\r\n# with transparent pixels\r\n# get size, resize (zoom and subsample)\r\n\r\nimg64 = \"\"\"R0lGODlheAB4APU/ABoCERIkGm0BOlNYECMHel4Dew5JRhRqdjF1bKF7GvlHD/hvEZIEc70EcPAC\r\nf5RdT/JqSRmKPTeWNVSlL3O1KxaMXReRfyqWb1yEaW+xaJefJryuJPeUE47CJ6PMJLXVIcbdHvjA\r\nFdnmHPLqGOTIJ/LwNfaWW7HRTLzYf9zmUBgliGUvgRhAlBhWnhlpqBl+sUBGjGFprr9QoRmVnRqW\r\nt0ekgWysy/y7ltKEuubmhZKXxt6t1KrH2PXmxNfg6QAAACH5BAUAAD8ALAAAAAB4AHgAAAb/wJ9w\r\nSCwaj8ik8udrOn283XJKrVqv2KNPp6tdKmCwZUwgFAqrmM6Xbbvf1qYuBoOpCKpD2FIZkwtlZoJp\r\nPDxscIiJVjwxLCwqkJF5YmJjM2VngYAEFjM0NC86iqOkQj0oFBKOq5IqLBGVfhaBtIGdMy8ujys6\r\nUqW/Vj0lIh8dFBMtq6yQLGF9srW1FZ65LJgMEDfA20g3KSEjIiAexwfKrpKwfLKz0WUqnZ/JKmcM\r\nCgrZPdzcPQsLHCMCFjtWQdkjSZTWdXJXxsA0UPPMNLhHUdu+Uj1IKODAARwxchIktEh27tGeZ2QY\r\nHoiX644Zigoc3LN4EVGOFCJEhFgQAtyI/w/kJkRIRtIROj19ULbDI+0WjYj2YMI0cagmlh4gsuYU\r\nwTHgT2MTJAwtyqrFHlmQ3uGBBGYGrogKFkiFCaOqVSo5TmTdK4JEz3AgwEaIcGAkWUfPlKaNtLat\r\nPGsF5MqduwJSjLtUTnzYvHkviL/EjIWEZbioigAGAgQAwLo16wBrJ7FMRuBBXH9xKa44aMYXZiM9\r\ngHrwwJlz1oDiBMMyZ4BAAAHQG0hv4KB69ekMBGQHgPqA22q1/YnHDcGoJlG/ifQIOLw9ceMkBIIV\r\nmxoAA+v48+vP3+C1i3n+cDDePQjM41I9dt2VA3KBdeBecSAA5sEEE6wmAHX7ZajhdQJwp//CChyN\r\nJ545j2hSAAMypJcDaOM46N5wm+X0wQQDaLfhjTg2IEAACfwj4D8LYDASb2ac2IAMCXLD0V/hFNPB\r\nky9u5sFzOFZpJQMADODjAgn8FxEmJzJwpFUmdORTkx48qaaDHQxQo5VwVimdAFpyUEM1RJ4xHXVJ\r\nkgIBRx0x6CQFa2pAZZyIyhnAA7kMmZYZDIg5XYrb3BBgTx59ZAwFnNaIYaKgbqijAV6WWEakDUgq\r\nnQy+jcIDCxD4yGRoYFEQwH2h5npjf8pco+qeDfTpBg+GJWCmVyAAJdqhujabIZZG3RHptL82gIOr\r\nXroAgwaBhqOpp7pKp+F0ufa3VpjTpkr/bgOtvhGDC//B2wIGswL1Jqg6vgaAhjuydmGoWKJLrXSS\r\nMlBAu1nY0AK8DMOLQXw5eXBvnDp2t7ALBvDLwkgRDGCAdp9eKcAZAqNq8BkrwGFDwyzDe0JOE18J\r\ngAEWgPLCzS4QwO8BDgsVgQGshZyjfQZTezLJBVCahQ80uJDL0/DiPAMFMe8qgAEGRODUzblknKEA\r\nhblA4WB9tPmvzPVMmwnSB7cBNc5cO+00CwLA2WFY6rDEtdf7CcBCLsdI0EcNIpQwwgD7WqnjiSQX\r\nSXLRWdyAC9eUU/6x4llOEHgfnoBis85f8/xCB2H18cFWI2hQNb8EGIyJ4/UkjcWfJlRu/3vdVe54\r\nDIWa+/GJ5Tu/UAPpsFwAwlY5SQC00F8DUAvbYV5bRQ+AavCCzTbfzOyGO0rAO+8SxAOK0y/wrR/Y\r\nL2RAuuATaCUjGJdX6Tzsjkd6orBF9PAnoBtkgMAnnoufqK7Wh5B8bwJbIx/o+taC0RGvAhTwzAcE\r\nB4YDAIB5+tERLdhmMBjQgAdU+BGgzJQBAB4AdzcCQN5mEBIDhqVzT6OB+fKDvsCU7j1ZoQDZLBGA\r\n3M0PEI/z4M1oMAVL+QhQmCLBnVA4LgL6YQYXaGFYEEgNeMmQXy7IQDGEIgGgSEkslcjZBXEkAEGQ\r\nDAYuwN4Hl2AC8SARU+AYAK42pMIVuv8lilKUwO/+QwMV7CwDNpSAizaDNzEc4A4qSFwKgUiA6wHw\r\nkfjrR4BGCMcEMDFDAPAOH57hFhrgsYVVXNgM8SMAF2RlfVBKk1hg0QfGEECR/AIAGh9JyzUioY1u\r\nfGNPBkDGAwBQIZ2kQQRaGJLxJWOU1gHbCGw4ASiJhmwFcSUBLrkfADiylgC0QRJuc0QzmYma/LEm\r\nAGdgiWDOYDCgbBQLCsAvBJTAhoRSU1jAeBBEQiIAGCSlL7H5SCT0IAE9muSSdpnP6wCgZp5IKDmD\r\n+YJzDnOY6uzh1zCwTNJx6kmcWuUhWxGJMW7IANfkJwiNgApyaMBYb9wALPvmFnPSoKX/nWvaAQYz\r\nGAvowhr8mkBFNXfRwBHmHJEwikeftU9+fuIIaSKUUrlFgo4MoKAOaMAALvBSmFr1dzebKU3/wwKJ\r\n9k0DyeFUT0u3jEeYRoD7aUAAOpnQlzYNBitoVw/UpFROUWgDIQCndfrTAaDUwKowBWCjtDqYZKyU\r\nlBrYqeaSmgqDOPZRN7qaW6vaAkgxQHpDQEGa6GrXwOm1OgLQgHA6kIG/ApYajTILTc2xwPNtYJm7\r\no8BAJlAYZRjGsScUFQDcoos7sE1pQmjPmsR6DHxyT0rDActpQ4kYmuL0a4kFwTEiuNkClaY0ymip\r\ncXf2KBNFSj3C2Wxdj3HY/AwgWcnF/ygVb1HOUmVtMKnh12tB4L0IbtFLhimVI1oAU7RmcC0mYhwD\r\niJAD5LIJo1T7rAMA4AH0JreuF7gAOdk7A5K4gqZedW3qvJeVD2BAXiMBsWEoTE4FL7i79UPREAps\r\n4KQao7z4AYAEiJPeTU33ArKYsIVVMJjPCiABIghJB8SRgZYxbCRjeIEl/IBMUqK4SKgaQgqShdzN\r\nkg7G1WFAAMRSX87ujkJKGUM9s9ZkBwhgA6nQ6Qg8gIuo5UJeLujEE8uZW36h+HHfFcKUqRzeJ8mR\r\ney2k6TDrCr6QsMMChyzDAXj5tQ0MJrEiyB7OosaOCct5DEPNYABcAqZ0+UI8JAi1gf8ZraEtB1rQ\r\nBvxyoJWS6DLwawDDBMedbHfpQyfFD9vFJKcdt4IHPEAfPbjNQDNl4gHM89SCroD3lj0arfmhAnfw\r\nsVg6UgPB2uzQz2YlJ+us60D0ugNZIQEIcvADIw7bKyIgdVoNwGxB07QCZCNmsv1wgDLk82rG2sDv\r\nsIptPkBTKTOYY98MgAHZQigr5MalrM4kAgULgNnIdje8wfDQdyelAlimEwc2UO1xkvPWyl62xTk5\r\nA4cnwEXtMU4KfmCCuAyb2HR8NzFXKfF/JzsMZf4xB2g5BkrUQKx4G/kTM5zBAagpSh84wQ9idUQm\r\n/UTg+lHhWXr+BYrD4t/OmDi8c/7/gJj2vAIXaOaDd7fKiUsgihQqc1SNfnSUJ/0H3DTTmTrwWbXu\r\nMMeTzWoYru6MTVag7mv9OBgkEKF3KsuugSbmFGl0owFcdE3ugbvLB+qVBEA1ANp+YjD36CVzHODz\r\nzlieqFATdg3Ex3DjILu7Zy5FA0DVAMZY09E9AHc3wjEcCYA6fuze93IK9s1DIhIe/Pvqf2xAA0Kx\r\nuruXP/OnbsjxxI2nmoKdS6c7/1nvjUVLfy+vZbjEPnHSctbJRv7lP5SYrn9+9MWqJh9sLMQsK19B\r\nG2AAo9JyuW6xYKjOjG70bmqeN/dE6achTBFg00Is13Ux8DKA6xZT2NRWVWVVFgRV/xuCJa+FHIen\r\nelhHb1A1P96FKj5AFPkVYi3AgJrGNfb3SGw1Ax9DgVaTAOpzAgemgZlnCSaoHwUIJmHSAD+wMe93\r\nXboAeNczhCkYSsS3fwFwTTNQA+AjdLegdg6waQZoJD1oW9dFN3REOZ7DT8AHNLoXLgHgAg5YVQiw\r\nSWbYCVAYDXiWIo1gECLYAl9oHQBAPpUzWVj1H0DjLJhUf7VEOXLTUBawAqO3a3g2YG3oWIahYJm0\r\nEpW2eTdTgibmY3GYTKTSh5ImL0SXHwyQgymWIjrADLa1CgpGfxREcg6oPVDVHxkHAGeTIWoVNY/k\r\nh2/mcM2xQejiiejgCo6VibuHef9Tt32/sz19gxr8kgCIA1UdcjHXZDstoIh3ljYpwgOS4AgwgAAP\r\nkAEb8AA3wm6x4BRQdAC5llZXEwG8CFrGYkmTGFUzUxpEyDXhGHW6GAlAdCLSAwm+lgCY4hXX9zU1\r\n6BYW4D2ixy+YNxitRUMD4BcVEokBQBYNxDVYpo5GUU+SMFJMh0RekTrpKACsZAFfYEASIIzn814R\r\n8Er8UgGhdgIWcIS7BwCs8Ag++JBaZhC5CAkUOUn52CQPqVaD9z3HmELDRB+u9jUGkAGyNgP6p1sG\r\n4JKR8I405INAFQmHgEvnhhxMGWPew5M3GE4/+TN3kHFgADF/hQAgSUMEIJEFeT7/t9WSkAADQyBJ\r\nUxkOq7N7xqY5NPKQDqBlFBIS5lGOZuYQGfATGdBSlhRZmxYJJpaUaXkQj4AeP0B9bykCHmCXAUA6\r\nYWGXHXKVE4AAJaICMFCSYJATHdBSFxCXpFSYmZZBKoBdJTFSQiBQTkcM+9g3xjaWe0Un3yMSLokx\r\n/OIQFdBgHhCYL1UDA8CXoLUjrVhNBQADQ0IWrlAEbUR5/Ud3o0ec1QEAPMU7XOUKLyCI/AgG9hWa\r\nLUUhpxl1UBUwKKOcZHEZ6rFwPjEo6rYfYjJ6GkBcE1BhJOE0OZc1YAeZHYABk+U9tGklrPg4J1IH\r\nq8CYQ7A/ZiJ7bZKOhKkBsccp/zMAL/sFL2eZTKwUAcRxDJ3jCWGxGi6YVgAAPemyG0lyAxu3AVY2\r\nXHbpinRiZZwya9mygLtpdmkSFh5HH7eSKGWENEYjKUeQA8nlHmwyHKQ5LgDQVzVWQsYUL7r5NVpX\r\nAZsiFiqITgGKSRyELqmCMEwQXsL1IiYWdTTmHhRQRfGyMC3AnQP3bogXATE1A1q3o1ZSRrDzozyI\r\nBJr1Inoam9wzAA3GGcNRpjCEX6K0M3zXTIXEc6ykkvyxKPVTMsBlBMUBqGPaAS1qZge5F4D6VwBE\r\nqA2Uc4QFb+sDC7WkEFl5PoSiAStQiNPCpZnFZ5PKGYO5KwOQE57hAaaFWvs1Ev9XFDrwRjaas0ri\r\nA0DwE6ZSlXKqWjTpsgRY0RnG8QHhdjiKsgG2CgIIJD4vQBZppKbnowfuVkgPQUsG4KDqKFrh9QEa\r\nkDaXNQV75hnh5hMbcJyuOADUKgIU8ES/YxBiCKoW8K1lZwFD1FCMilixuhka8AAroGJUoBXH8xm3\r\nlzqX2h9GZ4p95JKO4AncSkPesXpg1BbuGKJSBavPCgIk8ACYxaxbAUdOlzphCloBgAAk1wJBdbEs\r\nuDNwuno7NKEtmDsBsAHu+rNZcQU50FS6dCYjkHuYQzMtxVHxAKrkRHMPZUjj6YoAoALgBrSegQU5\r\nULQjYLQfMLVC2QkyO41Na7P//Wp+FRCQuRMJW3E84vC245YFZRICcme0ARGvIRpVO1JvrcA5NHCh\r\noOUdLAS1P3Ol8LgYYIU8W7FybUBJNxkQdJsAl0pKrIFIzVC2oeMJzUaOrIgoVxMtZQBp3hIO5NYG\r\nN0C3+XgmO8ERlpS3tek8DoG5feMdNKCjF+K6oJWawlcADyACyDECpesGfvGwO7ElHCC5uLtXrWEA\r\nB5Cf9QG2ZEQqBnIgaOAVjAsHTaWyIjJC8oooOjKJOiIuodIfPBN8hHgiG1AC+oAIPQBHHHAPInJE\r\nnauH9EtDYSg3O+ZdDbC+iXC6PcFN4zFCHGC49YsoWMIyBmKAkWoT7wsT8csR/zBTlQVMMVcDfwxT\r\nT53GAPjjBi1HEZPhRjyxTONAwBNMteaApgr4JZiQMtvQwQ7sRt4yEDUSnyUsJzviCkB4MXnCwtzg\r\nwrkhHj6RLPOxGuQ6wf2xkIqZgPh1IDXhAz4MvwARw08yTx8zvzVMSldzANlaIj6oxDJLADGwwaTw\r\nxMciDrM1AeN6Bh0CveHSIXyoTmmxX7taGmyJGT4AAR58LE0yH0VDMmucvAM0MyEBQyPhErpIEuwo\r\nxr/QAzcAxaBBKxPwAALGNm4CHZJCMTpCJ7D2PZ2TnYHghhtjA4q8DW7JIrO1qvZjP2ewAaLxvKx4\r\nu8CyHa9hAHT5ZRRSM3CMCf+6uAotMMoXMbeZMhAJqzZq8wAkMCFg1AkIAFADYIzOjDUIILif8HPR\r\nRyFU1TRfcgZ24AhhnB5JQD0+ERoa8KPKKm4gkTcZICIbESSB5QnD839f9jvZbDCPoE3erAQ9sCDL\r\nVAwPQDCpYjThgMzqMAMBEr8YEFifMAMIBnQTQFWeDGUx4Kr3nD9mrAH+/KMeARJiQNDx6w8YcAvA\r\nOFz0ic15crITza4fIAMXTTCRktGFxJEd7dEI/QnixdANvcUuocEnfRUqvScFAwEuncwIgBsG3c6f\r\nIH0LPQFFNg8wgKA7fQU+0NPY0QAPABCpR0EW8AC4kRu38dEznQErGn0W0AKDMMCaT+0GO4ADUt0A\r\nIQyZL20bXD0ZCnDQMz0DyuJlE6ADZn3WiBDV0tHW59wHWj0X90DXm0fTwmUMJ8C/fF0KPsDIECPQ\r\nfYDHhD3XVwVALnICKJADjN3Y+3AKqSAGlV3YIG2HPBC8nu3NTcADNmACJgABsH0PlF0DNlDbNlAI\r\nTcDXQQAAOw==\"\"\"\r\n\r\nfrom tkinter import *\r\n\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\ndef redrawAll(canvas, data):\r\n # Draw a background rectangle to highlight the transparency\r\n # of the images\r\n canvas.create_rectangle(0, 10, data.width, 190, fill=\"cyan\")\r\n # Draw the demo info\r\n font = (\"Arial\", 16, \"bold\")\r\n msg = \"Image Demo #3 (read from base64)\"\r\n canvas.create_text(data.width/2, 25, text=msg, font=font)\r\n # Draw the original size image on the left\r\n imageSize = ( (data.image.width(), data.image.height()) )\r\n msg = \"Full-size \" + str(imageSize)\r\n canvas.create_text(data.width/5, 50, text=msg, font=font)\r\n canvas.create_image(data.width/5, 100, anchor=N, image=data.image)\r\n # Draw a half-size image in the middle\r\n imageSize = ( (data.halfImage.width(), data.halfImage.height()) )\r\n msg = \"Half-size \" + str(imageSize)\r\n canvas.create_text(data.width/2, 50, text=msg, font=font)\r\n canvas.create_image(data.width/2, 100, anchor=N, image=data.halfImage)\r\n # Draw a double-size image on the right\r\n imageSize = ( (data.doubleImage.width(), data.doubleImage.height()) )\r\n msg = \"Double-size \" + str(imageSize)\r\n canvas.create_text(data.width*4/5, 50, text=msg, font=font)\r\n canvas.create_image(data.width*4/5, 100, anchor=N, image=data.doubleImage)\r\n\r\nimport urllib.request\r\nimport base64\r\n\r\ndef init(data):\r\n data.image = PhotoImage(data=img64)\r\n data.halfImage = data.image.subsample(2,2)\r\n data.doubleImage = data.image.zoom(2,2)\r\n\r\ndef mousePressed(event, data): pass\r\ndef keyPressed(event, data): pass\r\ndef timerFired(data): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n\r\n # create the root and the canvas (Note Change: do this BEFORE calling init!)\r\n root = Tk()\r\n\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(1000, 600)\r\n" }, { "alpha_fraction": 0.5796618461608887, "alphanum_fraction": 0.5984758138656616, "avg_line_length": 33.88888931274414, "blob_id": "55c7647d54734dcb44a1828275a8e6d0232c2ff8", "content_id": "7251b17a73ea6eaccc90f7dfd8688197242b2d32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4199, "license_type": "no_license", "max_line_length": 80, "num_lines": 117, "path": "/notes/imagesDemo4.py", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "# imagesDemo4.py\r\n\r\n# based on imagesDemo1.py,\r\n# but here we read/write pixels\r\n\r\nfrom tkinter import *\r\n\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\ndef redrawAll(canvas, data):\r\n # Draw a background rectangle to highlight the transparency\r\n # of the images\r\n canvas.create_rectangle(0, 10, data.width, 190, fill=\"cyan\")\r\n # Draw the demo info\r\n font = (\"Arial\", 16, \"bold\")\r\n msg = \"Image Demo #4 (read/write pixels)\"\r\n canvas.create_text(data.width/2, 25, text=msg, font=font)\r\n # Draw the original size image on the left\r\n imageSize = ( (data.image.width(), data.image.height()) )\r\n msg = \"Full-size in Gray \" + str(imageSize)\r\n canvas.create_text(data.width/5, 50, text=msg, font=font)\r\n canvas.create_image(data.width/5, 100, anchor=N, image=data.image)\r\n # Draw a half-size image in the middle\r\n imageSize = ( (data.halfImage.width(), data.halfImage.height()) )\r\n msg = \"Half-size \" + str(imageSize)\r\n canvas.create_text(data.width/2, 50, text=msg, font=font)\r\n canvas.create_image(data.width/2, 100, anchor=N, image=data.halfImage)\r\n # Draw a double-size image on the right\r\n imageSize = ( (data.doubleImage.width(), data.doubleImage.height()) )\r\n msg = \"Double-size in Red \" + str(imageSize)\r\n canvas.create_text(data.width*4/5, 50, text=msg, font=font)\r\n canvas.create_image(data.width*4/5, 100, anchor=N, image=data.doubleImage)\r\n\r\ndef getRGB(image, x, y):\r\n return tuple(map(int, image.get(x, y).split(\" \")))\r\n\r\ndef setRGB(image, x, y, red, green, blue):\r\n image.put(hexColor(red, green, blue), to=(x,y))\r\n \r\ndef hexColor(red, green, blue):\r\n return (\"#%02x%02x%02x\" % (red, green, blue))\r\n\r\ndef grayScale(image):\r\n for x in range(image.width()):\r\n for y in range(image.height()):\r\n (red, green, blue) = getRGB(image, x, y)\r\n gray = (red*30 + green*59 + blue*11)//100\r\n setRGB(image, x, y, gray, gray, gray)\r\n\r\ndef redScale(image, onlyHalf=False):\r\n for x in range(image.width()):\r\n for y in range(image.height()):\r\n (red, green, blue) = getRGB(image, x, y)\r\n setRGB(image, x, y, red, 0, 0)\r\n\r\ndef init(data):\r\n data.image = PhotoImage(file=\"sampleImage1.gif\")\r\n data.halfImage = data.image.subsample(2,2)\r\n data.doubleImage = data.image.zoom(2,2)\r\n grayScale(data.image)\r\n redScale(data.doubleImage)\r\n\r\ndef mousePressed(event, data): pass\r\ndef keyPressed(event, data): pass\r\ndef timerFired(data): pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n\r\n # create the root and the canvas (Note Change: do this BEFORE calling init!)\r\n root = Tk()\r\n\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(1000, 500)\r\n" }, { "alpha_fraction": 0.625475287437439, "alphanum_fraction": 0.6470215320587158, "avg_line_length": 36.4878044128418, "blob_id": "5d7c7ed75310e85f83aaa08244b2961edc50ca89", "content_id": "200b97d4d7aebc495cca6c2e665f20ca6d39d3b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1578, "license_type": "no_license", "max_line_length": 79, "num_lines": 41, "path": "/notes/notes-design-testing-and-debugging.html", "repo_name": "austin-schick/puzzlehunt-website", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html>\r\n<head>\r\n <title>15-112: Fundamentals of Programming</title>\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/reset.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/bootstrap.min.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112.css\">\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../css/112-highlight-style.css\">\r\n <script src=\"../js/jquery-2.1.4.min.js\"></script>\r\n <script src=\"../js/highlight.pack.js\"></script>\r\n <script src=\"../js/bootstrap.min.js\"></script>\r\n <script id=\"112-script\" src=\"../js/112.js\"></script>\r\n <base target=\"_self\">\r\n</head>\r\n<body>\r\n\r\n<div class=\"navbar\">\r\n15-112 <br> Spring 18\r\n<br><br><a target=\"_self\" href=\"../index.html\">Home</a>\r\n<br><br><a target=\"_self\" href=\"../syllabus.html\">Syllabus</a>\r\n<br><br><a target=\"_self\" href=\"../schedule.html\">Schedule</a>\r\n<br><br><a target=\"_self\" href=\"../gallery.html\">Gallery</a>\r\n<br><br><a target=\"_self\" href=\"../staff.html\">Staff</a>\r\n<br><br><a target=\"_self\" href=\"../piazza.html\">Piazza</a>\r\n<br><br><a target=\"_self\" href=\"../autolab.html\">Autolab</a>\r\n<br><br><a target=\"_blank\" href=\"../oh-queue.html\">OH Queue</a>\r\n</div>\r\n\r\n<div class=\"content\">\r\n<h1>\r\nCMU 15-112: Fundamentals of Programming and Computer Science<br>\r\nClass Notes: Top-Down Design + Testing + Debugging\r\n</h1>\r\n<hr>\r\nThis week's lecture includes a discussion about\r\ntop-down design, as well as testing and debugging.\r\nThere are no corresponding notes, so if you miss the\r\nlecture or have questions about it, please contact your CA.\r\n</div>\r\n</body>\r\n</html>\r\n" } ]
18
lexjox777/Python-Instance-method
https://github.com/lexjox777/Python-Instance-method
0e11a60000f55cb916cf08ae9e377dfc4876a26e
d51f0ab105257f03752e3a4a4b0ddf1ebe1d7ae9
02ab92eef6a11c8eaa30d0e1272d341e354c67d4
refs/heads/master
2023-03-27T14:57:33.629101
2021-03-28T18:18:48
2021-03-28T18:18:48
352,404,930
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6007194519042969, "alphanum_fraction": 0.6474820375442505, "avg_line_length": 18.928571701049805, "blob_id": "5711d7071fd2f09bf3db6caa1bb1fdb3b45282d9", "content_id": "aa3f6eab54063ae91ab5d8ccf517e0604c7d1473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/main.py", "repo_name": "lexjox777/Python-Instance-method", "src_encoding": "UTF-8", "text": "class Student:\n def __init__(self,scores = []):\n self.scores = scores\n\n def avg(self):\n return round(sum(self.scores)/ len(self.scores))\n\n# objects / instance\n\nkings = Student(scores = [2,3,5,3,5,10])\ntayo= Student(scores= [4,5,8,8,12])\n\nprint(kings.avg())\nprint(tayo.avg())" } ]
1
samuelmahr/reminders-appsync-api
https://github.com/samuelmahr/reminders-appsync-api
48aedfa50842e51d93d1a647a27e2073b2a3e669
021722deb9f4a0e4ae9d82230699209d45e7eba9
39e3e07fea4db1669209dcb7a5ba605626cf8fca
refs/heads/master
2020-06-19T19:32:27.737243
2019-07-15T00:03:59
2019-07-15T00:03:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6033155918121338, "alphanum_fraction": 0.663706362247467, "avg_line_length": 29.160715103149414, "blob_id": "545bfd311902430b948ffabbd2e23e46d494ac15", "content_id": "2892cb2020fbcd992e4b8c958474a56c237c7b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1689, "license_type": "no_license", "max_line_length": 110, "num_lines": 56, "path": "/tests/test_reminders_firehose_transformer.py", "repo_name": "samuelmahr/reminders-appsync-api", "src_encoding": "UTF-8", "text": "import base64\nimport json\nimport os\n\nfrom lambda_functions import reminders_firehose_transformer as function_code\nfrom tests.mocks import context_mock as context\n\nDATA_AS_JSON = {\n 'title': 'Test Get Missing Keys',\n 'createdTimestamp': 1563146089\n}\n\nEXPECTED_RECORD_AS_JSON = {\n 'title': 'Test Get Missing Keys',\n 'createdTimestamp': 1563146089,\n 'notes': '',\n 'dueTimestamp': '',\n 'location': '',\n 'remindTimestamp': '',\n 'priority': ''\n}\n\nFIREHOSE_EVENT = {\n 'invocationId': 'invocationIdExample',\n 'deliveryStreamArn': 'arn:aws:kinesis:EXAMPLE',\n 'region': 'us-east-1',\n 'records': [\n {\n 'recordId': '49546986683135544286507457936321625675700192471156785154',\n 'approximateArrivalTimestamp': 1563146089,\n 'data': 'changeme'\n }\n ]\n}\n\n\ndef test_get_missing_keys():\n reminder_keys = os.environ['REMINDER_KEYS'].split(',')\n missing_data = function_code.get_missing_keys(DATA_AS_JSON)\n assert len(missing_data.keys()) == 5\n for key, value in missing_data.items():\n if key in reminder_keys:\n assert not value\n assert 'title' not in missing_data\n assert 'createdTimestamp' not in missing_data\n\n\ndef test_handler():\n event = FIREHOSE_EVENT.copy()\n data = base64.b64encode(json.dumps(DATA_AS_JSON, separators=(',', ':'), sort_keys=True).encode()).decode()\n event['records'][0]['data'] = data\n expected_data_str = json.dumps(EXPECTED_RECORD_AS_JSON, separators=(',', ':'), sort_keys=True) + '\\n'\n expected_data = base64.b64encode(expected_data_str.encode()).decode()\n\n response = function_code.handler(event, context.Context(20))\n assert response['records'][0]['data'] == expected_data\n" }, { "alpha_fraction": 0.6299524307250977, "alphanum_fraction": 0.6378763914108276, "avg_line_length": 27.68181800842285, "blob_id": "7f73b343431086cd855c66fbadda19940a7e3544", "content_id": "d5f3b2a118a46efe471a6391255fb03711c05e07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1262, "license_type": "no_license", "max_line_length": 89, "num_lines": 44, "path": "/lambda_functions/reminders_firehose_transformer.py", "repo_name": "samuelmahr/reminders-appsync-api", "src_encoding": "UTF-8", "text": "\"\"\"This lambda adds missing keys to the firehose record for reminder\"\"\"\n\nimport base64\nimport json\nimport logging.config\nimport os\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\nREMINDER_KEYS = os.environ['REMINDER_KEYS'].split(',')\n\n\ndef handler(event, context):\n \"\"\"handler function, manages transaction transformation\"\"\"\n records = list()\n for record in event['records']:\n new_record = {\n 'recordId': record['recordId'],\n 'result': 'Ok'\n }\n\n reminder = json.loads(base64.b64decode(record['data']))\n reminder.update(**get_missing_keys(reminder))\n\n reminder_str = json.dumps(reminder, separators=(',', ':'), sort_keys=True) + '\\n'\n data = base64.b64encode(reminder_str.encode())\n new_record.update({'data': data.decode()})\n records.append(new_record)\n\n LOGGER.debug('Time left to execute %s ms', context.get_remaining_time_in_millis())\n\n return {\n 'records': records\n }\n\n\ndef get_missing_keys(reminder):\n \"\"\"Ensures reminder has all necessary data even if it is just an empty string\"\"\"\n missing_keys = dict()\n for key in REMINDER_KEYS:\n if key not in reminder:\n missing_keys.update({key: ''})\n\n return missing_keys\n" }, { "alpha_fraction": 0.3847839832305908, "alphanum_fraction": 0.49499061703681946, "avg_line_length": 26.299144744873047, "blob_id": "b5d52ba04f299d39c7374c0d3392a317d4fc6e3d", "content_id": "5cd404b3bd237a46abd1458b4550f8837993431d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3194, "license_type": "no_license", "max_line_length": 123, "num_lines": 117, "path": "/tests/test_dynamo_to_firehose.py", "repo_name": "samuelmahr/reminders-appsync-api", "src_encoding": "UTF-8", "text": "import json\n\nfrom lambda_functions import reminders_dynamo_to_firehose as function_code\n\nINSERT_RECORD = {\n 'eventID': 'c4ca4238a0b923820dcc509a6f75849b',\n 'eventName': 'INSERT',\n 'eventVersion': '1.1',\n 'eventSource': 'aws:dynamodb',\n 'awsRegion': 'us-east-1',\n 'dynamodb': {\n 'Keys': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n }\n },\n 'NewImage': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n }\n },\n 'ApproximateCreationDateTime': 1428537600,\n 'SequenceNumber': '4421584500000000017450439091',\n 'SizeBytes': 26,\n 'StreamViewType': 'NEW_AND_OLD_IMAGES'\n },\n 'eventSourceARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/ExampleTableWithStream/stream/2015-06-27T00:48:05.899'\n}\nMODIFY_RECORD = {\n 'eventID': 'c81e728d9d4c2f636f067f89cc14862c',\n 'eventName': 'MODIFY',\n 'eventVersion': '1.1',\n 'eventSource': 'aws:dynamodb',\n 'awsRegion': 'us-east-1',\n 'dynamodb': {\n 'Keys': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n }\n },\n 'NewImage': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n },\n 'notes': {\n 'S': 'new notes'\n }\n },\n 'OldImage': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n }\n },\n 'ApproximateCreationDateTime': 1428537600,\n 'SequenceNumber': '4421584500000000017450439092',\n 'SizeBytes': 59,\n 'StreamViewType': 'NEW_AND_OLD_IMAGES'\n },\n 'eventSourceARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/ExampleTableWithStream/stream/2015-06-27T00:48:05.899'\n}\nREMOVE_RECORD = {\n 'eventID': 'eccbc87e4b5ce2fe28308fd9f2a7baf3',\n 'eventName': 'REMOVE',\n 'eventVersion': '1.1',\n 'eventSource': 'aws:dynamodb',\n 'awsRegion': 'us-east-1',\n 'dynamodb': {\n 'Keys': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n }\n },\n 'OldImage': {\n 'title': {\n 'S': 'Reminder 1'\n },\n 'createdTimestamp': {\n 'N': '1563140810'\n }\n },\n 'ApproximateCreationDateTime': 1428537600,\n 'SequenceNumber': '4421584500000000017450439093',\n 'SizeBytes': 38,\n 'StreamViewType': 'NEW_AND_OLD_IMAGES'\n },\n 'eventSourceARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/ExampleTableWithStream/stream/2015-06-27T00:48:05.899'\n}\n\n\ndef test_build_firehose_record_insert():\n assert True\n\n\ndef test_build_firehose_record_modify():\n assert True\n\n\ndef test_build_firehose_record_remove():\n assert True\n" }, { "alpha_fraction": 0.6751080751419067, "alphanum_fraction": 0.6831377148628235, "avg_line_length": 32.72916793823242, "blob_id": "839b6526721467d10e06752ff68cfa54fbd48753", "content_id": "88e1bc13fafb96bf34b4b4f210e7fad3aa7e87af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1619, "license_type": "no_license", "max_line_length": 104, "num_lines": 48, "path": "/lambda_functions/reminders_dynamo_to_firehose.py", "repo_name": "samuelmahr/reminders-appsync-api", "src_encoding": "UTF-8", "text": "\"\"\"Logs reminders to firehose\"\"\"\nimport json\nimport logging.config\nimport os\n\nimport boto3\nfrom boto3.dynamodb.types import TypeDeserializer\n\nDESERIALIZER = TypeDeserializer()\nFIREHOSE_CLIENT = boto3.client('firehose')\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\nREMINDERS_FIREHOSE = os.environ['REMINDERS_FIREHOSE_STREAM_NAME']\n\n\ndef handler(event, context):\n \"\"\"handler function works as controller function\"\"\"\n firehose_records = list()\n for record in event['Records']:\n firehose_records.append(build_firehose_record(record))\n\n put_records_on_firehose(firehose_records)\n LOGGER.debug('Time left to execute %s ms', context.get_remaining_time_in_millis())\n return {'Complete': True}\n\n\ndef build_firehose_record(record):\n \"\"\"builds firehose record based on event type\"\"\"\n if record['eventName'] == 'REMOVE':\n image = {k: DESERIALIZER.deserialize(\n v) for k, v in record['dynamodb']['OldImage'].items()}\n else:\n image = {k: DESERIALIZER.deserialize(\n v) for k, v in record['dynamodb']['NewImage'].items()}\n\n image.update({'event': record['eventName']})\n return json.dumps(image, separators=(',', ':'), sort_keys=True) + '\\n'\n\n\ndef put_records_on_firehose(firehose_records):\n \"\"\"Break list of firehose records into lists of 500 and put on firehose\"\"\"\n firehose_record_chunks = [firehose_records[i:i + 500] for i in range(0, len(firehose_records), 500)]\n\n for chunk in firehose_record_chunks:\n FIREHOSE_CLIENT.put_record_batch(\n DeliveryStreamName=REMINDERS_FIREHOSE,\n Records=chunk\n )\n" }, { "alpha_fraction": 0.8176795840263367, "alphanum_fraction": 0.8176795840263367, "avg_line_length": 29.33333396911621, "blob_id": "11ef122af9aee4b37a18e303448ee464bdf72eab", "content_id": "1aa0603a3a98514285f0c0fadc3092a80654af5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 181, "license_type": "no_license", "max_line_length": 93, "num_lines": 6, "path": "/pytest.ini", "repo_name": "samuelmahr/reminders-appsync-api", "src_encoding": "UTF-8", "text": "[pytest]\nenv =\n REMINDER_KEYS=title,createdTimestamp,notes,dueTimestamp,location,remindTimestamp,priority\n REMINDERS_FIREHOSE=reminders-stream-mock\nlog_cli=true\nlog_level=INFO" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 62, "blob_id": "96dcd1c15bca13ad69fd316d6e35a70fce7b5c11", "content_id": "2cbca4fc7840910e24d44a5b33ded59861dc9e10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 101, "num_lines": 2, "path": "/README.md", "repo_name": "samuelmahr/reminders-appsync-api", "src_encoding": "UTF-8", "text": "# reminders-appsync-api\nAppSync API deployed with Serverless framework. The reminder data structure is based on iOS reminders\n" } ]
6
LeandroSales96/topJogos
https://github.com/LeandroSales96/topJogos
ca39a1426402dc30d8cbb56f3a428b4dd02e48e9
caf8b7cd473fb5e999fbe964a1e3e759e7313e21
d82f015c042e4e43286eafffb05fc1be95116ba1
refs/heads/master
2021-07-01T19:15:55.439699
2017-09-13T16:49:30
2017-09-13T16:49:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.470080703496933, "alphanum_fraction": 0.5093236565589905, "avg_line_length": 24.78358268737793, "blob_id": "a807d1e74d0cb32c304128b025f4193953083619", "content_id": "0e708ac375ff61d8296ba132d1f7225c65bf6765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3593, "license_type": "no_license", "max_line_length": 79, "num_lines": 134, "path": "/rodando3.py", "repo_name": "LeandroSales96/topJogos", "src_encoding": "UTF-8", "text": "import pygame\r\nimport random\r\n\r\n#cores\r\nazul = (0,127,255)\r\npreto = (0,0,0)\r\nvermelho = (255,0,0)\r\ncinza = (100,100,100)\r\nbranco = (255,255,255)\r\n\r\nclass Peixe(pygame.sprite.Sprite):\r\n\r\n def __init__(self,color,width,height):\r\n super().__init__()\r\n self.image = pygame.Surface([width,height])\r\n self.image.fill(color)\r\n self.rect = self.image.get_rect()\r\n\r\n def update(self):\r\n #move_peixe\r\n direcao = random.randrange(-2,2)\r\n if direcao == -1:\r\n self.rect.x -= 10\r\n elif direcao == 0:\r\n self.rect.y -= 10\r\n elif direcao == 1:\r\n self.rect.x += 10\r\n else:\r\n self.rect.y += 10\r\n \r\n \r\n #nao deixar passar dos limites de Y\r\n if self.rect.y > 641 | self.rect.y < 0:\r\n self.rect.y = 0\r\n elif self.rect.x > 641 | self.rect.x < 0:\r\n self.rect.x = 0\r\n elif self.rect.x > 641 & self.rect.y > 641:\r\n self.rect.x,self.rect.y = 0,0\r\n\r\nclass Tubarao(Peixe):\r\n\r\n def update(self):\r\n pos = pygame.mouse.get_pos()\r\n self.rect.x = pos[0]\r\n self.rect.y = pos[1]\r\n \r\n\r\n#def main():\r\n#variaveis\r\npygame.init()\r\ntela_width,tela_height = 640,640\r\ntela = pygame.display.set_mode([640,640])\r\n#new changes\r\nblock_list = pygame.sprite.Group()\r\nall_sprites_list = pygame.sprite.Group()\r\nfor i in range(50):\r\n posicao_X = random.randrange(0,tela_width,10)\r\n posicao_Y = random.randrange(0,tela_height,10)\r\n peixe = Peixe(vermelho,10,10)\r\n peixe.rect.x = posicao_X\r\n peixe.rect.y = posicao_Y\r\n block_list.add(peixe)\r\n all_sprites_list.add(peixe)\r\n \r\n#defincoes do game\r\npygame.display.set_caption(\"Jogo do tubarao\")\r\nrelogio = pygame.time.Clock()\r\n \r\n#objetos\r\ngrid = [[1]*8 for n in range(8)]\r\n#tubarao = pygame.Rect(10,10,10,10)\r\n#peixe = pygame.Rect(50,10,10,10)\r\ntubarao = Tubarao(cinza,10,10)\r\nall_sprites_list.add(tubarao)\r\n \r\n \r\n \r\n \r\nsair = False\r\n\r\nwhile sair != True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sair = True\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n tubarao = tubarao.move(10,0) \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n tubarao = tubarao.move(-10,0)\r\n if event.key == pygame.K_RIGHT:\r\n tubarao = tubarao.move(10,0)\r\n \r\n\r\n\r\n #fps \r\n relogio.tick(10)\r\n \r\n #tabuleiro\r\n tela.fill(azul)\r\n x,y = 10,640\r\n for linha in grid:\r\n for coluna in linha:\r\n pygame.draw.line(tela, (0, 0, 0), (x,0), (x, y))\r\n pygame.draw.line(tela, (0, 0, 0), (0,x), (y, x))\r\n x += 10\r\n y += 10\r\n \r\n #tubarao sem sprite\r\n #if tubarao.colliderect(peixe):\r\n # print(\"Houve colisao\")\r\n # tubarao.x = peixe.x\r\n # print(\"Tubarao comeu\")\r\n #pygame.draw.rect(tela,cinza,tubarao)\r\n #pygame.draw.rect(tela,vermelho,peixe)\r\n\r\n #colisao\r\n all_sprites_list.update()\r\n blocks_hit_list = pygame.sprite.spritecollide(tubarao,block_list,False)\r\n for peixe in blocks_hit_list: \r\n all_sprites_list.remove(peixe)\r\n \r\n \r\n \r\n \r\n #tela_on\r\n all_sprites_list.draw(tela)\r\n #pygame.display.update()\r\n pygame.display.flip()\r\n \r\n \r\npygame.quit()\r\n\r\n\r\n#main()\r\n\r\n\r\n" } ]
1
josephmarch/Web-Design-Challenge
https://github.com/josephmarch/Web-Design-Challenge
9770de079ae50cc6d419d744dc0b03ab4ad990f3
1a1d2d23720eaebebade2e97538cd9ea8a0785ff
eaa21689f7614e398f5ae522318a18d1714c0a68
refs/heads/main
2023-08-14T19:27:51.759243
2021-10-05T03:39:41
2021-10-05T03:39:41
411,075,577
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7055555582046509, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 29.16666603088379, "blob_id": "8c3d202cb2c1713623629bcd35e2a49ff6c14351", "content_id": "57a9f75be27f7fb2f825c468e17a3305c4d66add", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/dftoHTML.py", "repo_name": "josephmarch/Web-Design-Challenge", "src_encoding": "UTF-8", "text": "import pandas as pd\ndf = pd.read_csv(\"Resources/cities.csv\", index_col=0)\nhtml = df.to_html()\ntext_file = open(\"Resources/cities.html\", \"w\")\ntext_file.write(html)\ntext_file.close()" }, { "alpha_fraction": 0.8289855122566223, "alphanum_fraction": 0.8289855122566223, "avg_line_length": 27.75, "blob_id": "dafd0f0345ada14ae39a59e0f4630cc0526077ff", "content_id": "277dcd79276c8436aafa672d6371ba6a88ad519d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 345, "license_type": "no_license", "max_line_length": 133, "num_lines": 12, "path": "/README.md", "repo_name": "josephmarch/Web-Design-Challenge", "src_encoding": "UTF-8", "text": "# Web-Design-Challenge\nLatitude Analysis Dashboard with Attitude\n\nDeployed: https://josephmarch.github.io/Web-Design-Challenge/\n\nLanding page: index.html\n\nComparison page: comparison.html\n\nData page: data.html\n\nVisualizations: visualizations/maxt.html, visualizations/humidity.html, visualizations/cloudiness.html, visualizations/windspeed.html\n" } ]
2
TechNoteGit/UnitTest
https://github.com/TechNoteGit/UnitTest
eeb570520d9cc68776d6e1c38b4dd441aae0b95c
005e715e648b6cdcad67d2ff1284c8d57be57715
41210eaecad9ceba829518babfafff37dad9c874
refs/heads/main
2023-08-06T14:32:35.732152
2021-09-20T12:14:16
2021-09-20T12:14:16
408,355,632
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6381322741508484, "alphanum_fraction": 0.6381322741508484, "avg_line_length": 21.34782600402832, "blob_id": "faa1a68e4dd3d162e25b441e662886db77ed9a64", "content_id": "6fc746ddee65dbbb231935be7528f6013bb98333", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "permissive", "max_line_length": 62, "num_lines": 23, "path": "/file_operation.py", "repo_name": "TechNoteGit/UnitTest", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport pickle\nimport os\n\n\ndef file_read(target_file):\n with open(target_file, 'rb') as f:\n return pickle.load(f)\n\n\ndef file_write(target_file):\n current_time = datetime.now()\n with open(target_file, 'wb') as f:\n pickle.dump(current_time, f)\n\n\nif __name__ == '__main__':\n current_path = os.path.dirname(os.path.realpath(__file__))\n target_file = current_path + '/test_file'\n\n file_write(target_file)\n now = file_read(target_file)\n print(str(now))\n" }, { "alpha_fraction": 0.650130569934845, "alphanum_fraction": 0.650130569934845, "avg_line_length": 26.35714340209961, "blob_id": "a80b91af0bf5019a8add8922e84ec9de66721c65", "content_id": "e34c45ac73084c52d200820e994aa58e95cf2e11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "permissive", "max_line_length": 55, "num_lines": 28, "path": "/test/test_file_operation.py", "repo_name": "TechNoteGit/UnitTest", "src_encoding": "UTF-8", "text": "import unittest\nimport file_operation\nimport pickle\nfrom datetime import datetime\nfrom unittest.mock import Mock\nfrom unittest.mock import patch, mock_open\n\n\nclass TestFileOperation(unittest.TestCase):\n def test_file_read(self):\n current_time = datetime.now()\n read_data = pickle.dumps(current_time)\n mock = mock_open(read_data=read_data)\n with patch('builtins.open', mock):\n ret = file_operation.file_read('test_file')\n\n self.assertEqual(current_time, ret)\n\n def test_file_write(self):\n mock = mock_open()\n with patch('builtins.open', mock):\n file_operation.file_write('test_file')\n\n mock.assert_called_once_with('test_file', 'wb')\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 9, "blob_id": "57948212e45070cbde9e59faa757d0a5fb3f3c84", "content_id": "13b33fda4a321434409966bfa51ade984469f9fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "permissive", "max_line_length": 10, "num_lines": 2, "path": "/README.md", "repo_name": "TechNoteGit/UnitTest", "src_encoding": "UTF-8", "text": "# UnitTest\nUnitTest\n" } ]
3
jevinkeffers/Python_Frequency_Patterns
https://github.com/jevinkeffers/Python_Frequency_Patterns
2d540da35ba5e1b0167a66059eeb2c0b4d37794e
c1fa168dfe626ce4acc307c1987f44d25c6d3aa8
b62115f2e094d19f0bd312ed65b7b9c571ff5cd8
refs/heads/master
2022-11-29T07:05:30.660832
2020-08-14T16:53:46
2020-08-14T16:53:46
287,570,782
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3621346950531006, "alphanum_fraction": 0.3672172725200653, "avg_line_length": 16.909090042114258, "blob_id": "744aaeab5dfa3f16bb2331950de82075e7c04cc6", "content_id": "ac23f5a8e8b48936cd42839ea00e7e99a1eae846", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 40, "num_lines": 44, "path": "/3-Anagrams.py", "repo_name": "jevinkeffers/Python_Frequency_Patterns", "src_encoding": "UTF-8", "text": "def some_function(a,b):\n # if len(a)==len(b):\n # pass\n # else:\n # return False\n\n # dict_a = {}\n # dict_b = {}\n\n for char in a:\n if char in b:\n pass\n else:\n return False\n return True\n \n # for i in a:\n # if i in dict_a:\n # dict_a[i] += 1\n # else: \n # dict_a[i] = 1\n\n # for j in b:\n # if j in dict_b:\n # dict_b[j] += 1\n # else: \n # dict_b[j] = 1\n\n # for k in dict_a:\n # if k in dict_b:\n # if dict_a[k] == dict_b[k]:\n # pass\n # else:\n # return False\n # else:\n # return False\n # return True\n\na = \"pie\"\nb = \"eip\"\nc = \"pies\"\nd = \"pwe\"\n\nprint(some_function(a, d))" }, { "alpha_fraction": 0.4155937135219574, "alphanum_fraction": 0.4370529353618622, "avg_line_length": 21.934425354003906, "blob_id": "6f3747b4abc88e81dcbeb8301fd2b4fa0423b665", "content_id": "743eb3c9ac3979f0099624bfc5d9ddfb42c5294d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "no_license", "max_line_length": 74, "num_lines": 61, "path": "/2-Power_Comp.py", "repo_name": "jevinkeffers/Python_Frequency_Patterns", "src_encoding": "UTF-8", "text": "# Ex 1 \n\n\n# #ERIC'S SOLUTION\n# def some_function(a,b):\n \n# for i in a:\n# if (i*i) in b:\n# pass\n# else: \n# return False\n# return True\n\n# print(some_function(a, e))\n\n\n#DAVID'S SOLUTION\n# def power_of(a,b):\n# dict_a = {}\n# dict_b = {}\n# if len(a) == len(b):\n# for i in range(len(a)):\n# base[index] = a[index]\n# for index in range(len(b)):\n# base[index] = b[index]\n# for key in base.keys():\n# for k in compare.keys():\n# if (base[key] * base[key]) == compare[k]:\n# num_of_matches += 1\n# else:\n# return print(\"ERROR: The lengths of lists do not match, cannot\")\n\n#COMPROMISE\ndef some_function(a,b):\n dict_a = {}\n dict_b = {}\n boolean_stop = False\n if len(a) == len(b):\n for i in a:\n dict_a[i] = i\n for k in b:\n dict_b[k] = k\n for x in dict_a:\n for y in dict_b:\n if (dict_a[x]**2) == dict_b[y]:\n boolean_return = True\n break\n else:\n boolean_return = False\n boolean_stop = boolean_return\n return boolean_stop\n \n#outside the function\na = [1,2,3,4]\nb = [1,4,9,16]\nc = [1,4,5,6]\nd = [1,4,4,2]\ne = [1,16,9,4]\nf = [1,2,3,4,5]\n\nprint(some_function(a,e))" } ]
2
mtasende/disaster-response
https://github.com/mtasende/disaster-response
4ee2249160b1540526b405d247ce4a0dbcb168ff
fed024d77ad9ab4cf409e8f65659ed8d4d95c007
0e41f1244eac22bcb8aa0ad956a6cda405c6adaf
refs/heads/master
2022-12-12T12:26:10.628903
2018-11-23T19:25:41
2018-11-23T19:25:41
157,442,052
0
0
MIT
2018-11-13T20:28:29
2018-11-23T19:25:50
2022-12-08T01:17:59
Jupyter Notebook
[ { "alpha_fraction": 0.8217054009437561, "alphanum_fraction": 0.8217054009437561, "avg_line_length": 41.83333206176758, "blob_id": "9e19baca0ce95aec7892c96f4ad9b79e95223ad7", "content_id": "74c964cb027c19e9ad2e74045861994744c13049", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 258, "license_type": "permissive", "max_line_length": 108, "num_lines": 6, "path": "/install.sh", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\npip install -r requirements.txt\npip install .\npython data/process_data.py data/disaster_messages.csv data/disaster_categories.csv data/DisasterResponse.db\npython models/train_classifier.py data/DisasterResponse.db models/classifier.pkl\n\n" }, { "alpha_fraction": 0.5823529362678528, "alphanum_fraction": 0.5892494916915894, "avg_line_length": 32.76712417602539, "blob_id": "606914d46dbf23a995c4b22cda7bc6dbc33ea7b0", "content_id": "71d51351cb736504d3b19a1da7795aee1e75b80b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4930, "license_type": "permissive", "max_line_length": 79, "num_lines": 146, "path": "/models/model.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom sqlalchemy import create_engine\nimport re\nfrom data.process_data import MESSAGES_TABLE\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk import pos_tag\nfrom nltk.corpus import wordnet\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, \\\n f1_score\n\n\nclass Model(object):\n \"\"\"\n Implements the model interface.\n It also serves as the base class for more complex models.\n \"\"\"\n\n def __init__(self):\n self.model = None\n\n def load_data(self, database_filepath):\n \"\"\"\n Get the data from the database.\n\n Args:\n database_filepath(str): The path of the sqlite database.\n\n Returns:\n X(pandas.DataFrame): The input messages to classify.\n y(pandas.DataFrame): The desired output labels.\n category_names(list(str)): The names of the labels' categories.\n \"\"\"\n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql_table(MESSAGES_TABLE, engine)\n X = df.loc[:, 'message']\n y = df.iloc[:, 4:]\n category_names = y.columns.tolist()\n\n return X, y, category_names\n\n def tokenize(self, text):\n \"\"\" Basic tokenization function. \"\"\"\n # Case normalization\n temp_text = text.lower()\n\n # Punctuation removal\n temp_text = re.sub(r'[^a-zA-Z0-9]', ' ', temp_text)\n\n # Tokenize\n tokens = word_tokenize(temp_text)\n\n # Stop Word Removal\n stop_words = stopwords.words(\"english\")\n tokens = [word for word in tokens if word not in stop_words]\n\n # Part-of-Speech Tagging\n tokens = [(token[0], self.get_wordnet_pos(token[1]))\n for token in pos_tag(tokens)]\n\n # Lemmatization\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(*token) for token in tokens]\n\n # Stemming\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(word) for word in tokens]\n\n return tokens\n\n def build_model(self):\n \"\"\" Build a pipeline to preprocess and classify text. \"\"\"\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n self.model = pipeline\n return pipeline\n\n def tune_params(self, X_train, Y_train):\n \"\"\" Grid search for better parameters. Return the best model found. \"\"\"\n return self.model # No hyper-parameter tuning\n\n def evaluate_model(self, model, X_test, Y_test, category_names):\n \"\"\"\n Print some evaluation metrics:\n - Accuracy\n - Precision\n - Recall\n - F1-score\n \"\"\"\n y_pred = model.predict(X_test)\n\n results = list()\n for i in range(y_pred.shape[1]):\n acc = accuracy_score(Y_test.values[:, i], y_pred[:, i])\n prec = precision_score(Y_test.values[:, i], y_pred[:, i],\n average='macro')\n rec = recall_score(Y_test.values[:, i], y_pred[:, i],\n average='macro')\n f1 = f1_score(Y_test.values[:, i], y_pred[:, i], average='macro')\n results.append({'accuracy': acc,\n 'precision': prec,\n 'recall': rec,\n 'f1': f1})\n results_df = pd.DataFrame(results, index=category_names)\n print('-' * 100)\n print(results_df)\n print('-' * 100)\n print(results_df.describe())\n print('-' * 100)\n print('Main metric [f1-score]: {}'.format(results_df['f1'].mean()))\n print('-' * 100)\n\n def save_model(self, model, model_filepath):\n \"\"\" Save the model to a pickle. \"\"\"\n joblib.dump(model, model_filepath)\n\n @staticmethod\n def get_wordnet_pos(treebank_tag):\n \"\"\"\n Transforms from Treebank tags to wordnet tags.\n As discussed here:\n https://stackoverflow.com/questions/15586721/\n wordnet-lemmatization-and-pos-tagging-in-python\n \"\"\"\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN # If unknown, return the default value\n" }, { "alpha_fraction": 0.6870503425598145, "alphanum_fraction": 0.6954436302185059, "avg_line_length": 35.260868072509766, "blob_id": "aca91545ddaabcd79b9e3cbcb90f4df82d949e80", "content_id": "bf3e3d29e44eb28cca8c906c94455275ea486147", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "permissive", "max_line_length": 77, "num_lines": 23, "path": "/models/model_002_xgboost.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from models.model import Model\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom xgboost import XGBClassifier\n\n\nclass Model002(Model):\n \"\"\"\n This is the first, and most basic, model.\n It uses a simple NLP pipeline and a RandomForest as a classifier.\n This class uses an XGBoost classifier instead of the default one.\n \"\"\"\n\n def build_model(self):\n \"\"\" Build a pipeline to preprocess and classify text. \"\"\"\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(XGBClassifier(random_state=2018)))\n ])\n self.model = pipeline\n return pipeline\n" }, { "alpha_fraction": 0.47647058963775635, "alphanum_fraction": 0.6941176652908325, "avg_line_length": 15.190476417541504, "blob_id": "2f76fbc998d43a7742371eda1d99d8c4663a69c1", "content_id": "2957a8eb0c46c8c2ff12ba41a740b8f82dfc31cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 680, "license_type": "permissive", "max_line_length": 23, "num_lines": 42, "path": "/requirements.txt", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "boto==2.49.0\nboto3==1.9.50\nbotocore==1.12.50\nbz2file==0.98\ncertifi==2018.10.15\nchardet==3.0.4\nClick==7.0\ndata-utils-mt==0.4\ndecorator==4.3.0\ndocutils==0.14\nFlask==1.0.2\ngensim==3.6.0\ngunicorn==19.9.0\nidna==2.7\nipython-genutils==0.2.0\nitsdangerous==1.1.0\nJinja2==2.10\njmespath==0.9.3\njsonschema==2.6.0\njupyter-core==4.4.0\nMarkupSafe==1.1.0\nnbformat==4.4.0\nnltk==3.4\nnumpy==1.15.4\npandas==0.23.4\nplotly==3.4.1\npython-dateutil==2.7.5\npytz==2018.7\nrequests==2.20.1\nretrying==1.3.3\ns3transfer==0.1.13\nscikit-learn==0.20.0\nscipy==1.1.0\nsingledispatch==3.4.0.3\nsix==1.11.0\nsklearn==0.0\nsmart-open==1.7.1\nSQLAlchemy==1.2.14\ntraitlets==4.3.2\nurllib3==1.24.1\nWerkzeug==0.14.1\nxgboost==0.81\n" }, { "alpha_fraction": 0.651583731174469, "alphanum_fraction": 0.6651583909988403, "avg_line_length": 21.100000381469727, "blob_id": "e2155f7ecca390f9076948ee628680101b04b586", "content_id": "cec8792ae01fde78cf1adbf22336344a953ad953", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "permissive", "max_line_length": 45, "num_lines": 10, "path": "/setup.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from setuptools import find_packages, setup\n\nsetup(\n name='disaster-response',\n packages=find_packages(),\n version='0.1.0',\n description='Disaster response pipeline',\n author='Miguel Tasende',\n license='MIT',\n)\n" }, { "alpha_fraction": 0.6278742551803589, "alphanum_fraction": 0.632097601890564, "avg_line_length": 32.82539749145508, "blob_id": "f6d51b0452130b5088eb56e9f5a0700522837cde", "content_id": "6275469c0a73334bc801b64cb1a5c18436e99606", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2131, "license_type": "permissive", "max_line_length": 80, "num_lines": 63, "path": "/data/process_data.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "import sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\n\nMESSAGES_TABLE = 'messages'\n\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\" Get the messages and categories from CSV files. \"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n return messages.merge(categories, on='id', how='left')\n\n\ndef categories_split(df):\n \"\"\" Separate the categories in their own columns. \"\"\"\n ohe_categories = pd.DataFrame(df.categories.str.split(';').apply(\n lambda x: {e.split('-')[0]: int(e.split('-')[1]) for e in x}).tolist())\n return df.join(ohe_categories).drop('categories', axis=1)\n\n\ndef clean_data(df):\n \"\"\" Prepare the data for ML use. \"\"\"\n df = df.drop_duplicates().reset_index(drop=True)\n return categories_split(df)\n\n\ndef save_data(df, database_filename):\n \"\"\" Save the data to a sqlite database. \"\"\"\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql(MESSAGES_TABLE, engine, index=False, if_exists='replace',\n chunksize=1000)\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6116035580635071, "alphanum_fraction": 0.6196615695953369, "avg_line_length": 30.417720794677734, "blob_id": "2531ad76287ad5a92cbbf182cb94ff80ea62e588", "content_id": "0378ab71b9ca9cb8de3094cc8884c696f87a6330", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2482, "license_type": "permissive", "max_line_length": 80, "num_lines": 79, "path": "/models/train_classifier.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "import sys\nfrom sklearn.model_selection import train_test_split\n# from models.model import Model\nfrom models.model_004_xgboost_gridsearch import Model004\nfrom time import time\n\ncurrent_model = Model004() # Change this to use another model\n\n\ndef load_data(database_filepath):\n \"\"\" Wrapper function. \"\"\"\n return current_model.load_data(database_filepath)\n\n\ndef build_model():\n \"\"\" Wrapper function. \"\"\"\n return current_model.build_model()\n\n\ndef tune_params(model, X_train, Y_train):\n \"\"\" Wrapper function. \"\"\"\n return current_model.tune_params(X_train, Y_train)\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \"\"\" Wrapper function. \"\"\"\n current_model.evaluate_model(model, X_test, Y_test, category_names)\n\n\ndef save_model(model, model_filepath):\n \"\"\" Wrapper function. \"\"\"\n current_model.save_model(model, model_filepath)\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n print('Building model...')\n model = build_model()\n\n print('Hyperparameter Tuning.')\n print('-'*120)\n print('WARNING: THIS PROCESS TAKES ABOUT 1 HOUR OR EVEN MORE...')\n print('-' * 120)\n tic = time()\n model = tune_params(model, X_train, Y_train)\n toc = time()\n print('Hyperparameter tuning time: {} seconds'.format(toc - tic))\n\n print('Training model...')\n tic = time()\n model.fit(X_train, Y_train)\n toc = time()\n print('Training time: {} seconds'.format(toc - tic))\n\n print('Evaluating model...')\n tic = time()\n evaluate_model(model, X_test, Y_test, category_names)\n toc = time()\n print('Evaluation time: {} seconds'.format(toc - tic))\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.707446813583374, "alphanum_fraction": 0.7118794322013855, "avg_line_length": 50.272727966308594, "blob_id": "8a05d9fec9c5287b9c96b9117c4a691828e6792d", "content_id": "f74be547803011a4de35631a7dd9f1886e3cc3e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2338, "license_type": "permissive", "max_line_length": 468, "num_lines": 44, "path": "/README.md", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "# Disaster Response Pipeline Project\nA Machine Learning system that uses Natural Language Processing to identify relevant messages in social networks, when a dissaster occurs.\n\n## Installation\nTo install the project follow the instructions below:\n```\n$ ./install.sh\n```\nThe script will first preprocess the data and save it into a sqlite database. You can change the database path by modifying the script. Then a model will be trained and saved into pickle format, to be used later, in the web app.\n\n**NOTE: Expect the installation process to take a long time (about one hour may be normal).**\n\n## Project Motivation\nWhen a disaster occurs many messages are sent through different channels (e.g.: News, Social Networks, or directly to responders). It is very important to detect to which responders the messages should arrive (Firefighters, Police, Ambulance, etc.). To that end a quick and accurate classification of those messages is very useful. It is normally not good enough to look for keywords, and that's why a Machine Learning approach could give the solution to this problem.\n\n## File Descriptions\n```\n├── LICENSE\n├── README.md\n├── disaster_app <- Web app main dir\n│   └── templates <- HTML templates for the web app\n├── data <- Data and data wrangling scripts\n├── deploy.sh <- Script to deploy the app to Heroku\n├── disaster_app.py <- Main script for the web app\n├── install.sh <- Installation script\n├── models <- ML models and training scripts\n├── notebooks <- Exploratory notebooks\n├── requirements.txt\n├── run.sh <- Script to run the web app\n└── setup.py\n```\n\n## How to interact with the project\nAfter the installation is complete, run the `run.sh` script. A server will be listening in http://127.0.0.1:5000. Use a browser to access to that address.\n\n**NOTE: Please don't execute `$python run.py` directly.\nYou can execute `$python disaster_app.py` if you want.**\n\n## Licensing, Authors, Acknowledgements, etc.\nCode released under the [MIT](https://github.com/mtasende/airbnb-analysis/blob/master/LICENSE) license.\n\nThis project was authored by Miguel Tasende.\n\nIt was created as part of Udacity's Data Scientist Nanodegree.\n" }, { "alpha_fraction": 0.6432119011878967, "alphanum_fraction": 0.6556291580200195, "avg_line_length": 35.60606002807617, "blob_id": "57aff02d2c05cc1a178d61162acab594bd298325", "content_id": "86870b0f633fe2a0dd70bc155a086eebeb81f71d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1208, "license_type": "permissive", "max_line_length": 79, "num_lines": 33, "path": "/models/model_004_xgboost_gridsearch.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from models.model import Model\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom xgboost import XGBClassifier\n\n\nclass Model004(Model):\n \"\"\"\n This model uses a simple NLP pipeline and an XGBoost classifier.\n It performs Grid Search on some parameters.\n \"\"\"\n\n def build_model(self):\n \"\"\" Build a pipeline to preprocess and classify text. \"\"\"\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(XGBClassifier(random_state=2018)))\n ])\n self.model = pipeline\n return pipeline\n\n def tune_params(self, X_train, Y_train):\n \"\"\" Grid search for better parameters. Return the best model found. \"\"\"\n parameters = {\n 'clf__estimator__max_depth': [3, 6],\n 'clf__estimator__subsample': [0.5, 1.0]\n }\n cv = GridSearchCV(self.model, parameters, cv=3, n_jobs=-1)\n cv.fit(X_train, Y_train)\n return cv.best_estimator_\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 13.666666984558105, "blob_id": "28bc7bca1b9cbbe763afcea3cc4bf17cb61a388d", "content_id": "570cb4a16f83a0855e21984b9653ea5ce42c343a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 44, "license_type": "permissive", "max_line_length": 22, "num_lines": 3, "path": "/run.sh", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\npython disaster_app.py\n" }, { "alpha_fraction": 0.7486437559127808, "alphanum_fraction": 0.7486437559127808, "avg_line_length": 22, "blob_id": "0afcde4b6f08fed86d6267c191f9a0a63e67280f", "content_id": "1e1863a47f0d7246d2d9a430ed28ef2b16ed9a6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 553, "license_type": "permissive", "max_line_length": 63, "num_lines": 24, "path": "/deploy.sh", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -rf app_deploy\nmkdir app_deploy\ncp -r disaster_app app_deploy/\ncp README.md app_deploy/\ncp LICENSE app_deploy/\ncp -r models app_deploy/\ncp -r data app_deploy/\ncp requirements.txt app_deploy/\ncd app_deploy\necho \"from disaster_app import app\" > disaster_app.py\necho \"web gunicorn disaster_app:app\" > Procfile\ngit init\ngit add .\ngit commit -m \"First commit\"\n# git commit -m \"Updating the app\"\n# git remote add heroku https://git.heroku.com/diaster-app.git \n\nheroku login\nheroku create disaster-app-mt\ngit remote -v\n\ngit push heroku master\n\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 14.399999618530273, "blob_id": "56dd8bde8393c19ce5c0585b84ac8676aff5ba02", "content_id": "b9be611102ed8e3799eb70d41a805e0d57a4bf58", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "permissive", "max_line_length": 28, "num_lines": 5, "path": "/disaster_app/__init__.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from flask import Flask\n\napp = Flask(__name__)\n\nfrom disaster_app import run\n" }, { "alpha_fraction": 0.5959232449531555, "alphanum_fraction": 0.6127098202705383, "avg_line_length": 38.71428680419922, "blob_id": "a80874ab5cebf1bd50eeacf3a69aa59e7535af01", "content_id": "db5ff4bdca9f55db992df252d5dbcd59b26ff57b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "permissive", "max_line_length": 79, "num_lines": 21, "path": "/models/model_001_grid_search.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from models.model import Model\nfrom sklearn.model_selection import GridSearchCV\n\n\nclass Model001(Model):\n \"\"\"\n This is the first, and most basic, model.\n It uses a simple NLP pipeline and a RandomForest as a classifier.\n The only added feature with respect to the Model class is a Grid Search.\n \"\"\"\n def tune_params(self, X_train, Y_train):\n \"\"\" Grid search for better parameters. Return the best model found. \"\"\"\n parameters = {\n # 'features__text_pipeline__vect__ngram_range': [(1, 1), (1, 2)],\n 'tfidf__smooth_idf': [True, False],\n 'clf__estimator__max_depth': [None, 7]\n # 'clf__estimator__n_estimators': [10, 100],\n }\n cv = GridSearchCV(self.model, parameters, n_jobs=-1)\n cv.fit(X_train, Y_train)\n return cv.best_estimator_\n" }, { "alpha_fraction": 0.5843729376792908, "alphanum_fraction": 0.5952068567276001, "avg_line_length": 32.10869598388672, "blob_id": "bc6da7af3ccd95ee6dba00a2efb7ebae74f498cb", "content_id": "ab4773c89b460c5a8c27a723f64a88ce6f34cbbe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3046, "license_type": "permissive", "max_line_length": 71, "num_lines": 92, "path": "/models/model_003_word2vec.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from models.model import Model\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.base import BaseEstimator, TransformerMixin\n# from xgboost import XGBClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom gensim.models import KeyedVectors\nimport re\nimport numpy as np\n\nPRE_TRAINED_W2V_PATH = '../data/GoogleNews-vectors-negative300.bin'\n\n\nclass Model003(Model):\n \"\"\"\n This is the first, and most basic, model.\n It uses a simple NLP pipeline and a RandomForest as a classifier.\n This class uses an XGBoost classifier instead of the default one.\n \"\"\"\n\n def tokenize(self, text):\n \"\"\" Basic tokenization function. \"\"\"\n # Case normalization\n temp_text = text.lower()\n\n # Punctuation removal\n temp_text = re.sub(r'[^a-zA-Z0-9]', ' ', temp_text)\n\n # Tokenize\n tokens = word_tokenize(temp_text)\n\n # Stop Word Removal\n stop_words = stopwords.words(\"english\")\n tokens = [word for word in tokens if word not in stop_words]\n\n return tokens\n\n def build_model(self):\n \"\"\" Build a pipeline to preprocess and classify text. \"\"\"\n pipeline = Pipeline([\n ('word2vec', Word2VecTransformer(\n filepath=PRE_TRAINED_W2V_PATH,\n tokenizer=self.tokenize\n )),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n random_state=2018, n_jobs=-1), n_jobs=-1))\n ])\n self.model = pipeline\n return pipeline\n\n\nclass Word2VecTransformer(TransformerMixin, BaseEstimator):\n \"\"\"Transforms words into word2vec vectors. \"\"\"\n\n def __init__(self, filepath, tokenizer):\n super().__init__()\n self.filepath = filepath\n self.tokenizer = tokenizer\n self.model = None\n\n def fit(self, X, y):\n \"\"\" Get Google's pre-trained word2vec model. \"\"\"\n self.model = KeyedVectors.load_word2vec_format(self.filepath,\n binary=True)\n return self\n\n def transform(self, X):\n \"\"\" Transform the sentences to vectors. \"\"\"\n X_tokenized = [self.tokenizer(sentence) for sentence in X]\n X_tr = list()\n null_sentences_count = 0\n for sentence in X_tokenized:\n sentence_tr = list()\n for word in sentence:\n try:\n sentence_tr.append(self.model[word])\n except KeyError:\n pass\n if len(sentence_tr) == 0:\n null_sentences_count += 1\n X_tr.append(np.zeros(300))\n else:\n X_tr.append(np.mean(sentence_tr, axis=0))\n\n print('Found {} non-convertible sentences. '.format(\n null_sentences_count) + 'That is {}% of the total.'.format(\n (100 * null_sentences_count/len(X_tokenized))\n ))\n\n return np.stack(X_tr, axis=0)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 12.333333015441895, "blob_id": "30f2832b3bd4414729cffacba92a293f80287e96", "content_id": "84ea86ae3b2f1f6dfabb45788ca0689b28571e4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "permissive", "max_line_length": 28, "num_lines": 3, "path": "/disaster_app.py", "repo_name": "mtasende/disaster-response", "src_encoding": "UTF-8", "text": "from disaster_app import app\n\napp.run()\n" } ]
15
cosmic119/WebCamTest
https://github.com/cosmic119/WebCamTest
47ddaf6227f6648b0b89d09eba7804f2441d50e0
8a3f5f49417f924a243154c3e1a7a3b973ffbdbc
0f8faf6577cb54efd0a5b2f7accd7fec9c194c80
refs/heads/master
2020-03-26T08:10:11.490028
2018-08-14T08:12:17
2018-08-14T08:12:17
144,689,275
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5098073482513428, "alphanum_fraction": 0.5374703407287598, "avg_line_length": 38.47569274902344, "blob_id": "b768907fb43d3a6ffb4319f548dfcd852ead4bd2", "content_id": "85e84629da64454361e2636e7666ffaddea33d52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22738, "license_type": "no_license", "max_line_length": 154, "num_lines": 576, "path": "/facial_expression_simulator.py", "repo_name": "cosmic119/WebCamTest", "src_encoding": "UTF-8", "text": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport cv2\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom collections import defaultdict\nfrom tensorflow.python.ops import init_ops\nimport dlib\n\n\nclass DummyScope(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n\nclass GPUNetworkBuilder(object):\n \"\"\"This class provides convenient methods for constructing feed-forward\n networks with internal data layout of 'NCHW'.\n \"\"\"\n\n def __init__(self,\n is_training,\n dtype=tf.float32,\n activation='RELU',\n use_batch_norm=True,\n batch_norm_config={'decay': 0.9,\n 'epsilon': 1e-4,\n 'scale': True,\n 'zero_debias_moving_mean': False},\n use_xla=False):\n self.dtype = dtype\n self.activation_func = activation\n self.is_training = is_training\n self.use_batch_norm = use_batch_norm\n self.batch_norm_config = batch_norm_config\n self._layer_counts = defaultdict(lambda: 0)\n if use_xla:\n self.jit_scope = tf.contrib.compiler.jit.experimental_jit_scope\n else:\n self.jit_scope = DummyScope\n\n def _count_layer(self, layer_type):\n idx = self._layer_counts[layer_type]\n name = layer_type + str(idx)\n self._layer_counts[layer_type] += 1\n return name\n\n def _get_variable(self, name, shape, dtype=None,\n initializer=None, seed=None):\n if dtype is None:\n dtype = self.dtype\n if initializer is None:\n initializer = init_ops.glorot_uniform_initializer(seed=seed)\n elif (isinstance(initializer, float) or\n isinstance(initializer, int)):\n initializer = tf.constant_initializer(float(initializer))\n return tf.get_variable(name, shape, dtype, initializer)\n\n def _to_nhwc(self, x):\n return tf.transpose(x, [0, 2, 3, 1])\n\n def _from_nhwc(self, x):\n return tf.transpose(x, [0, 3, 1, 2])\n\n def _bias(self, input_layer):\n num_outputs = input_layer.get_shape().as_list()[1]\n biases = self._get_variable('biases', [num_outputs], input_layer.dtype,\n initializer=0)\n if len(input_layer.get_shape()) == 4:\n return tf.nn.bias_add(input_layer, biases,\n data_format='NCHW')\n else:\n return input_layer + biases\n\n def _batch_norm(self, input_layer, scope):\n return tf.contrib.layers.batch_norm(input_layer,\n is_training=self.is_training,\n scope=scope,\n data_format='NCHW',\n fused=True,\n **self.batch_norm_config)\n\n def _bias_or_batch_norm(self, input_layer, scope, use_batch_norm):\n if use_batch_norm is None:\n use_batch_norm = self.use_batch_norm\n if use_batch_norm:\n return self._batch_norm(input_layer, scope)\n else:\n return self._bias(input_layer)\n\n def input_layer(self, input_layer):\n \"\"\"Converts input data into the internal format\"\"\"\n with self.jit_scope():\n x = self._from_nhwc(input_layer)\n x = tf.cast(x, self.dtype)\n # Rescale and shift to [-1,1]\n x = x * (1. / 127.5) - 1\n return x\n\n def conv(self, input_layer, num_filters, filter_size,\n filter_strides=(1, 1), padding='SAME',\n activation=None, use_batch_norm=None):\n \"\"\"Applies a 2D convolution layer that includes bias or batch-norm\n and an activation function.\n \"\"\"\n num_inputs = input_layer.get_shape().as_list()[1]\n kernel_shape = [filter_size[0], filter_size[1],\n num_inputs, num_filters]\n strides = [1, 1, filter_strides[0], filter_strides[1]]\n with tf.variable_scope(self._count_layer('conv')) as scope:\n kernel = self._get_variable('weights', kernel_shape,\n input_layer.dtype)\n if padding == 'SAME_RESNET': # ResNet models require custom padding\n kh, kw = filter_size\n rate = 1\n kernel_size_effective = kh + (kw - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = padgit_total // 2\n pad_end = pad_total - pad_beg\n padding = [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]]\n input_layer = tf.pad(input_layer, padding)\n padding = 'VALID'\n x = tf.nn.conv2d(input_layer, kernel, strides, padding=padding, data_format='NCHW')\n x = self._bias_or_batch_norm(x, scope, use_batch_norm)\n x = self.activate(x, activation)\n return x\n\n def deconv(self, input_layer, num_filters, filter_size,\n filter_strides=(2, 2), padding='SAME',\n activation=None, use_batch_norm=None):\n \"\"\"Applies a 'transposed convolution' layer that includes bias or\n batch-norm and an activation function.\n \"\"\"\n num_inputs = input_layer.get_shape().as_list()[1]\n ih, iw = input_layer.get_shape().as_list()[2:]\n output_shape = [-1, num_filters,\n ih * filter_strides[0], iw * filter_strides[1]]\n kernel_shape = [filter_size[0], filter_size[1],\n num_filters, num_inputs]\n strides = [1, 1, filter_strides[0], filter_strides[1]]\n with tf.variable_scope(self._count_layer('deconv')) as scope:\n kernel = self._get_variable('weights', kernel_shape,\n input_layer.dtype)\n x = tf.nn.conv2d_transpose(input_layer, kernel, output_shape,\n strides, padding=padding,\n data_format='NCHW')\n x = self._bias_or_batch_norm(x, scope, use_batch_norm)\n x = self.activate(x, activation)\n return x\n\n def activate(self, input_layer, funcname=None):\n \"\"\"Applies an activation function\"\"\"\n if isinstance(funcname, tuple):\n funcname = funcname[0]\n params = funcname[1:]\n if funcname is None:\n funcname = self.activation_func\n if funcname == 'LINEAR':\n return input_layer\n activation_map = {\n 'RELU': tf.nn.relu,\n 'RELU6': tf.nn.relu6,\n 'ELU': tf.nn.elu,\n 'SIGMOID': tf.nn.sigmoid,\n 'TANH': tf.nn.tanh,\n 'LRELU': lambda x, name: tf.maximum(params[0] * x, x, name=name)\n }\n return activation_map[funcname](input_layer, name=funcname.lower())\n\n def pool(self, input_layer, funcname, window_size,\n window_strides=(2, 2),\n padding='VALID'):\n \"\"\"Applies spatial pooling\"\"\"\n pool_map = {\n 'MAX': tf.nn.max_pool,\n 'AVG': tf.nn.avg_pool\n }\n kernel_size = [1, 1, window_size[0], window_size[1]]\n kernel_strides = [1, 1, window_strides[0], window_strides[1]]\n return pool_map[funcname](input_layer, kernel_size, kernel_strides,\n padding, data_format='NCHW',\n name=funcname.lower())\n\n def spatial_avg(self, input_layer):\n \"\"\"Averages over spatial dimensions (4D->2D)\"\"\"\n return tf.reduce_mean(input_layer, [2, 3], name='spatial_avg')\n\n def fully_connected(self, input_layer, num_outputs, activation=None):\n \"\"\"Applies a fully-connected set of weights\"\"\"\n num_inputs = input_layer.get_shape().as_list()[1]\n kernel_size = [num_inputs, num_outputs]\n with tf.variable_scope(self._count_layer('fully_connected')):\n kernel = self._get_variable('weights', kernel_size,\n input_layer.dtype)\n x = tf.matmul(input_layer, kernel)\n x = self._bias(x)\n x = self.activate(x, activation)\n return x\n\n def inception_module(self, input_layer, name, cols):\n \"\"\"Applies an inception module with a given form\"\"\"\n with tf.name_scope(name):\n col_layers = []\n col_layer_sizes = []\n for c, col in enumerate(cols):\n col_layers.append([])\n col_layer_sizes.append([])\n x = input_layer\n for l, layer in enumerate(col):\n ltype, args = layer[0], layer[1:]\n if ltype == 'conv':\n x = self.conv(x, *args)\n elif ltype == 'pool':\n x = self.pool(x, *args)\n elif ltype == 'share':\n # Share matching layer from previous column\n x = col_layers[c - 1][l]\n else:\n raise KeyError(\"Invalid layer type for \" +\n \"inception module: '%s'\" % ltype)\n col_layers[c].append(x)\n catdim = 1\n catvals = [layers[-1] for layers in col_layers]\n x = tf.concat(catvals, catdim)\n return x\n\n\ndef inference_googlenet(net, input_layer):\n \"\"\"GoogLeNet model\n https://arxiv.org/abs/1409.4842\n \"\"\"\n net.use_batch_norm = False\n\n def inception_v1(net, x, k, l, m, n, p, q):\n cols = [[('conv', k, (1, 1))],\n [('conv', l, (1, 1)), ('conv', m, (3, 3))],\n [('conv', n, (1, 1)), ('conv', p, (5, 5))],\n [('pool', 'MAX', (3, 3), (1, 1), 'SAME'), ('conv', q, (1, 1))]]\n return net.inception_module(x, 'incept_v1', cols)\n\n print('input_layer=', input_layer)\n x = net.input_layer(input_layer)\n print('x=', x)\n x = net.conv(x, 64, (7, 7), (2, 2))\n print('x=', x)\n x = net.pool(x, 'MAX', (3, 3), padding='SAME')\n print('x=', x)\n x = net.conv(x, 64, (1, 1))\n print('x=', x)\n x = net.conv(x, 192, (3, 3))\n print('x=', x)\n x = net.pool(x, 'MAX', (3, 3), padding='SAME')\n print('x=', x)\n x = inception_v1(net, x, 64, 96, 128, 16, 32, 32)\n x = inception_v1(net, x, 128, 128, 192, 32, 96, 64)\n x = net.pool(x, 'MAX', (3, 3), padding='SAME')\n x = inception_v1(net, x, 192, 96, 208, 16, 48, 64)\n x = inception_v1(net, x, 160, 112, 224, 24, 64, 64)\n x = inception_v1(net, x, 128, 128, 256, 24, 64, 64)\n x = inception_v1(net, x, 112, 144, 288, 32, 64, 64)\n x = inception_v1(net, x, 256, 160, 320, 32, 128, 128)\n x = net.pool(x, 'MAX', (3, 3), padding='SAME')\n x = inception_v1(net, x, 256, 160, 320, 32, 128, 128)\n x = inception_v1(net, x, 384, 192, 384, 48, 128, 128)\n x = net.spatial_avg(x)\n return x\n\n\ndef eval_func(images, var_scope):\n net = GPUNetworkBuilder(\n False, dtype=tf.float32, use_xla=False)\n # images = net._to_nhwc(images)\n # model_func = inference_googlenet\n print('>>> eval_func: images=', images)\n output = inference_googlenet(net, images)\n logits_g = net.fully_connected(output, 8, activation='LINEAR')\n if logits_g.dtype != tf.float32:\n logits_g = tf.cast(logits_g, tf.float32)\n with tf.device('/cpu:0'):\n logits_g = tf.nn.softmax(logits_g)\n\n return logits_g\n\n\ndef sess_eval(sess, image, logits_g):\n with tf.Graph().as_default() as g:\n flogits_g = sess.run([logits_g], feed_dict={images: image})\n gender_result = None\n\n global gender\n # print('values=',flogits_g, flogits_a)\n gender_result = gender[np.argmax(flogits_g[0])]\n # print('argmax g=', np.argmax(flogits_g[0]))\n # print('argmax a=', np.argmax(flogits_a[0]))\n print('gender=', gender_result)\n return gender_result\n\n\n# graph construction for evaluation\ngender = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']\nwith tf.Graph().as_default() as g:\n # Get images and labels for CIFAR-10.\n images = tf.placeholder(dtype=tf.uint8, shape=[256, 256, 3])\n checkpoint_dir = './ckpt_data_facial_expression'\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n # Build inference Graph.\n # print('>>>>> input original size = ', res_images)\n # in case of images less than or larger than 227x227\n # images = tf.image.resize_images(images, [227,227] )\n # print('>>>>> input resized = ',images)\n with tf.variable_scope('GPU_%i' % 0, reuse=tf.AUTO_REUSE) as var_scope, \\\n tf.name_scope('tower_%i' % 0):\n images1 = tf.image.central_crop(images, 224. / 256.)\n images2 = tf.image.resize_images(images1, [224, 224], tf.image.ResizeMethod.BILINEAR, align_corners=False)\n # res_images = tf.cast(images, dtype=tf.float32)\n # images3 = tf.image.per_image_standardization(images2)\n res_images = tf.reshape(images2, [1, 224, 224, 3])\n logits_g = eval_func(res_images, var_scope)\n\n # Restore the moving average version of the learned variables for eval.\n saver = tf.train.Saver()\n\n gpu_options = tf.GPUOptions(allocator_type='BFC', allow_growth=True)\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/cifar10_train/model.ckpt-0,\n # extract global_step from it.\n # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n sys.exit()\n\n(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\nprint('cv version=', major_ver, minor_ver, subminor_ver)\nif __name__ == '__main__':\n # if __package__ is None:\n # import sys\n # from os import path\n # print(path.dirname( path.dirname( path.abspath(__file__) ) ))\n # sys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) ))\n # import eval_googleNet\n\n # Set up tracker.\n # Instead of MIL, you can also use\n tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MULTI']\n tracker_type = tracker_types[6]\n\n if int(major_ver) < 3:\n tracker = cv2.Tracker_create(tracker_type)\n else:\n if tracker_type == 'BOOSTING':\n tracker = cv2.TrackerBoosting_create()\n if tracker_type == 'MIL':\n tracker = cv2.TrackerMIL_create()\n if tracker_type == 'KCF':\n tracker = cv2.TrackerKCF_create()\n if tracker_type == 'TLD':\n tracker = cv2.TrackerTLD_create()\n if tracker_type == 'MEDIANFLOW':\n tracker = cv2.TrackerMedianFlow_create()\n if tracker_type == 'GOTURN':\n tracker = cv2.TrackerGOTURN_create()\n if tracker_type == 'MULTI':\n tracker = cv2.MultiTracker_create()\n\n video = cv2.VideoCapture(0)\n\n # Exit if video not opened.\n if not video.isOpened():\n print(\"Could not open video\")\n video.release()\n cv2.destroyAllWindows()\n sys.exit()\n\n # Read first frame.\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file')\n video.release()\n cv2.destroyAllWindows()\n sys.exit()\n print('frame.shape=', frame.shape)\n\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # create a CLAHE object (Arguments are optional).\n # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n # gray = clahe.apply(gray)\n\n # Define an initial bounding box with face detector\n face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n faces = face_cascade.detectMultiScale(frame, 1.3, 5)\n # face_cascade = cv2.CascadeClassifier('lpb_cascade.xml')\n # faces = face_cascade.detectMultiScale(frame, scaleFactor= 1.1, minNeighbors=8, minSize=(55, 55), flags=cv2.CASCADE_SCALE_IMAGE)\n print('type of faces = ', type(faces))\n no_faces = len(faces)\n\n while no_faces < 1:\n ok, frame = video.read()\n # lor(frame, cv2.COLOR_BGR2GRAY)\n # create a CLAHE object (Arguments are optional).\n # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n # gray = clahe.apply(gray)\n faces = face_cascade.detectMultiScale(frame, 1.3, 5)\n no_faces = len(faces)\n print('no faces = ', no_faces)\n\n # Initialize multi-tracker with first frame and bounding box\n print('len(faces)=', len(faces))\n bbox = list(faces)\n no_faces = len(faces)\n gender_list = []\n gender_count = [0, 0]\n bbox_list = []\n\n\n def check_faces(no_faces, faces, bbox, frame):\n for i in range(no_faces):\n print(faces[i])\n bbox[i] = tuple(faces[i])\n print('bbox[', i, ']=', bbox[i])\n p1 = (bbox[i][0], bbox[i][1])\n p2 = ((bbox[i][0] + bbox[i][2]), (bbox[i][1] + bbox[i][3]))\n\n image = frame[p1[0]:p2[0], p1[1]:p2[1]]\n print(image.shape)\n if image.shape[0] <= 10:\n break\n if image.shape[1] <= 10:\n break\n print('p1=', p1, 'p2=', p2)\n if image is None:\n break\n image = cv2.resize(image, (256, 256))\n\n gender = sess_eval(sess, image, logits_g)\n gender_list.append(gender)\n bbox_list.append(bbox[i])\n ok = tracker.add(cv2.TrackerBoosting_create(), frame, bbox[i])\n\n\n check_faces(no_faces, faces, bbox, frame)\n\n frame_count = 0\n\n while video.isOpened():\n frame_count = (frame_count + 1) % 20\n # Read a new frame\n ok, frame = video.read()\n if not ok:\n break\n\n # Start timer\n timer = cv2.getTickCount()\n\n # Update tracker\n ok, bbox = tracker.update(frame)\n # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n # gray = clahe.apply(gray)\n\n faces = face_cascade.detectMultiScale(frame, 1.3, 5)\n no_faces = len(faces)\n\n if no_faces > 0:\n tracker = None\n tracker = cv2.MultiTracker_create()\n\n bbox = None\n bbox = list(faces)\n gender_list[:] = []\n bbox_list[:] = []\n\n # Over python3.3 code\n # gender_list.clear()\n # age_list.clear()\n # bbox_list.clear()\n check_faces(no_faces, faces, bbox, frame)\n\n # print('ok=', ok, 'bbox=', bbox, 'no faces = ', len(bbox))\n # print('ok=', ok, 'bbox=', bbox, 'no faces = ', no_faces)\n\n # if len(bbox) < 1:\n # print('retry - face detect')\n # faces = face_cascade.detectMultiScale(frame, 1.3, 5)\n\n # rno_faces = len(bbox)\n # while not ok:\n # print('not ok')\n # ok, frame = video.read()\n # ok, bbox = tracker.update(frame)\n # rno_faces = len(bbox)\n # print('ok=', ok, 'bbox=', bbox, 'no faces = ', rno_faces)\n\n # Calculate Frames per secorinnd (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);\n i = 0\n # starting with here, treat with multiple face bbox\n while gender_list:\n # Draw bounding box\n # Tracking success\n bbox = bbox_list.pop()\n p1 = (int(bbox[0]), int(bbox[1]))\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)\n # if not gender_list:\n # print('no_gender_list, i=',i)\n # break\n # if not age_list:\n # print('no age_list, i=',i)\n # break\n gender_result = gender_list.pop()\n print('gender_result=', gender_result)\n if gender_result == 'MAN':\n gender_count[1] = gender_count[1] + 1\n else:\n gender_count[0] = gender_count[0] + 1\n\n cv2.putText(frame, \"face\" + str(i) + \", result: \" + gender_result,(int(bbox[0]), int(bbox[1])),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n i = i + 1\n # Tracking failure\n # cv2.putText(frame, \"Tracking failure detected\", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n\n # Display tracker type on frame\n cv2.putText(frame, tracker_type + \" Tracker\", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)\n cv2.putText(frame, '[WOMEN, MEN]: ' + str(gender_count), (10, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255),\n 2)\n\n # Display FPS on frame\n # cv2.putText(frame, \"FPS : \" + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)\n\n if gender_count[0] < gender_count[1]:\n cv2.putText(frame, 'Majority: MAN', (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n elif gender_count[0] > gender_count[1]:\n cv2.putText(frame, 'Majority: WOMAN', (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n else:\n cv2.putText(frame, 'Majority: EQUAL', (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n # Display result\n cv2.imshow(\"Tracking\", frame)\n\n # gender_count = [0, 0]\n\n # Exit if ESC pressed\n k = cv2.waitKey(10)\n if k == 27:\n break\n\nvideo.release()\ncv2.destroyAllWindows()\n" } ]
1
MicahRamirez/collatz_accept_script
https://github.com/MicahRamirez/collatz_accept_script
3596fadabd0fa553f710de8292819ecf5517f546
c8c7f45685832f4cad4509e823b8e13ed27797b1
348c6f8f45698e569df57bb1c94bc3a546dfa967
refs/heads/master
2021-03-12T19:17:36.043496
2015-09-10T19:59:24
2015-09-10T19:59:24
41,834,531
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.5516816973686218, "avg_line_length": 23.350000381469727, "blob_id": "ec5b5cb226062fdd1d7ba74a5f06a9d36bca500a", "content_id": "609a5fea86d4cc5f68bea37505288a3967cbda1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2438, "license_type": "no_license", "max_line_length": 84, "num_lines": 100, "path": "/collatz_accept_test.py", "repo_name": "MicahRamirez/collatz_accept_script", "src_encoding": "UTF-8", "text": "import random \n\nf = open('./bad_numbers.txt', 'r')\n#data structs\nbad_map = {}\n\nfor line in f:\n b_num = int(line.strip('\\n'))\n bad_map[b_num] = 0\n\nf.close()\n\nfile_write = open('./acceptance_test.txt','w')\n\n#lazy cache\ncyc_cache = {}\nbad_list = list(bad_map.keys())\nmap_to_blist = {}\nbad_list.sort()\n\n#associate the \"bad number\" with its' respective index in a sorted list\nfor i in range(0, len(bad_list)):\n map_to_blist[bad_list[i]] = i\n\n#where n is an integer\ndef cycle_length(n):\n init_num = n\n cyc_len = 1\n while(n > 1):\n if(n in cyc_cache):\n #minus one to offset the init cyc length of 1 for both this and in cache\n cyc_cache[init_num] = cyc_len + cyc_cache[n] - 1\n return cyc_cache[init_num]\n if(n % 2 == 0):\n n = n >> 1\n else:\n n = (n << 1) + n + 1\n cyc_len += 1\n cyc_cache[init_num] = cyc_len\n return cyc_len\n\n#preferred input is where n < k\n#determines the max cycle length in a range\n#pre n < k\ndef range_cyc_length(n , k ):\n assert n < k\n max_cyc = 0\n #k + 1 because need inclusive\n for i in range(n, k + 1):\n if i in cyc_cache:\n cur_cyc = cyc_cache[i]\n else: \n cur_cyc = cycle_length(i)\n if(max_cyc < cur_cyc):\n max_cyc = cur_cyc\n return max_cyc\n\ndef check_bad_in_range(start, fin):\n assert start < fin\n #check the range from start to fin for bad numbers\n for i in range(start, fin + 1):\n if i in bad_map:\n start = i + 1\n bad_idx = map_to_blist[i]\n next_bad_num = bad_list[bad_idx + 1]\n fin = random.randint(start, next_bad_num)\n\n return (start, fin)\n\ncollatz_in = open('./XMR73-RunCollatz.in', 'w')\ncollatz_out = open('./XMR73-RunCollatz.out','w')\nwrite_in = \"\"\nwrite_out = \"\"\n\nfor i in range(0, 10):\n n = random.randint(0, 1000000)\n k = random.randint(0, 1000000)\n\n while(n in bad_map):\n n = random.randint(0, 1000000)\n\n while(k in bad_map):\n k = random.randint(0, 1000000)\n\n if n > k:\n swap = n\n n = k\n k = swap\n #unpack the tuple\n n, k = check_bad_in_range(n, k)\n\n max_cycl_length = range_cyc_length(n, k)\n write_in += str(n) + ' ' + str(k) + '\\n'\n write_out += str(n) + ' ' + str(k) + ' ' + str(max_cycl_length) + '\\n'\n\ncollatz_in.write(write_in)\ncollatz_out.write(write_out)\n\ncollatz_in.close()\ncollatz_out.close()\n\n\n\n" }, { "alpha_fraction": 0.8086956739425659, "alphanum_fraction": 0.8086956739425659, "avg_line_length": 37.33333206176758, "blob_id": "673b265c794fbb7e7ae2224a6cff39c1c10e8e0c", "content_id": "90d2db2d6b9a56c91730d1bb0bfd0e17101dd4e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 115, "license_type": "no_license", "max_line_length": 89, "num_lines": 3, "path": "/README.md", "repo_name": "MicahRamirez/collatz_accept_script", "src_encoding": "UTF-8", "text": "# collatz_accept_script\n\nAutomated the generation of acceptance testing for http://www.spoj.com/problems/PROBTNPO/\n" } ]
2
nickit94/ParserOnlineShops
https://github.com/nickit94/ParserOnlineShops
b737c525e35acdeb663582de7b199a799446c584
f48c021124593a6ece8d0d32ceb1290bea928305
ad78740bb57b5dcff7f5472e90fed39123133fc3
refs/heads/main
2023-06-10T17:02:19.322192
2021-07-04T11:15:50
2021-07-04T11:15:50
382,831,553
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5925533175468445, "alphanum_fraction": 0.5949648022651672, "avg_line_length": 28.962427139282227, "blob_id": "d1a31558a3c7e367093e75074a58c6f1d01dad3c", "content_id": "10d14de3d4c1848a983da20d3a171f5c6ad6df42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11967, "license_type": "no_license", "max_line_length": 116, "num_lines": 346, "path": "/modules/common/helper.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "\"\"\"\nГлавный общий файл-хэлпер с общими для многих файлов функциями и константами\n\"\"\"\nimport sys\nimport collections\nimport logging\nimport random\nimport os\nfrom datetime import datetime, timedelta\nimport modules.common.file_worker as fw\n\n\nlog_name = \"logs/log-\" + datetime.now().strftime(\"%Y.%m.%d-%H.%M\") + \".txt\"\n# Следующие 2 строчки: расскомментировать, если бой, закомментировать, если тест.\n# logging.basicConfig(handlers=[logging.FileHandler(filename=log_name, encoding='utf-8', mode='w')],\n# level=logging.INFO)\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('header')\n\n\ndef del_old_logs():\n \"\"\"\n Удаление старых логов\n \"\"\"\n name = \"log-\" + (datetime.now() - timedelta(days=2)).strftime(\"%Y.%m.%d-\")\n\n for element in os.scandir(\"logs/\"):\n if element.is_file() and name in element.name:\n os.remove(\"logs/\" + element.name)\n\n\ndef get_proxy():\n \"\"\"\n Получить proxy из файла\n \"\"\"\n proxy_list = fw.FileWorker.list_data.load(PROXY_PATH)\n\n if not proxy_list:\n logger.error(\"ОШИБКА PROXY, СПИСОК ПУСТ\")\n return None\n\n proxy = proxy_list[random.randint(0, len(proxy_list) - 1)]\n logger.info(\"Выбран PROXY: {}\".format(proxy))\n\n return proxy\n\n\ndef is_all_elem_equal_in_tuple_list(elements, pos):\n \"\"\"\n Проверить все элементы на равенство по заданной позиции\n \"\"\"\n if not elements or len(elements) == 1:\n return True\n\n data = elements[0][pos]\n for item in elements:\n if item[pos] != data:\n return False\n\n return True\n\n\ndef replace_value_from_dictionary(dictionary: dict, value):\n \"\"\"\n Проверка наличия @key_name в словаре замен. В случае, если такой ключ найден -\n возвращается значение из словаря.\n \"\"\"\n if not dictionary:\n return value\n\n # Поиск в строке названия фраз из списка исключения и их замена\n for key, val in dictionary.items():\n if key in value:\n value = value.replace(key, val)\n logger.info(\"Нашел модель в словаре исключений телеграм, key={}\".format(key))\n\n return value\n\n\ndef find_allowed_model_names(model_name):\n \"\"\"\n Поиск названия из списка известных моделей\n \"\"\"\n for item in ALLOWED_MODEL_NAMES_LIST_FOR_BASE:\n if item.lower() == model_name.lower():\n return True\n\n return False\n\n\ndef find_in_namedtuple_list(namedtuple_list, brand_name=None, model_name=None, shop=None, category=None, color=None,\n ram=None, rom=None, price=None, img_url=None, url=None, rating=None, num_rating=None,\n product_code=None, date_time=None, avg_actual_price=None,\n hist_min_price=None, hist_min_shop=None, hist_min_date=None, diff_cur_avg=None,\n limit_one=False):\n \"\"\"\n Поиск элемента по любым параметрам в любом namedtuple\n \"\"\"\n if not namedtuple_list:\n return []\n\n result_list = []\n for item in namedtuple_list:\n if brand_name and getattr(item, 'brand_name', None) != brand_name:\n continue\n if model_name and getattr(item, 'model_name', None) != model_name:\n continue\n if shop and getattr(item, 'shop', None) != shop:\n continue\n if category and getattr(item, 'category', None) != category:\n continue\n if color and getattr(item, 'color', None) != color:\n continue\n if ram and getattr(item, 'ram', None) != ram:\n continue\n if rom and getattr(item, 'rom', None) != rom:\n continue\n if img_url and getattr(item, 'img_url', None) != img_url:\n continue\n if url and getattr(item, 'url', None) != url:\n continue\n if rating and getattr(item, 'rating', None) != rating:\n continue\n if num_rating and getattr(item, 'num_rating', None) != num_rating:\n continue\n if product_code and getattr(item, 'product_code', None) != product_code:\n continue\n if date_time and getattr(item, 'date_time', None) != date_time:\n continue\n if price and getattr(item, 'price', None) != price:\n continue\n if avg_actual_price and getattr(item, 'avg_actual_price', None) != avg_actual_price:\n continue\n if hist_min_price and getattr(item, 'hist_min_price', None) != hist_min_price:\n continue\n if hist_min_shop and getattr(item, 'hist_min_shop', None) != hist_min_shop:\n continue\n if hist_min_date and getattr(item, 'hist_min_date', None) != hist_min_date:\n continue\n if diff_cur_avg and getattr(item, 'diff_cur_avg', None) != diff_cur_avg:\n continue\n\n result_list.append(item)\n if limit_one:\n break\n\n return result_list\n\n\ndef find_min_price_in_prices_list(price_list):\n \"\"\"\n Вернет список с одним или несколькими магазинами и разными цветами, но с самыми низкими ценами\n \"\"\"\n pos_price, pos_shop, pos_datetime, pos_color, pos_url = 0, 1, 2, 3, 4\n\n # Если в списке все цены равны (не важно сколько магазинов) или список пуст - возвращаем список без изменений\n if is_all_elem_equal_in_tuple_list(price_list, pos_price):\n return price_list\n\n # Если в списке цены разные, но магазин один или несколько - находим самые низкие цены не зависимо от магазина\n result = []\n min_price = min(price_list)[pos_price]\n for item in price_list:\n if item[pos_price] == min_price:\n result.append(item)\n\n return result\n\n\ndef per_num_of_num(a, b):\n \"\"\"\n Процент числа от числа\n \"\"\"\n return float(100.0 - (a / b * 100.0))\n\n\n# ----------------------------- ПУТИ -----------------------------\nROOT_PATH = \"C:/Py_Projects/ParserOnlineShop/\" # sys.path[1] + '/'\n# Путь к webdriver\nWD_PATH = ROOT_PATH + \"venv/WebDriverManager/chromedriver.exe\"\n# Путь для файла с логами изменений цен\nPRICE_CHANGES_PATH = ROOT_PATH + \"data/cache/dif_price.csv\"\n# Путь для файла с результатами парсинга\nCSV_PATH = ROOT_PATH + \"data/cache/goods.csv\"\nCSV_PATH_RAW = ROOT_PATH + \"data/cache/\"\n# Путь к proxy\nPROXY_PATH = ROOT_PATH + \"data/proxy/proxy.txt\"\n# Пути к словарям\nEXCEPT_MODEL_NAMES_PATH = ROOT_PATH + \"data/dictionaries/except_model_names.dic\"\nEXCEPT_MODEL_NAMES_TELEGRAM_PATH = ROOT_PATH + \"data/dictionaries/except_model_names_telegram.dic\"\nSTATS_PRODS_DICTIONARY_PATH = ROOT_PATH + \"data/dictionaries/stats_prods_from_telegram.dic\"\nSTATS_SHOPS_DICTIONARY_PATH = ROOT_PATH + \"data/dictionaries/stats_shops_from_telegram.dic\"\nMESSAGES_IN_TELEGRAM_LIST_PATH = ROOT_PATH + \"data/databases/msg_in_telegram.csv\"\nNUM_POSTS_IN_TELEGRAM_PATH = ROOT_PATH + \"data/databases/num_posts_in_telegram.data\"\nLIST_MODEL_NAMES_BASE_PATH = ROOT_PATH + \"data/databases/list_model_names_base.dat\"\nUNDEFINED_MODEL_NAME_LIST_PATH = ROOT_PATH + \"data/databases/undefined_model_name.dat\"\nUNDEFINED_MODEL_NAME_LIST_LOCK_PATH = ROOT_PATH + \"data/databases/undefined_model_name.lock\"\nCRASH_DATA_PATH = ROOT_PATH + \"data/databases/crash_data.dat\"\nBOT_ACCOUNT_PATH = ROOT_PATH + \"modules/data_sender/telegram/my_account\"\nIMAGE_FOR_SEND_IN_TELEGRAM_PATH = ROOT_PATH + \"data/cache/for_send/\"\n\n# ----------------------------- КОЛЛЕКЦИЯ -----------------------------\n\n# Список разрешенных названий моделей для добавления в БД\nALLOWED_MODEL_NAMES_LIST_FOR_BASE = []\n# Словарь исключений названий моделей\nEXCEPT_MODEL_NAMES_DICT = {}\n# Единое название для всех восстановленных айфонов\nREBUILT_IPHONE_NAME = \"\"\n# Список слов, которые необходимо исключать из названий цветов\nIGNORE_WORDS_FOR_COLOR = []\n\n\n# ---------------- ПЕРЕМЕННЫЕ ДЛЯ РЕФЕРАЛЬНЫХ ССЫЛОК ----------------\nREF_LINK_MVIDEO = ''\nREF_LINK_MTS = ''\nREF_LINK_ELDORADO = ''\nREF_LINK_CITILINK = ''\n\nDOMAIN_DNS = 'dns.ru'\nDOMAIN_MVIDEO = 'mvideo.ru'\nDOMAIN_MTS = 'mts.ru'\nDOMAIN_ELDORADO = 'eldorado.ru'\nDOMAIN_CITILINK = 'citilink.ru'\n\n\n# ---------------- ПЕРЕМЕННЫЕ ДЛЯ РЕФЕРАЛЬНЫХ ССЫЛОК ----------------\n\n# Коллекция для хранения результатов парсинга одного товара (смартфоны)\nParseResult = collections.namedtuple(\n 'ParseResult',\n (\n 'shop',\n 'category',\n 'brand_name',\n 'model_name',\n 'color',\n 'ram',\n 'rom',\n 'price',\n 'img_url',\n 'url',\n 'rating',\n 'num_rating',\n 'product_code',\n ),\n)\n\n# Коллекция для хранения результатов парсинга одного товара (смартфоны)\nPriceChanges = collections.namedtuple(\n 'PriceChanges',\n (\n 'shop',\n 'category',\n 'brand_name',\n 'model_name',\n 'color',\n 'ram',\n 'rom',\n 'img_url',\n 'url',\n 'date_time',\n 'price',\n 'avg_actual_price',\n 'hist_min_price',\n 'hist_min_shop',\n 'hist_min_date',\n 'diff_cur_avg',\n ),\n)\n\n# -------------------- СПИСОК СООБЩЕНИЙ ТЕЛЕГРАМ ---------------------- #\n\n# Коллекция для хранения результатов парсинга одного товара (смартфоны)\nMessagesInTelegram = collections.namedtuple(\n 'MessagesInTelegram',\n (\n 'message_id',\n 'category',\n 'brand_name',\n 'model_name',\n 'ram',\n 'rom',\n 'price',\n 'avg_actual_price',\n 'img_url',\n 'where_buy_list',\n 'hist_min_price',\n 'hist_min_shop',\n 'hist_min_date',\n 'post_datetime',\n 'text_hash',\n 'is_actual',\n ),\n)\n\n# -------------------- НАЗВАНИЯ МАГАЗИНОВ ДЛЯ ТЕЛЕГРАМ ---------------------- #\n\nTRUE_SHOP_NAMES = [\n 'М.видео',\n 'Эльдорадо',\n 'DNS',\n 'DNS Технопоинт',\n 'МТС',\n 'Ситилинк',\n 'RBT.ru',\n 'Онлайнтрейд',\n 'Связной',\n 'ТехноСити',\n 'Билайн',\n 'МегаФон',\n 'е2е4',\n 'НОУ-ХАУ',\n 're:Store',\n 'Официальный интернет-магазин Samsung',\n 'Официальный интернет-магазин Huawei',\n 'Ozon',\n 'Wildberries',\n 'Sony Store',\n 'Tmall',\n]\n\n# ----------------------------- ТАБЛИЦЫ В БД ----------------------------- #\n\n# Список названий магазинов\nSHOPS_NAME_LIST = [\n ('мвидео',),\n ('эльдорадо',),\n ('dns',),\n ('технопоинт',),\n ('мтс',),\n ('ситилинк',),\n ('rbt',),\n ('онлайнтрейд',),\n ('связной',),\n ('техносити',),\n ('билайн',),\n ('мегафон',),\n ('e2e4',),\n ('ноу-хау',),\n]\n# Список категорий\nCATEGORIES_NAME_LIST = [\n ('смартфоны',),\n ('ноутбуки',),\n]\n" }, { "alpha_fraction": 0.5888787508010864, "alphanum_fraction": 0.5918413996696472, "avg_line_length": 32.36882019042969, "blob_id": "0e1656733799855d970fdeef017398b72464eb5b", "content_id": "23de64414645daa7ce7ee7e675fe2a24938bea88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9544, "license_type": "no_license", "max_line_length": 109, "num_lines": 263, "path": "/modules/common/sql_req.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "# --\n# Создание БД\ncreate_database_query = \"CREATE DATABASE \"\n\n# --------------------- СОЗДАНИЕ ТАБЛИЦ ------------------------------\n\n# Таблица Категории - categories_name_table\ncreate_categories_name_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS categories_name_table (\n ID_Category SERIAL PRIMARY KEY,\n Category_Name VARCHAR(50) NOT NULL\n );\n\"\"\"\n\n# Таблица: Магазины - shops_name_table\ncreate_shops_name_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS shops_name_table (\n ID_Shop_Name SERIAL PRIMARY KEY,\n Shop_Name VARCHAR(20) NOT NULL\n );\n\"\"\"\n\n# Таблица: Продукты - products_table\ncreate_products_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS products_table (\n ID_Product SERIAL PRIMARY KEY,\n ID_Category INTEGER REFERENCES categories_name_table(ID_Category) NOT NULL,\n Brand_Name VARCHAR(20) NOT NULL,\n Model_Name VARCHAR(100) NOT NULL,\n Total_Rating REAL\n );\n\"\"\"\n\n# Таблица: Комплектации телефонов - versions_phones_table\ncreate_versions_phones_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS versions_phones_table (\n ID_Ver_Phone SERIAL PRIMARY KEY,\n ID_Product INTEGER NOT NULL,\n RAM INTEGER NOT NULL,\n ROM INTEGER NOT NULL,\n Img_URL VARCHAR(200) NOT NULL,\n \n FOREIGN KEY (ID_Product)\n REFERENCES products_table(ID_Product) MATCH SIMPLE\n ON UPDATE NO ACTION\n ON DELETE CASCADE\n NOT VALID\n );\n\"\"\"\n\n# Таблица: В каком магазине купить продукт - shop_buy_table\ncreate_shops_phones_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS shops_phones_table (\n ID_Shop_Phone SERIAL PRIMARY KEY,\n ID_Shop_Name INTEGER REFERENCES shops_name_table(ID_Shop_Name) NOT NULL,\n ID_Product INTEGER NOT NULL,\n ID_Ver_Phone INTEGER NOT NULL,\n URL_Product VARCHAR(200) NOT NULL,\n Product_Code VARCHAR(20) NOT NULL,\n Color VARCHAR(50) NOT NULL,\n Local_Rating REAL,\n Num_Local_Rating INTEGER,\n Bonus_Rubles INTEGER,\n \n FOREIGN KEY (ID_Ver_Phone)\n REFERENCES versions_phones_table(ID_Ver_Phone) MATCH SIMPLE\n ON UPDATE NO ACTION\n ON DELETE CASCADE\n NOT VALID\n );\n\"\"\"\n\n# Таблица: Цены всех товаров - prices_phones_table\ncreate_prices_phone_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS prices_phones_table (\n ID SERIAL PRIMARY KEY,\n ID_Shop_Name INTEGER REFERENCES shops_name_table(ID_Shop_Name) NOT NULL,\n ID_Product INTEGER NOT NULL,\n ID_Shop_Phone INTEGER NOT NULL,\n Price INTEGER NOT NULL,\n Datetime TIMESTAMP NOT NULL,\n \n FOREIGN KEY (ID_Shop_Phone)\n REFERENCES shops_phones_table(ID_Shop_Phone) MATCH SIMPLE\n ON UPDATE NO ACTION\n ON DELETE CASCADE\n NOT VALID\n );\n\"\"\"\n\n# ----------------------- СОЗДАНИЕ ПРЕДСТАВЛЕНИЙ --------------------------\n\n# Создать представление общей таблицы, где все таблицы соеденены в одну\ncreate_view_general_table_query = \"\"\"\n CREATE VIEW general_table AS\n SELECT products_table.id_product, products_table.id_category, brand_name, model_name, total_rating,\n versions_phones_table.id_ver_phone, ram, rom, img_url, \n shops_phones_table.id_shop_phone, shops_phones_table.id_shop_name, url_product, product_code, color, \n local_rating, num_local_rating, bonus_rubles,\n id, price, datetime\n FROM products_table\n JOIN versions_phones_table USING (id_product)\n JOIN shops_phones_table USING (id_ver_phone)\n JOIN prices_phones_table USING (id_shop_phone)\n\"\"\"\n\n# --------------------------- ЗАПРОСЫ К БД -----------------------------\n\n# Поиск товара в таблице products_table\nselect_id_product_query = \"\"\"\n SELECT id_product FROM products_table \n WHERE \n brand_name = %s AND \n model_name = %s\n\"\"\"\n\n# Поиск комплектации в таблице versions_phones_table\nselect_id_ver_phone_query = \"\"\"\n SELECT id_ver_phone FROM versions_phones_table \n WHERE \n id_product = %s AND \n (ram = %s or ram = 0) AND\n rom = %s\n\"\"\"\n\n# Поиск наличия в магазине данной комплектации в таблице shops_phones_table\nselect_id_shop_phone_query = \"\"\"\n SELECT id_shop_phone \n FROM shops_phones_table \n WHERE \n id_ver_phone = %s AND \n id_shop_name = %s AND\n url_product = %s\n\"\"\"\n\n# Поиск наличия цены у данного магазина данной комплектации в таблице price_phones_table\nselect_price_in_price_phone_query = \"\"\"\n SELECT price \n FROM prices_phones_table \n WHERE \n id_shop_phone = %s\n\"\"\"\n\nselect_img_url_query = \"\"\"\n SELECT img_url \n FROM general_table\n WHERE \n id_shop_name=%s AND \n brand_name=%s AND \n model_name=%s AND \n ram=%s AND \n rom=%s \n LIMIT 1\n\"\"\"\n\n# --------------------------- ЗАПИСЬ В БД ----------------------------\n\n# Заполнить таблицу названий магазинов\ninsert_into_shops_name_table_query = \"INSERT INTO shops_name_table (Shop_Name) VALUES %s\"\n\n# Заполнить таблицу названий категорий\ninsert_into_categories_name_table_query = \"INSERT INTO categories_name_table (Category_Name) VALUES %s\"\n\n# Добавление цены в prices_phone\ninsert_into_prices_phones_table_query = \"\"\"\n INSERT INTO prices_phones_table (id_shop_name, id_product, id_shop_phone, price, datetime) \n VALUES %s\n\"\"\"\n\ninsert_into_shops_phones_table_query = \"\"\"\n INSERT INTO shops_phones_table (id_shop_name, id_product, id_ver_phone, url_product, product_code, color,\n local_rating, num_local_rating, bonus_rubles)\n VALUES %s\n RETURNING id_shop_phone\n\"\"\"\n\ninsert_into_versions_phones_table_query = \"\"\"\n INSERT INTO versions_phones_table (id_product, ram, rom, img_url)\n VALUES %s\n RETURNING id_ver_phone\n\"\"\"\n\ninsert_into_products_table_query = \"\"\"\n INSERT INTO products_table (id_category, brand_name, model_name, total_rating)\n VALUES %s\n RETURNING id_product\n\"\"\"\n\n# --------------------------- ПОИСК ----------------------------\n\n# Поиск всех цен (исторических) по названию бренда, модели, ROM и RAM и сортировка по убыванию\nsearch_all_prices_by_version_query = \"\"\"\n SELECT price, id_shop_name, datetime\n FROM general_table\n WHERE brand_name = %s AND \n model_name = %s AND \n (ram = %s or ram = 0) AND \n rom = %s\n ORDER BY datetime DESC\n\"\"\"\n\n# Поиск минимальной цены (исторической) по названию бренда, модели, ROM и RAM\nsearch_min_historical_price_by_version_query = \"\"\"\n SELECT price, id_shop_name, datetime::DATE\n FROM general_table\n WHERE brand_name = %s AND \n model_name = %s AND \n (ram = %s or ram = 0) AND \n rom = %s\n ORDER BY price ASC LIMIT 1\n\"\"\"\n\n# Поиск только актуальных (с самой свежей датой) цен всех магазинов и цветов\n# SELECT price, id_shop_name, datetime, color, general_table.url_product\nsearch_actual_prices_by_version_query = \"\"\"\n SELECT price, id_shop_name, datetime, color, general_table.url_product\n FROM general_table\n JOIN (\n SELECT url_product, MAX(datetime) as MaxDate \n FROM general_table\n WHERE brand_name = %s AND \n model_name = %s AND \n (ram = %s OR ram = 0) AND \n rom = %s\n GROUP BY url_product\n ) AS group_table\n ON general_table.datetime = group_table.MaxDate AND \n general_table.url_product = group_table.url_product\n\"\"\"\n\n# Поиск только актуальных (с самой свежей датой) цен всех магазинов и цветов\nsearch_actual_prices_by_version_and_shop_query = \"\"\"\n SELECT price, id_shop_name, general_table.url_product\n FROM general_table\n JOIN (\n SELECT url_product, MAX(datetime) as MaxDate \n FROM general_table\n WHERE brand_name = %s AND \n model_name = %s AND \n (ram = %s OR ram = 0) AND \n rom = %s AND\n id_shop_name = %s\n GROUP BY url_product\n ) AS group_table\n ON general_table.datetime = group_table.MaxDate AND \n general_table.url_product = group_table.url_product\n\"\"\"\n\n# ----------------------- ОБНОВЛЕНИЕ ДАННЫХ ---------------------------\n\n# Обновление даты у цены\nupdate_datetime_in_price_phone_table_query = \"\"\"\n UPDATE prices_phones_table \n SET datetime = now() \n WHERE id =\n (SELECT id \n FROM general_table \n WHERE id_product = %s AND\n id_ver_phone = %s AND\n id_shop_phone = %s AND\n price = %s\n )\n\"\"\"\n" }, { "alpha_fraction": 0.650737464427948, "alphanum_fraction": 0.650737464427948, "avg_line_length": 32.900001525878906, "blob_id": "76c71209bcf0e80d0adb0bbaa4adccfcc5ceefc6", "content_id": "f34468e3323e2a319b7ce54ddbff2e8df7ef4318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2223, "license_type": "no_license", "max_line_length": 118, "num_lines": 50, "path": "/modules/data_validator/data_validator.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import modules.common.helper as h\n\nlogger = h.logging.getLogger('DataValidator')\n\n\nclass DataValidator:\n \"\"\"\n ОДИН ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataValidator\n Получает данные после DataReceiver и фильтрует их, после чего возвращает результат\n \"\"\"\n\n def __init__(self, data_receiver_result_list):\n self.data_for_validation_list = data_receiver_result_list\n pass\n\n @staticmethod\n def validation_item(item):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Основа DataValidator - проверка одного элемента. Сделан статичным, чтобы можно было вызывать в других модулях,\n дабы не гонять один и тот же список (спарсенных данных, например) по несколько раз (по разу в каждом модуле).\n\n Вызывая этот метод, например, в DBInserter - можно не запускать DataValidator отдельно (через run), а значит\n экономим один полный проход по списку входных данных.\n Если этот список состоит из тысячи элементов - экономия оказывается ощутимой.\n \"\"\"\n if item.category and item.shop and item.brand_name and item.model_name and \\\n item.color and item.img_url and item.product_code and item.rom and item.price:\n return True\n\n return False\n\n def __validation_list(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Валидация списка данных - проверка на пустые ключевые поля\n \"\"\"\n result = []\n for item in self.data_for_validation_list:\n if DataValidator.validation_item(item):\n result.append(item)\n\n return result\n\n def run(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Запуск DataValidator\n \"\"\"\n return self.__validation_list()\n" }, { "alpha_fraction": 0.7474600672721863, "alphanum_fraction": 0.7561683654785156, "avg_line_length": 30.363636016845703, "blob_id": "455ee22251f2b4be9f04612fa2e244d8526f6359", "content_id": "9774d27f1510233d3574d1432057ea37a4d493e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 121, "num_lines": 22, "path": "/main.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import modules.common.helper as h\nfrom modules.runner.runner import Runner\n\n\nlogger = h.logging.getLogger('Main')\n\n\nif __name__ == '__main__':\n runner = Runner()\n runner.run()\n\n\n\"\"\"\nКонсервация проекта 10.05.21\nВесь код проекта полностью отредактирован - полный рефакторинк и изменение структуры. Ничего не протестировано.\nДоделки:\n - Проверить актуальность всех парсеров. \n - Есть вопросы к модулю добавления данных в БД. \n - Также перепроверить код telegram_sender.py на функции async, сравнить со старыми версиями. Много варнингов в консоли. \n - Проверить чтение словарей существующих моделей, ощущение, будто бы половину не видит.\n - Путь ROOT_PATH задан вручную для отладки.\n\"\"\"" }, { "alpha_fraction": 0.5812349915504456, "alphanum_fraction": 0.5856512188911438, "avg_line_length": 37.64371109008789, "blob_id": "08086f64201796464edd45e664b11db41d8f9d9b", "content_id": "4097e1679b5dd40b5696bade04698f7d0bf4098a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15395, "license_type": "no_license", "max_line_length": 119, "num_lines": 334, "path": "/modules/data_receiver/parsers/dns_parse.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import time\nimport re\n\nimport selenium.common.exceptions as se\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\nfrom modules.data_receiver.parsers.parse_base import ParseBase\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\nDNS_REBUILT_IPHONE = ' \"как новый\"'\nlogger = h.logging.getLogger('dnsparse')\n\n\n# Парсинг названия модели (получить название модели, цвет и ROM)\ndef dns_parse_model_name(name):\n # Убираем неразрывные пробелы\n name = name.replace(u'\\xc2\\xa0', u' ')\n name = name.replace(u'\\xa0', u' ')\n # Понижение регистра\n name = name.lower()\n # Характеристики из названия\n characteristics = re.findall(r'\\[.*]', name)[0]\n name = name.replace(characteristics, '')\n ram = dns_parse_specifications(characteristics)\n\n # Восстановленные телефоны (только для iphone). Если есть слово - удалить\n rebuilt = h.REBUILT_IPHONE_NAME if (DNS_REBUILT_IPHONE in name) else ''\n name = name.replace(DNS_REBUILT_IPHONE if rebuilt else '', '')\n # Удалить год, если есть\n year = re.findall(r' 20[1,2]\\d ', name)\n year = year[0] if year else ''\n # Убрать диагональ вначале строки\n name = name.partition(' ')[2]\n # Получить цвет\n color = name[name.find('гб ') + len('гб '):]\n # Получить ROM\n rom = re.findall(r'\\d+\\sгб', name)[0]\n # Если в названии указан еще и RAM через /\n ram_rom = re.findall(r'\\d+[/]\\d+\\sгб', name)\n # Удалить из названия модели RAM/ROM или только ROM\n name = name.replace(ram_rom[0] if ram_rom else rom, '')\n # Удалить из строки ROM всё, кроме цифр\n rom = re.findall(r'\\d+', rom)\n rom = int(rom[0]) if rom else 0\n # Удалить из строки модели цвет, название бренда и слово \"смартфон\"\n name = name.replace(color, '').replace('смартфон', '').replace(year, '').replace(' nfc ', ' ').replace(' 5g ', ' ')\n # Удалить лишние пробелы\n name = ' '.join(name.split())\n name += rebuilt\n\n # Проверка названия в словаре исключений названий моделей\n name = h.replace_value_from_dictionary(h.EXCEPT_MODEL_NAMES_DICT, name)\n\n # # Проверка названия модели в словаре разрешенных моделей\n if not h.find_allowed_model_names(name):\n logger.info(\"Обнаружена новая модель, отсутствующая в базе = '{}'\".format(name))\n FileWorker.list_data.save(h.UNDEFINED_MODEL_NAME_LIST_PATH, data=name, overwrite=False)\n return None, None, None, 0, 0\n\n # Получить название бренда\n brand_name = name.split()[0]\n model_name = name.replace(brand_name, '').strip()\n\n return brand_name, model_name, color, ram, rom\n\n\n# Парсинг характеристик (получить RAM)\ndef dns_parse_specifications(specifications):\n # Понижение регистра\n specifications = specifications.lower()\n # Получение значения ram из строки характеристик\n ram = re.findall(r'\\d+\\sгб', specifications)\n # Удалить из строки ROM всё, кроме цифр, если эта строка не пустая, иначе 0\n ram = re.findall(r'\\d+', ram[0])[0] if ram else 0\n\n return int(ram)\n\n\nclass DNSParse(ParseBase):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataReceiver\n Реализация базового класса ParseBase\n Парсит данные с магазина Днс\n \"\"\"\n def __init__(self):\n super().__init__(domain='https://www.dns-shop.ru', shop='dns', logger=logger, category=\"смартфоны\")\n self.container_css_selector = 'div.catalog-product.ui-button-widget'\n\n def _wd_city_selection_catalog(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций на странице каталога\n \"\"\"\n # Поиск шапки выбора города и название города\n city_head = self._wd_find_elem(By.XPATH, \"//i[@class='location-icon']\")\n city_head_text = self._wd_find_elem(By.XPATH, \"//div[@class='w-choose-city-widget-label']\")\n if not city_head or not city_head_text:\n self.logger.error(\"Не могу найти элемент с текущим городом на странице\")\n return False\n\n # Если в шапке сайта указан неверный город - кликаем по нему и выбираем нужный\n if self.current_city.lower() not in city_head_text.text.lower():\n\n if not self._wd_click_elem(city_head):\n self.logger.error(\"Не могу кликнуть по названию города для его смены\")\n return False\n\n time.sleep(1)\n\n # Поиск города в заготовленном списке крупных городов\n city_list = self._wd_find_all_elems_with_timeout(By.XPATH, \"//span[@databases-role='big-cities']\")\n if city_list:\n for item in city_list:\n if self.current_city.lower() in item.text.lower():\n time.sleep(0.5)\n return self._wd_ac_click_elem(item)\n else:\n self.logger.info(\"Не вижу нужный город в списке, пробую вбить вручную\")\n\n # Если в заготовонном списке нет нужного города - ищем input и вводим в поиск\n input_city = self._wd_find_elem_with_timeout(By.XPATH, \"//input[@databases-role='search-city']\")\n if not input_city:\n self.logger.error(\"Не могу найти поле для ввода города\")\n return False\n\n # Кликнуть на форму для ввода текста\n if not self._wd_ac_click_elem(input_city):\n self.logger.error(\"Не могу кликнуть на форму для ввода текста\")\n return False\n time.sleep(1)\n\n # Ввод названия города по буквам\n for char in self.current_city:\n self._wd_ac_send_keys(input_city, char)\n\n # Найти в результирующем списке нужный город\n city_list = self._wd_find_all_elems_with_timeout(By.XPATH, \"//li[@class='modal-row']/a/span/mark\")\n if city_list:\n for item in city_list:\n if self.current_city.lower() in item.text.lower():\n time.sleep(0.5)\n return self._wd_ac_click_elem(item)\n else:\n self.logger.error(\"Не вижу нужный город в списке input, выход\")\n return False\n\n return True\n\n def _wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возмодных ситуаций на странице продукта\n \"\"\"\n pass\n\n def _wd_check_load_page_catalog(self):\n \"\"\"\n Проверка по ключевым div-ам что страница каталога прогружена полностью\n \"\"\"\n # Ожидание прогрузки цен\n if not self._wd_find_elem_with_timeout(By.XPATH,\n \"//div[contains(@class, 'product-buy__price')]\"):\n return False\n\n self.logger.info(\"Page loaded\")\n return True\n\n def _wd_check_load_page_product(self):\n \"\"\"\n Проверка по ключевым div-ам что страница продукта прогружена полностью\n \"\"\"\n pass\n\n def _wd_open_browser_catalog(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы каталога, выбор города\n \"\"\"\n if not super()._wd_open_browser_catalog(url=url):\n return False\n\n self._wd_scroll_down()\n return True\n\n def _wd_open_browser_product(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы продукта, выбор города\n \"\"\"\n pass\n\n def _wd_next_page(self):\n \"\"\"\n Переход на заданную страницу num_page через клик (для имитации пользователя)\n \"\"\"\n for num_try in range(3):\n\n if num_try and not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n if num_try:\n self._wd_scroll_up()\n\n # Поиск следующей кнопки страницы\n num_page_elem = self._wd_find_elem(By.XPATH,\n \"//a[@class='pagination-widget__page-link' and text()='{}']\".\n format(self.cur_page))\n if not num_page_elem:\n self.logger.info(\"Достигнут конец каталога\")\n return False\n\n # Клик - переход на следующую страницу\n if not self._wd_ac_click_elem(num_page_elem):\n self.logger.error(\"Не могу кликнуть на страницу в __wd_next_page\")\n self.driver.refresh()\n continue\n\n # Специальная задержка между переключениями страниц для имитации юзера\n time.sleep(self.wait_between_pages_sec)\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (2)\")\n self.driver.refresh()\n continue\n\n # Скролл\n self._wd_scroll_down()\n\n # Особенность ДНС - при переключении страницы иногда не меняется контент. Если так - обновляем страницу\n try:\n self.wait.until_not(presence_of_element_located((By.XPATH, \"//a[@href='{}']\".format(\n self.pr_result_list[-5].url.replace(self.domain, '')))))\n\n self.logger.info(\"Cur_page = {}\".format(self.cur_page))\n self.cur_page += 1\n return True\n except se.TimeoutException:\n print(\"НЕ ДОЖДАЛСЯ -5, обновляю\")\n self.logger.error(\"TimeoutException в __wd_next_page, обновляю страницу\")\n self.driver.refresh()\n continue\n except IndexError:\n self.logger.error('Список pr_result_list[-5] оказался пуст, выход за границы списка')\n return False\n else:\n self.logger.error(\"!! После 3 попыток не получилось переключить страницу #{} !!\".format(self.cur_page))\n return False\n\n def _parse_product_page(self, html, url):\n \"\"\"\n Метод для парсинга html страницы продукта\n \"\"\"\n pass\n\n def _parse_catalog_block(self, block):\n \"\"\"\n Парсинг данных одного блока\n \"\"\"\n # Название модели и URL\n model_name_url_block = block.select_one('a.catalog-product__name.ui-link.ui-link_black')\n if not model_name_url_block:\n self.logger.warning(\"No model name and URL\")\n return\n else:\n url = self.domain + model_name_url_block.get('href')\n model_name = model_name_url_block.text\n\n # Проверка на предзаказ\n if [item.text for item in block.select(\"button\") if item.text == \"Предзаказ\"]:\n self.logger.info(\"Товар '{}' по предзаказу, пропуск\".format(model_name))\n return\n\n # Ссылка на изображение товара\n img_url = block.select_one('img.loaded')\n if not img_url:\n self.logger.warning(\"No img url\")\n return\n else:\n img_url = img_url.get('src')\n\n # Рейтинг товара\n rating_block = block.select_one('a.catalog-product__rating.ui-link.ui-link_black')\n if not rating_block:\n rating = 0\n num_rating = 0\n else:\n rating = float(rating_block.get('data-rating'))\n\n # Кол-во отзывов\n num_rating = re.findall(r'\\d+\\.*\\d*k*', rating_block.text)\n if num_rating:\n num_rating = num_rating[0]\n num_rating = int(float(num_rating.replace('k', '')) * 1000) if 'k' in num_rating \\\n else int(num_rating)\n else:\n num_rating = 0\n\n # Код продукта\n product_code = block.get('databases-code')\n if not product_code:\n self.logger.warning(\"No product code\")\n\n # Цена\n price = block.select_one('div.product-buy__price')\n if not price:\n print(\"ДНС: НЕТ ЦЕНЫ !!!!!!\")\n self.logger.warning(\"No price\")\n return\n else:\n price = int(re.findall(r'\\d+', price.text.replace(' ', ''))[0])\n\n # Парсинг названия модели\n brand_name, model_name, color, ram, rom = dns_parse_model_name(model_name)\n if not brand_name or not model_name or not color or not rom:\n self.logger.warning(\"No brand name, model name, color or rom\")\n return\n\n # Добавление полученных результатов в коллекцию\n self._add_to_pr_result_list(brand_name, model_name, color, price, ram, rom,\n img_url, url, rating, num_rating, product_code)\n\n\nif __name__ == '__main__':\n import main\n\n time_start = time.time()\n main.load_allowed_model_names_list_for_base()\n main.load_exceptions_model_names()\n main.read_config()\n\n parser = DNSParse()\n\n parser.run_catalog('https://www.dns-shop.ru/catalog/17a8a01d16404e77/smartfony/')\n logger.info(f\"Время выполнения: {time.time() - time_start} сек\")\n" }, { "alpha_fraction": 0.5821570754051208, "alphanum_fraction": 0.5847810506820679, "avg_line_length": 30.30878257751465, "blob_id": "60065d70469fa80b15a6f98040b77e937efcb14a", "content_id": "f297e0c7476a8d6a7e872fca4114d8a29388a98e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12709, "license_type": "no_license", "max_line_length": 120, "num_lines": 353, "path": "/modules/data_receiver/parsers/parse_base.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "from abc import ABC, abstractmethod\nimport time\nimport csv\nimport configparser\n\nimport bs4\nimport selenium.common.exceptions as se\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located, presence_of_all_elements_located\nfrom selenium.webdriver.common.keys import Keys\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\n\nclass ParseBase(ABC):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataReceiver\n Абстрактный базовый класс для всех парсеров, использующий Selenium\n \"\"\"\n\n def __init__(self, domain, shop, logger, category, is_proxy=False, cur_page=2):\n self.logger = logger\n self.container_css_selector = None\n\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--disable-blink-features=AutomationControlled')\n\n # options.add_argument(\"window-size=1920,1080\")\n # options.add_argument(\"--disable-notifications\")\n if is_proxy:\n options.add_argument(\"--proxy-server=%s\" % h.get_proxy())\n\n try:\n self.driver = webdriver.Chrome(executable_path=h.WD_PATH, options=options)\n except se.WebDriverException as e:\n self.logger.error(\"НЕ СМОГ ИНИЦИАЛИЗИРОВАТЬ WEBDRIVER, {}\".format(e))\n self.driver = None\n return\n\n self.driver.implicitly_wait(1.5)\n self.wait = WebDriverWait(self.driver, 20)\n self.pr_result_list = []\n self.cur_page = cur_page\n # Данные магазина\n self.domain = domain\n self.shop = shop\n self.category = category\n # Конфиг\n self.config = configparser.ConfigParser()\n self.config.read('config.ini', encoding=\"utf-8\")\n self.current_city = self.config.defaults()['current_city']\n self.wait_between_pages_sec = int(self.config.defaults()['wait_between_pages_sec'])\n\n def _wd_find_elem(self, by, xpath):\n \"\"\"\n Поиск одного элемента без таймаута\n \"\"\"\n try:\n result = self.driver.find_element(by, xpath)\n return result\n except (se.NoSuchElementException, se.TimeoutException):\n return None\n\n def _wd_find_all_elems(self, by, xpath):\n \"\"\"\n Поиск всех элементов без таймаута\n \"\"\"\n try:\n result = self.driver.find_elements(by, xpath)\n return result\n except se.NoSuchElementException:\n return None\n\n def _wd_find_elem_with_timeout(self, by, element):\n \"\"\"\n Поиск элемента с таймаутом\n \"\"\"\n try:\n result = self.wait.until(presence_of_element_located((by, element)))\n return result\n except se.TimeoutException:\n return None\n\n def _wd_find_all_elems_with_timeout(self, by, element):\n \"\"\"\n Поиск всех элементов с таймаутом\n \"\"\"\n try:\n result = self.wait.until(presence_of_all_elements_located((by, element)))\n return result\n except se.TimeoutException:\n return None\n\n def _wd_ac_send_keys(self, element, keys):\n \"\"\"\n Отправка клавиши в элемент через ActionChains\n \"\"\"\n if not element:\n return False\n\n try:\n ActionChains(self.driver).move_to_element(element).send_keys(keys).perform()\n except Exception as e:\n self.logger.error(\"Не смог отправить клавишу через ActionChains, {}\".format(e))\n return False\n\n return True\n\n def _wd_ac_click_elem(self, element):\n \"\"\"\n Обертка для клика по элементу через ActionChains\n \"\"\"\n if not element:\n return False\n\n try:\n ActionChains(self.driver).move_to_element(element).click().perform()\n except Exception as e:\n self.logger.error(\"Не смог нажать на элемент через ActionChains, {}\".format(e))\n return False\n\n return True\n\n def _wd_click_elem(self, element):\n \"\"\"\n Обертка для клика по элементу через click\n \"\"\"\n if not element:\n return False\n\n try:\n element.click()\n return True\n except Exception as e:\n self.logger.error(\"Не смог нажать на элемент через click, {}\".format(e))\n return False\n\n def _wd_scroll_down(self, count_press=7, timeout=0.2):\n \"\"\"\n Скролл вниз для прогрузки товаров на странице\n \"\"\"\n for _ in range(count_press):\n ActionChains(self.driver).send_keys(Keys.PAGE_DOWN).perform()\n time.sleep(timeout)\n\n def _wd_scroll_up(self, count_press=7, timeout=0.2):\n \"\"\"\n Скролл вверх для прогрузки товаров на странице\n \"\"\"\n for _ in range(count_press):\n ActionChains(self.driver).send_keys(Keys.PAGE_UP).perform()\n time.sleep(timeout)\n\n def _multiple_func_call(self, fun, count=3, true_result=True):\n \"\"\"\n Множественный вызов передаваемой функции, пока она не вернет true_result\n \"\"\"\n for i in range(count):\n if fun() == true_result:\n break\n self.logger.warning(\"[{}/{}] Функция {} в цикле MULTI_CALL вернула NotTrueResult\".format(i + 1, count, fun))\n else:\n self.logger.error(\"Все попытки в цикле MULTI_CALL для метода {} провалились\".format(fun))\n return False\n\n self.logger.info(\"Успешный вызов метода {} в цикле MULTI_CALL\".format(fun))\n return True\n\n def _add_to_pr_result_list(self, brand_name, model_name, color, price, ram, rom,\n img_url, url, rating, num_rating, product_code):\n \"\"\"\n Добавление спарсенных данных одного блока к результирующему списку всех спарсенных позиций\n \"\"\"\n if 'apple' in brand_name.lower():\n ram = 0\n\n # Добавление полученных результатов в коллекцию\n self.pr_result_list.append(h.ParseResult(\n shop=self.shop,\n category=self.category.lower(),\n brand_name=brand_name.lower(),\n model_name=model_name.lower(),\n color=color.lower(),\n price=price,\n ram=ram,\n rom=rom,\n img_url=img_url.lower(),\n url=url.lower(),\n rating=rating,\n num_rating=num_rating,\n product_code=product_code.lower(),\n ))\n\n def _wd_get_cur_page(self):\n \"\"\"\n Получить текущий код страницы\n \"\"\"\n try:\n return self.driver.page_source\n except Exception as e:\n self.logger.error(\"Не смог получить код страницы, {}\".format(e))\n return None\n\n def _wd_close_browser(self):\n \"\"\"\n Завершение работы браузера\n \"\"\"\n self.logger.info(\"Завершение работы\")\n if self.driver:\n self.driver.quit()\n\n def _parse_catalog_page(self, html):\n \"\"\"\n Парсинг блоков каталога\n \"\"\"\n if not self.container_css_selector:\n raise AttributeError('self.container_css_selector not initialized in child class.')\n\n soup = bs4.BeautifulSoup(html, 'lxml')\n\n # Контейнер с элементами\n container = soup.select(self.container_css_selector)\n for block in container:\n self._parse_catalog_block(block)\n del container\n\n def _save_result(self):\n \"\"\"\n Сохранение всего результата в csv файл\n \"\"\"\n FileWorker.csv_data.save(path=h.CSV_PATH_RAW + self.shop + '.csv',\n data=self.pr_result_list, namedtuple_type=h.ParseResult)\n\n def run_catalog(self, url, cur_page=None):\n \"\"\"\n Запуск работы парсера для каталога\n \"\"\"\n if not self.driver:\n self._wd_close_browser()\n return None\n\n if not self._wd_open_browser_catalog(url):\n self.logger.error(\"Open browser fail\")\n self._wd_close_browser()\n return None\n\n if cur_page:\n self.cur_page = cur_page + 1\n\n while True:\n html = self._wd_get_cur_page()\n self._parse_catalog_page(html)\n if not self._wd_next_page():\n break\n\n self._wd_close_browser()\n self._save_result()\n return self.pr_result_list\n\n def run_product(self, url):\n \"\"\"\n Запуск работы парсера для продукта\n \"\"\"\n pass\n\n @abstractmethod\n def _wd_open_browser_catalog(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы каталога, выбор города\n \"\"\"\n try:\n self.driver.get(url)\n except Exception as e:\n self.logger.error(\"Не смог загрузить страницу, {}\".format(e))\n return False\n\n # Ждем, пока не прогрузится страница, даем 3 попытки, т.к. сайт при первом запуске часто выдает пустую страницу\n if not self._multiple_func_call(self._wd_check_load_page_catalog):\n self.logger.error(\"Не удалось прогрузить страницу в _wd_open_browser [base]\")\n return False\n\n # Выбор города\n if not self._wd_city_selection_catalog():\n self.logger.info(\"Не могу выбрать город\")\n return False\n\n time.sleep(2)\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в _wd_open_browser [base] (2)\")\n return False\n\n return True\n\n @abstractmethod\n def _wd_city_selection_catalog(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций на странице каталога\n \"\"\"\n pass\n\n @abstractmethod\n def _wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возмодных ситуаций на странице продукта\n \"\"\"\n pass\n\n @abstractmethod\n def _wd_check_load_page_catalog(self):\n \"\"\"\n Проверка по ключевым div-ам что страница каталога прогружена полностью\n \"\"\"\n pass\n\n @abstractmethod\n def _wd_check_load_page_product(self):\n \"\"\"\n Проверка по ключевым div-ам что страница продукта прогружена полностью\n \"\"\"\n pass\n\n @abstractmethod\n def _wd_open_browser_product(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы продукта, выбор города\n \"\"\"\n pass\n\n @abstractmethod\n def _wd_next_page(self):\n \"\"\"\n Переход на заданную страницу num_page через клик (для имитации пользователя)\n \"\"\"\n pass\n\n @abstractmethod\n def _parse_product_page(self, html, url):\n \"\"\"\n Метод для парсинга html страницы продукта\n \"\"\"\n pass\n\n @abstractmethod\n def _parse_catalog_block(self, block):\n \"\"\"\n Парсинг данных одного блока\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.5846552848815918, "alphanum_fraction": 0.5877059102058411, "avg_line_length": 33.87234115600586, "blob_id": "7d06542aaccd2af3147f77e8ac6c180b786cb1d3", "content_id": "8c178dc2ccf7d865c59625cdec4b8a82d85fdd1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7357, "license_type": "no_license", "max_line_length": 178, "num_lines": 188, "path": "/modules/data_sender/telegram/bot_helper.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import ast\nimport csv\nimport time\nfrom datetime import datetime\n\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\nfrom modules.common.image_creator import ImageCreator\n\nlogger = h.logging.getLogger('Bot')\n\n\n# -------------------------- СТАТИСТИКА -------------------------- #\n\ndef inc_stats_products(dictionary: dict, brand_name, model_name):\n \"\"\"\n Обновление словаря статистики товаров\n \"\"\"\n full_name = \"{} {}\".format(brand_name, model_name)\n if full_name in dictionary:\n dictionary[full_name] += 1\n else:\n dictionary[full_name] = 1\n\n\ndef inc_stats_shops(dictionary: dict, shop_list):\n \"\"\"\n Обновление словаря статистики магазинов\n \"\"\"\n for shop_item in shop_list:\n shop_name = h.SHOPS_NAME_LIST[shop_item - 1][0]\n if shop_name in dictionary:\n dictionary[shop_name] += 1\n else:\n dictionary[shop_name] = 1\n\n\n# -------------------------- РЕФЕРАЛЬНЫЕ ССЫЛКИ -------------------------- #\n\ndef convert_url_for_ref_link(url):\n \"\"\"\n Конвертирование url в специальный вид для реферальных ссылок\n \"\"\"\n return url.replace(':', '%3A').replace('/', '%2F').strip()\n\n\ndef get_ref_link(url):\n \"\"\"\n Получить реферальную ссылку\n \"\"\"\n # Мвидео\n if h.DOMAIN_MVIDEO in url:\n return h.REF_LINK_MVIDEO + convert_url_for_ref_link(url)\n\n # МТС\n if h.DOMAIN_MTS in url:\n return h.REF_LINK_MTS + convert_url_for_ref_link(url)\n\n # Ситилинк\n if h.DOMAIN_CITILINK in url:\n return h.REF_LINK_CITILINK + convert_url_for_ref_link(url)\n\n # Эльдорадо\n if h.DOMAIN_ELDORADO in url:\n return h.REF_LINK_ELDORADO + convert_url_for_ref_link(url)\n\n return url\n\n\n# -------------------------- СЛОВАРИ -------------------------- #\n\ndef load_num_posts():\n \"\"\"\n Чтение кол-ва всех и актуальных постов\n \"\"\"\n data_num_post = FileWorker.list_data_int.load(h.NUM_POSTS_IN_TELEGRAM_PATH)\n num_all_post, num_actual_post = data_num_post \\\n if data_num_post and len(data_num_post) == 2 else (0, 0)\n\n return num_all_post, num_actual_post\n\n\ndef load_msg_in_telegram_list():\n \"\"\"\n Загрузить данные о сообщениях в канале телеграм. FileWorker не подходит для этой задачи\n из-за обработки прочитанных данных\n \"\"\"\n posts_in_telegram_list = []\n\n # Message Id,Category,Brand Name,Model Name,Ram,Rom,Price,Avg Actual Price,Img Url,Where Buy List,Hist Min Price,Hist Min Shop,Hist Min Date,Post Datetime,Text Hash,Is Actual\n with open(h.MESSAGES_IN_TELEGRAM_LIST_PATH, 'r', encoding='UTF-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n posts_in_telegram_list.append(h.MessagesInTelegram(\n message_id=int(row['Message ID']),\n category=row['Category'],\n brand_name=row['Brand Name'],\n model_name=row['Model Name'],\n ram=int(row['Ram']),\n rom=int(row['Rom']),\n price=int(row['Price']),\n avg_actual_price=float(row['Avg Actual Price']),\n img_url=row['Img Url'],\n where_buy_list=ast.literal_eval(row['Where Buy List']),\n hist_min_price=int(row['Hist Min Price']),\n hist_min_shop=int(row['Hist Min Shop']),\n hist_min_date=datetime.strptime(str(row['Hist Min Date']), '%Y-%m-%d %H:%M:%S.%f'),\n post_datetime=datetime.strptime(str(row['Post Datetime']), '%Y-%m-%d %H:%M:%S.%f'),\n text_hash=row['Text Hash'],\n is_actual=(row['Is Actual'] == 'True'),\n ))\n\n return posts_in_telegram_list\n\n\n# ----- ВСПОМОГАТЕЛЬНЫЕ ФУНКЦИИ ДЛЯ АЛГОРИТМА НЕАКТУАЛЬНЫХ ПОСТОВ ----- #\n\ndef irr_post_search_data_in_stock(act_price_data_list, pr_product_in_stock_list):\n \"\"\"\n Для неактуальных постов: поиск среди всех данных только тех, что в наличии\n \"\"\"\n pos_price, pos_shop, pos_datetime, pos_color, pos_url = 0, 1, 2, 3, 4\n\n act_price_data_in_stock_list = []\n for act_price_data_item in act_price_data_list:\n if h.find_in_namedtuple_list(pr_product_in_stock_list, url=act_price_data_item[pos_url],\n limit_one=True):\n act_price_data_in_stock_list.append(act_price_data_item)\n\n return act_price_data_in_stock_list\n\n\ndef irr_post_add_item_in_msg_in_telegram_list(msg_telegram_list, max_element, item, new_hash, is_actual):\n \"\"\"\n Для неактуальных постов: добавить элемент в список сообщений телеграм\n \"\"\"\n new_item = h.MessagesInTelegram(message_id=item.message_id, category=item.category, brand_name=item.brand_name,\n model_name=item.model_name, ram=item.ram, rom=item.rom,\n price=item.price, avg_actual_price=item.avg_actual_price,\n img_url=item.img_url, where_buy_list=item.where_buy_list,\n hist_min_price=item.hist_min_price, hist_min_shop=item.hist_min_shop,\n hist_min_date=item.hist_min_date, post_datetime=item.post_datetime,\n text_hash=new_hash, is_actual=is_actual)\n\n # Проверка на переполнение списка\n if len(msg_telegram_list) >= max_element:\n logger.info(\"Список постов в телеграм полный, пробую удалить неактуальный\")\n\n # Поиск индекса первого неактуального поста\n indx = 0\n for msg_item in msg_telegram_list:\n if not msg_item.is_actual:\n break\n indx += 1\n\n # Удаление старого неактуального\n if indx < len(msg_telegram_list):\n msg_telegram_list.pop(indx)\n logger.info(\"Удаляю {}-й элемент\".format(indx))\n else:\n logger.warning(\"Не могу удалить, нет неактуальных\")\n\n msg_telegram_list.append(new_item)\n\n\n# -------------------- ИЗОБРАЖЕНИЕ -------------------- #\n\ndef create_and_save_img_for_edit_post(img_url, is_actual):\n \"\"\"\n Генерация изображения и сохранения его на диск.\n Возвращает полный путь к сохраненному изображению\n \"\"\"\n img = ImageCreator(img_url)\n if not img.check():\n logger.error(\"No IMG in edit post\")\n return None\n\n # Установка штампа\n if not is_actual:\n img.draw_stamp().darken()\n else:\n img.lighten()\n\n img_name = 'img_{}.jpg'.format(datetime.now().timestamp())\n img.save_as_jpg(h.IMAGE_FOR_SEND_IN_TELEGRAM_PATH, img_name)\n time.sleep(1)\n\n return h.IMAGE_FOR_SEND_IN_TELEGRAM_PATH + img_name\n" }, { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.590005099773407, "avg_line_length": 47.41975402832031, "blob_id": "43f00c87800998d3514a541c431be180d5ed3cd8", "content_id": "4ffb44a0958a26c596ab224d8a9d56ba9a70bf5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8887, "license_type": "no_license", "max_line_length": 119, "num_lines": 162, "path": "/modules/data_checker/data_checker.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import datetime\nimport configparser\n\nfrom modules.common.db_wrapper import DataBase\nfrom modules.common.file_worker import FileWorker\nimport modules.common.sql_req as sr\nimport modules.common.helper as h\n\nlogger = h.logging.getLogger('DataChecker')\n\n\nclass DataChecker:\n \"\"\"\n Класс, реализующий проверку данных, которую отфильтровал предыдущий модуль добавления в БД @AddingToDB. Из\n этих данных этот класс оставляет только те данные, которые необходимо отправить дальше, на вывод.\n \"\"\"\n\n def __init__(self, pr_data_after_bd_list, pr_parse_result_list):\n \"\"\"\n :param pr_data_after_bd_list: список данных, которые отфильтровал DBInserter в процессе добавления данных в БД\n :param pr_parse_result_list: список данных, которые пришли после DataValidator (до БД)\n \"\"\"\n self.pc_self_result_list = []\n self.pr_price_change_list = pr_data_after_bd_list\n self.pr_parse_result_list = pr_parse_result_list\n\n self.db = DataBase()\n self.config = configparser.ConfigParser()\n self.config.read('config.ini', encoding=\"utf-8\")\n self.min_diff_price_per = float(self.config.defaults()['min_diff_price_per'])\n self.best_shop_for_img_url = (self.config.defaults()['best_shops_for_img_url']).lower().split(', ')\n\n def __check_price_for_benefit(self, price, brand_name, model_name, ram, rom):\n \"\"\"\n Проверка списка товаров с измененной ценой на выгодное предложение\n \"\"\"\n pos_price, pos_shop, pos_datetime, pos_color, pos_url = 0, 1, 2, 3, 4\n null_result = (None, None, None)\n\n # Получить список всех актуальных цен на данную комплектацию: price, id_shop_name, datetime, color, url_product\n act_price_data_list = self.db.execute_read_query(sr.search_actual_prices_by_version_query,\n (brand_name, model_name, ram, rom))\n if not act_price_data_list:\n return null_result\n\n # Определить, данный товар продается только в одном магазине или нет\n is_one_shop = h.is_all_elem_equal_in_tuple_list(act_price_data_list, pos_shop)\n # Поиск исторического минимума цены\n all_price_data_list = self.db.execute_read_query(sr.search_all_prices_by_version_query,\n (brand_name, model_name, ram, rom))\n if not all_price_data_list:\n return null_result\n\n logger.info((\"-\" * 50) + \"\\n\" + \"hist origin: {}\".format(all_price_data_list))\n\n # Если магазин один, то удалить последние добавленные актуальные цены для нормального расчета средней цены\n indx = 0\n if is_one_shop:\n last_datetime = all_price_data_list[0][pos_datetime]\n for item in all_price_data_list:\n if (last_datetime - item[pos_datetime]).total_seconds() < 1:\n indx += 1\n else:\n break\n logger.info('One shop: indx = {}, new hist: {}'.format(indx, all_price_data_list[indx:]))\n hist_min_price = min(all_price_data_list[indx:])\n else:\n hist_min_price = min(all_price_data_list)\n\n # Поиск средней цены для одного магазина или нескольких\n avg_price = ((price + hist_min_price[pos_price]) / 2) if is_one_shop \\\n else sum(item[pos_price] for item in act_price_data_list) / len(act_price_data_list)\n\n logger.info('price = {}, hist_min_price = {}'.format(price, hist_min_price[pos_price]))\n logger.info('is_one_shop: {}'.format(is_one_shop))\n logger.info(\"check_price: len = {}, prices_list = {}\".format(len(act_price_data_list), act_price_data_list))\n logger.info(\"avg_price = {}\".format(avg_price))\n logger.info(\"hist_min_price res = {}\".format(hist_min_price))\n\n # Оставить в списке только товары в наличии (которые есть в списке с результатами всех парсеров)\n act_price_in_stock_data_list = []\n for item in act_price_data_list:\n if h.find_in_namedtuple_list(self.pr_parse_result_list, url=item[pos_url], limit_one=True):\n act_price_in_stock_data_list.append(item)\n\n # Оставить только самые минимальные цены из товаров в наличии\n min_act_price_in_stock_data_list = h.find_min_price_in_prices_list(act_price_in_stock_data_list)\n\n # Сравнение минимальной цены (любой, они равны) со средней. Если цена не выгодная - очистить список\n if h.per_num_of_num(min_act_price_in_stock_data_list[0][pos_price], avg_price) < self.min_diff_price_per or \\\n avg_price - min_act_price_in_stock_data_list[0][pos_price] < 1500:\n min_act_price_in_stock_data_list.clear()\n\n logger.info('YES' if min_act_price_in_stock_data_list else 'NO')\n return min_act_price_in_stock_data_list, avg_price, hist_min_price\n\n def __check_prices(self, pr_price_change_list=None):\n \"\"\"\n Запуск проверки товаров с измененной ценой на поиск выгоды\n \"\"\"\n pos_price, pos_shop, pos_datetime, pos_color, pos_url = 0, 1, 2, 3, 4\n\n if not pr_price_change_list:\n pr_price_change_list = self.pr_price_change_list\n\n for item in pr_price_change_list:\n result_list, avg_price, hist_min_price = \\\n self.__check_price_for_benefit(item.price, item.brand_name, item.model_name, item.ram, item.rom)\n\n if not result_list or not avg_price or not hist_min_price:\n continue\n\n for item_result in result_list:\n # Для исключительных ситуаций: проверка, что такого элемента с такой ценой и цветом еще нет в списке\n if h.find_in_namedtuple_list(self.pc_self_result_list, url=item_result[pos_url], limit_one=True):\n continue\n\n # Ссылу на изображение необходимо вытянуть из предпочтительных магазинов\n img_url = None\n for best_shop_item in self.best_shop_for_img_url:\n img_url = h.find_in_namedtuple_list(\n self.pr_parse_result_list,\n brand_name=item.brand_name, model_name=item.model_name, shop=best_shop_item, limit_one=True)\n if img_url and (\"http\" in img_url[0].img_url):\n img_url = img_url[0].img_url\n break\n else:\n img_url = None\n\n self.pc_self_result_list.append(h.PriceChanges(\n shop=item_result[pos_shop],\n category=item.category,\n brand_name=item.brand_name,\n model_name=item.model_name,\n color=item_result[pos_color],\n ram=item.ram,\n rom=item.rom,\n img_url=img_url if img_url else item.img_url,\n url=item_result[pos_url],\n date_time=datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\"),\n price=item_result[pos_price],\n avg_actual_price=int(avg_price),\n hist_min_price=hist_min_price[pos_price],\n hist_min_shop=hist_min_price[pos_shop],\n hist_min_date=hist_min_price[pos_datetime],\n diff_cur_avg=int(avg_price - item_result[pos_price]),\n ))\n\n def run(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Запуск проверки данных\n \"\"\"\n self.db.connect_or_create(\"parser\", \"postgres\", \"1990\", \"127.0.0.1\", \"5432\")\n self.__check_prices()\n self.db.disconnect()\n\n # Сохранение результата\n FileWorker.csv_data.save(h.PRICE_CHANGES_PATH, data=self.pc_self_result_list,\n namedtuple_type=h.PriceChanges)\n\n return self.pc_self_result_list\n" }, { "alpha_fraction": 0.6541679501533508, "alphanum_fraction": 0.6578865647315979, "avg_line_length": 26.117647171020508, "blob_id": "853e0044c692388b3de5ac0cf49037dc47d5214d", "content_id": "36876089d79a0b66eeeecc64c4d86d55110efc4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3698, "license_type": "no_license", "max_line_length": 97, "num_lines": 119, "path": "/modules/runner/runner_helper.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import os\nimport requests\nimport configparser\n\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\nlogger = h.logging.getLogger('Runner')\n\n\nBOT_TOKEN = ''\nBOT_CHAT_ID = 0\nCOUNT_CRASH = 0\nMAX_COUNT_CRASH_FOR_ALARM = 3\n\n\ndef load_result_from_csv(name):\n \"\"\"\n Загрузить данные с csv, чтобы не парсить сайт\n \"\"\"\n return FileWorker.csv_data.load(h.CSV_PATH_RAW + name, namedtuple_type=h.ParseResult)\n\n\ndef read_config():\n \"\"\"\n Чтение данных с config.ini\n \"\"\"\n global BOT_TOKEN, BOT_CHAT_ID\n\n config = configparser.ConfigParser()\n config.read('config.ini', encoding=\"utf-8\")\n h.REBUILT_IPHONE_NAME = ' ' + config.defaults()['rebuilt_iphone_name']\n h.IGNORE_WORDS_FOR_COLOR = config['parser']['color_ignore'].lower().split('\\n')\n\n h.REF_LINK_MVIDEO = config['admitad']['ref_link_mvideo']\n h.REF_LINK_MTS = config['admitad']['ref_link_mts']\n h.REF_LINK_ELDORADO = config['admitad']['ref_link_eldorado']\n h.REF_LINK_CITILINK = config['admitad']['ref_link_citilink']\n\n BOT_TOKEN = config['bot-test']['token']\n BOT_CHAT_ID = int(config['bot-test']['chat_id'])\n\n\ndef load_data():\n \"\"\"\n Чтение всей конфигурации проекта перед запуском runner.run\n \"\"\"\n global COUNT_CRASH\n\n # Чтение словаря исключений названий моделей\n h.EXCEPT_MODEL_NAMES_DICT = FileWorker.dict_data.load(h.EXCEPT_MODEL_NAMES_PATH)\n\n # Чтение списка разрешенных названий моделей для добавления в БД\n h.ALLOWED_MODEL_NAMES_LIST_FOR_BASE = FileWorker.list_data.load(h.LIST_MODEL_NAMES_BASE_PATH)\n\n # Чтение значения кол-ва раз подряд, когда система падала\n COUNT_CRASH = FileWorker.list_data_int.load(h.CRASH_DATA_PATH)\n COUNT_CRASH = COUNT_CRASH[0] \\\n if COUNT_CRASH and len(COUNT_CRASH) == 1 else 0\n\n # Чтение данных с config.ini\n read_config()\n\n\ndef create_lock_file():\n \"\"\"\n Создать лок файл, запрещающий сервисному боту читать файл с исключениями\n \"\"\"\n delete_lock_file()\n with open(h.UNDEFINED_MODEL_NAME_LIST_LOCK_PATH, 'w') as f:\n pass\n\n\ndef delete_lock_file():\n \"\"\"\n Удалить лок-файл\n \"\"\"\n if os.path.isfile(h.UNDEFINED_MODEL_NAME_LIST_LOCK_PATH):\n os.remove(h.UNDEFINED_MODEL_NAME_LIST_LOCK_PATH)\n\n\ndef send_alarm_in_telegram(msg):\n \"\"\"\n Отправка сообщения в телеграм при переполнении счетчика COUNT_CRASH\n \"\"\"\n method = 'https://api.telegram.org/bot' + BOT_TOKEN + '/sendMessage'\n r = requests.post(method, data={\n 'chat_id': BOT_CHAT_ID,\n 'text': msg,\n })\n\n if r.status_code != 200:\n logger.error(\"НЕ МОГУ ОТПРАВИТЬ СООБЩЕНИЕ В ТЕЛЕГРАМ\")\n\n\ndef inc_count_crash(shop_name):\n \"\"\"\n Увеличение счетчика падений системы\n \"\"\"\n global COUNT_CRASH\n\n logger.error(\"Упал {}, Count Crash = {}\".format(shop_name, COUNT_CRASH))\n COUNT_CRASH += 1\n\n if COUNT_CRASH == MAX_COUNT_CRASH_FOR_ALARM:\n # send_alarm_in_telegram(\"Почини меня\")\n print(\"!ПОЧИНИ МЕНЯ!\")\n return\n\n FileWorker.list_data.save(h.CRASH_DATA_PATH, data=COUNT_CRASH)\n\n\ndef clear_count_crash():\n \"\"\"\n Сброс счетчика падений системы\n \"\"\"\n global COUNT_CRASH\n COUNT_CRASH = 0\n FileWorker.list_data.save(h.CRASH_DATA_PATH, data=COUNT_CRASH)\n" }, { "alpha_fraction": 0.5648199319839478, "alphanum_fraction": 0.567451536655426, "avg_line_length": 38.02702713012695, "blob_id": "504b7f6088c5f35fa6fe3ff8c54e246e5c56fcae", "content_id": "fd78130a91043f3430a6e5c35adff73720768251", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16954, "license_type": "no_license", "max_line_length": 120, "num_lines": 370, "path": "/modules/data_receiver/parsers/citilink_parse.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import time\nimport re\n\nfrom selenium.webdriver.common.by import By\nfrom modules.data_receiver.parsers.parse_base import ParseBase\n\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\nlogger = h.logging.getLogger('citilinkparse')\nCITILINK_REBUILT_IPHONE = '\"как новый\"'\n\n\ndef citilink_parse_model_name(name):\n \"\"\"\n Парсинг названия модели (получить название модели, цвет и ROM)\n \"\"\"\n # Защита от неправильных названий\n if len(name.split()) < 3 or not name.count(','):\n return None, None, None\n # Убираем неразрывные пробелы\n name = name.replace(u'\\xc2\\xa0', u' ')\n name = name.replace(u'\\xa0', u' ')\n # Понижение регистра\n name = name.lower()\n name = name.replace('dual sim', '').replace('dual cam', '').replace(' lte ', ' ').replace(' nfc ', ' '). \\\n replace(' 5g ', ' ').replace(' 4g ', ' ').replace(' 3g ', ' ').replace('«', '').replace('»', '')\n # Восстановленные телефоны (только для iphone). Если есть слово - удалить\n rebuilt = h.REBUILT_IPHONE_NAME if (CITILINK_REBUILT_IPHONE in name) else ''\n name = name.replace(CITILINK_REBUILT_IPHONE, '')\n # Цвет\n color = name[name.rfind(','):].replace(',', '').replace('(product)', '').strip()\n # Исключение для перечисленных брендов\n model_code = ''\n if 'bq' in name or 'blackview' in name or 'alcatel' in name:\n model_code = ' ' + name[name.find(',') + 1:name.rfind(',')].strip()\n # Удаление кода моделей\n name = name[:name.find(',')]\n # Удалить все скобки\n brackets = re.findall(r\"\\(.+?\\)\", name)\n for item in brackets:\n name = name.replace(item, '')\n # Получить размер RAM и ROM, если есть\n ram_rom = re.findall(r'\\d*/*\\d+ *(?:gb|tb)', name)\n ram_rom = ram_rom[0] if ram_rom else ''\n # Удалить год, если есть\n year = re.findall(r' 20[1,2]\\d ', name)\n year = year[0] if year else ''\n # Удалить лишние слова в названии модели\n name = name.replace('смартфон', '').replace(ram_rom, '').replace(color, ''). \\\n replace(year, '').replace(' ', ' ').strip()\n name += model_code + rebuilt\n\n # Проверка названия в словаре исключений названий моделей\n name = h.replace_value_from_dictionary(h.EXCEPT_MODEL_NAMES_DICT, name)\n\n # Проверка названия модели в словаре разрешенных моделей\n if not h.find_allowed_model_names(name):\n logger.info(\"Обнаружена новая модель, отсутствующая в базе = '{}'\".format(name))\n FileWorker.list_data.save(h.UNDEFINED_MODEL_NAME_LIST_PATH, data=name, overwrite=False)\n return None, None, None\n\n # Получить название бренда\n brand_name = name.split()[0]\n model_name = name.replace(brand_name, '').strip()\n\n return brand_name, model_name, color\n\n\nclass CitilinkParse(ParseBase):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataReceiver\n Реализация базового класса ParseBase\n Парсит данные с магазина Ситилинк\n \"\"\"\n def __init__(self):\n super().__init__(domain=\"https://www.citilink.ru\", shop=\"citilink\", logger=logger, is_proxy=True,\n category=\"смартфоны\")\n\n self.is_grid = True\n self.container_css_selector = 'div.product_data__gtm-js.product_data__pageevents-js.' \\\n 'ProductCardHorizontal.js--ProductCardInListing.js--ProductCardInWishlist'\n\n def _wd_city_selection_catalog(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций на странице каталога\n \"\"\"\n city = self._wd_find_elem_with_timeout(By.CLASS_NAME, \"MainHeader__city\")\n if not city:\n self.logger.error(\"Не найдено поле с названием города\")\n return False\n\n # Если указан неверный город\n if self.current_city.lower() not in city.text.lower():\n self.logger.info(\"Неверный город\")\n\n # Клик по городу\n if not self._wd_ac_click_elem(city):\n self.logger.error(\"Не могу нажать на кнопку выбора города\")\n return False\n\n self.logger.info(\"Клик по городу\")\n\n # Получить список всех городов и если есть нужный, кликнуть по нему\n city_list = self._wd_find_all_elems_with_timeout(By.CLASS_NAME, \"CitiesSearch__main-cities-list-item\")\n if city_list:\n for item in city_list:\n if self.current_city.lower() in item.text.lower():\n time.sleep(1.5)\n return self._wd_ac_click_elem(item)\n\n self.logger.info(\"Не вижу нужный город в списке, пробую вбить вручную\")\n\n # Поиск поля для ввода города\n input_city = self._wd_find_elem_with_timeout(By.XPATH, \"//input[@type='search']\")\n if not input_city:\n self.logger.error(\"Не найдено поле, куда вводить новый город\")\n return False\n\n # Кликнуть на форму для ввода текста\n time.sleep(1)\n if not self._wd_ac_click_elem(input_city):\n self.logger.error(\"Не могу кликнуть на форму для ввода текста\")\n return False\n\n # Ввод названия города по буквам\n for char in self.current_city:\n self._wd_ac_send_keys(input_city, char)\n time.sleep(0.2)\n\n time.sleep(2)\n\n # Выбор города из сгенерированного списка городов\n input_city_item = self._wd_find_elem_with_timeout(By.XPATH,\n \"//a[@databases-search='{}']\".format(\n self.current_city.lower()))\n if not input_city_item:\n self.logger.error(\"Не найдено элементов при вводе города\")\n return False\n\n # Клик по нему\n if not self._wd_ac_click_elem(input_city_item):\n self.logger.error(\"Не могу нажать на выбранный город\")\n return False\n\n return True\n\n def _wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возмодных ситуаций на странице продукта\n \"\"\"\n pass\n\n def _wd_check_load_page_catalog(self):\n \"\"\"\n Проверка по ключевым div-ам что страница каталога прогружена полностью\n \"\"\"\n # Ожидание прогрузки цен\n if not self._wd_find_elem_with_timeout(By.CLASS_NAME,\n \"ProductCardVerticalPrice__price-current_current-price\" if self.is_grid\n else \"ProductCardHorizontal__price_current-price\"):\n return False\n\n self.logger.info(\"Page loaded\")\n return True\n\n def _wd_check_load_page_product(self):\n \"\"\"\n Проверка по ключевым div-ам что страница продукта прогружена полностью\n \"\"\"\n pass\n\n def _wd_select_list_view(self):\n \"\"\"\n Переключение каталога в вид списка\n \"\"\"\n # Если есть этот тег в html коде, значит сейчас стоит табличный вид, переключаем на список\n if self._wd_find_elem(By.XPATH,\n \"//label[@class='ProductCardCategoryList__icon ProductCardCategoryList__icon_grid \"\n \"ProductCardCategoryList__icon-active']\"):\n\n # Переключение с табличного вида на список\n listing_views = self._wd_find_elem_with_timeout(By.XPATH,\n \"//span[@class='gray-icon IconFont IconFont_size_m \"\n \"IconFont_list']\")\n if not listing_views:\n self.logger.error(\"Не могу найти listing views\")\n return False\n\n # Клик\n if not self._wd_ac_click_elem(listing_views):\n self.logger.error(\"Не могу нажать на кнопку в __select_list_view\")\n return False\n\n self.is_grid = False\n\n return True\n\n def _wd_open_browser_catalog(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы каталога, выбор города\n \"\"\"\n if not super()._wd_open_browser_catalog(url=url):\n return False\n\n # Переключение на отображение товаров в виде списка\n if not self._wd_select_list_view():\n self.logger.error(\"Не смог переключить отображение товара в виде списока\")\n return False\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_open_browser (2)\")\n return False\n\n return True\n\n def _wd_open_browser_product(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы продукта, выбор города\n \"\"\"\n pass\n\n def _wd_next_page(self):\n \"\"\"\n Переход на заданную страницу num_page через клик (для имитации пользователя)\n \"\"\"\n for num_try in range(3):\n\n if num_try and not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n # Поиск следующей кнопки страницы\n num_page_elem = self._wd_find_elem(By.XPATH,\n f\"//a[@databases-page='{self.cur_page}']\")\n if not num_page_elem:\n self.logger.info(\"Достигнут конец каталога\")\n return False\n\n # Клик - переход на следующую страницу\n if not self._wd_ac_click_elem(num_page_elem):\n self.logger.error(\"Не могу кликнуть на страницу в __wd_next_page\")\n self.driver.refresh()\n continue\n\n # Специальная задержка между переключениями страниц для имитации юзера\n time.sleep(self.wait_between_pages_sec)\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n no_in_stock = self._wd_find_all_elems(By.XPATH, '//span[contains(text(), \"Узнать о поступлении\")]')\n if no_in_stock and len(no_in_stock) == 48:\n self.logger.info(\"Вся страница неактуальна, выход\")\n return False\n\n self.cur_page += 1\n return True\n else:\n self.logger.error(\"!! После 3 попыток не получилось переключить страницу #{} !!\".format(self.cur_page))\n return False\n\n def _parse_product_page(self, html, url):\n \"\"\"\n Метод для парсинга html страницы продукта\n \"\"\"\n pass\n\n def _parse_catalog_block(self, block):\n \"\"\"\n Метод для парсинга html страницы товара\n \"\"\"\n\n # Название модели\n full_name = block.select_one('a.ProductCardHorizontal__title.Link.js--Link.Link_type_default')\n if not full_name:\n self.logger.warning(\"No model name and URL\")\n return\n else:\n url = full_name.get('href')\n full_name = full_name.text.replace('\\n', '').replace(' ', ' ').strip()\n\n # Проверка на наличие\n if [item.text for item in block.select('button[type=\"button\"]') if \"Узнать о поступлении\" in item.text]:\n self.logger.info(\"Товара '{}' нет в наличии, пропуск\".format(full_name))\n return\n\n # Исключение\n if 'clevercel' in full_name.lower():\n self.logger.info('CLEVERCEL - Skip')\n return\n\n # URL\n if not url:\n self.logger.warning(\"No URL\")\n return\n else:\n url = self.domain + url\n\n # Ссылка на изображение товара\n img_url = block.select_one('div.ProductCardHorizontal__picture-hover_part.'\n 'js--ProductCardInListing__picture-hover_part')\n if not img_url:\n self.logger.warning(\"No img url\")\n return\n else:\n img_url = img_url.get('databases-src')\n\n # Рейтинг товара и на основании скольки отзывов построен\n rating, num_rating = 0, 0\n rating_and_num_rating = block.select(\n 'div.Tooltip__content.js--Tooltip__content.ProductCardHorizontal__tooltip__content.Tooltip__content_center')\n if rating_and_num_rating:\n for item in rating_and_num_rating:\n if 'рейтинг' in item.text.lower():\n rating = float(re.findall(r'\\d+.\\d+', item.text)[0].replace(',', '.'))\n if 'отзыв' in item.text.lower():\n num_rating = int(re.findall(r'\\d+', item.text)[0])\n\n # Код продукта\n product_code = \"None\"\n\n # RAM, ROM\n ram, rom = 0, 0\n characteristics = block.select('li.ProductCardHorizontal__properties_item')\n if not characteristics:\n self.logger.error(\"Нет характеристик\")\n return\n else:\n for item in characteristics:\n if 'оперативн' in item.text.lower():\n ram = int(re.findall(r'\\d+', item.text)[0])\n if 'встроенн' in item.text.lower():\n rom = int(re.findall(r'\\d+', item.text)[0])\n\n # Цена\n price = block.select_one('span.ProductCardHorizontal__price_current-price')\n if not price:\n self.logger.warning(\"No price\")\n return\n else:\n price = int(re.findall(r'\\d+', price.text.replace(' ', ''))[0])\n\n # Парсинг названия модели\n brand_name, model_name, color = citilink_parse_model_name(full_name)\n if not brand_name or not model_name or not color:\n self.logger.warning(\"No brand name, model name or color\")\n return\n\n # Добавление полученных результатов в коллекцию\n self._add_to_pr_result_list(brand_name, model_name, color, price, ram, rom,\n img_url, url, rating, num_rating, product_code)\n\n\nif __name__ == '__main__':\n import main\n\n time_start = time.time()\n main.load_allowed_model_names_list_for_base()\n main.load_exceptions_model_names()\n main.read_config()\n\n parser = CitilinkParse()\n parser.run_catalog('https://www.citilink.ru/catalog/mobile/smartfony/')\n logger.info(f\"Время выполнения: {time.time() - time_start} сек\")\n" }, { "alpha_fraction": 0.6104835271835327, "alphanum_fraction": 0.6168097853660583, "avg_line_length": 32.530303955078125, "blob_id": "bb0972b8c2d7c98f4f638a0e48b3bf138c4b6562", "content_id": "43b5a40b0d222549ded98b14ab64a390a8f2893a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4990, "license_type": "no_license", "max_line_length": 112, "num_lines": 132, "path": "/modules/runner/runner.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "from time import time\n\nimport modules.runner.runner_helper as rh\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\nfrom modules.data_receiver.parsers.dns_parse import DNSParse\nfrom modules.data_receiver.parsers.mvideo_parse import MVideoParse\nfrom modules.data_receiver.parsers.mts_parse import MTSParse\nfrom modules.data_receiver.parsers.eldorado_parse import EldoradoParse\nfrom modules.data_receiver.parsers.citilink_parse import CitilinkParse\nfrom modules.data_validator.data_validator import DataValidator\nfrom modules.data_checker.data_checker import DataChecker\nfrom modules.db_inserter.db_inserter import DbInserter\nfrom modules.data_sender.telegram.bot import Bot as DataSender\n\nlogger = h.logging.getLogger('Runner')\n\n\nclass Runner:\n def __init__(self):\n self.data_receiver_result_list = []\n self.data_validator_result_list = []\n self.db_inserter_result_list = []\n self.data_checker_result_list = []\n\n self.setup()\n\n @staticmethod\n def setup():\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Конфигурация проекта\n \"\"\"\n h.del_old_logs()\n rh.load_data()\n\n def __run_one_parser(self, parser_class, url, name=\"\"):\n \"\"\"\n Запустить один парсер\n \"\"\"\n parser = parser_class()\n result = parser.run_catalog(url=url)\n # result = rh.load_result_from_csv(\"mts.csv\")\n if not result:\n rh.inc_count_crash(name)\n return\n\n self.data_receiver_result_list.extend(result)\n\n def receiver_stage(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Этап 1: Получение сырых данных\n \"\"\"\n rh.create_lock_file()\n\n self.__run_one_parser(MVideoParse, name=\"Мвидео\",\n url=\"https://www.mvideo.ru/smartfony-i-svyaz-10/smartfony-205?sort=price_asc\")\n\n # self.__run_one_parser(MTSParse, name=\"МТС\",\n # url=\"https://shop.mts.ru/catalog/smartfony/\")\n #\n # self.__run_one_parser(DNSParse, name=\"ДНС\",\n # url=\"https://www.dns-shop.ru/catalog/17a8a01d16404e77/smartfony/\")\n #\n # self.__run_one_parser(CitilinkParse, name=\"Ситилинк\",\n # url=\"https://www.citilink.ru/catalog/mobile/smartfony/\")\n #\n # self.__run_one_parser(EldoradoParse, name=\"Эльдорадо\",\n # url=\"https://www.eldorado.ru/c/smartfony/\")\n\n rh.delete_lock_file()\n rh.clear_count_crash()\n FileWorker.csv_data.save(h.CSV_PATH, data=self.data_receiver_result_list, namedtuple_type=h.ParseResult)\n\n def validator_stage(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Этап 2: Валидация сырых данных\n \"\"\"\n validator = DataValidator(self.data_receiver_result_list)\n self.data_validator_result_list = validator.run()\n\n def inserter_stage(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Этап 3: Добавление валидных данных в БД и выборка по определенным критериям\n \"\"\"\n inserter = DbInserter(self.data_validator_result_list)\n self.db_inserter_result_list = inserter.run()\n\n def checker_stage(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Этап 4: Выборка данных для отправки после выборки с БД\n \"\"\"\n checker = DataChecker(self.db_inserter_result_list, self.data_validator_result_list)\n self.data_checker_result_list = checker.run()\n\n def sender_stage(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Этап 5: Отправка данных\n \"\"\"\n with DataSender() as bot:\n bot.checking_irrelevant_posts(self.data_validator_result_list)\n bot.send_posts(self.data_checker_result_list)\n\n def run(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Запуск всего проекта\n \"\"\"\n # result_list = load_result_from_csv(\"goods2.csv\")\n time_start = time()\n\n # Этап 1: Получение сырых данных\n self.receiver_stage()\n\n # Этап 2: Валидация сырых данных\n self.validator_stage()\n\n # Этап 3: Добавление валидных данных в БД и выборка по определенным критериям\n self.inserter_stage()\n\n # Этап 4: Выборка данных для отправки после выборки с БД\n self.checker_stage()\n\n # Этап 5: Отправка данных\n self.sender_stage()\n\n logger.info(f\"Время выполнения: {time() - time_start} сек\")\n" }, { "alpha_fraction": 0.5743376016616821, "alphanum_fraction": 0.5802441239356995, "avg_line_length": 39.03828811645508, "blob_id": "8d521cf8f2aeaded44df063bb8ba39a6ec2190a3", "content_id": "67606dfad0f55c0cde6ce477e17978d87ee9850f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21195, "license_type": "no_license", "max_line_length": 120, "num_lines": 444, "path": "/modules/data_receiver/parsers/mvideo_parse.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import re\nimport time\n\nimport selenium.common.exceptions as se\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as ec\n\nfrom modules.data_receiver.parsers.parse_base import ParseBase\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\nlogger = h.logging.getLogger('mvideoparse')\nMVIDEO_REBUILT_IPHONE = ' восст.'\n\n\ndef mvideo_parse_model_name(name):\n \"\"\"\n Парсинг названия модели (получить название модели, цвет и ROM)\n \"\"\"\n # Защита от неправильных названий\n if len(name.split()) < 5:\n return None, None, None\n # Убираем неразрывные пробелы\n name = name.replace(u'\\xc2\\xa0', u' ')\n name = name.replace(u'\\xa0', u' ')\n # Восстановленные телефоны (только для iphone). Если есть слово - удалить\n rebuilt = h.REBUILT_IPHONE_NAME if (MVIDEO_REBUILT_IPHONE in name.lower()) else ''\n name = name.replace(MVIDEO_REBUILT_IPHONE, '')\n # Оборачивание скобками названия модели, если их не было\n last_word = name.split()[-1]\n if last_word.isupper() and \\\n not ('(' in last_word) and \\\n not (')' in last_word):\n name = name.replace(last_word, '({})'.format(last_word))\n # Понижение регистра\n name = name.lower()\n # Удалить nfc и 5g\n name = name.replace(' nfc ', ' ').replace(' 5g ', ' ')\n # Удалить все скобки\n brackets = re.findall(r\"\\(.+?\\)\", name)\n for item in brackets:\n name = name.replace(item, '')\n # Удалить год, если есть\n year = re.findall(r' 20[1,2]\\d ', name)\n year = year[0] if year else ''\n # Получить размер ROM\n rom = re.findall(r'\\d*[gb]*\\+*\\d+(?:gb|tb)', name)\n rom = (rom[0]) if rom else \"\"\n # Получить ЦВЕТ\n # Получить 2 слова цвета\n color1, color2 = name.split()[-2:] if name.split()[-1] != rom \\\n else name.split()[-3:-1]\n # Если первое слово цвета состоит только из букв и длиннее 2 символов и отсутствует в игнор-листе - добавить\n # к итоговому цвету\n color1 = color1 if (\n color1.isalpha() and len(color1) > 2 and not (color1.strip() in h.IGNORE_WORDS_FOR_COLOR)) else \"\"\n color = color1 + \" \" + color2 if (color1.isalpha() and len(color1) > 2) else color2\n # Удалить первую часть часть\n name = name.replace('смартфон', '').replace(rom, '').replace(year, '').replace(' ', ' ')\n # Убрать вторую часть лишних слов из названия\n name = name.replace(color, '').replace(' ', ' ').strip()\n name += rebuilt\n\n # Проверка названия в словаре исключений названий моделей\n name = h.replace_value_from_dictionary(h.EXCEPT_MODEL_NAMES_DICT, name)\n\n # Проверка названия модели в словаре разрешенных моделей\n if not h.find_allowed_model_names(name):\n logger.info(\"Обнаружена новая модель, отсутствующая в базе = '{}'\".format(name))\n FileWorker.list_data.save(h.UNDEFINED_MODEL_NAME_LIST_PATH, data=name, overwrite=False)\n return None, None, None\n\n # Получить название бренда\n brand_name = name.split()[0]\n model_name = name.replace(brand_name, '').strip()\n\n return brand_name, model_name, color\n\n\nclass MVideoParse(ParseBase):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataReceiver\n Реализация базового класса ParseBase\n Парсит данные с магазина МВидео\n \"\"\"\n def __init__(self):\n super().__init__(domain=\"https://www.mvideo.ru\", shop=\"mvideo\", logger=logger, category=\"смартфоны\")\n self.container_css_selector = 'div.product-cards-layout__item'\n\n def _wd_city_selection_catalog(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций для страницы каталога\n \"\"\"\n city = self._wd_find_elem_with_timeout(By.XPATH, \"//span[@class='location-text top-navbar-link ng-tns-c147-1']\")\n if not city:\n self.logger.error(\"Не найдено поле с названием города\")\n return False\n\n # Если указан неверный город\n if self.current_city.lower() not in city.text.lower():\n self.logger.info(\"Неверный город\")\n\n # Клик по городу\n if not self._wd_ac_click_elem(city):\n self.logger.error(\"Не могу нажать на кнопку выбора города\")\n return False\n\n self.logger.info(\"Клик по городу\")\n\n # Получить список всех городов и если есть нужный, кликнуть по нему\n city_list = self._wd_find_all_elems_with_timeout(By.CLASS_NAME, \"location-select__location\")\n if city_list:\n for item in city_list:\n if self.current_city.lower() in item.text.lower():\n time.sleep(1.5)\n return self._wd_ac_click_elem(item)\n else:\n self.logger.warning(\"Нет списка городов, попробую вбить вручную\")\n\n # Поиск поля для ввода города\n input_city = self._wd_find_elem_with_timeout(By.CLASS_NAME, \"location-select__input-wrap\")\n if not input_city:\n self.logger.error(\"Не найдено поле, куда вводить новый город\")\n return False\n\n # Кликнуть на форму для ввода текста\n time.sleep(1)\n if not self._wd_ac_click_elem(input_city):\n self.logger.error(\"Не могу нажать на форму ввода текста\")\n return False\n\n # Ввод названия города по буквам\n for char in self.current_city:\n self._wd_ac_send_keys(input_city, char)\n time.sleep(0.2)\n\n # Если не поставить задержку, окно закрывает, а город не применяет\n time.sleep(1.5)\n\n # Выбор города из сгенерированного списка городов\n input_city_item = self._wd_find_elem_with_timeout(By.XPATH, \"//li[@databases-index='0']\")\n if not input_city_item:\n self.logger.error(\"Не найдено элементов при вводе города\")\n return False\n\n # Клик по нему\n if not self._wd_ac_click_elem(input_city_item):\n self.logger.error(\"Не могу нажать на выбранный город\")\n return False\n\n return True\n\n def _wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций для страницы продукта\n \"\"\"\n pass\n\n def _wd_check_load_page_catalog(self):\n \"\"\"\n Проверка по ключевым div-ам что страница каталога прогружена полностью\n \"\"\"\n # Ожидание прогрузки пагинации\n if not self._wd_find_elem_with_timeout(By.CLASS_NAME, \"pagination\"):\n return False\n\n # Ожидание прогрузки цен\n if not self._wd_find_elem_with_timeout(By.CLASS_NAME, \"price__main-value\"):\n return False\n\n # Ожидание прогрузки изображения товара\n if not self._wd_find_elem_with_timeout(By.CLASS_NAME, \"product-picture__img\"):\n return False\n\n # Ожидание прогрузки переключателя вида товара\n if not self._wd_find_elem_with_timeout(By.XPATH, \"//div[@class='listing-view-switcher__inner-area']\"):\n return False\n\n self.logger.info(\"Page loaded\")\n return True\n\n def _wd_check_load_page_product(self):\n \"\"\"\n Проверка по ключевым div-ам что страница товара прогружена полностью\n \"\"\"\n pass\n\n def __wd_select_list_view(self):\n \"\"\"\n Переключение на отображение товаров в виде списка\n \"\"\"\n # Если есть этот тег в html коде, значит сейчас стоит табличный вид, переключаем на список\n if self._wd_find_elem(By.XPATH,\n \"//div[@class='listing-view-switcher__pointer listing-view-switcher__pointer--grid']\"):\n # Переключение с табличного вида на список\n listing_views = self._wd_find_elem_with_timeout(By.XPATH,\n \"//div[@class='listing-view-switcher__inner-area']\")\n if not listing_views:\n self.logger.error(\"Не могу найти listing views\")\n return False\n\n # Клик\n if not self._wd_ac_click_elem(listing_views):\n self.logger.error(\"Не могу нажать на кнопку в __select_list_view\")\n return False\n\n # Но если нет и тега list (вид списка) - то ошибка\n elif not self._wd_find_elem(By.XPATH,\n \"//div[@class='listing-view-switcher__pointer \"\n \"listing-view-switcher__pointer--list']\"):\n self.logger.error(\"Не вижу тегов для переключения вида товара\")\n return False\n\n return True\n\n def __wd_mvideo_switch_num_prod_in_catalog(self):\n \"\"\"\n Метод только для мвидео. Переключает кол-во отображаемых товаров на\n странице каталога с 24 до 72\n \"\"\"\n # Найти кнопку выбора кол-ва товаров на странице\n but_show24 = self._wd_find_elem_with_timeout(By.XPATH, \"//span[contains(text(),'Показывать по 24')]\")\n if but_show24:\n self._wd_ac_click_elem(but_show24)\n item_show72 = self._wd_find_elem_with_timeout(By.XPATH, \"//div[contains(text(),'Показывать по 72')]\")\n\n # Переключиться на 72 товара на странице\n if item_show72:\n self._wd_ac_click_elem(item_show72)\n\n def _wd_open_browser_catalog(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы каталога, выбор города\n \"\"\"\n if not super()._wd_open_browser_catalog(url):\n return False\n\n # Переключение на отображение товаров в виде списка\n if not self.__wd_select_list_view():\n self.logger.error(\"Не смог переключить отображение товара в виде списока\")\n return False\n\n self._wd_scroll_down(count_press=13)\n self.__wd_mvideo_switch_num_prod_in_catalog()\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_open_browser (2)\")\n return False\n\n # Скролл\n self._wd_scroll_down(count_press=35)\n return True\n\n def _wd_open_browser_product(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы парсинга, выбор города\n \"\"\"\n pass\n\n def _wd_next_page(self):\n \"\"\"\n Переход на заданную страницу num_page через клик (для имитации пользователя)\n \"\"\"\n for num_try in range(3):\n\n if num_try and not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n # Поиск следующей кнопки страницы\n num_page_elem = self._wd_find_elem(By.XPATH, \"//li[@class='page-item number-item ng-star-inserted']/\"\n \"a[text()={}]\".format(self.cur_page))\n if not num_page_elem:\n self.logger.info(\"Достигнут конец каталога\")\n return False\n\n # Клик - переход на следующую страницу\n if not self._wd_ac_click_elem(num_page_elem):\n self.logger.error(\"Не могу кликнуть на страницу в __wd_next_page\")\n return False\n\n # Специальная задержка между переключениями страниц для имитации юзера\n time.sleep(self.wait_between_pages_sec)\n\n # Скролл вниз\n self._wd_scroll_down(count_press=35)\n\n no_in_stock = self._wd_find_all_elems(By.XPATH, '//div[contains(text(), \"Нет в наличии\")]')\n if no_in_stock and len(no_in_stock) == 72:\n self.logger.info(\"Вся страница неактуальна, выход\")\n return False\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (2)\")\n self.driver.refresh()\n continue\n\n # Особенность МВидео - при переключении страницы, пока сайт ждет ответ от сервера,\n # оставляет старые данные с эффектом размытия. Ждем, пока они не исчезнут\n try:\n self.wait.until_not(ec.presence_of_element_located((By.XPATH, \"//a[@href='{}']\".format(\n self.pr_result_list[-5].url))))\n except se.TimeoutException:\n self.logger.error('Не пропадает телефон с прошлой страницы, не могу прогрузить текущую')\n self.driver.refresh()\n continue\n except IndexError:\n self.logger.error(\n 'По непонятной причине список pr_result_list[-5] оказался пуст, выход за границы списка')\n return False\n\n self.cur_page += 1\n return True\n else:\n self.logger.error(\"!! После 3 попыток не получилось переключить страницу #{} !!\".format(self.cur_page))\n return False\n\n def _parse_product_page(self, html, url):\n \"\"\"\n Метод для парсинга html страницы продукта\n \"\"\"\n pass\n\n def _parse_catalog_block(self, block):\n \"\"\"\n Метод для парсинга html страницы товара\n \"\"\"\n # Название модели и URL\n model_name_url_block = block.select_one('a.product-title__text')\n\n # Проверка на баг мвидео - наличие в названии модели фразы PDA\n if model_name_url_block and ('pda' in model_name_url_block.text.lower()):\n self.logger.warning(\"PDA detected\")\n return\n\n if not model_name_url_block:\n self.logger.warning(\"No model name and URL\")\n return\n else:\n url = model_name_url_block.get('href')\n full_name = model_name_url_block.text.replace('\\n', '').strip()\n\n # Чек\n if not [item.text for item in block.select('span') if (\"В корзину\" in item.text)]:\n self.logger.info(\"Нет кнопки 'В корзину', {} {}\".format(full_name, url))\n return\n\n # Проверка на предзаказ\n if [item.text for item in block.select(\"span.button__label.ng-star-inserted\") if item.text == \"Предзаказ\"]:\n self.logger.info(\"Товар '{}' по предзаказу, пропуск\".format(full_name))\n return\n\n # Проверка на наличие\n if [item.text for item in block.select(\"div.product-notification\") if \"Нет в наличии\" in item.text]:\n self.logger.info(\"Товара '{}' нет в наличии, пропуск\".format(full_name))\n return\n\n # Ссылка на изображение товара\n img_url = block.select_one('img.product-picture__img.product-picture__img--list')\n if not img_url:\n self.logger.warning(\"No img url\")\n return\n else:\n img_url = img_url.get('src')\n if img_url.startswith(\"//\"):\n img_url = \"https:\" + img_url\n\n # Рейтинг товара\n rating = block.select_one('span.stars-container')\n if not rating:\n rating = 0\n else:\n rating = re.findall(r'\\d+.\\d+', rating.text)\n rating = rating[0] if rating else 0\n\n # На основании скольки отзывов построен рейтинг\n num_rating = block.select_one('span.product-rating__feedback.product-rating__feedback--with-link')\n if not num_rating:\n num_rating = 0\n else:\n num_rating = re.findall(r'\\d+', num_rating.text)\n num_rating = num_rating[0] if num_rating else 0\n\n # Парсинг значений RAM и ROM\n ram, rom = 0, 0\n specifications = block.select('li.product-feature-list__item.product-feature-list__item--undefined')\n if not specifications:\n self.logger.warning(\"No RAM and ROM\")\n return\n else:\n for item in specifications:\n if \"ram\" in item.text.lower():\n ram = int(re.findall(r'\\d+', item.text)[0])\n if \"rom\" in item.text.lower():\n rom = int(re.findall(r'\\d+', item.text)[0])\n\n print(\"!!!!!{} {} {} {} {} {} {}!!!!!\".format(full_name, ram, rom,\n img_url, url, rating, num_rating))\n\n # Парсинг цен\n promo_price = block.select_one('p.price__main-value.price__main-value--old')\n # Если есть блок акции - берем цену с него\n if promo_price:\n price = int(re.findall(r'\\d+', promo_price.text.replace(u'\\xa0', ''))[0])\n else:\n price = block.select_one('p.price__main-value')\n if not price:\n self.logger.warning(\"No price\")\n return\n else:\n price = int(re.findall(r'\\d+', price.text.replace(u'\\xa0', ''))[0])\n\n # Код продукта\n if len(url) > 8:\n product_code = url[-8:]\n else:\n self.logger.warning(\"No product code\")\n return\n\n # Парсинг названия модели\n brand_name, model_name, color = mvideo_parse_model_name(full_name)\n if not brand_name or not model_name or not color:\n self.logger.warning(\"No brand name, model name, color or not in the list of allowed\")\n return\n\n # Добавление полученных результатов в коллекцию\n self._add_to_pr_result_list(brand_name, model_name, color, price, ram, rom,\n img_url, url, rating, num_rating, product_code)\n\n\nif __name__ == '__main__':\n import main\n\n time_start = time.time()\n # main.load_allowed_model_names_list_for_base()\n # main.load_exceptions_model_names()\n # main.read_config()\n\n parser = MVideoParse()\n parser.run_catalog('https://www.mvideo.ru/smartfony-i-svyaz-10/smartfony-205?sort=price_asc')\n logger.info(f\"Время выполнения: {time.time() - time_start} сек\")\n" }, { "alpha_fraction": 0.5685046315193176, "alphanum_fraction": 0.5742588639259338, "avg_line_length": 37.242340087890625, "blob_id": "fcabc39633e96b70e1c363d2df7842a90ccba6ad", "content_id": "2f7709bec369493cdd20af71784a0df44e720be0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16285, "license_type": "no_license", "max_line_length": 142, "num_lines": 359, "path": "/modules/data_receiver/parsers/mts_parse.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import time\nimport re\n\nfrom selenium.webdriver.common.by import By\nfrom modules.data_receiver.parsers.parse_base import ParseBase\n\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\nlogger = h.logging.getLogger('mtsparse')\n\n\ndef mts_parse_model_name(name):\n \"\"\"\n Парсинг названия модели (получить название модели, цвет и ROM)\n \"\"\"\n # Защита от неправильных названий\n if len(name.split()) < 3:\n return \"error\", \"error\", \"error\", 0, 0\n # Убираем неразрывные пробелы\n name = name.replace(u'\\xc2\\xa0', u' ')\n name = name.replace(u'\\xa0', u' ')\n # Проверка названия в словаре исключений названий моделей\n name = h.find_and_replace_except_model_name(name)\n # Понижение регистра\n name = name.lower()\n name = name.replace('dual sim', '').replace('lte', '').replace(' nfc ', ' ').\\\n replace(' 5g ', ' ').replace('«', '').replace('»', '')\n # Удалить все скобки\n brackets = re.findall(r\"\\(.+?\\)\", name)\n for item in brackets:\n name = name.replace(item, '')\n # Только для самсунгов - удалить код модели\n samsung_code = re.findall(r'samsung ([\\w+]*?) galaxy', name)\n samsung_code = samsung_code[0] if samsung_code else ''\n # Получить размер RAM и ROM, если есть\n ram_rom = re.findall(r'\\d*/*\\d+ *(?:gb|tb)', name)\n rom, ram = 0, 0\n if ram_rom:\n ram_rom = ram_rom[0]\n if '/' in ram_rom:\n ram_rom_digit = re.findall(r'\\d+', ram_rom)\n ram = int(ram_rom_digit[0])\n rom = int(ram_rom_digit[1])\n else:\n ram = 0\n rom = int(re.findall(r'\\d+', ram_rom)[0])\n else:\n ram_rom = ''\n # Удалить год, если есть\n year = re.findall(r' 20[1,2]\\d ', name)\n year = year[0] if year else ''\n # Получить 2 слова цвета\n color1, color2 = name.split()[-2:] if name.split()[-1] != ram_rom \\\n else name.split()[-3:-1]\n # Если первое слово цвета состоит только из букв и длиннее 2 символов - добавить к итоговому цвету\n color = color1 + \" \" + color2 if (color1.isalpha() and len(color1) > 2) else color2\n # Удалить лишние слова в названии модели\n name = name.replace(ram_rom, '').replace(color, '').replace(year, ''). \\\n replace(samsung_code, '').replace(' ', ' ').strip()\n\n # Проверка названия в словаре исключений названий моделей\n name = h.replace_value_from_dictionary(h.EXCEPT_MODEL_NAMES_DICT, name)\n\n # Проверка названия модели в словаре разрешенных моделей\n if not h.find_allowed_model_names(name):\n logger.info(\"Обнаружена новая модель, отсутствующая в базе = '{}'\".format(name))\n h.save_undefined_model_name(name)\n FileWorker.list_data.save(h.UNDEFINED_MODEL_NAME_LIST_PATH, data=name, overwrite=False)\n return None, None, None, 0, 0\n\n # Получить название бренда\n brand_name = name.split()[0]\n model_name = name.replace(brand_name, '').strip()\n\n return brand_name, model_name, color, ram, rom\n\n\nclass MTSParse(ParseBase):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataReceiver\n Реализация базового класса ParseBase\n Парсит данные с магазина МТС\n \"\"\"\n def __init__(self):\n super().__init__(domain=\"https://www.shop.mts.ru\", shop=\"mts\", logger=logger, category=\"смартфоны\")\n self.container_css_selector = 'div.card-product-wrapper.card-product-wrapper--catalog'\n\n def _wd_city_selection_catalog(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций на странице каталога\n \"\"\"\n city = self._wd_find_elem_with_timeout(By.XPATH, \"//span[@class='current-region__text']\")\n if not city:\n self.logger.error(\"Не найдено поле с названием города\")\n return False\n\n # Если указан неверный город\n if not (self.current_city.lower() in city.text.lower()):\n self.logger.info(\"Неверный город\")\n\n # Клик по городу\n if not self._wd_ac_click_elem(city):\n self.logger.error(\"Не могу нажать на кнопку выбора города\")\n return False\n\n # Получить список всех городов и если есть нужный, кликнуть по нему\n city_list = self._wd_find_all_elems_with_timeout(By.CLASS_NAME, \"default-regions__item\")\n if city_list:\n for item in city_list:\n if self.current_city.lower() in item.text.lower():\n time.sleep(1.5)\n return self._wd_ac_click_elem(item)\n else:\n self.logger.warning(\"Нет списка городов, попробую вбить вручную\")\n\n # Поиск поля для ввода города\n input_city = self._wd_find_elem_with_timeout(By.XPATH, \"//div[@class='select-region-form__fieldset \"\n \"input-group__fieldset']\")\n if not input_city:\n self.logger.error(\"Не найдено поле, куда вводить новый город\")\n return False\n\n time.sleep(1)\n\n # Кликнуть на форму для ввода текста\n if not self._wd_ac_click_elem(input_city):\n self.logger.error(\"Не могу нажать на поле поиска\")\n return False\n\n # Ввод названия города по буквам\n for char in self.current_city:\n self._wd_ac_send_keys(input_city, char)\n time.sleep(0.2)\n\n # Если не поставить задержку, окно закрывает, а город не применяет\n time.sleep(1.5)\n\n # Выбор города из сгенерированного списка городов\n input_city_item = self._wd_find_elem_with_timeout(By.XPATH, \"//li[@class='list-results__item']\")\n if not input_city_item:\n self.logger.error(\"Не найдено элементов при вводе города\")\n return False\n\n # Клик по нему\n if not self._wd_ac_click_elem(input_city_item):\n self.logger.error(\"Не могу нажать на выбранный город\")\n return False\n\n return True\n\n def _wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций на странице продукта\n \"\"\"\n pass\n\n def _wd_check_load_page_catalog(self):\n \"\"\"\n Проверка по ключевым div-ам что страница каталога прогружена полностью\n \"\"\"\n # Ожидание прогрузки цен\n if not self._wd_find_elem_with_timeout(By.CLASS_NAME, \"product-price__current\"):\n return False\n\n self.logger.info(\"Page loaded\")\n return True\n\n def _wd_check_load_page_product(self):\n \"\"\"\n Проверка по ключевым div-ам что страница продукта прогружена полностью\n \"\"\"\n pass\n\n def _wd_open_browser_catalog(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы каталога, выбор города\n \"\"\"\n if not super()._wd_open_browser_catalog(url=url):\n return False\n\n # Скролл страницы 1\n if not self._wd_scroll_down(count_press=10, timeout=0.3):\n self.logger.error(\"Не удалось прогрузить страницу после скролла в __wd_open_browser (3)\")\n return False\n\n time.sleep(4)\n\n # Скролл страницы 2 (подргужается автоматически)\n if not self._wd_scroll_down(count_press=10, timeout=0.3):\n self.logger.error(\"Не удалось прогрузить страницу после скролла в __wd_open_browser (4)\")\n return False\n\n time.sleep(2)\n return True\n\n def _wd_open_browser_product(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы продукта, выбор города\n \"\"\"\n pass\n\n def _wd_next_page(self):\n \"\"\"\n Переход на заданную страницу num_page через клик (для имитации пользователя)\n \"\"\"\n for num_try in range(3):\n\n if num_try and not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n # Поиск следующей кнопки страницы\n num_page_elem = self._wd_find_elem(By.XPATH, \"//div[contains(@class, 'pagination__page')]/\"\n \"a[text()='{}']\".format(self.cur_page))\n if not num_page_elem:\n self.logger.info(\"Достигнут конец каталога\")\n return False\n\n # Клик - переход на следующую страницу\n if not self._wd_ac_click_elem(num_page_elem):\n self.logger.error(\"Не могу кликнуть на страницу в __wd_next_page\")\n self.driver.refresh()\n continue\n\n # Специальная задержка между переключениями страниц для имитации юзера\n time.sleep(self.wait_between_pages_sec)\n\n no_in_stock = self._wd_find_all_elems(By.XPATH, '//div[contains(text(), \"Нет в наличии\") or contains(text(), \"Скоро в продаже\")]')\n if no_in_stock and len(no_in_stock) == 30:\n self.logger.info(\"Вся страница неактуальна, выход\")\n return False\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (2)\")\n self.driver.refresh()\n continue\n\n # Скролл вниз и ожидание прогрузки страницы\n if not self._wd_scroll_down(count_press=10, timeout=0.3):\n self.logger.error(\"Не удалось прогрузить страницу после скролла в __wd_next_page\")\n self.driver.refresh()\n continue\n\n self.cur_page += 1\n return True\n else:\n self.logger.error(\"!! После 3 попыток не получилось переключить страницу #{} !!\".format(self.cur_page))\n return False\n\n def _parse_product_page(self, html, url):\n \"\"\"\n Метод для парсинга html страницы продукта\n \"\"\"\n pass\n\n def _parse_catalog_block(self, block):\n \"\"\"\n Метод для парсинга html страницы товара\n \"\"\"\n # Название модели\n full_name = block.select_one('a.card-product-description__heading')\n if not full_name:\n self.logger.warning(\"No model name and URL\")\n return\n else:\n full_name = full_name.get('aria-label').replace('\\n', '').strip()\n\n # Проверка на предзаказ\n if [item.text for item in block.select(\"span.button__text\") if item.text == \"Предзаказ\"]:\n self.logger.info(\"Товар '{}' по предзаказу, пропуск\".format(full_name))\n return\n\n # Проверка на мобильный телефон\n type_product = block.select_one(\"div.card-product-description__type\")\n if type_product and \"Мобильный телефон\" in type_product.text:\n self.logger.info(\"Найден мобильный телефон, пропуск\")\n return\n\n # URL\n url = block.select_one('a.card-product-description__heading')\n if not url:\n self.logger.warning(\"No URL\")\n return\n else:\n url = self.domain + url.get('href')\n\n # Ссылка на изображение товара\n img_url = block.select_one('img.gallery__img')\n if not img_url:\n self.logger.warning(\"No img url\")\n return\n else:\n img_url = img_url.get('src')\n\n if '/resize/' in img_url:\n img_url = img_url[:img_url.index('/resize/')]\n\n # Рейтинг товара\n rating = block.select_one('span.assessment-product__text')\n if not rating:\n rating = 0\n else:\n rating = float(rating.text.replace(' ', '').replace('\\n', '').replace(',', '.'))\n\n # На основании скольки отзывов построен рейтинг\n num_rating = block.select_one('span.assessment-product__text')\n if not num_rating:\n num_rating = 0\n else:\n num_rating = int(re.findall(r'\\d+', num_rating.text)[0])\n\n # Код продукта\n product_code = \"None\"\n\n # Цена\n price = block.select_one('span.product-price__current')\n if not price:\n self.logger.warning(\"No price\")\n return\n else:\n price = int(re.findall(r'\\d+', price.text.replace(' ', ''))[0])\n\n # Попытка применить промокод\n # old_price = block.select_one('div.product-price__old')\n # promo_code = block.select('div.action-product-item.promo-action')\n # if not old_price and promo_code:\n # for item in promo_code:\n # if 'промокод' in item.text:\n # self.logger.info('Нашел промокод \"{}\", применяю'.format(item.text))\n # promo_code = re.findall(r'\\d+', item.text.replace(' ', ''))\n # promo_code = int(promo_code[0]) if promo_code else 0\n # price -= promo_code\n # break\n\n # Парсинг названия модели\n brand_name, model_name, color, ram, rom = mts_parse_model_name(full_name)\n if not brand_name or not model_name or not color:\n self.logger.warning(\"No brand name, model name or color\")\n return\n\n # Добавление полученных результатов в коллекцию\n self._add_to_pr_result_list(brand_name, model_name, color, price, ram, rom,\n img_url, url, rating, num_rating, product_code)\n\n\nif __name__ == '__main__':\n import main\n\n time_start = time.time()\n main.load_allowed_model_names_list_for_base()\n main.load_exceptions_model_names()\n main.read_config()\n\n parser = MTSParse()\n parser.run_catalog('https://shop.mts.ru/catalog/smartfony/14/', 14)\n logger.info(f\"Время выполнения: {time.time() - time_start} сек\")\n" }, { "alpha_fraction": 0.5183103084564209, "alphanum_fraction": 0.5216169357299805, "avg_line_length": 51.3636360168457, "blob_id": "5cfe36557b10eb94923458ae6e3db62724409fba", "content_id": "a6e69d99be32b82fa7059da7b4a1e7ec61be70d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13480, "license_type": "no_license", "max_line_length": 120, "num_lines": 231, "path": "/modules/db_inserter/db_inserter.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import configparser\n\nfrom modules.common.db_wrapper import DataBase\nfrom modules.common import sql_req as sr, helper as h\n\nlogger = h.logging.getLogger('AddingToDB')\n\n\n# Функция, которая вернет true, если хоть у одного поля поврежденные данные\ndef check_item_on_errors(item):\n if not item.category or \\\n not item.shop or \\\n not item.brand_name or \\\n not item.model_name or \\\n not item.color or \\\n not item.img_url or \\\n not item.product_code or \\\n item.rom == 0 or \\\n item.price == 0:\n return False\n else:\n return True\n\n\nclass DbInserter:\n \"\"\"\n Класс, отвечающий за распределение данных с парсеров - добавляет в базу, находит выгодные цены,\n подготавливает список выгодных товаров для отправки в телеграм бот\n \"\"\"\n\n def __init__(self, parse_result_list=None):\n self.db = DataBase()\n self.config = configparser.ConfigParser()\n self.config.read('config.ini', encoding=\"utf-8\")\n self.best_shop_for_img_url = (self.config.defaults()['best_shops_for_img_url']).lower().split(', ')\n self.pr_parse_result_list = parse_result_list\n # Базовая переменная, в которую необходимо помещать те позиции, которые были добавлены в базу и подходят для\n # следующего этапа - проверки перед публикацией\n self.pr_price_change_list = []\n\n def __insert_product_in_products_table(self, id_category_name, brand_name, model_name, total_rating):\n \"\"\"\n Добавление продукта в таблицу products_table\n \"\"\"\n id_product = self.db.execute_read_query(sr.insert_into_products_table_query,\n [(id_category_name, brand_name, model_name, total_rating), ])\n\n return id_product[0][0] if id_product else None\n\n def __insert_version_in_versions_phones_table(self, id_product, ram, rom, img_url):\n \"\"\"\n Добавление комплектации в таблицу versions_phones_table\n \"\"\"\n id_ver_phone = self.db.execute_read_query(sr.insert_into_versions_phones_table_query,\n [(id_product, ram, rom, img_url), ])\n\n return id_ver_phone[0][0] if id_ver_phone else None\n\n def __insert_shop_in_shops_phones_table(self, id_shop_name, id_product, id_ver_phone, url, product_code, var_color,\n local_rating, num_local_rating, bonus_rubles=0):\n \"\"\"\n Добавление магазина, где продается комплектация в таблицу shops_phones_table\n \"\"\"\n id_shop_phone = self.db.execute_read_query(sr.insert_into_shops_phones_table_query,\n [(id_shop_name, id_product, id_ver_phone, url, product_code,\n var_color,\n local_rating, num_local_rating, bonus_rubles), ])\n\n return id_shop_phone[0][0] if id_shop_phone else None\n\n def __insert_price_in_prices_phones_table(self, id_shop_name, id_product, id_shop_phone, price, date_time='now()'):\n \"\"\"\n Добавление цены в таблицу prices_phones_table\n \"\"\"\n self.db.execute_query(sr.insert_into_prices_phones_table_query,\n [(id_shop_name, id_product, id_shop_phone, price, date_time), ])\n\n def __add_product_to_bd(self, category_name, shop_name, brand_name, model_name, var_rom, var_ram, var_color,\n img_url, url, product_code, local_rating, num_rating, price, bonus_rubles=0):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Добавление спарсенного товара в БД\n \"\"\"\n logger.info('-' * 50)\n logger.info(\n \"-- {} {} {} {} {} {} {} {}\".format(shop_name, brand_name, model_name, var_rom, var_ram, var_color, url,\n price))\n\n if not self.db.connection:\n logger.warning(\"Can't execute query - no connection\")\n return 'error'\n\n try:\n id_category_name = h.CATEGORIES_NAME_LIST.index((category_name,)) + 1\n id_shop_name = h.SHOPS_NAME_LIST.index((shop_name,)) + 1\n except ValueError as e:\n logger.error(\"ERROR get category_name or shop_name = {}\".format(e))\n return 'error'\n\n id_product = self.db.execute_read_query(sr.select_id_product_query, (brand_name, model_name))\n # + Продукт присутствует в #products_table\n if id_product:\n\n logger.info(\"---id_prod = {}\".format(id_product))\n id_product = id_product[0][0]\n id_ver_phone = self.db.execute_read_query(sr.select_id_ver_phone_query,\n (id_product, var_ram, var_rom))\n # ++ Комплектация присутствует в #version_phones_table\n if id_ver_phone:\n logger.info(\"---id_ver_phone = {}\".format(id_ver_phone))\n id_ver_phone = id_ver_phone[0][0]\n id_shop_phone = self.db.execute_read_query(sr.select_id_shop_phone_query,\n (id_ver_phone, id_shop_name, url))\n\n # +++ Данную комплектацию можно купить в этом магазине в #shop_phones_table\n if id_shop_phone:\n logger.info(\"---id_shop_phone = {}\".format(id_shop_phone))\n id_shop_phone = id_shop_phone[0][0]\n price_phone = self.db.execute_read_query(sr.select_price_in_price_phone_query, (id_shop_phone,))\n\n if not price_phone:\n logger.error(\"Нет цены, id_prod = {}, \"\n \"id_ver = {}, id_shop = {}\".format(id_product, id_ver_phone, id_shop_phone))\n return 'error'\n\n # ++++ Цена данной комплектации в данном магазине не изменилась - ничего не делаем\n if price_phone[-1][0] == price:\n logger.info(\"---price_phone = {}\".format(price_phone))\n # Если ничего не изменилось - обновить дату у цены\n logger.info(\"NO CHANGE, IGNORE; \"\n \"id_prod = {}, id_ver = {}, id_shop = {}, price = {}\".format(id_product,\n id_ver_phone,\n id_shop_phone,\n price_phone[-1][0]))\n\n # ---- Цена данной комплектации в данном магазине изменилась - добавляем в список цен\n else:\n logger.info(\"Новая цена на эту комплектацию в этом магазине, добавляю цену\")\n self.__insert_price_in_prices_phones_table(id_shop_name, id_product, id_shop_phone, price)\n return 'price'\n\n # --- Данную комплектацию нельзя купить в этом магазине, магазин отсутствует в #shop_phones_table\n else:\n logger.info(\"Такой комплектации нет в данном магазине, добавляю магазин и цену\")\n id_shop_phone = self.__insert_shop_in_shops_phones_table(id_shop_name, id_product, id_ver_phone,\n url, product_code, var_color, local_rating,\n num_rating, bonus_rubles)\n self.__insert_price_in_prices_phones_table(id_shop_name, id_product, id_shop_phone, price)\n logger.info(\n \"id_prod = {}, id_ver = {}, new id_shop = {}\".format(id_product, id_ver_phone, id_shop_phone))\n return 'version'\n\n # -- Комплектация отсутствует в #version_phones_table\n else:\n logger.info(\n \"Данная комплектация отсутствует в списке комплектаций, добавляю комплектацию, магазин, цену\")\n id_ver_phone = self.__insert_version_in_versions_phones_table(id_product, var_ram, var_rom, img_url)\n id_shop_phone = self.__insert_shop_in_shops_phones_table(id_shop_name, id_product, id_ver_phone,\n url, product_code, var_color, local_rating,\n num_rating, bonus_rubles)\n self.__insert_price_in_prices_phones_table(id_shop_name, id_product, id_shop_phone, price)\n logger.info(\n \"id_prod = {}, new id_ver = {}, new id_shop = {}\".format(id_product, id_ver_phone, id_shop_phone))\n return 'shop'\n\n # - Продукт отсутствует в #products_table\n else:\n logger.info(\"Данный продукт отсутствует в products_table, добавляю продукт, комплектацию, магазин, цену\")\n id_product = self.__insert_product_in_products_table(id_category_name, brand_name, model_name, 0)\n id_ver_phone = self.__insert_version_in_versions_phones_table(id_product, var_ram, var_rom, img_url)\n id_shop_phone = self.__insert_shop_in_shops_phones_table(id_shop_name, id_product, id_ver_phone, url,\n product_code, var_color, local_rating, num_rating,\n bonus_rubles)\n self.__insert_price_in_prices_phones_table(id_shop_name, id_product, id_shop_phone, price)\n logger.info(\n \"new id_prod = {}, new id_ver = {}, new id_shop = {}\".format(id_product, id_ver_phone, id_shop_phone))\n return 'product'\n\n return 'error'\n\n def __add_input_list_to_db(self, pr_product_list=None):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Добавление всех товаров в базу\n \"\"\"\n if not pr_product_list or not self.pr_parse_result_list:\n logger.warning('pr_product_list is empty')\n return\n\n pr_product_list = pr_product_list or self.pr_parse_result_list\n for item in pr_product_list:\n\n # Проверка элемента на некорректные поля\n if not check_item_on_errors(item):\n logger.warning(\"Продукт {} {} с артиклом {} в магазине {} содержит 'None', SKIP\".format(\n item.brand_name, item.model_name, item.product_code, item.shop))\n continue\n\n # Сохранение данных в базу. Если цена изменилась - вернет предыдущую\n resp = self.__add_product_to_bd(\n category_name=item.category,\n shop_name=item.shop,\n brand_name=item.brand_name,\n model_name=item.model_name,\n var_color=item.color,\n var_ram=item.ram,\n var_rom=item.rom,\n price=item.price,\n img_url=item.img_url,\n url=item.url,\n product_code=item.product_code,\n local_rating=item.rating,\n num_rating=item.num_rating)\n\n # Если при добавлении товара в базу была изменена только цена -\n # добавляем в очередь на проверку выгоды\n if resp == 'price' and not h.find_in_namedtuple_list(self.pr_price_change_list, brand_name=item.brand_name,\n model_name=item.model_name, ram=item.ram, rom=item.rom,\n price=item.price, limit_one=True):\n logger.info(item)\n self.pr_price_change_list.append(item)\n\n def run(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Запуск\n \"\"\"\n self.db.connect_or_create(\"parser\", \"postgres\", \"1990\", \"127.0.0.1\", \"5432\")\n self.__add_input_list_to_db()\n self.db.disconnect()\n return self.pr_price_change_list\n\n" }, { "alpha_fraction": 0.6026785969734192, "alphanum_fraction": 0.6034482717514038, "avg_line_length": 33.01047134399414, "blob_id": "1a9b43a935544915e1d75976f2e74a6076c3bdd7", "content_id": "855376f96288ee01bd18552786c6f4072a7f57e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7434, "license_type": "no_license", "max_line_length": 123, "num_lines": 191, "path": "/modules/common/db_wrapper.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom psycopg2 import OperationalError\nfrom modules.common import sql_req as sr, helper as h\n\nlogger = h.logging.getLogger('DBWrapper')\n\n\nclass DataBase:\n \"\"\"\n ВСПОМОГАТЕЛЬНЫЙ КЛАСС\n Класс, реализующий взаимодейтсвие с БД PostgreSQL.\n\n :method connect: Подключение к БД\n :method create_database: Создание базы данных\n :method connect_or_create: Попытка подключиться к запрашиваемой БД, если не получилось - создание этой БД\n :method execute_query: Отправка sql запроса в БД\n :method execute_read_query: Отправка sql запроса в БД с получением ответа\n :method disconnect: Отключение от БД\n :method __create_tables_and_views: Создание таблиц, если они отсутствуют и заполнение вспомогательных данными.\n Необходимо реализовать отдельные методы по созданию и заполнению таблиц и вызывать их здесь.\n \"\"\"\n def __init__(self):\n self.connection = None\n self.cursor = None\n self.db_name_basic = \"postgres\"\n\n def __create_tables_and_views(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД.\n Создание таблиц, если они отсутствуют и заполнение вспомогательных данными\n \"\"\"\n self.execute_query(sr.create_categories_name_table_query)\n self.execute_query(sr.create_shops_name_table_query)\n\n self.__insert_shops_name_table()\n self.__insert_category_name()\n\n self.execute_query(sr.create_products_table_query)\n self.execute_query(sr.create_versions_phones_table_query)\n self.execute_query(sr.create_shops_phones_table_query)\n self.execute_query(sr.create_prices_phone_table_query)\n\n self.execute_query(sr.create_view_general_table_query)\n\n def __insert_shops_name_table(self):\n \"\"\"\n Заполнить таблицу shops_name_table данными\n \"\"\"\n if not self.connection:\n logger.error(\"Can't execute read query - no connection\")\n return\n\n try:\n psycopg2.extras.execute_values(self.cursor, sr.insert_into_shops_name_table_query, h.SHOPS_NAME_LIST)\n except OperationalError as e:\n logger.error(\"The error '{}' occurred\".format(e))\n\n def __insert_category_name(self):\n \"\"\"\n Заполнить таблицу categories_name_table данными\n \"\"\"\n if not self.connection:\n logger.error(\"Can't execute read query - no connection\")\n return\n\n try:\n psycopg2.extras.execute_values(self.cursor, sr.insert_into_categories_name_table_query, h.CATEGORIES_NAME_LIST)\n except OperationalError as e:\n logger.error(\"The error '{}' occurred\".format(e))\n\n def create_database(self, db_name):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Создание базы данных\n \"\"\"\n if not self.connection:\n logger.error(\"Can't create database - no connection\")\n return False\n\n try:\n self.cursor.execute(sr.create_database_query + db_name)\n except OperationalError as e:\n logger.error(\"The error '{}' occurred\".format(e))\n return False\n\n return True\n\n def connect(self, db_name, db_user, db_password, db_host, db_port):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Соединение с базой данных\n \"\"\"\n if self.connection:\n self.disconnect()\n\n try:\n self.connection = psycopg2.connect(\n database=db_name,\n user=db_user,\n password=db_password,\n host=db_host,\n port=db_port,\n )\n\n logger.info(\"Connection to PostgreSQL DB '{}' successful\".format(db_name))\n self.connection.autocommit = True\n self.cursor = self.connection.cursor()\n except OperationalError as e:\n logger.error(\"The error '{}' occurred\".format(e))\n return False\n\n return True\n\n def connect_or_create(self, db_name, db_user, db_password, db_host, db_port):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Попытка подключиться к запрашиваемой БД, если не получилось - создание этой БД\n \"\"\"\n # Попытка подключится к запрашиваемой базе данных\n if self.connect(db_name, db_user, db_password, db_host, db_port):\n logger.info(\"Connected to Database {}\".format(db_name))\n return True\n\n # Если такой базы не существует, подключаемся к основной и создаем новую\n logger.info(\"Database '{}' not found, create '{}'\".format(db_name, db_name))\n if not self.connect(self.db_name_basic, db_user, db_password, db_host, db_port):\n logger.error(\"Basic Database '{}' not found!\".format(self.db_name_basic))\n return False\n\n # Если подключились к основной - создаем свою\n if not self.create_database(db_name):\n logger.error(\"Can't create new Database '{}'\".format(db_name))\n return False\n\n # Если получилось создать новую базу данных - соединяемся с ней\n logger.info(\"Data base '{}' created\".format(db_name))\n if not self.connect(db_name, db_user, db_password, db_host, db_port):\n return False\n\n self.__create_tables_and_views()\n return True\n\n def execute_query(self, query, variables=None):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Отправка sql запроса в БД\n \"\"\"\n if not self.connection:\n logger.info(\"Can't execute query - no connection\")\n return False\n\n try:\n self.cursor.execute(query, variables)\n except OperationalError as e:\n logger.info(\"The error '{}' occurred\".format(e))\n return False\n\n return True\n\n def execute_read_query(self, query, variables=None):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Чтение данных с таблицы\n \"\"\"\n if not self.connection:\n logger.error(\"Can't execute read query - no connection\")\n return None\n\n try:\n if variables:\n self.cursor.execute(query, variables)\n else:\n self.cursor.execute(query)\n\n result = self.cursor.fetchall()\n return result\n\n except OperationalError as e:\n logger.error(\"The error '{}' occurred\".format(e))\n return None\n\n def disconnect(self):\n \"\"\"\n ОБЯЗАТЕЛЬНЫЙ МЕТОД\n Отсоединение от БД\n \"\"\"\n if self.cursor:\n self.cursor.close()\n\n if self.connection:\n self.connection.close()\n" }, { "alpha_fraction": 0.5717958807945251, "alphanum_fraction": 0.5766171216964722, "avg_line_length": 35.92878341674805, "blob_id": "d715dd86641aaa0e3add8314d1ebf31161590aa4", "content_id": "f42e9f3a4daff456a74b3b30de9e406da1888393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14836, "license_type": "no_license", "max_line_length": 115, "num_lines": 337, "path": "/modules/data_receiver/parsers/eldorado_parse.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import time\nimport re\n\nfrom selenium.webdriver.common.by import By\nfrom modules.data_receiver.parsers.parse_base import ParseBase\n\nimport modules.common.helper as h\nfrom modules.common.file_worker import FileWorker\n\nlogger = h.logging.getLogger('eldoradoparse')\nELDORADO_REBUILT_IPHONE = 'как новый'\n\n\ndef eldorado_parse_model_name(name):\n \"\"\"\n Парсинг названия модели (получить название модели, цвет и ROM)\n \"\"\"\n # Защита от неправильных названий\n if len(name.split()) < 5:\n return None, None, None\n # Убираем неразрывные пробелы\n name = name.replace(u'\\xc2\\xa0', u' ')\n name = name.replace(u'\\xa0', u' ')\n # Восстановленные телефоны (только для iphone). Если есть слово - удалить\n rebuilt = h.REBUILT_IPHONE_NAME if (ELDORADO_REBUILT_IPHONE in name.lower()) else ''\n name = name.replace(ELDORADO_REBUILT_IPHONE, '')\n # Оборачивание скобками названия модели, если их не было\n last_word = name.split()[-1]\n if last_word.isupper() and \\\n not ('(' in last_word) and \\\n not (')' in last_word):\n name = name.replace(last_word, '({})'.format(last_word))\n # Понижение регистра\n name = name.lower()\n # Удалить nfc и 5g\n name = name.replace(' nfc ', ' ').replace(' 5g ', ' ')\n # Удалить все скобки\n brackets = re.findall(r\"\\(.+?\\)\", name)\n for item in brackets:\n name = name.replace(item, '')\n # Удалить год, если есть\n year = re.findall(r' 20[1,2]\\d ', name)\n year = year[0] if year else ''\n # Получить размер ROM\n rom = re.findall(r'\\d*[gb]*[\\+/]*\\d+(?:gb|tb)', name)\n rom = (rom[0]) if rom else \"\"\n # Получить ЦВЕТ\n # Получить 2 слова цвета\n color1, color2 = name.split()[-2:] if name.split()[-1] != rom \\\n else name.split()[-3:-1]\n # Если первое слово цвета состоит только из букв и длиннее 2 символов и отсутствует в игнор-листе - добавить\n # к итоговому цвету\n color1 = color1 if (\n color1.isalpha() and len(color1) > 2 and not (color1.strip() in h.IGNORE_WORDS_FOR_COLOR)) else \"\"\n color = color1 + \" \" + color2 if (color1.isalpha() and len(color1) > 2) else color2\n # Удалить первую часть часть\n name = name.replace('смартфон', '').replace(rom, '').replace(year, '').replace(' ', ' ')\n # Убрать вторую часть лишних слов из названия\n name = name.replace(color, '').replace(' ', ' ').strip()\n name += rebuilt\n\n # Проверка названия в словаре исключений названий моделей\n name = h.replace_value_from_dictionary(h.EXCEPT_MODEL_NAMES_DICT, name)\n\n # Проверка названия модели в словаре разрешенных моделей\n if not h.find_allowed_model_names(name):\n logger.info(\"Обнаружена новая модель, отсутствующая в базе = '{}'\".format(name))\n FileWorker.list_data.save(h.UNDEFINED_MODEL_NAME_LIST_PATH, data=name, overwrite=False)\n return None, None, None\n\n # Получить название бренда\n brand_name = name.split()[0]\n model_name = name.replace(brand_name, '').strip()\n\n return brand_name, model_name, color\n\n\nclass EldoradoParse(ParseBase):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataReceiver\n Реализация базового класса ParseBase\n Парсит данные с магазина Эльдорадо\n \"\"\"\n\n def __init__(self):\n super().__init__(domain=\"https://www.eldorado.ru\", shop=\"eldorado\", logger=logger, category=\"смартфоны\")\n self.is_grid = True\n self.container_css_selector = 'li[databases-dy=\"product\"]'\n\n def _wd_city_selection_catalog(self):\n \"\"\"\n Алгоритм выбора города для всех возможных ситуаций на странице каталога\n \"\"\"\n city = self._wd_find_elem_with_timeout(By.XPATH, \"//span[@class='h8xlw5-3 kLXpZr']\")\n if not city:\n self.logger.error(\"Не найдено поле с названием города\")\n return False\n\n # Если указан неверный город\n if not (self.current_city.lower() in city.text.lower()):\n self.logger.info(\"Неверный город\")\n\n # Клик по городу\n if not self._wd_ac_click_elem(city):\n self.logger.error(\"Не могу нажать на кнопку выбора города\")\n return False\n\n self.logger.info(\"Клик по городу\")\n\n # Получить список всех городов и если есть нужный, кликнуть по нему\n city_list = self._wd_find_all_elems_with_timeout(By.CLASS_NAME, \"N5ndClh\")\n if city_list:\n for item in city_list:\n if self.current_city.lower() in item.text.lower():\n time.sleep(1.5)\n return self._wd_ac_click_elem(item)\n else:\n self.logger.info(\"Не вижу нужный город в списке, пробую вбить вручную\")\n\n # Поиск поля для ввода города\n input_city = self._wd_find_elem_with_timeout(By.XPATH, \"//input[@name='region-search']\")\n if not input_city:\n self.logger.error(\"Не найдено поле, куда вводить новый город\")\n return False\n\n # Кликнуть на форму для ввода текста\n time.sleep(1)\n if not self._wd_ac_click_elem(input_city):\n self.logger.error(\"Не могу кликнуть на форму для ввода текста\")\n return False\n\n # Ввод названия города по буквам\n for char in self.current_city:\n self._wd_ac_send_keys(input_city, char)\n time.sleep(0.2)\n\n time.sleep(2)\n\n # Выбор города из сгенерированного списка городов\n input_city_item = self._wd_find_elem_with_timeout(By.XPATH, \"//span[contains(text(),'{}')]\".format(\n self.current_city))\n if not input_city_item:\n self.logger.error(\"Не найдено элементов при вводе города\")\n return False\n\n # Клик по нему\n if not self._wd_ac_click_elem(input_city_item):\n self.logger.error(\"Не могу нажать на выбранный город\")\n return False\n\n return True\n\n def __wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возмодных ситуаций на странице продукта\n \"\"\"\n pass\n\n def _wd_city_selection_product(self):\n \"\"\"\n Алгоритм выбора города для всех возмодных ситуаций на странице продукта\n \"\"\"\n pass\n\n def _wd_check_load_page_catalog(self):\n \"\"\"\n Проверка по ключевым div-ам что страница каталога прогружена полностью\n \"\"\"\n # Ожидание прогрузки цен\n if not self._wd_find_elem_with_timeout(By.XPATH, '//span[@databases-pc=\"offer_price\"]'):\n return False\n\n self.logger.info(\"Page loaded\")\n return True\n\n def _wd_check_load_page_product(self):\n \"\"\"\n Проверка по ключевым div-ам что страница продукта прогружена полностью\n \"\"\"\n pass\n\n def _wd_open_browser_catalog(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы каталога, выбор города\n \"\"\"\n if not super()._wd_open_browser_catalog(url=url):\n return False\n\n return True\n\n def _wd_open_browser_product(self, url):\n \"\"\"\n Запуск браузера, загрузка начальной страницы продукта, выбор города\n \"\"\"\n pass\n\n def _wd_next_page(self):\n \"\"\"\n Переход на заданную страницу num_page через клик\n \"\"\"\n for num_try in range(3):\n\n if num_try and not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n # Поиск следующей кнопки страницы\n num_page_elem = self._wd_find_elem(By.XPATH, \"//a[@aria-label='Page {}']\".format(self.cur_page))\n if not num_page_elem:\n self.logger.info(\"Достигнут конец каталога\")\n return False\n\n # Клик - переход на следующую страницу\n if not self._wd_ac_click_elem(num_page_elem):\n self.logger.error(\"Не могу кликнуть на страницу в __wd_next_page\")\n self.driver.refresh()\n continue\n\n # Специальная задержка между переключениями страниц для имитации юзера\n time.sleep(self.wait_between_pages_sec)\n\n # Ждем, пока не прогрузится страница\n if not self._wd_check_load_page_catalog():\n self.logger.error(\"Не удалось прогрузить страницу в __wd_next_page (1)\")\n self.driver.refresh()\n continue\n\n no_in_stock = self._wd_find_all_elems(By.XPATH, '//span[text()=\"Нет в наличии\"]')\n if no_in_stock and len(no_in_stock) == 36:\n self.logger.info(\"Вся страница неактуальна, выход\")\n return False\n\n self.cur_page += 1\n return True\n else:\n self.logger.error(\"!! После 3 попыток не получилось переключить страницу #{} !!\".format(self.cur_page))\n return False\n\n def _parse_product_page(self, html, url):\n \"\"\"\n Метод для парсинга html страницы продукта\n \"\"\"\n pass\n\n def _parse_catalog_block(self, block):\n \"\"\"\n Метод для парсинга html страницы товара\n \"\"\"\n # Название модели\n full_name = block.select_one('a[databases-dy=\"title\"]')\n if not full_name:\n self.logger.warning(\"No model name and URL\")\n return\n else:\n url = full_name.get('href')\n full_name = full_name.text.replace('\\n', '').replace(' ', ' ').strip()\n\n # Проверка на \"Нет в наличии\" И предзаказ\n if [item.text for item in block.select('span') if (\"Нет в наличии\" in item.text or\n \"Оформить предзаказ\" in item.text)]:\n self.logger.info(\"Товара '{}' нет в наличии или предзаказ, пропуск\".format(full_name))\n return\n\n # URL\n if not url:\n self.logger.warning(\"No URL\")\n return\n else:\n url = self.domain + url\n\n # Ссылка на изображение товара\n img_url = block.select_one('a[href=\"{}\"] > img'.format(url.replace(self.domain, '')))\n if not img_url:\n self.logger.warning(\"No img url\")\n return\n else:\n img_url = img_url.get('src')\n\n if '/resize/' in img_url:\n img_url = img_url[:img_url.index('/resize/')]\n\n # Рейтинг товара и на основании скольки отзывов построен\n rating = len(block.select('span.tevqf5-2.fBryir'))\n num_rating = block.select_one('a[databases-dy=\"review\"]')\n if not num_rating:\n self.logger.info(\"No num rating\")\n num_rating = 0\n else:\n num_rating = int(re.findall(r'\\d+', num_rating.text)[0])\n\n # Код продукта\n product_code = \"None\"\n\n # RAM, ROM\n ram, rom = 0, 0\n characteristics = block.select('li.aKmrwMA')\n if not characteristics:\n self.logger.error(\"Нет характеристик\")\n return\n else:\n for item in characteristics:\n if 'оперативн' in item.text.lower():\n ram = int(re.findall(r'\\d+', item.text)[0])\n if 'встроенн' in item.text.lower():\n rom = int(re.findall(r'\\d+', item.text)[0])\n\n # Цена\n price = block.select_one('span[databases-pc=\"offer_price\"]')\n if not price:\n self.logger.warning(\"No price\")\n return\n else:\n price = int(re.findall(r'\\d+', price.text.replace(' ', ''))[0])\n\n # Парсинг названия модели\n brand_name, model_name, color = eldorado_parse_model_name(full_name)\n if not brand_name or not model_name or not color:\n self.logger.warning(\"No brand name, model name or color\")\n return\n\n # Добавление полученных результатов в коллекцию\n self._add_to_pr_result_list(brand_name, model_name, color, price, ram, rom,\n img_url, url, rating, num_rating, product_code)\n\n\nif __name__ == '__main__':\n import main\n\n time_start = time.time()\n main.load_allowed_model_names_list_for_base()\n main.load_exceptions_model_names()\n main.read_config()\n\n parser = EldoradoParse()\n parser.run_catalog('https://www.eldorado.ru/c/smartfony/')\n logger.info(f\"Время выполнения: {time.time() - time_start} сек\")\n" }, { "alpha_fraction": 0.5639880895614624, "alphanum_fraction": 0.5684523582458496, "avg_line_length": 28.647058486938477, "blob_id": "5ecf5d3d55dde8b0ffc0131a31a3fb11da4f7f33", "content_id": "7378d7152b8f3985db3c06089575d965cc297b83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4671, "license_type": "no_license", "max_line_length": 105, "num_lines": 136, "path": "/modules/common/image_creator.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import requests\nfrom PIL import Image\nimport modules.common.helper as h\n\nlogger = h.logging.getLogger('post_image')\nSTAMP_PATH = h.ROOT_PATH + 'data/img/stamp.png'\nBLACKOUT_PATH = h.ROOT_PATH + 'data/img/blackout.png'\nHIGHLIGHTING_PATH = h.ROOT_PATH + 'data/img/white.png'\n\n\nclass ImageCreator:\n \"\"\"\n ВСПОМОГАТЕЛЬНЫЙ НЕОСНОВНОЙ КЛАСС (отсутствует в базовых модулях)\n Создает изображение из URL заданных размеров.\n\n :method open: Открыть изображение с диска\n :method __creation: Генерация картинки нужного размера из url\n :method check: Проверка наличия изображения\n :method get_img: Получить изображение\n :method save_as_png: Сохранение изображения на диск как png\n :method save_as_jpg: Сохранение изображения на диск как jpg\n \"\"\"\n def __init__(self, url='', width=640, height=480):\n self.W = width\n self.H = height\n self.img = None\n\n if url:\n self.__creation(url)\n\n def open(self, path):\n \"\"\"\n Открыть изображение с диска\n \"\"\"\n self.img = Image.open(path).convert('RGBA')\n\n def __creation(self, url):\n \"\"\"\n Генерация картинки нужного размера из url\n \"\"\"\n # Проверка URL\n if not (\"http\" in url):\n logger.warning(\"Дефектный URL изображения: {}\".format(url))\n return None\n\n # Загрузить изображение с url\n try:\n resp = requests.get(url, stream=True).raw\n except requests.exceptions.RequestException as e:\n logger.error(\"Can't get img from url, url={}\\ne = {}\".format(url, e))\n return None\n\n # Попытка открыть изображение средствами PIL\n try:\n raw_img = Image.open(resp)\n except IOError:\n logger.error(\"Unable to open image\")\n return None\n\n # Если высота не соответствует H - изменение размера изображения с учетом пропорций\n if raw_img.height != self.H:\n width, height = raw_img.size\n new_width = int(self.H * width / height)\n raw_img = raw_img.resize((new_width, self.H), Image.LANCZOS)\n\n self.img = Image.new('RGBA', (self.W, self.H), color='#FFFFFF')\n self.img.paste(raw_img, (int((self.W - raw_img.width) / 2), 0), 0)\n\n def check(self):\n \"\"\"\n Проверка наличия изображения\n \"\"\"\n return bool(self.img)\n\n def get_img(self):\n \"\"\"\n Получить изображение\n \"\"\"\n return self.img\n\n def save_as_png(self, path, name):\n \"\"\"\n Сохранение изображения на диск как png\n \"\"\"\n # img_png = self.img.convert('RGBA')\n self.img.save(\"{}/{}.png\".format(path, name), \"png\")\n\n def save_as_jpg(self, path, name):\n \"\"\"\n Сохранение изображения на диск как jpg\n \"\"\"\n if path and path[-1] in ['/', '\\\\']:\n path = path[:-1]\n\n if name and '.jpg' in name:\n name = name.replace('.jpg', '')\n\n img_jpg = self.img.convert('RGB')\n img_jpg.save(\"{}/{}.jpg\".format(path, name), \"jpeg\")\n\n def draw_stamp(self):\n \"\"\"\n Отрисовка штампа на изображении\n \"\"\"\n if not self.img:\n logger.error('no img in draw_stamp')\n return self\n\n logger.info(\"draw stamp on image\")\n stamp = Image.open(STAMP_PATH).convert(\"RGBA\")\n self.img.paste(stamp, (int((self.W - stamp.width) / 2), int((self.H - stamp.height) / 2)), stamp)\n # self.img.paste(stamp, (int(self.W - stamp.width), 0), stamp)\n\n return self\n\n def darken(self):\n \"\"\"\n Затемнение изображения\n \"\"\"\n logger.info(\"darken image\")\n\n blackout = Image.open(BLACKOUT_PATH).convert(\"RGBA\")\n self.img.paste(blackout, (0, 0), blackout)\n\n return self\n\n def lighten(self):\n \"\"\"\n Высветление изображения\n \"\"\"\n logger.info(\"lighten image\")\n\n white = Image.open(HIGHLIGHTING_PATH).convert(\"RGBA\")\n self.img.paste(white, (0, 0), white)\n\n return self\n" }, { "alpha_fraction": 0.5783606767654419, "alphanum_fraction": 0.5822950601577759, "avg_line_length": 45.339778900146484, "blob_id": "be98d1b6322f95410f454c9234be0f54b048cd3b", "content_id": "1ffb77e279688bb7be6148a72b3e8f57f01d121c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19187, "license_type": "no_license", "max_line_length": 119, "num_lines": 362, "path": "/modules/data_sender/telegram/bot.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import time\nimport hashlib\nfrom datetime import datetime\n\nimport modules.common.helper as h\nimport modules.data_sender.telegram.bot_helper as bh\nfrom modules.common import sql_req as sr\nfrom modules.common.db_wrapper import DataBase\nfrom modules.common.file_worker import FileWorker\nfrom modules.data_sender.telegram.telegram_sender import TelegramSender\n\nlogger = h.logging.getLogger('Bot')\n\n\nclass Bot(TelegramSender):\n \"\"\"\n РЕАЛИЗАЦИЯ ОДНОГО ИЗ ОСНОВНЫХ МОДУЛЕЙ ПРОЕКТА - DataSender\n Класс, реализующий юзербота в телеграме, который выполняет сразу несколько функций:\n - Делает новые посты в канал (с генерацией текста и картинки)\n - Перепроверяет старые посты на актуальность и, в случае неактуальности обновляет\n данные или ставит штамп\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.ignore_brands = self.config['bot-ignore']['brands'].lower().split('\\n')\n self.one_star_per = float(self.config['bot-stars']['one_star_per'])\n self.two_star_per = float(self.config['bot-stars']['two_star_per'])\n self.three_star_per = float(self.config['bot-stars']['three_star_per'])\n self.four_star_per = float(self.config['bot-stars']['four_star_per'])\n self.five_star_per = float(self.config['bot-stars']['five_star_per'])\n self.irrelevant_url_text = self.config['bot']['irrelevant_url_text']\n self.hash_tag_actual = '#' + self.config['bot']['hash_tag_actual']\n self.max_num_act_post_telegram = int(self.config['bot']['max_num_act_post_telegram'])\n\n self.pc_product_list = []\n self.posts_in_telegram_list = []\n self.db = DataBase()\n\n # Загрузка словарей\n self.except_model_names_telegram_dict = FileWorker.dict_data.load(h.EXCEPT_MODEL_NAMES_TELEGRAM_PATH)\n self.stats_prods_dict = FileWorker.dict_data_str_int.load(h.STATS_PRODS_DICTIONARY_PATH)\n self.stats_shops_dict = FileWorker.dict_data_str_int.load(h.STATS_SHOPS_DICTIONARY_PATH)\n\n self.posts_in_telegram_list = bh.load_msg_in_telegram_list()\n self.num_all_post, self.num_actual_post = bh.load_num_posts()\n\n def __generate_caption(self, category, brand_name, model_name, ram, rom, price, avg_actual_price, hist_min_price,\n hist_min_date, hist_min_shop, versions_list, is_actual):\n \"\"\"\n Генерация текста для поста (описания для изображения)\n - versions_list: список кортежей вида [(shop, url, color), (...), ...]\n \"\"\"\n\n if not versions_list:\n logger.error(\"Неизвестная ошибка с пустым versions_list, пропуск\")\n return None\n\n # НАЗВАНИЕ МОДЕЛИ с учетом словаря с исключениями названий\n caption = h.replace_value_from_dictionary(self.except_model_names_telegram_dict, '<b>{} {} {}</b>\\n'.format(\n category[0:-1].title(), brand_name.title(), model_name.title()))\n\n # КОМПЛЕКТАЦИЯ\n caption += '<b>{}/{} GB</b>\\n\\n'.format(ram, rom) \\\n if (ram and brand_name != 'apple') \\\n else '<b>{} GB</b>\\n\\n'.format(rom)\n\n # ОГОНЬКИ\n star = 0\n per = h.per_num_of_num(price, avg_actual_price)\n\n if self.one_star_per <= per < self.two_star_per:\n star = 1\n if self.two_star_per <= per < self.three_star_per:\n star = 2\n if self.three_star_per <= per < self.four_star_per:\n star = 3\n if self.four_star_per <= per < self.five_star_per:\n star = 4\n if self.five_star_per < per:\n star = 5\n\n caption += '🔥' * star + '\\n'\n\n # ЦЕНА\n caption += 'Выгодная цена: <b><i>{}</i></b> ₽\\n'.format(\n '{0:,}'.format(price).replace(',', ' ')\n )\n caption += '<i>(Дешевле на {}</i> ₽<i> от средней)</i>\\n\\n'.format(\n '{0:,}'.format(int(avg_actual_price - price)).replace(',', ' ')\n )\n\n # ИСТОРИЧЕСКИЙ МИНИМУМ\n if price <= hist_min_price:\n caption += '<i>Данная цена является самой низкой за всё время</i>\\n'\n else:\n date_time = datetime.strptime(str(hist_min_date), '%Y-%m-%d %H:%M:%S.%f').strftime('%d.%m.%Y')\n caption += '<i>Минимальная цена {}</i> ₽ <i>была в {} {}</i>\\n'.format(\n '{0:,}'.format(hist_min_price).replace(',', ' '),\n h.TRUE_SHOP_NAMES[hist_min_shop - 1], date_time\n )\n\n # СПИСОК ССЫЛОК ДЛЯ ПОКУПКИ\n pos_shop, pos_url, pos_color = 0, 1, 2\n shops_set = list(set(item[pos_shop] for item in versions_list))\n\n # Группировка позиций по магазину и создание списка ссылок на разные магазины с разными цветами\n hashtag_shops = ''\n links_shop_list = []\n for shop in shops_set:\n # Генерация тегов магазинов\n hashtag_shops += '#' + h.SHOPS_NAME_LIST[shop - 1][0] + ' '\n # Генерация ссылок\n urls = ''\n for product in versions_list:\n if product[pos_shop] == shop:\n urls += '<a href=\"{}\">► {}</a>\\n'.format(bh.get_ref_link(product[pos_url]),\n product[pos_color].title())\n links_shop_list.append(urls)\n\n # Генерация ссылок\n indx = 0\n for link_set in links_shop_list:\n caption += '\\nКупить в <b><u>{}</u></b>:\\n{}'.format(h.TRUE_SHOP_NAMES[shops_set[indx] - 1], link_set)\n indx += 1\n\n # ХЭШТЕГИ\n caption += '\\n#{} {}'.format(brand_name, hashtag_shops)\n if is_actual:\n caption += self.hash_tag_actual\n\n return caption\n\n def __filtering_data(self):\n \"\"\"\n Фильтрация входных данных - удаление дубликатов и применение игнор-листа\n \"\"\"\n # Удалить дубликаты, если имеются\n result = []\n for item in self.pc_product_list:\n if not result.count(item):\n result.append(item)\n self.pc_product_list = result\n\n # Удалить товары, если его бренд имеется в игнор-листе\n result = []\n for item in self.pc_product_list:\n if not self.ignore_brands.count(item.brand_name):\n result.append(item)\n self.pc_product_list = result\n\n def __prepare_and_send_all_posts(self):\n \"\"\"\n Разбор списка продуктов, группировка по цветам, отправка в телеграм\n \"\"\"\n versions_list = []\n # Проход по всему списку, группировка элементов по версии и цвету, пост группы\n while self.pc_product_list:\n # Взятие группы комплектации с разными цветами\n item = self.pc_product_list[0]\n one_version_list = h.find_in_namedtuple_list(self.pc_product_list,\n brand_name=item.brand_name, model_name=item.model_name,\n ram=item.ram, rom=item.rom, price=item.price)\n # Составление списка комплектаций\n versions_list.append(one_version_list)\n # Удаление из основного списка взятой группы one_version_list\n for item in one_version_list:\n self.pc_product_list.remove(item)\n\n # Отправка постов в телеграм. Звук только у последних 2-ух\n for i in range(len(versions_list)):\n self.app.loop.run_until_complete(\n self.__send_one_post(versions_list[i], dis_notify=bool(i < (len(versions_list) - 2)))\n )\n\n async def __send_one_post(self, version_list, dis_notify):\n \"\"\"\n Отправка поста в телеграм\n \"\"\"\n item = version_list[0]\n\n # Проверка на наличие такого же поста в списке актуальных сообщений\n if h.find_in_namedtuple_list(self.posts_in_telegram_list,\n brand_name=item.brand_name, model_name=item.model_name, price=item.price,\n ram=item.ram, rom=item.rom, limit_one=True):\n logger.info(\"Duplicate post, SKIP\")\n logger.info(item)\n return\n\n # Обновление счетчика постов\n self.num_all_post += 1\n self.num_actual_post += 1\n\n # Обновление словаря статистики товаров и магазинов\n bh.inc_stats_products(self.stats_prods_dict, item.brand_name, item.model_name)\n bh.inc_stats_shops(self.stats_shops_dict, list(set(item.shop for item in version_list)))\n\n # Генерация поста\n versions_list = [(item.shop, item.url, item.color) for item in version_list]\n text = self.__generate_caption(\n category=item.category, brand_name=item.brand_name, model_name=item.model_name, ram=item.ram,\n rom=item.rom, price=item.price, avg_actual_price=item.avg_actual_price,\n hist_min_price=item.hist_min_price, hist_min_date=item.hist_min_date, hist_min_shop=item.hist_min_shop,\n versions_list=versions_list, is_actual=True\n )\n\n img_path = bh.create_and_save_img_for_edit_post(img_url=item.img_url, is_actual=True)\n if not img_path:\n return\n\n for i in range(3):\n msg_id = self.send_photo_message(img_path=img_path, caption=text, dis_notify=dis_notify)\n\n if msg_id:\n self.posts_in_telegram_list.append(h.MessagesInTelegram(\n message_id=msg_id,\n category=item.category,\n brand_name=item.brand_name,\n model_name=item.model_name,\n ram=item.ram,\n rom=item.rom,\n price=item.price,\n avg_actual_price=item.avg_actual_price,\n img_url=item.img_url,\n where_buy_list=[(item.shop, item.color, item.url) for item in version_list],\n post_datetime=datetime.now(),\n hist_min_price=item.hist_min_price,\n hist_min_shop=item.hist_min_shop,\n hist_min_date=item.hist_min_date,\n text_hash=hashlib.sha256(text.encode()).hexdigest(),\n is_actual=True,\n ))\n break\n\n async def __edit_post_as_irrelevant(self, post, text, current_actual):\n \"\"\"\n Отредактировать пост как частично или полностью неактуальный\n \"\"\"\n # Если пост был неактуальный и до сих пор неактуальный - выходим, менять нечего\n if not post.is_actual and not current_actual:\n logger.info(\"Пост был и остается неактуальным, не меняем\")\n return True\n\n # Если есть изменения состояния, то обновляем пост вместе с картинкой, иначе только описание\n if post.is_actual != current_actual:\n logger.info(\"Изменение актуальности {} -> {}\".format(post.is_actual, current_actual))\n\n # Генерация новой картинки и сохранение на диск\n img_path = bh.create_and_save_img_for_edit_post(img_url=post.img_url, is_actual=current_actual)\n\n # 3 попытки изменить пост (из-за бага телеграм)\n for i in range(3):\n if self.edit_photo_message(msg_id=post.message_id, img_path=img_path, caption=text):\n logger.info(\"Успешное выполнение edit_message_media!\")\n self.num_actual_post += 1 if current_actual else (-1)\n time.sleep(1)\n return True\n\n logger.error(\"Не удалось отредактировать пост после 3 попыток\")\n return False\n\n # Если пост не менял актуальность (true=true) и хэш сообщения изменился - обновляем описание поста\n if hashlib.sha256(text.encode()).hexdigest() != post.text_hash:\n if not self.edit_photo_message_caption(msg_id=post.message_id, caption=text):\n return False\n\n logger.info(\"В посте ничего не изменилось\")\n return True\n\n def __checking_irrelevant_posts(self, pr_product_in_stock_list):\n \"\"\"\n Проверка неактуальных постов\n \"\"\"\n self.db.connect_or_create(\"parser\", \"postgres\", \"1990\", \"127.0.0.1\", \"5432\")\n\n # Проход по всем актуальным постам, их проверка на полную, частичную актуальность и неактуальность\n new_posts_in_telegram_list = []\n for item in self.posts_in_telegram_list:\n\n # Получить список всех актуальных цен и данных на данную комплектацию:\n act_price_data_list = self.db.execute_read_query(sr.search_actual_prices_by_version_query,\n (item.brand_name, item.model_name, item.ram, item.rom))\n # Фильтрация списка актуальных цен с учетом наличия в магазинах\n act_price_data_in_stock_list = bh.irr_post_search_data_in_stock(act_price_data_list,\n pr_product_in_stock_list)\n # Список данных с минимальными актуальными ценами в наличии\n min_act_price_data_in_stock_list = h.find_min_price_in_prices_list(act_price_data_in_stock_list)\n\n logger.info((\"-\" * 50) + \"item: {}\".format(item))\n logger.info(\"item actual: {}\".format(item.is_actual))\n logger.info(\"act_price_data_list: {}\".format(act_price_data_list))\n logger.info(\"act_price_data_in_stock_list: {}\".format(act_price_data_in_stock_list))\n logger.info(\"min_act_price_data_in_stock_list: {}\".format(min_act_price_data_in_stock_list))\n\n # Если минимальная цена отличается от цены в посте - ПОСТ ПОЛНОСТЬЮ НЕАКТУАЛЬНЫЙ\n is_actual = True\n if (min_act_price_data_in_stock_list and min_act_price_data_in_stock_list[0][0] != item.price) or \\\n not min_act_price_data_in_stock_list:\n logger.info(\"Пост полностью неактуальный - есть более выгодное(ые) предложение(ия) или акция прошла\")\n is_actual = False\n\n # Генерация списка всех товаров для одного поста и генерация текста\n if is_actual:\n versions_list = [(it[1], it[4], it[3]) for it in min_act_price_data_in_stock_list]\n else:\n versions_list = [(it[0], it[2], it[1]) for it in item.where_buy_list]\n\n new_text = self.__generate_caption(\n category=item.category, brand_name=item.brand_name, model_name=item.model_name,\n ram=item.ram, rom=item.rom, price=item.price, avg_actual_price=item.avg_actual_price,\n hist_min_price=item.hist_min_price, hist_min_date=item.hist_min_date, hist_min_shop=item.hist_min_shop,\n versions_list=versions_list, is_actual=is_actual\n )\n\n if not self.app.loop.run_until_complete(\n self.__edit_post_as_irrelevant(item, new_text, is_actual)\n ):\n logger.error(\"Не удалось отредактировать пост!\")\n is_actual = True\n\n # Сохраняем пост в список постов\n bh.irr_post_add_item_in_msg_in_telegram_list(new_posts_in_telegram_list,\n self.max_num_act_post_telegram, item,\n hashlib.sha256(new_text.encode()).hexdigest(), is_actual)\n\n self.posts_in_telegram_list = new_posts_in_telegram_list\n self.db.disconnect()\n\n def send_posts(self, pc_product_list):\n \"\"\"\n Запуск отправки новых постов\n \"\"\"\n # pc_product_list = get_data()\n if not pc_product_list:\n logger.info(\"НЕТ ДАННЫХ ДЛЯ TELEGRAM\")\n return\n\n self.pc_product_list = pc_product_list\n self.__filtering_data()\n self.__prepare_and_send_all_posts()\n\n # Сохранение словарей\n FileWorker.dict_data.save(h.STATS_PRODS_DICTIONARY_PATH, data=self.stats_prods_dict)\n FileWorker.dict_data.save(h.STATS_SHOPS_DICTIONARY_PATH, data=self.stats_shops_dict)\n FileWorker.list_data.save(h.NUM_POSTS_IN_TELEGRAM_PATH, data=[self.num_all_post, self.num_actual_post])\n FileWorker.csv_data.save(\n h.MESSAGES_IN_TELEGRAM_LIST_PATH, data=self.posts_in_telegram_list, namedtuple_type=h.MessagesInTelegram)\n\n def checking_irrelevant_posts(self, pr_product_in_stock_list):\n \"\"\"\n Запуск проверки на неактуальность постов\n \"\"\"\n if not pr_product_in_stock_list:\n logger.error(\"НЕТ ДАННЫХ ДЛЯ НЕАКТУАЛЬНЫХ ПОСТОВ\")\n return\n\n self.__checking_irrelevant_posts(pr_product_in_stock_list)\n\n # Сохранение словарей\n FileWorker.list_data.save(h.NUM_POSTS_IN_TELEGRAM_PATH, data=[self.num_all_post, self.num_actual_post])\n FileWorker.csv_data.save(\n h.MESSAGES_IN_TELEGRAM_LIST_PATH, data=self.posts_in_telegram_list, namedtuple_type=h.MessagesInTelegram)\n" }, { "alpha_fraction": 0.5764321088790894, "alphanum_fraction": 0.5776288509368896, "avg_line_length": 39.299034118652344, "blob_id": "8f44e772aa22285d0758c6a53b6005e0af9bf98b", "content_id": "a012c8da263fbd30ce042c9d10da74f8b6d9362f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15008, "license_type": "no_license", "max_line_length": 120, "num_lines": 311, "path": "/modules/common/file_worker.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import re\nimport csv\nfrom enum import Enum, auto\nfrom typing import Union\nfrom collections import namedtuple\nimport modules.common.helper as h\n\nlogger = h.logging.getLogger('FileWorker')\n\n\ndef convert_column_name(name):\n \"\"\"\n Конвертирование название заголовка в csv в формат поля namedtuple.\n Разрешается в заголовке использовать:\n - сколько угодно пробелов до и после названия столбца\n - разделять слова пробелами\n - использовать любой регистр\n \"\"\"\n return name.strip().replace(' ', '_').replace(' ', '_').lower()\n\n\ndef convert_namedtuple_fields_name(name):\n \"\"\"\n Конвертирование названия поля у namedtuple в название столбца.\n Разрешается в заголовке использовать:\n - сколько угодно пробелов до и после названия столбца\n - разделять слова пробелами\n - использовать любой регистр\n \"\"\"\n return name.replace('_', ' ').title()\n\n\nclass FileWorker(Enum):\n \"\"\"\n Класс, работающий с данными, которые впоследствии сохраняются в файлы. Имеет разные типы и, соответственно, разные\n методы для сохранения и загрузки данных с файлов.\n\n @list_data: обычный строковый список, никаких спецсимволов, элементы разделены \\n\n @csv_data: namedtuple, который сохраняется в csv файл. Исключение: в save() и load() появляется доп. параметр\n @dict_data: словарь вида [key]->[value] (в файле). Может быть нескольких видов, перечисленных ниже или можно\n использовать эту краткую форму (по-умолчанию str_str).\n @dict_data_int_int: надстройка над @dict_data, key:int, value:int\n @dict_data_str_str: надстройка над @dict_data, key:str, value:str (можно исп. краткую форму dict_data)\n @dict_data_str_int: надстройка над @dict_data, key:str, value:int\n @dict_data_int_str: надстройка над @dict_data, key:int, value:str\n \"\"\"\n\n list_data = auto()\n list_data_int = auto()\n list_data_str = auto()\n csv_data = auto()\n dict_data = auto()\n dict_data_int_int = auto()\n dict_data_str_str = auto()\n dict_data_str_int = auto()\n dict_data_int_str = auto()\n\n def save(self, path, data, overwrite=True, namedtuple_type=None):\n \"\"\"\n Сохранение файла на диск. В зависимости от типа файла выбирается определенный метод сохранения и\n подбираются определенные параметры.\n\n :param path: (str) полный путь к файлу, включая имя и расширение\n :param data: (any) данные, которые необходимо сохранить\n :param overwrite: (bool) флаг полной перезаписи файла, при False - дозаписывается в конец\n :param namedtuple_type: (type namedtuple) только для csv - тип namedtuple\n \"\"\"\n\n if not data:\n logger.error(\"Не могу сохранить данные, т.к. данных нет. Path = {}\".format(path))\n return\n\n if self is FileWorker.dict_data:\n if namedtuple_type is not None:\n raise AttributeError(\"Param 'namedtuple_type' is not used for type @dict_data\")\n self.__save_dict(data, path, overwrite)\n\n elif self is FileWorker.list_data:\n if namedtuple_type is not None:\n raise AttributeError(\"Param 'namedtuple_type' is not used for type @list_data\")\n self.__save_list(data, path, overwrite)\n\n elif self is FileWorker.csv_data:\n if namedtuple_type is None:\n raise AttributeError(\"For type @csv_data, param 'namedtuple_type' is required\")\n self.__save_csv(data, path, overwrite, namedtuple_type)\n\n def load(self, path, namedtuple_type=None):\n \"\"\"\n Чтение данных с файла. В зависимости от типа файла выбирается определенный метод чтения и\n подбираются определенные параметры.\n\n :param path: (str) полный путь к файлу, включая имя и расширение\n :param namedtuple_type: (type namedtuple) только для csv - тип namedtuple\n :return: данные, прочитанные с файла\n \"\"\"\n\n if self in [FileWorker.dict_data, FileWorker.dict_data_str_str, FileWorker.dict_data_str_int,\n FileWorker.dict_data_int_str, FileWorker.dict_data_int_int]:\n if namedtuple_type is not None:\n raise AttributeError(\"Param 'namedtuple_type' is not used for type @dict_data\")\n return self.__load_dict(path, self)\n\n elif self in [FileWorker.list_data, FileWorker.list_data_int, FileWorker.list_data_str]:\n if namedtuple_type is not None:\n raise AttributeError(\"Param 'namedtuple_type' is not used for type @list_data\")\n return self.__load_list(path, self)\n\n elif self is FileWorker.csv_data:\n if namedtuple_type is None:\n raise AttributeError(\"For type @csv_data, param 'namedtuple_type' is required\")\n return self.__load_csv(path, namedtuple_type)\n\n @staticmethod\n def __save_dict(data: dict, path, overwrite):\n \"\"\" Сохраняет словарь @data в файл @path.\n\n :param data (dict): данные, которые сохраняем в файл\n :param path (str): полный путь файла для сохранения, включая имя и расширение\n :param overwrite (bool): флаг, выбирающий режим записи - перезапись или дозаписать в конец файла \"\"\"\n\n mode = 'w' if overwrite else 'a'\n with open(path, mode, encoding='UTF-8') as f:\n for key, val in data.items():\n f.write('[{}] -> [{}]\\n'.format(key, val))\n\n @staticmethod\n def __load_dict(path, type_dict):\n \"\"\"\n Прочитать словарь @data из файла @path\n\n :param path (str): полный путь файла для сохранения, включая имя и расширение\n :return (dict): словарь, распарсенный из файла\n \"\"\"\n\n data = dict()\n try:\n with open(path, 'r', encoding='UTF-8') as f:\n for line in f:\n res = re.findall(r\"\\[.+?]\", line)\n # Отсечь кривые записи\n if len(res) != 2:\n continue\n\n key = res[0].replace('[', '').replace(']', '')\n value = res[1].replace('[', '').replace(']', '')\n\n # В зависимости от типа словаря конвертировать значения\n if type_dict is FileWorker.dict_data_int_int:\n if not key.isdigit() or not value.isdigit():\n continue\n key = int(key)\n value = int(value)\n\n if type_dict is FileWorker.dict_data_str_int:\n if not value.isdigit():\n continue\n value = int(value)\n\n if type_dict is FileWorker.dict_data_int_str:\n if not key.isdigit():\n continue\n key = int(key)\n\n data[key] = value\n\n except Exception as e:\n logger.error(\"Произошла ошибка при попытке открыть файл в __load_dict, path = {}, e = {}\".format(path, e))\n\n return data\n\n @staticmethod\n def __save_list(data: Union[list, str, int], path, overwrite):\n \"\"\" Сохраняет список или строку @data в файл @path.\n\n :param data (list|str): данные, которые сохраняем в файл\n :param path (str): полный путь файла для сохранения, включая имя и расширение\n :param overwrite (bool): флаг, выбирающий режим записи - перезапись или дозаписать в конец файла \"\"\"\n\n if not data or not path:\n return\n\n mode = 'w' if overwrite else 'a'\n with open(path, mode, encoding='UTF-8') as f:\n if isinstance(data, str) or isinstance(data, int):\n f.write(str(data) + '\\n')\n\n if isinstance(data, list):\n for item in data:\n f.write(str(item) + '\\n')\n\n @staticmethod\n def __load_list(path, type_list):\n \"\"\"\n Чтение списка @data из файла @path\n\n :param path (str): полный путь файла для сохранения, включая имя и расширение\n :return (list): список данных, распарсенный из файла\n \"\"\"\n data = list()\n\n try:\n with open(path, 'r', encoding='UTF-8') as f:\n for line in f:\n line = line.replace('\\n', '').replace('\\r', '')\n if not line:\n continue\n\n if type_list is FileWorker.list_data_int:\n res = re.findall(r\"\\d+\", line)\n if res:\n res = ''.join(res)\n line = int(res)\n else:\n continue\n\n if type_list in [FileWorker.list_data, FileWorker.list_data_str]:\n pass\n\n data.append(line)\n\n except Exception as e:\n logger.error(\"Произошла ошибка при попытке открыть файл в __load_list, path = {}, e = {}\".format(path, e))\n\n return data\n\n @staticmethod\n def __save_csv(data, path, overwrite, namedtuple_type):\n \"\"\"\n Сохраняет @data (список namedtuple или одиночный namedtuple) типа @namedtuple_type в файл @path.\n\n :param data (list|namedtuple_type): данные, которые сохраняем в файл\n :param path (str): полный путь файла для сохранения, включая имя и расширение\n :param overwrite (bool): флаг, выбирающий режим записи - перезапись или дозаписать в конец файла\n :param namedtuple_type (type): тип namedtuple\n \"\"\"\n\n # Проверка корректности типов данных\n if isinstance(data, list):\n if not isinstance(data[0], namedtuple_type):\n return\n elif not isinstance(data, namedtuple_type):\n return\n\n mode = 'w' if overwrite else 'a'\n with open(path, mode, newline='', encoding='utf-8') as f:\n writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)\n if overwrite:\n columns_name = [convert_namedtuple_fields_name(column) for column in namedtuple_type._fields]\n writer.writerow(columns_name)\n\n if isinstance(data, namedtuple_type):\n try:\n writer.writerow(data)\n except Exception as e:\n logger.error(\"Ошибка при попытке сохранения csv!\\nitem={}\\nerror={}\\n\".format(data, e))\n\n if isinstance(data, list):\n for item in data:\n try:\n writer.writerow(item)\n except Exception as e:\n logger.error(\"Ошибка при попытке сохранения csv!\\nitem={}\\nerror={}\\n\".format(item, e))\n\n @staticmethod\n def __load_csv(path, namedtuple_type):\n \"\"\"\n Чтение csv в список @result именнованных кортежей @namedtuple_type из файла @path\n\n :param namedtuple_type (type namedtuple): тип данных namedtuple, не объект\n :param path (str): полный путь файла для сохранения, включая имя и расширение\n :return (list): список данных, распарсенный из файла\n \"\"\"\n\n result = []\n\n try:\n with open(path, 'r', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n\n # Проверка совместимости namedtuple и csv\n columns_in_csv = [convert_column_name(column) for column in reader.fieldnames]\n for field in namedtuple_type._fields:\n if field not in columns_in_csv:\n print(\"Несоответствие поля {}, словари не совместимы\".format(field))\n return None\n\n # Заполнение списка данными из файла\n for row in reader:\n value_list = []\n for itt in namedtuple_type._fields:\n # Поиск названия столбца, который равен названию полю namedtuple (без учета пробелов и регистра)\n need_column_name = [column for column in reader.fieldnames\n if convert_column_name(column) == itt][0]\n value_list.append(row[need_column_name])\n\n result.append(namedtuple_type._make(value_list))\n\n except Exception as e:\n logger.error(\"Произошла ошибка при попытке открыть файл в __load_csv, path = {}, e = {}\".format(path, e))\n\n return result\n\n\"\"\"\nИспользование FileDataType\n\n1. res = FileDataType.csv_data.load(\"data/cache/s1.csv\", namedtuple_type=ParseResult)\n\n2. var = FileDataType.csv_data\n res = var.load(\"data/cache/s1.csv\", namedtuple_type=ParseResult)\n\"\"\"\n\n" }, { "alpha_fraction": 0.7225806713104248, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 24.66666603088379, "blob_id": "522fec4c1ec1ace2a96f339e5b4fb385c8977a91", "content_id": "cb7993eb663fdd600eaf562d3327980bc294dbbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 180, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/config.ini", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "[DEFAULT]\nmin_diff_price_per = 10\ncurrent_city = Новосибирск\nwait_between_pages_sec = 4\nrebuilt_iphone_name = восст.\nbest_shops_for_img_url = мвидео, МТС\n\n" }, { "alpha_fraction": 0.6115384697914124, "alphanum_fraction": 0.6163461804389954, "avg_line_length": 32.191490173339844, "blob_id": "818d153f999c876b4e89804737c30934c9bf89e9", "content_id": "fdd40bc6aae8386827f35ea02382690d1f3d64e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3662, "license_type": "no_license", "max_line_length": 118, "num_lines": 94, "path": "/modules/data_sender/telegram/telegram_sender.py", "repo_name": "nickit94/ParserOnlineShops", "src_encoding": "UTF-8", "text": "import time\nimport configparser\n\nfrom pyrogram import Client\nfrom pyrogram.types import InputMediaPhoto\nimport pyrogram.errors.exceptions as ex\n\nimport modules.common.helper as h\n\nlogger = h.logging.getLogger('TelegramSender')\n\n\nclass TelegramSender:\n \"\"\"\n Базовая реализация Sender-а для Telegram\n \"\"\"\n def __init__(self):\n self.app = None\n self.config = configparser.ConfigParser()\n self.config.read('config.ini', encoding=\"utf-8\")\n self.chat_id = int(self.config['bot']['chat_id'])\n\n def __enter__(self):\n logger.info(\"Запуск бота\")\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n logger.info(\"Остановка бота\")\n self.stop()\n\n def start(self):\n \"\"\"\n Запуск бота\n \"\"\"\n self.app = Client(h.BOT_ACCOUNT_PATH)\n print(h.BOT_ACCOUNT_PATH)\n self.app.start()\n\n def stop(self):\n \"\"\"\n Остановка бота\n \"\"\"\n self.app.stop()\n self.app = None\n\n async def send_photo_message(self, img_path, caption, dis_notify):\n \"\"\"\n Отправить сообщение с изображением\n :param img_path: полный путь к изображению на диске\n :param caption: описание к изображению (текст поста)\n :param dis_notify: (bool) выключить звук уведомлений\n \"\"\"\n try:\n resp = await self.app.send_photo(self.chat_id, img_path, caption, 'html', disable_notification=dis_notify)\n logger.info(\"Создан новый пост, id={}\".format(resp.message_id))\n return resp.message_id\n\n except ex.bad_request_400.MessageNotModified:\n logger.warning(\"Слишком много постов в телеграм, ожидаем 30 сек...\")\n time.sleep(30)\n\n return None\n\n async def edit_photo_message(self, msg_id, img_path, caption):\n \"\"\"\n Редактирование сообщения с изображением\n :param msg_id: id сообщения, которое необходимо отредактировать\n :param img_path: полный путь к изображению на диске\n :param caption: описание к изображению (текст поста)\n \"\"\"\n try:\n await self.app.edit_message_media(self.chat_id, msg_id, InputMediaPhoto(img_path, caption, 'html'))\n logger.info(\"edit_message_media УСПЕШНО\")\n return True\n\n except ex.bad_request_400.MessageNotModified as e:\n logger.error(\"Не удалось отредактировать пост - edit_message_media: {}\".format(e))\n\n async def edit_photo_message_caption(self, msg_id, caption):\n \"\"\"\n Изменить описание у поста с картинкой\n :param msg_id: id сообщения, которое необходимо отредактировать\n :param caption: описание к изображению (текст поста)\n \"\"\"\n try:\n await self.app.edit_message_caption(self.chat_id, msg_id, caption, 'html')\n logger.info(\"edit_message_caption УСПЕШНО\")\n time.sleep(1)\n return True\n\n except ex.bad_request_400.MessageNotModified as e:\n logger.error(\"Не удалось отредактировать пост - edit_message_caption: {}\".format(e))\n return False\n" } ]
21
cangming-Z/uiAutoTest
https://github.com/cangming-Z/uiAutoTest
5a73cb4a9880ce8f7d9f3cea74a83f375215147a
023158c808fb697c75191f68517bc12e80b70042
cc7cc6524adf7b9e1da2c1166f7d9de8f9953ec7
refs/heads/master
2023-02-20T06:42:07.401282
2021-01-22T09:13:53
2021-01-22T09:13:53
326,669,565
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6678832173347473, "alphanum_fraction": 0.6678832173347473, "avg_line_length": 23.909090042114258, "blob_id": "aa95d063a03be1cc8cc422d04a8f46900f5f4f5f", "content_id": "d7d43180f923080121802914e37d60a0c7f22816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/ui/blueprints/case_list.py", "repo_name": "cangming-Z/uiAutoTest", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, request\n\ncase_list_bp = Blueprint('case_list', __name__)\n\n\n@case_list_bp.route('/case_list')\ndef index():\n cook = request.cookies\n # cook.pop('user', '')\n print(cook)\n return render_template('lyear_pages_case.html')\n" }, { "alpha_fraction": 0.6118200421333313, "alphanum_fraction": 0.6158495545387268, "avg_line_length": 30.04166603088379, "blob_id": "3cf8c5a71a3df14c80ec769dd28b48ed8cfbd0fc", "content_id": "948789f738c34624c25c355179e96861a5422301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 88, "num_lines": 48, "path": "/ui/blueprints/auth.py", "repo_name": "cangming-Z/uiAutoTest", "src_encoding": "UTF-8", "text": "import json\n\nfrom flask import render_template, redirect, url_for, Blueprint, request, jsonify\nfrom flask_login import current_user, login_user, login_required, logout_user\n\nauth_bp = Blueprint('auth', __name__)\n\n\n@auth_bp.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n try:\n data = request.get_data()\n json_data = json.loads(data)\n username = json_data['username']\n password = json_data['password']\n\n if username == '' and password == '':\n # login_user(username)\n return 'main'\n return jsonify(message='Invalid username or password.'), 400\n except Exception as e:\n print(e)\n return jsonify(message='error')\n\n\n@auth_bp.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return jsonify(message=_('Logout success.'))\n\n\n@auth_bp.route('/register')\ndef register():\n username = 'aaa'\n password = '123'\n # generate a random account for demo use\n # username = fake.user_name()\n # # make sure the generated username was not in database\n # while User.query.filter_by(username=username).first() is not None:\n # username = fake.user_name()\n # password = fake.word()\n # user = User(username=username)\n # user.set_password(password)\n # db.session.add(user)\n # db.session.commit()\n return jsonify(username=username, password=password, message=_('Generate success.'))" }, { "alpha_fraction": 0.686274528503418, "alphanum_fraction": 0.686274528503418, "avg_line_length": 11.75, "blob_id": "044be6aa9a3768bf944f750e2bdd5a51b10c41d2", "content_id": "011d21895429818ba5ef2a9ec74cb329709c4800", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/run.py", "repo_name": "cangming-Z/uiAutoTest", "src_encoding": "UTF-8", "text": "from ui import create_app\n\n\napp = create_app('ui')\n" }, { "alpha_fraction": 0.7202312350273132, "alphanum_fraction": 0.7248554825782776, "avg_line_length": 25.96875, "blob_id": "94d2d054fe9f0158b688653dd96d050dfb03ece5", "content_id": "af986fc92e063b7f129a13d0577f1d9f0e8429a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "no_license", "max_line_length": 58, "num_lines": 32, "path": "/ui/__init__.py", "repo_name": "cangming-Z/uiAutoTest", "src_encoding": "UTF-8", "text": "import os\n\nimport click\nfrom flask import Flask, render_template, jsonify, request\n# 解决ajax请求的跨域问题\nfrom flask_cors import CORS\n\nfrom ui.apis.v1 import api_v1\nfrom ui.blueprints.auth import auth_bp\nfrom ui.blueprints.case_list import case_list_bp\nfrom ui.blueprints.index import index_bp\nfrom ui.blueprints.main import main_bp\n\n\ndef create_app(config_name=None):\n if config_name is not None:\n app = Flask(config_name)\n else:\n app = Flask('UI')\n CORS(app, supports_credentials=True)\n register_blueprints(app)\n return app\n\n\ndef register_blueprints(app):\n app.register_blueprint(index_bp)\n app.register_blueprint(auth_bp)\n app.register_blueprint(main_bp)\n app.register_blueprint(case_list_bp)\n # app.register_blueprint(todo_bp)\n # app.register_blueprint(home_bp)\n app.register_blueprint(api_v1, url_prefix='/api/v1')\n\n\n" }, { "alpha_fraction": 0.6700707077980042, "alphanum_fraction": 0.7007070183753967, "avg_line_length": 36.47058868408203, "blob_id": "ea9ba7e5ed9c6bbb7164a4d218b2afc23875c8ae", "content_id": "4b3774e4432f6434475159647f661532a0ebe320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1453, "license_type": "no_license", "max_line_length": 75, "num_lines": 34, "path": "/ui/db/init.sql", "repo_name": "cangming-Z/uiAutoTest", "src_encoding": "UTF-8", "text": "//项目表\nCREATE TABLE `web_auto_test`.`project` (\n `id` int NOT NULL AUTO_INCREMENT COMMENT 'id',\n `project_code` varchar(255) NOT NULL COMMENT '项目编号',\n `project_name` varchar(255) NOT NULL COMMENT '项目名称',\n `project_brief` varchar(255) NULL COMMENT '项目简介',\n `remark` varchar(255) NULL COMMENT '备注',\n PRIMARY KEY (`id`, `project_code`)\n);\n\n//自定义模块表\nCREATE TABLE `web_auto_test`.`custom_module` (\n `id` int NOT NULL AUTO_INCREMENT,\n `project_code` varchar(255) NOT NULL COMMENT '项目编号',\n `custom_module_code` varchar(255) NOT NULL COMMENT '自定义模块编号',\n `custom_module_name` varchar(255) NULL COMMENT '自定义模块名',\n `remark` varchar(255) NULL COMMENT '备注',\n PRIMARY KEY (`id`, `project_code`)\n);\n\n\n//自定义模块详情表\nCREATE TABLE `web_auto_test`.`custom_module_details111` (\n `id` int NOT NULL AUTO_INCREMENT,\n `create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',\n `update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间',\n `custom_modul_code` varchar(255) NOT NULL COMMENT '自定义模块编号',\n `index` int NOT NULL COMMENT '执行顺序',\n `ope_type` varchar(255) NULL COMMENT '操作类型',\n `ope_type_remark` varchar(255) NULL COMMENT '操作类型描述',\n `element` text NULL COMMENT '页面元素',\n `value` varchar(255) NULL COMMENT 'url或输入文本信息',\n PRIMARY KEY (`id`)\n);" }, { "alpha_fraction": 0.6890243887901306, "alphanum_fraction": 0.6890243887901306, "avg_line_length": 19.5, "blob_id": "e376572913b64214472673c7527b46c0a430b837", "content_id": "02b800305dcf7f52dd3be086101c74e0492314dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/ui/blueprints/main.py", "repo_name": "cangming-Z/uiAutoTest", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template\n\nmain_bp = Blueprint('main', __name__)\n\n\n@main_bp.route('/main')\ndef main():\n return render_template('测试ajax.html')\n" } ]
6
j-hawkins/test
https://github.com/j-hawkins/test
5464f064c85b36ae0a9cbcb2fc06d5aa8d1e032c
6992c0037f39c5d0260b54aa5271a1e29ddfaa53
df5535946c2caf2f419ef622c50353f4a88cbaa2
refs/heads/master
2022-12-17T00:26:28.323809
2020-01-16T16:06:53
2020-01-16T16:06:53
234,335,725
0
0
null
2020-01-16T14:16:25
2020-01-16T16:07:14
2022-12-08T03:27:09
Python
[ { "alpha_fraction": 0.6232876777648926, "alphanum_fraction": 0.6267123222351074, "avg_line_length": 17.600000381469727, "blob_id": "7ebe73c7a48420f47529bfc9adf8c0700bcaf921", "content_id": "74084e2b8f769550573ad4071f7c916a5be8adf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 47, "num_lines": 15, "path": "/pycode.py", "repo_name": "j-hawkins/test", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\r\nimport requests\r\n\r\nurl = [r'https://www.bbc.co.uk/news']\r\n\r\ntitles = []\r\n\r\nfor i in url:\r\n r = requests.get(i)\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n tag = 'title'\r\n results = soup.find(tag).text\r\n titles.append(results)\r\n\r\nprint(titles)" } ]
1
ssv273/ML-in-prod
https://github.com/ssv273/ML-in-prod
fd70d8dd485209c76019c75067d0ba0a0406df4b
3cc6cee29145cda53b469f49d14d70d4db4bcb0e
41f4ac0ca40aee69fd3a40a9beaaed3ea836adc7
refs/heads/master
2021-05-21T15:52:12.576671
2020-04-05T10:27:58
2020-04-05T10:27:58
252,704,407
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4893842935562134, "alphanum_fraction": 0.4968152940273285, "avg_line_length": 32.814815521240234, "blob_id": "3e45c3bd59a52b7a2d724bb7382ed3588724449a", "content_id": "d0f62288c3f23767008d91e630b0bacab0ed9779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 69, "num_lines": 27, "path": "/5_predict.py", "repo_name": "ssv273/ML-in-prod", "src_encoding": "UTF-8", "text": "import pickle\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\r\n\r\nimport numpy as np\r\n\r\n\r\n\r\ndataset = pd.read_csv('Data/dataset/dataset_train.csv', sep=';')\r\nX = dataset.drop(['user_id', 'is_churned'], axis=1)\r\ny = dataset['is_churned']\r\n\r\nX_mm = MinMaxScaler().fit_transform(X)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X_mm, \r\n y, \r\n test_size=0.3,\r\n shuffle=True, \r\n stratify=y, \r\n random_state=100)\r\n\r\nwith open('models/baseline_xgb.pcl', 'rb') as f:\r\n model = pickle.load(f)\r\n\r\npredict_test = model.predict(X_test)\r\npredict_test_probas = model.predict_proba(X_test)[:, 1]\r\n\r\n" }, { "alpha_fraction": 0.5283187627792358, "alphanum_fraction": 0.5420761108398438, "avg_line_length": 40.709922790527344, "blob_id": "ed7b19d92ce2f116bd3bfabbc13ac8c02f5e98ac", "content_id": "344392b95142b93a8661dc0252fa93ea7edf91e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5774, "license_type": "no_license", "max_line_length": 128, "num_lines": 131, "path": "/3_Fit_model.py", "repo_name": "ssv273/ML-in-prod", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\nimport xgboost as xgb\r\nfrom matplotlib import pyplot as plt\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nfrom imblearn.over_sampling import SMOTE\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\r\nfrom sklearn.feature_selection import chi2, mutual_info_classif, RFECV\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score, precision_recall_curve, roc_curve, auc, \\\r\n log_loss, roc_auc_score, average_precision_score, confusion_matrix\r\nfrom WOE_IV import data_vars\r\nimport pickle\r\n\r\n\r\ndef evaluation(y_true, y_pred, y_prob):\r\n precision = precision_score(y_true=y_true, y_pred=y_pred)\r\n recall = recall_score(y_true=y_true, y_pred=y_pred)\r\n f1 = f1_score(y_true=y_true, y_pred=y_pred)\r\n ll = log_loss(y_true=y_true, y_pred=y_prob)\r\n roc_auc = roc_auc_score(y_true=y_true, y_score=y_prob)\r\n print('Precision: {}'.format(precision))\r\n print('Recall: {}'.format(recall))\r\n print('F1: {}'.format(f1))\r\n print('Log Loss: {}'.format(ll)) \r\n print('ROC AUC: {}'.format(roc_auc)) \r\n return precision, recall, f1, ll, roc_auc\r\n\r\ndef xgb_fit_predict(X_train, y_train, X_test, y_test):\r\n clf = xgb.XGBClassifier(max_depth=3,\r\n n_estimators=100,\r\n learning_rate=0.1,\r\n nthread=5,\r\n subsample=1.,\r\n colsample_bytree=0.5,\r\n min_child_weight = 3,\r\n reg_alpha=0.,\r\n reg_lambda=0.,\r\n seed=42,\r\n missing=1e10)\r\n\r\n clf.fit(X_train, y_train, eval_metric='aucpr', verbose=10)\r\n predict_proba_test = clf.predict_proba(X_test)\r\n predict_test = clf.predict(X_test)\r\n precision_test, recall_test, f1_test, log_loss_test, roc_auc_test = \\\r\n evaluation(y_test, predict_test, predict_proba_test[:, 1])\r\n return clf\r\n\r\ndef plot_importance(importance, features, name):\r\n fi = pd.DataFrame(list(zip(features, importance))).sort_values(by=1, ascending=False)\r\n plt.figure(figsize=(16,6))\r\n plt.bar(range(fi.shape[0]), fi[1], align='center')\r\n plt.xticks(range(fi.shape[0]), fi[0], rotation=90)\r\n plt.title(name)\r\n plt.show()\r\n return fi\r\n\r\ndataset = pd.read_csv('dataset/dataset_train.csv', sep=';')\r\nX = dataset.drop(['user_id', 'is_churned'], axis=1)\r\ny = dataset['is_churned']\r\n\r\nX_mm = MinMaxScaler().fit_transform(X)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X_mm, \r\n y, \r\n test_size=0.3,\r\n shuffle=True, \r\n stratify=y, \r\n random_state=100)\r\n\r\n\r\n\r\n# сделаем WoE и IV преобразование\r\ndataset_raw = pd.read_csv('dataset/dataset_raw_train.csv', sep=';')\r\nX_raw = dataset_raw.drop(['user_id', 'is_churned'], axis=1)\r\ny_raw = dataset_raw['is_churned']\r\n\r\n# Рассчитаем IV\r\niv_df, iv = data_vars(X_raw, y_raw)\r\nIV = iv.sort_values('IV', ascending=False)\r\n\r\n# Рассчитаем WoE\r\nX_WOE = X_raw.copy()\r\n\r\nfor var in X_WOE.columns:\r\n small_df = iv_df.loc[iv_df['VAR_NAME'] == var]\r\n if type(small_df.loc[~small_df['MIN_VALUE'].isnull()]['MIN_VALUE'].values[0]) == str:\r\n small_df.loc[small_df['MIN_VALUE'].isnull(), 'MIN_VALUE'] = 'NaN'\r\n small_df.loc[small_df['MAX_VALUE'].isnull(), 'MAX_VALUE'] = 'NaN'\r\n else:\r\n small_df.loc[small_df['MIN_VALUE'].isnull(), 'MIN_VALUE'] = 0.\r\n small_df.loc[small_df['MAX_VALUE'].isnull(), 'MAX_VALUE'] = 0.\r\n transform_dict = dict(zip(small_df['MAX_VALUE'], small_df['WOE']))\r\n replace_cmd = ''\r\n replace_cmd1 = ''\r\n for i in sorted(transform_dict.items()):\r\n replace_cmd += str(i[1]) + ' if x <= ' + str(i[0]) + ' else '\r\n replace_cmd1 += str(i[1]) + ' if x == \"' + str(i[0]) + '\" else '\r\n replace_cmd += '0'\r\n replace_cmd1 += '0'\r\n if replace_cmd != '0':\r\n try:\r\n X_WOE[var] = X_WOE[var].apply(lambda x: eval(replace_cmd))\r\n except:\r\n X_WOE[var] = X_WOE[var].apply(lambda x: eval(replace_cmd1))\r\n\r\n\r\n\r\nX_train_WOE, X_test_WOE, y_train_WOE, y_test_WOE = train_test_split(X_WOE, \r\n y_raw, \r\n test_size=0.3,\r\n shuffle=True, \r\n stratify=y_raw, \r\n random_state=100)\r\n# Снизим дизбаланс классов\r\nX_train_WOE_balanced, y_train_WOE_balanced = SMOTE(random_state=42, sampling_strategy==0.3).fit_sample(X_train_WOE, y_train_WOE)\r\n\r\n# Выделим значимые признаки, для таких признаков IV не должно быть близко ни к 0, ни к 1\r\nsignificant_features = IV.loc[(IV['IV'] >= 0.01) & (IV['IV'] <= 0.8)]['VAR_NAME']\r\nprint('Значимых признаков:', significant_features.nunique())\r\n\r\n# Создаем и обучаем модель\r\nmodel = xgb_fit_predict(X_train_WOE_balanced, y_train_WOE_balanced, X_test, y_test)\r\n\r\n#сохраняем её\r\nwith open('models/baseline_xgb.pcl', 'wb') as f:\r\n pickle.dump(model, f)\r\n\r\n" }, { "alpha_fraction": 0.6228929758071899, "alphanum_fraction": 0.6303410530090332, "avg_line_length": 39.17741775512695, "blob_id": "039a132c7d88ae3853181fc26a8f242bd9ab061b", "content_id": "e3543ef6e47dcbdc480b23bb3073ddcff28f5969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2600, "license_type": "no_license", "max_line_length": 102, "num_lines": 62, "path": "/2_processing.py", "repo_name": "ssv273/ML-in-prod", "src_encoding": "UTF-8", "text": "import time\r\nfrom datetime import datetime, timedelta\r\nimport pandas as pd\r\nfrom imblearn.over_sampling import SMOTE\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom collections import Counter\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nfrom config import *\r\n\r\ndef time_format(sec):\r\n return str(timedelta(seconds=sec))\r\n\r\ndef prepare_dataset(dataset, \r\n dataset_type='train',\r\n dataset_path='Data/dataset/'):\r\n print(dataset_type)\r\n start_t = time.time()\r\n print('Dealing with missing values, outliers, categorical features...')\r\n \r\n # Профили\r\n dataset['age'] = dataset['age'].fillna(dataset['age'].median())\r\n dataset['gender'] = dataset['gender'].fillna(dataset['gender'].mode()[0])\r\n dataset.loc[~dataset['gender'].isin(['M', 'F']), 'gender'] = dataset['gender'].mode()[0]\r\n dataset['gender'] = dataset['gender'].map({'M': 1., 'F':0.})\r\n dataset.loc[(dataset['age'] > 80) | (dataset['age'] < 7), 'age'] = round(dataset['age'].median())\r\n dataset.loc[dataset['days_between_fl_df'] < -1, 'days_between_fl_df'] = -1\r\n # Пинги\r\n for period in range(1,len(INTER_LIST)+1):\r\n col = 'avg_min_ping_{}'.format(period)\r\n dataset.loc[(dataset[col] < 0) | \r\n (dataset[col].isnull()), col] = dataset.loc[dataset[col] >= 0][col].median()\r\n # Сессии и прочее\r\n dataset.fillna(0, inplace=True)\r\n dataset.to_csv('{}dataset_{}.csv'.format(dataset_path, dataset_type), sep=';', index=False)\r\n \r\n print('Dataset is successfully prepared and saved to {}, run time (dealing with bad values): {}'.\\\r\n format(dataset_path, time_format(time.time()-start_t))) \r\n\r\ntrain = pd.read_csv('Data/dataset/dataset_raw_train.csv', sep=';')\r\ntest = pd.read_csv('Data/dataset/dataset_raw_test.csv', sep=';')\r\nprint('train.shape is ',train.shape,'test.shape is', test.shape)\r\n\r\nprepare_dataset(dataset=train, dataset_type='train')\r\nprepare_dataset(dataset=test, dataset_type='test')\r\n\r\ntrain_new = pd.read_csv('Data/dataset/dataset_train.csv', sep=';')\r\nprint(train_new['is_churned'].value_counts())\r\n\r\nX_train = train_new.drop(['user_id', 'is_churned'], axis=1)\r\ny_train = train_new['is_churned']\r\n\r\nX_train_mm = MinMaxScaler().fit_transform(X_train)\r\nprint('Балансируем классы...')\r\n\r\nX_train_balanced, y_train_balanced = SMOTE(random_state=42, ratio=0.3). \\\r\n fit_sample(X_train_mm, y_train.values)\r\n\r\n\r\nprint('До:', Counter(y_train.values))\r\nprint('После:', Counter(y_train_balanced))" }, { "alpha_fraction": 0.4573991000652313, "alphanum_fraction": 0.6233183741569519, "avg_line_length": 23, "blob_id": "46ed046b67a0a8f6d980703c3d7e2866f40f6bd2", "content_id": "b8c82d5d18aa6f42c1f8261184c7da77bd3d79c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/config.py", "repo_name": "ssv273/ML-in-prod", "src_encoding": "UTF-8", "text": "# Следует из исходных данных\r\nCHURNED_START_DATE = '2019-09-01' \r\nCHURNED_END_DATE = '2019-10-01'\r\n\r\nINTER_1 = (1,7)\r\nINTER_2 = (8,14)\r\nINTER_3 = (15,21)\r\nINTER_4 = (22,28)\r\nINTER_LIST = [INTER_1, INTER_2, INTER_3, INTER_4]" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.5261083841323853, "avg_line_length": 43.47761154174805, "blob_id": "d86832fa336bf682b8e0290cac2e046183717bfc", "content_id": "4f14e506a14883915be5a990821a301c90da5786", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3046, "license_type": "no_license", "max_line_length": 110, "num_lines": 67, "path": "/4_validate.py", "repo_name": "ssv273/ML-in-prod", "src_encoding": "UTF-8", "text": "import pickle\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score, precision_recall_curve, roc_curve, auc, \\\r\n log_loss, roc_auc_score, average_precision_score, confusion_matrix\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\nimport numpy as np\r\n\r\n\r\ndef plot_confusion_matrix(y_true, y_pred, classes, cmap=plt.cm.Blues):\r\n TN, FP, FN, TP = confusion_matrix(y_true, y_pred).ravel()\r\n cm = np.array([[TP, FP],\r\n [FN, TN]])\r\n cm_normalized = cm.astype('float') / cm.sum(axis=0)\r\n # Plot both matrixes - basic and normalized\r\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10,10))\r\n for ax, normalize, data, title in zip(ax,\r\n [False, True], \r\n [cm, cm_normalized], \r\n ['Confusion matrix (without normalization)', \r\n 'Сonfusion matrix (normalized)']):\r\n im = ax.imshow(data, interpolation='nearest', cmap=cmap)\r\n divider = make_axes_locatable(ax)\r\n cax = divider.append_axes('right', size='5%', pad=0.1)\r\n fig.colorbar(im, cax=cax)\r\n ax.set(xticks=np.arange(data.shape[1]),\r\n yticks=np.arange(data.shape[0]),\r\n xticklabels=classes, \r\n yticklabels=classes,\r\n title=title,\r\n ylabel='Predicted label',\r\n xlabel='True label')\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor') \r\n # Loop over data dimensions and create text annotations\r\n fmt = '.2f' if normalize else 'd'\r\n for i in range(data.shape[0]):\r\n for j in range(data.shape[1]):\r\n ax.text(j, i, format(data[i, j], fmt), ha=\"center\", va=\"center\", \r\n color=\"white\" if data[i, j] > data.max() / 2. else \"black\") \r\n fig.tight_layout()\r\n return fig \r\n\r\n\r\n\r\ndataset = pd.read_csv('Data/dataset/dataset_train.csv', sep=';')\r\nX = dataset.drop(['user_id', 'is_churned'], axis=1)\r\ny = dataset['is_churned']\r\n\r\nX_mm = MinMaxScaler().fit_transform(X)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X_mm, \r\n y, \r\n test_size=0.3,\r\n shuffle=True, \r\n stratify=y, \r\n random_state=100)\r\n\r\nwith open('models/baseline_xgb.pcl', 'rb') as f:\r\n model = pickle.load(f)\r\n\r\npredict_test = model.predict(X_test)\r\npredict_test_probas = model.predict_proba(X_test)[:, 1]\r\n\r\nplot_confusion_matrix(y_test.values, predict_test, classes=['churn', 'active'])\r\nplt.show()" } ]
5
cygnusv/matchmaking-encryption
https://github.com/cygnusv/matchmaking-encryption
f9a4a5c552da5ca20b22cdbd3ad7ee92407c9091
02954e6036e8dbb831405553368bcf1b095486dc
abc46cbd65318bde9ea074f9cf21201fa2017d89
refs/heads/master
2020-05-30T11:46:44.997362
2019-06-01T09:44:03
2019-06-01T09:44:03
189,714,392
9
1
null
null
null
null
null
[ { "alpha_fraction": 0.7508232593536377, "alphanum_fraction": 0.7552140355110168, "avg_line_length": 51.57692337036133, "blob_id": "7d7e659ad576756c4d7551dc96d7068f56fdc57c", "content_id": "c7f35c310b93068da9712b1a02d41dd14fe5e166", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2733, "license_type": "no_license", "max_line_length": 502, "num_lines": 52, "path": "/README.md", "repo_name": "cygnusv/matchmaking-encryption", "src_encoding": "UTF-8", "text": "# The Matchmaking Encryption Bulletin Board\n\nThis repository holds a prototype implementation of a bulletin board hidden service that uses an Identity-Based Matchmaking Encryption (IB-ME) scheme. It allows clients to exchange data over the Tor network in an anonymous way, while having strong guarantees about the identities of both receivers and senders. In a nutshell, the bulletin board is composed by two parts: A web server implemented as Tor hidden service and, a command line client that permits to upload and download data from the server.\n\nA user that wants to post a message to the bulletin board can use the command line to encrypt it (using their IB-ME encryption key and an identity string policy for the intended receiver), and upload the ciphertext on the web server using the Tor network. These ciphertexts are available to anyone.\n\nA receiver can now use the client to download all the ciphertexts and try to decrypt each one, using the receiver's decryption key and the sender's identity policy. The client will report to the user the outcome of the decryption phase, showing all the successfully decrypted messages.\n\nYou can use the client application to play with the running service in [http://bjopwtc2f3umlark.onion/](http://bjopwtc2f3umlark.onion/). We created a key file so you can use the encryption and decryption keys of identities \"alice\", \"bob\", \"charlie\", and \"zelda\". We have the keys for identity \"authors\", not included in the key file. There is a message from us for each identity. Please, leave us a message too!\n\n## Client application\n\n### Dependencies\n\nThe client application is built with Python 3.6 and depends on [Charm Crypto](https://jhuisi.github.io/charm/index.html) and the `click` and `requests` libraries. It also requires Tor.\n\nFor installing Charm Crypto, follow [these instructions](https://jhuisi.github.io/charm/install_source.html).\n\nFor `click` and `requests`, you can install them using `pip`:\n\n pip install click\n pip install requests\n\n### Usage\n\n $ python3 client.py --help\n Usage: client.py [OPTIONS] COMMAND [ARGS]...\n\n Options:\n -u, --url TEXT URL of the bulletin board\n -l, --localhost Look for the bulletin board in http://localhost:5000\n --help Show this message and exit.\n\n Commands:\n peek Takes a gander at the bulletin board, without decrypting\n post Posts an encrypted message to the bulletin board\n read Reads encrypted messages from the bulletin board\n\n## Server\n\n### Dependencies\n\nThe server depends on the `flask` and `flask_restful` libraries.\n\nFor `flask` and `flask_restful`, you can install them using `pip`:\n\n pip install flask\n pip install flask_restful\n\n### Usage\n\n $ python3 api.py" }, { "alpha_fraction": 0.536285936832428, "alphanum_fraction": 0.5663738250732422, "avg_line_length": 31.51372528076172, "blob_id": "b7e927f636ce2a8f1b45f1e30d9aab8ebf6df2c8", "content_id": "1a44915adc0ead627977ba13d511ad8985ea9d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8309, "license_type": "no_license", "max_line_length": 113, "num_lines": 255, "path": "/ibme.py", "repo_name": "cygnusv/matchmaking-encryption", "src_encoding": "UTF-8", "text": "from charm.toolbox.pairinggroup import ZR,G1,pair\nfrom charm.toolbox.hash_module import Hash\nimport pickle\nimport base64\n\ndebug = False\n\nclass IBME():\n\n def __init__(self, groupObj=None):\n if groupObj is None:\n from charm.toolbox.pairinggroup import PairingGroup\n groupObj = PairingGroup('SS512', secparam=512) \n global group\n group = groupObj\n mask = 'ed27dbfb02752e0e16bc4502d6c732bc5f1cc92ba19b2d93a4e95c597ca42753e93550b52f82b6c13fb8cc0c2fc64487'\n self._mask = bytes.fromhex(mask)\n \n def setup(self):\n r, s, P = group.random(ZR), group.random(ZR), group.random(G1)\n P0 = r * P\n\n pk = (P, P0)\n sk = (r, s)\n if(debug):\n print(\"Public parameters...\")\n group.debug(pk)\n print(\"Secret parameters...\")\n group.debug(sk)\n return (pk, sk)\n\n def H(self, X):\n return group.hash(X, G1)\n\n def H_prime(self, X):\n # Both H and H' are computed from the same method group.hash()\n # In order to make them different, we apply a fixed mask to the\n # inputs of H'\n X = bytes([ a ^ b for (a,b) in zip(X.encode(), self._mask) ])\n return group.hash(X, G1)\n \n def skgen(self, sk, S): \n (_, s) = sk \n ek = s * self.H_prime(S)\n\n if(debug):\n print(\"Key for attrs S '{}' => {}\".format(S, ek))\n return ek\n\n def rkgen(self, sk, R): \n (r, s) = sk \n H_R = self.H(R) \n dk1 = r * H_R\n dk2 = s * H_R\n dk3 = H_R\n \n dk = (dk1, dk2, dk3)\n\n if(debug):\n print(\"Key for attrs R '{}' => {}\".format(R, dk))\n return dk\n \n \n def encrypt(self, pk, R, ek_S, M): # check length to make sure it is within n bits\n\n (P, P0) = pk\n\n u = group.random(ZR)\n t = group.random(ZR)\n \n T = t * P\n U = u * P\n\n H_R = self.H(R) \n k_R = pair(H_R, u * P0)\n\n k_S = pair(H_R, T + ek_S)\n\n enc_k_R = group.serialize(k_R)[2:-1]\n enc_k_S = group.serialize(k_S)[2:-1]\n\n V = bytes([ a ^ b ^ c for (a,b,c) in zip(M, enc_k_R, enc_k_S) ])\n\n C = (T, U, V)\n\n if(debug):\n print('\\nEncrypt...')\n print('T =>', T)\n print('u => %s' % u)\n print('U => %s' % U)\n print(\"V' =>\" % V)\n print('enc_k_R => %s' % enc_k_R)\n print('enc_k_S => %s' % enc_k_S)\n #group.debug(C)\n return C\n \n def decrypt(self, pk, dk, S, C):\n\n (dk1, dk2, dk3) = dk\n (T, U, V) = C\n \n k_R = pair(dk1, U)\n\n H_prime_S = self.H_prime(S)\n \n k_S = pair(dk3, T) * pair(H_prime_S, dk2)\n\n enc_k_R = group.serialize(k_R)[2:-1]\n enc_k_S = group.serialize(k_S)[2:-1]\n \n M = bytes([ a ^ b ^ c for (a,b,c) in zip(V, enc_k_R, enc_k_S) ])\n\n if(debug):\n print('\\nDecrypt....')\n print('T =>', T)\n print('U =>', U)\n print('V =>', V)\n print(\"M' =>\", M)\n return M\n\n def serialize_ciphertext(self, C):\n T, U, V = C\n T = base64.b64decode(group.serialize(T)[2:])\n U = base64.b64decode(group.serialize(U)[2:])\n return pickle.dumps((T, U, V))\n\n def deserialize_ciphertext(self, bitstring):\n T, U, V = pickle.loads(bitstring)\n T = group.deserialize(b'1:'+base64.b64encode(T))\n U = group.deserialize(b'1:'+base64.b64encode(U))\n return (T, U, V)\n\n def serialize_setup(self, S):\n pk, sk = S\n return pickle.dumps(tuple(group.serialize(x) for x in pk+sk))\n\n def deserialize_setup(self, bitstring):\n pieces = pickle.loads(bitstring)\n P, P0, r, s = tuple(group.deserialize(p) for p in pieces)\n pk = (P, P0)\n sk = (r, s)\n return pk, sk\n \n def serialize_tuple(self, input):\n return pickle.dumps(tuple(group.serialize(x) for x in input))\n\n def deserialize_tuple(self, bitstring):\n pieces = pickle.loads(bitstring)\n return tuple(group.deserialize(p) for p in pieces)\n\n\nif __name__ == \"__main__\":\n debug = True\n from charm.toolbox.pairinggroup import PairingGroup\n group = PairingGroup('SS512', secparam=512) \n ME = IBME(group)\n (master_public_key, master_secret_key) = ME.setup()\n R = 'attribute 1, attribute 2'\n S = 'attribute 3, attribute 4'\n dk = ME.rkgen(master_secret_key, R)\n ek = ME.skgen(master_secret_key, S)\n msg = b\"hello world!!!!!\"\n cipher_text = ME.encrypt(master_public_key, R, ek, msg)\n\n msg_prime = ME.decrypt(master_public_key, dk, S, cipher_text)\n assert msg == msg_prime\n\n S2 = 'attribute 5'\n msg_2 = ME.decrypt(master_public_key, dk, S2, cipher_text)\n assert msg != msg_2\n\n import timeit\n\n setup = '''\nfrom __main__ import IBME\nfrom charm.toolbox.pairinggroup import PairingGroup,pair\nfrom charm.toolbox.pairinggroup import ZR,G1,pair\ngroup = PairingGroup('SS512', secparam=512) \nME = IBME(group)\n(master_public_key, master_secret_key) = ME.setup()\nR = 'attribute 1, attribute 2'\nS = 'attribute 3, attribute 4'\ndk = ME.rkgen(master_secret_key, R)\nek = ME.skgen(master_secret_key, S)\nmsg = b\"hello world!!!!!\"\n '''\n debug = False\n iters = 10\n repetitions = 50\n print(\"\\n=====\")\n print(\"Benchmarking IB-ME...{} iters, {} repetitions\".format(iters, repetitions))\n encryption = 'cipher_text = ME.encrypt(master_public_key, R, ek, msg)'\n timer = timeit.Timer(encryption, setup=setup)\n print('Encryption time (ms):')\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n setup = setup + \"\\n\" + encryption\n\n decryption = 'ME.decrypt(master_public_key, dk, S, cipher_text)'\n timer = timeit.Timer(decryption, setup=setup)\n print('Decryption time (ms):')\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n pairing = 'pair(dk[0], ek)'\n timer = timeit.Timer(pairing, setup=setup)\n print('Pairing time (ms):')\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n expo = \"master_secret_key[0] * ek\"\n timer = timeit.Timer(expo, setup=setup)\n print('Expo time (ms):')\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n ra = \"group.random(ZR)\"\n timer = timeit.Timer(ra, setup=setup)\n print('Random time (ms):')\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n h = \"ME.H(R)\"\n timer = timeit.Timer(h, setup=setup)\n print('H time (ms):')\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n h2 = \"ME.H_prime(R)\"\n timer = timeit.Timer(h2, setup=setup)\n print(\"H' time (ms):\")\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n \n setupp = \"(master_public_key, master_secret_key) = ME.setup()\"\n timer = timeit.Timer(setupp, setup=setup)\n print(\"setup time (ms):\")\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n\n rkgen = \"dk = ME.rkgen(master_secret_key, R)\"\n timer = timeit.Timer(setupp, setup=setup)\n print(\"rkgen time (ms):\")\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n\n setupp = \"ek = ME.skgen(master_secret_key, S)\"\n timer = timeit.Timer(setupp, setup=setup)\n print(\"skgen time (ms):\")\n timings = [time/iters for time in timer.repeat(repetitions, iters)]\n print('\\tmin', 1000*min(timings), '\\tavg', 1000*(1.0/repetitions)*sum(timings))\n\n \n\n\n\n " }, { "alpha_fraction": 0.6510568857192993, "alphanum_fraction": 0.6725203394889832, "avg_line_length": 35.16470718383789, "blob_id": "c68a4cd3663babde2ed16c8f8e491dad04c96d17", "content_id": "01b295fe5f7b916394868ba888640dfb234bea44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3075, "license_type": "no_license", "max_line_length": 109, "num_lines": 85, "path": "/client.py", "repo_name": "cygnusv/matchmaking-encryption", "src_encoding": "UTF-8", "text": "from ibme import IBME\nimport requests\nimport base64\nimport click\nimport json\nimport binascii\n\nkeys = json.load(open('keys.json'))\n\nME = IBME()\nmaster_public_key = ME.deserialize_tuple(base64.urlsafe_b64decode(keys[\"public_key\"]))\ndel keys[\"public_key\"]\n\napi = \"http://bjopwtc2f3umlark.onion\"\n\ndef get_session():\n session = requests.session()\n if \".onion\" in api:\n session.proxies = {'http': 'socks5h://127.0.0.1:9050',\n 'https': 'socks5h://127.0.0.1:9050'}\n return session\n\ndef crc(data):\n return binascii.crc_hqx(data, 0).to_bytes(2, 'big')\n\[email protected]()\[email protected]('--url', '-u', help=\"URL of the bulletin board\")\[email protected]('--localhost', '-l', is_flag=True, help=\"Look for the bulletin board in http://localhost:5000\")\ndef cli(url, localhost):\n global api\n if localhost:\n api = \"http://localhost:5000\"\n if url:\n api = url\n print(f\"Using url {api}\")\n\[email protected](help=\"Posts an encrypted message to the bulletin board\")\[email protected]('--receiver', prompt=\"Receiver's policy string\", help=\"Receiver's policy string\")\[email protected]('--sender', prompt=\"Sender's encryption key\", help=\"Sender's encryption key\")\[email protected]('--message', prompt=\"Message to send\", help=\"Message to send\")\ndef post(receiver, sender, message):\n ek = ME.deserialize_tuple(base64.urlsafe_b64decode(keys[sender][\"ek\"]))[0]\n \n message = message.encode()\n padded_message = crc(message) + message\n ciphertext = ME.encrypt(master_public_key, receiver, ek, padded_message)\n ctxt = ME.serialize_ciphertext(ciphertext)\n b64_ctxt = base64.urlsafe_b64encode(ctxt)\n click.echo(b\"Ciphertext: \" + b64_ctxt)\n\n res = get_session().put(f'{api}/messages', data={'message': b64_ctxt}).json()\n click.echo(f\"Index of the message: {res}\")\n\[email protected](help=\"Takes a gander at the bulletin board, without decrypting\")\ndef peek():\n res = get_session().get(f'{api}/messages').json()\n for i, message in enumerate(res):\n click.echo(f\"({i}): {message}\")\n\ndef decrypt_ciphertext(dk, ctxt, sender):\n ctxt = base64.urlsafe_b64decode(ctxt)\n ctxt = ME.deserialize_ciphertext(ctxt)\n padded_message = ME.decrypt(master_public_key, dk, sender, ctxt)\n pad, message = padded_message[:2], padded_message[2:]\n return message if crc(message) == pad else None\n\[email protected](help=\"Reads encrypted messages from the bulletin board\")\[email protected]('--receiver', prompt=\"Receiver's policy string\", help=\"Receiver's policy string\")\[email protected]('--sender', prompt=\"Sender's attribute string\", help=\"Sender's attribute string\")\ndef read(receiver, sender):\n dk = ME.deserialize_tuple(base64.urlsafe_b64decode(keys[receiver][\"dk\"]))\n ciphertexts = get_session().get(f'{api}/messages').json()\n\n for i, ciphertext in enumerate(ciphertexts):\n message = decrypt_ciphertext(dk, ciphertext, sender)\n if message:\n click.echo(f\"{i}: {message.decode('utf-8')}\")\n\n\ncli.add_command(post)\ncli.add_command(peek)\ncli.add_command(read)\n\nif __name__ == '__main__':\n cli()\n\n" }, { "alpha_fraction": 0.59560227394104, "alphanum_fraction": 0.6013384461402893, "avg_line_length": 24.536584854125977, "blob_id": "fa1f2fa831feb23cd789fb555f9084a2d7552cbd", "content_id": "b5949c61d0ad8e2ce6dd5931faebf798e80e8e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1046, "license_type": "no_license", "max_line_length": 148, "num_lines": 41, "path": "/api.py", "repo_name": "cygnusv/matchmaking-encryption", "src_encoding": "UTF-8", "text": "from flask import Flask, request\nfrom flask_restful import Resource, Api\n\napp = Flask(__name__)\napi = Api(app)\n\nmessages = []\n\nclass BulletinBoard(Resource):\n def get(self):\n return messages\n\n def put(self):\n messages.append(request.form['message'])\n for i, message in enumerate(messages):\n print(f\"{i} : {message}\")\n return len(messages) - 1\n\napi.add_resource(BulletinBoard, '/messages')\n\n\[email protected]('/')\ndef hello_world():\n\n with open('readme.html', 'r') as myfile:\n readme = myfile.read()\n\n rows = \"\"\n for i, message in enumerate(messages):\n rows += f\"<tr><td>{i}</td><td>{message}</td></tr>\"\n\n html = \"<!doctype html><html><head><title>Matchmaking Encryption Hidden Service</title></head><body>\" \\\n + readme + \"<h1><a name='messages'></a> Messages in the Bulletin Board</h1><table style='width:100%''><tr><th>Index</th><th>Message</th></tr>\" \\\n + rows + \"</table></body></html>\"\n\n print(html)\n\n return html\n\nif __name__ == '__main__':\n app.run(debug=True)" } ]
4
Anc0/home-temp-restapi
https://github.com/Anc0/home-temp-restapi
eb21537aad9020b491ca78ebc54b526cf3fd6f6a
5ffaa0ad193048b4073464b0bc9b59a41c7221fb
71189d5d04ba1834227332aea910c70e6d653572
refs/heads/master
2020-03-24T08:42:12.110030
2018-10-21T09:33:44
2018-10-21T09:33:44
142,604,699
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5936073064804077, "alphanum_fraction": 0.5936073064804077, "avg_line_length": 29.44444465637207, "blob_id": "45670640bf9d2d6ed6f2b5973920eff4b6ff717b", "content_id": "1a3f3a9c0fb3ec4b5f2be52356f941ed07292c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 88, "num_lines": 36, "path": "/api/helpers/topic_retriever.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "from api.models import Topic\n\n\nclass TopicRetriever:\n\n def __init__(self):\n pass\n\n def get_all_topics(self):\n \"\"\"\n Retrieve all topics (feasible for home use, where there is not a lot of topics).\n :return: list of all topics\n \"\"\"\n return Topic.objects.filter(display=True)\n\n def get_topics(self, topic_names):\n \"\"\"\n Retrieve topics with name in topic_names list\n :param topic_names: [list[string]] topic names\n :return: list of topics\n \"\"\"\n return Topic.objects.filter(name__in=topic_names)\n\n def get_topic(self, id=None, name=None):\n \"\"\"\n Retrieve a single topic with name\n :param topic_name: [string] name of the topic\n :return: Topic with name=topic_name\n \"\"\"\n if not id and not name:\n raise Exception(\"You have to set either topic name or topic id.\")\n if not id:\n return Topic.objects.filter(name=name)\n if not name:\n return Topic.objects.filter(id=id)\n return Topic.objects.filter(id=id, name=name)" }, { "alpha_fraction": 0.6786655187606812, "alphanum_fraction": 0.6812993884086609, "avg_line_length": 32.5, "blob_id": "a2c254072eeb27e6d1536eb113a25b853b7ddc43", "content_id": "437cd309b4730c7c1d2d32a96d571a40f5be74f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 84, "num_lines": 34, "path": "/readme.md", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "# Home temp restapi\nApi and mqtt subscribing component of the home temp project.\n\n## Setup\nThere are a few technologies that need to be installed in order for the api to work:\n\n - postgres -> $ sudo apt-get install postgresql-10\n - nginx -> $ sudo apt-get install nginx\n - supervisor -> $ sudo apt-get install supervisor\n - rabbitmq -> $ sudo apt-get install rabbitmq-server\n - pip -> $ sudo apt-get install python-pip\n\n\nConfigure postgres:\n\n - $ sudo su - postgres\n - $ psql\n - $ CREATE ROLE home_temp WITH PASSWORD 'home_temp';\n - $ ALTER ROLE home_temp with LOGIN;\n - $ CREATE DATABASE home_temp;\n - $ GRANT ALL PRIVILEGES ON DATABASE home_temp to home_temp;\n\n\nThen install the virtualenv and virtualenvwrapper and create a virtual environment:\n\n - $ pip install virtualenv virtualenvwrapper\n - $ mkvirtualenv --python=/usr/bin/python3 home-temp-restapi\n\n\nLastly create a project directory and deploy the api component of the restapi:\n\n - $ mkdir ~/home-temp-restapi\n - $ mkdir ~/home-temp-restapi/source\n - on a local machine: $ fab production_api deploy\n" }, { "alpha_fraction": 0.748062014579773, "alphanum_fraction": 0.748062014579773, "avg_line_length": 22.454545974731445, "blob_id": "65fb57a91789f0e88c7919623257712c24848f73", "content_id": "72b54c2cbbddaac26c5c4ae042360c2f78ea2f57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 72, "num_lines": 11, "path": "/api/admin.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Topic, TopicRecord\n\n\nclass TopicAdmin(admin.ModelAdmin):\n fields = ['name', 'short_name', 'display', 'temperature_offset']\n\n\nadmin.site.register(TopicRecord)\nadmin.site.register(Topic, TopicAdmin)\n" }, { "alpha_fraction": 0.5822295546531677, "alphanum_fraction": 0.5855408310890198, "avg_line_length": 40.20454406738281, "blob_id": "b69d31e48eb3a0b20ccabd4ad4aeb31940c12a23", "content_id": "7106003ce63dcfbd2da7fbd9d4402e7292ef3e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 109, "num_lines": 44, "path": "/api/helpers/topic_record_retriever.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\n\nimport pytz\n\nfrom api.helpers.topic_retriever import TopicRetriever\nfrom api.models import TopicRecord\n\n\nclass TopicRecordRetriever:\n\n def __init__(self):\n self.topics = TopicRetriever()\n\n def get_records_for_topic(self, topic_id, seconds_back=3600):\n \"\"\"\n Retrieve topic records for single topic between from and to time.\n :param topic_id: [int] topic id\n :param seconds_back: off set of starting time of the data\n :return: topic records\n \"\"\"\n topic = self.topics.get_topic(id=topic_id)[0]\n data = list(TopicRecord.objects.filter(topic=topic.id,\n created__range=(datetime.now(pytz.UTC) -\n timedelta(seconds=seconds_back),\n datetime.now(pytz.UTC))))\n for x in data:\n x.value += topic.temperature_offset\n return data\n\n def get_records_for_topics(self, topic_names=None, from_time=datetime.now(pytz.UTC) - timedelta(hours=1),\n to_time=datetime.now(pytz.UTC)):\n \"\"\"\n Retrieve topic records for multiple topics between from and to time\n :param topic_names: [list[string]] list of topic names, if the list is empty, return for all topics\n :param from_time: [datetime tz] start of the interval\n :param to_time: [datetime tz] end of the interval\n :return: topic records\n \"\"\"\n if not topic_names:\n topics = self.topics.get_all_topics()\n else:\n topics = self.topics.get_topics(topic_names)\n\n return TopicRecord.objects.filter(topic__in=topics, created__gte=from_time, created__lte=to_time)" }, { "alpha_fraction": 0.5603773593902588, "alphanum_fraction": 0.5723270177841187, "avg_line_length": 38.75, "blob_id": "53f55307bcf3d41771e8b8f4d7d0cc201186b820", "content_id": "91769b42d62470b7b318310dca316299e5269f65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1590, "license_type": "no_license", "max_line_length": 165, "num_lines": 40, "path": "/api/migrations/0001_initial.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-26 16:09\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Topic',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('modified', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='TopicRecord',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('value', models.FloatField()),\n ('units', models.CharField(choices=[('CE', 'Degrees celsius'), ('FA', 'Degrees fahrenheit'), ('KE', 'Degrees kelvin')], default='CE', max_length=2)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('modified', models.DateTimeField(auto_now=True)),\n ('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Topic')),\n ],\n ),\n migrations.AddField(\n model_name='topic',\n name='last_record',\n field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='last_record', to='api.TopicRecord'),\n ),\n ]\n" }, { "alpha_fraction": 0.7568199634552002, "alphanum_fraction": 0.7575993537902832, "avg_line_length": 32.76315689086914, "blob_id": "e9dfc99b61bbd5c444b14ac939a4d93795735f11", "content_id": "2a2220f6246a726ea195f24666f6e36f30d497a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1283, "license_type": "no_license", "max_line_length": 93, "num_lines": 38, "path": "/api/views.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "import json\n\nfrom django.core.serializers import serialize\nfrom django.db import connection\nfrom django.http import HttpResponse\n\nfrom api.helpers.interpolation import Interpolation\nfrom api.helpers.topic_record_retriever import TopicRecordRetriever\nfrom api.helpers.topic_retriever import TopicRetriever\nfrom api.models import TopicRecord\n\n\ndef index(request):\n return HttpResponse(\"Hello world, this is temp rest api\")\n\n\ndef topics(request):\n data = serialize('json', TopicRetriever().get_all_topics())\n return HttpResponse(data, content_type=\"application/json\")\n\n\ndef topic(request, topic_id):\n data = serialize('json', TopicRetriever().get_topic(id=topic_id))\n return HttpResponse(data, content_type='application/json')\n\n\ndef records_for_topic(request, topic_id):\n data = serialize('json', TopicRecordRetriever().get_records_for_topic(topic_id=topic_id))\n return HttpResponse(data, content_type='application/json')\n\n\ndef records_for_topic_offset(request, topic_id, offset):\n # Get data from the database\n data = TopicRecord().get_aggregated_data(topic_id, offset)\n # Json serialize the results\n data = json.dumps(data, indent=4, sort_keys=True, default=str)\n # Return the respose\n return HttpResponse(data, content_type='application/json')\n" }, { "alpha_fraction": 0.6321428418159485, "alphanum_fraction": 0.6397321224212646, "avg_line_length": 35.129032135009766, "blob_id": "21fbe063d3712f5e9a5ace3125c535b04c142b46", "content_id": "52b5d2a75464387297eb9fe04f226a5472d48566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2240, "license_type": "no_license", "max_line_length": 133, "num_lines": 62, "path": "/api/models.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "from django.db import models, connection\n\n\nclass Topic(models.Model):\n # Actual mqtt topic name\n name = models.CharField(max_length=255, unique=True)\n # User friendly displayed name\n short_name = models.CharField(max_length=255, default=\"Unnamed topic\")\n\n temperature_offset = models.FloatField(default=0)\n\n last_record = models.OneToOneField('api.TopicRecord', null=True, on_delete=models.SET_NULL, related_name='last_record')\n\n # Should the topic be displayed in the webapp\n display = models.BooleanField(default=False)\n\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n def set_last_record(self, topic_record):\n if not self.last_record or topic_record.created > self.last_record.created:\n self.last_record = topic_record\n self.save()\n\n def __str__(self):\n return \"Topic: {}\".format(self.name)\n\n\nclass TopicRecord(models.Model):\n\n CELSIUS = 'CE'\n FAHRENHEIT = 'FA'\n KELVIN = 'KE'\n\n UNITS = (\n (CELSIUS, 'Degrees celsius'),\n (FAHRENHEIT, 'Degrees fahrenheit'),\n (KELVIN, 'Degrees kelvin')\n )\n\n value = models.FloatField()\n units = models.CharField(max_length=2, default=CELSIUS, choices=UNITS)\n\n topic = models.ForeignKey('api.Topic', on_delete=models.CASCADE)\n\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n def get_aggregated_data(self, topic_id, offset):\n time_slice = offset * 24 * 60 / 72\n sql = \"\"\"SELECT 1 AS id, date_round(created, '{} minutes'::interval), AVG(Value)\n FROM api_topicrecord\n WHERE created >= current_timestamp - '{} day'::interval AND topic_id = {}\n GROUP BY date_round(created, '{} minutes'::interval)\n ORDER BY date_round(created, '{} minutes'::interval);\"\"\".format(time_slice, offset, topic_id, time_slice, time_slice)\n cursor = connection.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n return [{\"created\": x[1], \"value\": x[2]} for x in data]\n\n def __str__(self):\n return \"TopicRecord: {}, value: {}, at: {}\".format(self.topic.name, self.value, self.created)\n" }, { "alpha_fraction": 0.6705068945884705, "alphanum_fraction": 0.6705068945884705, "avg_line_length": 38.45454406738281, "blob_id": "a7e6b8d2af6b0b734343958fea198b70f5f3f3c3", "content_id": "9774b3be5a2b093b940e77304c9fdd5cdf3a1ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 118, "num_lines": 11, "path": "/api/urls.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('topics/', views.topics, name='topics'),\n path('topic/<int:topic_id>/', views.topic, name='topic'),\n path('topic/<int:topic_id>/records/', views.records_for_topic, name='record_for_topic'),\n path('topic/<int:topic_id>/records/<int:offset>/', views.records_for_topic_offset, name='record_for_topic_offset')\n]\n" }, { "alpha_fraction": 0.4920634925365448, "alphanum_fraction": 0.6911976933479309, "avg_line_length": 15.5, "blob_id": "b2951a349afd2c6bbeea3c60784d311b10c4bb6b", "content_id": "a65262113710ce89975614ff7e909ec6175cdb3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 693, "license_type": "no_license", "max_line_length": 27, "num_lines": 42, "path": "/requirements.txt", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "amqp==2.3.2\nasn1crypto==0.24.0\nbackcall==0.1.0\nbcrypt==3.1.4\nbilliard==3.5.0.4\ncffi==1.11.5\ncryptography==2.3\ndecorator==4.3.0\nDjango==2.0.7\ndjango-cors-headers==2.4.0\ndjango-debug-toolbar==1.9.1\ndjango-extensions==2.1.0\nextensions==0.4\nFabric3==1.14.post1\ngunicorn==19.9.0\nidna==2.7\ninvoke==1.1.0\nipython==6.4.0\nipython-genutils==0.2.0\njedi==0.12.1\nkombu==4.2.1\nnumpy==1.14.5\npaho-mqtt==1.3.1\nparamiko==2.4.1\nparso==0.3.1\npexpect==4.6.0\npickleshare==0.7.4\nprompt-toolkit==1.0.15\npsycopg2==2.7.5\nptyprocess==0.6.0\npyasn1==0.4.4\npycparser==2.18\nPygments==2.2.0\nPyNaCl==1.2.1\npytz==2018.5\nscipy==1.1.0\nsimplegeneric==0.8.1\nsix==1.11.0\nsqlparse==0.2.4\ntraitlets==4.3.2\nvine==1.1.4\nwcwidth==0.1.7\n" }, { "alpha_fraction": 0.6886792182922363, "alphanum_fraction": 0.6922169923782349, "avg_line_length": 38.13846206665039, "blob_id": "a25f8047b728a0a9ff231db602a1ec834a32523c", "content_id": "382e217249f3ea61fc78ec85496df62d0f4bf334", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 2544, "license_type": "no_license", "max_line_length": 120, "num_lines": 65, "path": "/fabfile.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "from fabric.api import sudo, run, env, prefix, task\nfrom fabric.contrib.project import rsync_project\n\nproduction_pi = '192.168.1.14'\n\nenv.user = 'pi'\nenv.hosts = [production_pi]\n\n@task\ndef production_api():\n env.remote_dir = '/home/pi/home-temp-restapi/source/'\n env.local_dir = '/home/andraz/Projects/home-temp-restapi/'\n\n env.supervisor = 'home-temp-restapi/source/conf/supervisor.home-temp-restapi.production.conf'\n env.supervisor_name = 'home-temp-restapi'\n\n env.nginx = 'home-temp-restapi/source/conf/nginx.home-temp-restapi.production.conf'\n env.nginx_file = 'nginx.home-temp-restapi.production.conf'\n\n env.virtual_env = '/home/pi/.virtualenvs/home-temp-restapi/bin/activate'\n env.requirements = '/home/pi/home-temp-restapi/source/requirements.txt'\n\n@task\ndef production_mqtt_worker():\n env.remote_dir = '/home/pi/home-temp-mqtt-worker/source/'\n env.local_dir = '/home/andraz/Projects/home-temp-restapi/'\n\n env.supervisor = 'home-temp-mqtt-worker/source/conf/supervisor.mqtt-worker.production.conf'\n env.supervisor_name = 'mqtt-worker'\n\n env.nginx = 'home-temp-mqtt-worker/source/conf/nginx.home-temp-restapi.production.conf'\n env.nginx_file = 'nginx.home-temp-restapi.production.conf'\n\n env.virtual_env = '/home/pi/.virtualenvs/home-temp-mqtt-worker/bin/activate'\n env.requirements = '/home/pi/home-temp-mqtt-worker/source/requirements.txt'\n\n@task\ndef deploy(requirements=False, supervisor=False, nginx=False):\n \"\"\"\n Take local project, upload it to the server with the right configuration. Then check flags and install requirements,\n update supervisor configuration and update nginx configuration.\n \"\"\"\n rsync_project(remote_dir=env.remote_dir, local_dir=env.local_dir)\n\n if requirements:\n with prefix('source {}'.format(env.virtual_env)):\n run('pip install -r {}'.format(env.requirements))\n\n if supervisor:\n sudo('cp /home/pi/{} /etc/supervisor/conf.d'.format(env.supervisor))\n sudo('supervisorctl reread')\n sudo('supervisorctl update')\n\n sudo('supervisorctl restart {}'.format(env.supervisor_name))\n\n if nginx:\n sudo('cp /home/pi/{} /etc/nginx/sites-available/'.format(env.nginx))\n try:\n sudo('rm /etc/nginx/sites-enabled/{}'.format(env.nginx_file))\n except:\n print(\"Nginx configuration is not linked... proceeding.\")\n\n sudo('ln -s /etc/nginx/sites-available/{} /etc/nginx/sites-enabled/'.format(env.nginx_file))\n sudo('nginx -s reload')\n sudo('nginx -t')\n" }, { "alpha_fraction": 0.6241846680641174, "alphanum_fraction": 0.6287004351615906, "avg_line_length": 31.68852424621582, "blob_id": "4a48f4365ebaeb103b4436979cf52363d26c3387", "content_id": "8a80be6d4e5ab4164473d57fbf64b3c61aac0a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1993, "license_type": "no_license", "max_line_length": 142, "num_lines": 61, "path": "/mqtt/helpers.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "import logging\n\nimport paho.mqtt.client as mqtt\n\nfrom api.models import Topic, TopicRecord\n\nlogger = logging.getLogger('mqtt-client')\n\n\nclass MqttClient:\n\n def __init__(self, client_id='restapi', host_ip='localhost', host_port=1883, keepalive=60, topic='temperature/#', qos=0, persistent=True):\n self.client_id = client_id\n self.host_ip = host_ip\n self.host_port = host_port\n self.keepalive = keepalive\n self.topic = topic\n self.qos=qos\n self.persistent = not persistent\n\n @staticmethod\n def on_connect(mqttc, obj, flags, rc):\n logger.info(\"rc: \" + str(rc))\n\n @staticmethod\n def on_message(mqttc, obj, msg):\n logger.info(\"Message received\")\n topic_name = str(msg._topic).split(\"'\")[1]\n try:\n topic = Topic.objects.get(name=topic_name)\n except:\n topic = Topic.objects.create(name=topic_name)\n logger.info(topic)\n topic_record = TopicRecord(value=float(str(msg.payload).split(\"'\")[1]), topic=topic)\n topic_record.save()\n topic.set_last_record(topic_record)\n\n @staticmethod\n def on_subscribe(mqttc, obj, mid, granted_qos):\n logger.info(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))\n\n @staticmethod\n def on_log(mqttc, obj, level, string):\n logger.info(string)\n\n def run(self):\n # If you want to use a specific client id, use\n # but note that the client id must be unique on the broker. Leaving the client\n # id parameter empty will generate a random id for you.\n mqttc = mqtt.Client(self.client_id, clean_session=self.persistent)\n\n mqttc.on_message = self.on_message\n mqttc.on_connect = self.on_connect\n mqttc.on_subscribe = self.on_subscribe\n # Uncomment to enable debug messages\n # mqttc.on_log = on_log\n\n mqttc.connect(self.host_ip, self.host_port, self.keepalive)\n mqttc.subscribe(self.topic, self.qos)\n\n mqttc.loop_forever()" }, { "alpha_fraction": 0.5336787700653076, "alphanum_fraction": 0.5854922533035278, "avg_line_length": 20.44444465637207, "blob_id": "e6d1a5dfbe23d5b04329d5d27e054e9fe4d4530f", "content_id": "f76d43e8924a9841723a503881337052b1b0f1ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/api/migrations/0004_topic_temperature_offset.py", "repo_name": "Anc0/home-temp-restapi", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-11 11:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0003_topic_short_name'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='topic',\n name='temperature_offset',\n field=models.FloatField(default=0),\n ),\n ]\n" } ]
12
haonen/CAPP30254
https://github.com/haonen/CAPP30254
aa081e19e066869624beaf372863528b9f6e2a88
ebf918e3e4f4090193120ff8fee02fe9f4e2a7d1
2b2d094f8633ec84c2f9fba52eb5d1e81ec603a0
refs/heads/master
2020-05-05T09:08:54.919553
2019-05-29T19:55:26
2019-05-29T19:55:26
179,892,408
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5712401270866394, "alphanum_fraction": 0.579155683517456, "avg_line_length": 21.62686538696289, "blob_id": "45652ea0da2203b7023250e058d43b42f3f3114f", "content_id": "6a9ce0791741734acc15d12a5294c397bd3393a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 92, "num_lines": 67, "path": "/HW2/exploration.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\nHW 2: Data Exploration\nYuwei Zhang\n\"\"\"\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef read_data(path, index_col):\n '''\n read the csv file\n Inputs:\n path: the path of the csv file\n index_col: the name of the index column\n Return:\n the data frame\n '''\n df = pd.read_csv(path, index_col=index_col)\n return df\n\n\ndef plot_count(df, colname, hue_col):\n '''\n Plot the counting plot of categorical variable\n Inputs:\n df: a data frame\n colname: the name of a column\n Output:\n plot the counting for different categories\n '''\n sns.set(style=\"darkgrid\")\n sns.countplot(x=colname, hue=hue_col, data=df).set_title(('Distribution of ' + colname))\n\n\ndef plot_pair(df):\n '''\n Plot the pairwise relationship in a dataset\n Inputs:\n df: a data frame\n Ouput:\n plot the pair plot\n '''\n sns.set(font_scale=1)\n sns.pairplot(df, height=3)\n plt.tight_layout()\n\n\ndef plot_heatmap(df):\n '''\n Plot the correlation matrix of features and outcome in a heat map\n Inputs:\n df: a data frame\n Output:\n a heatmap\n '''\n cm = df.corr(method='pearson')\n sns.set(font_scale=1.5)\n fig, ax = plt.subplots(figsize=(10, 10))\n sns.heatmap(cm, cbar=True,\n annot=True,\n square=True,\n fmt='.2f',\n annot_kws={'size': 10},\n yticklabels=df.columns,\n xticklabels=df.columns, ax=ax)\n" }, { "alpha_fraction": 0.5021271109580994, "alphanum_fraction": 0.5709252953529358, "avg_line_length": 36.1827392578125, "blob_id": "39f871ec003615b40c5b605f0d1332620ec3cb30", "content_id": "a11a64f71700a1502f4b8f2fe6350e675f9beefc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15046, "license_type": "no_license", "max_line_length": 93, "num_lines": 394, "path": "/HW1/HW1_code.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\r\nCAPP30254 HW1\r\n\r\nYUWEI ZHANG\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nfrom math import pi\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nfrom pandas.tools.plotting import table\r\nimport googlemaps\r\n\r\nCOLTYPE = {'Community Area': str}\r\n\r\n#############\r\n# Load Data #\r\n#############\r\ncrimes_2017 = pd.read_csv('Crimes_2017.csv',\r\n dtype=COLTYPE)\r\ncrimes_2017.head()\r\ncrimes_2018 = pd.read_csv('Crimes_2018.csv',\r\n dtype=COLTYPE)\r\ncrimes_2018.head()\r\ncrimes = crimes_2017.append(crimes_2018)\r\n\r\n##############\r\n# Problem 1 #\r\n##############\r\n\r\n#differences in reports length\r\nlen(crimes_2017)-len(crimes_2018)\r\n\r\n#Descriptive analysis for crime types\r\nfig, ax = plt.subplots()\r\nfig.set_size_inches(11.7, 8.27)\r\nwith sns.color_palette(\"BuGn_r\"):\r\n sns.countplot(y=\"Primary Type\", hue=\"Year\", ax=ax, data=crimes)\r\nfig.savefig('primary_type_count.png')\r\n\r\n#Descriptive analysis for communuity area\r\ncomm_2017 = crimes_2017['Community Area'].value_counts().to_frame()\r\ncomm_2017['proportion'] = round(comm_2017['Community Area'] /\r\n comm_2017['Community Area'].sum() * 100, 3)\r\nhead_comm_2017 = comm_2017.head()\r\nhead_comm_2017 = head_comm_2017.reset_index()\r\nhead_comm_2017 = head_comm_2017.rename(columns={'index': 'community area',\r\n 'Community Area': 'cnt'})\r\n\r\ncomm_2018 = crimes_2018['Community Area'].value_counts().to_frame()\r\ncomm_2018['proportion'] = round(comm_2018['Community Area'] /\r\n comm_2017['Community Area'].sum() * 100, 3)\r\nhead_comm_2018 = comm_2018.head()\r\nhead_comm_2018 = head_comm_2018.reset_index()\r\nhead_comm_2018 = head_comm_2018.rename(columns={'index': 'community area',\r\n 'Community Area': 'cnt'})\r\n\r\nhead_comm_2017['year'] = 2017\r\nhead_comm_2018['year'] = 2018\r\nhead_comm = head_comm_2017.append(head_comm_2018)\r\nfig1 = sns.barplot(x='community area', y='cnt', hue='year', data=head_comm)\r\nfig1 = fig1.get_figure()\r\nfig1.savefig('comm_area_cnt.png')\r\n\r\nfig2 = sns.barplot(x='community area', y='proportion', hue='year',\r\n data=head_comm)\r\nfig2 = fig2.get_figure()\r\nfig2.savefig('comm_area_pro.png')\r\n\r\n#Descriptive analysis for location description\r\nloc_des_2017 = crimes_2017['Location Description'].value_counts().to_frame()\r\nloc_des_2017['proportion'] = round(loc_des_2017['Location Description'] /\r\n loc_des_2017['Location Description'].sum()\r\n * 100, 3)\r\nhead_loc_2017 = loc_des_2017.head()\r\nhead_loc_2017 = head_loc_2017.reset_index()\r\nhead_loc_2017 = head_loc_2017.rename(columns={'index': 'location description',\r\n 'Location Description': 'cnt'})\r\n\r\nloc_des_2018 = crimes_2018['Location Description'].value_counts().to_frame()\r\nloc_des_2018['proportion'] = round(loc_des_2018['Location Description'] /\r\n loc_des_2017['Location Description'].sum()\r\n * 100, 3)\r\nhead_loc_2018 = loc_des_2018.head()\r\nhead_loc_2018 = head_loc_2018.reset_index()\r\nhead_loc_2018 = head_loc_2018.rename(columns={'index': 'location description',\r\n 'Location Description': 'cnt'})\r\n\r\nhead_loc_2017['year'] = 2017\r\nhead_loc_2018['year'] = 2018\r\nhead_loc = head_loc_2017.append(head_loc_2018)\r\nfig1 = sns.barplot(x='location description', y='cnt', hue='year',data=head_loc)\r\nfig1 = fig1.get_figure()\r\nfig1.savefig('loc_cnt.png')\r\n\r\nfig2 = sns.barplot(x='location description', y='proportion', hue='year',\r\n data=head_loc)\r\nfig2 = fig2.get_figure()\r\nfig2.savefig('loc_pro.png')\r\n\r\n##############\r\n# Problem 2 #\r\n##############\r\n# filter dataframe\r\ncoltype = {'unemployment rate': float, 'poverty rate': float}\r\nacs_2017 = pd.read_csv('acs_data.csv', dtype=coltype)\r\n\r\nbattery_2017 = crimes_2017[crimes_2017['Primary Type'] == 'BATTERY']\r\nbattery_2018 = crimes_2018[crimes_2018['Primary Type'] == 'BATTERY']\r\nhomicide_2017 = crimes_2017[crimes_2017['Primary Type'] == 'HOMICIDE']\r\nhomicide_2018 = crimes_2018[crimes_2018['Primary Type'] == 'HOMICIDE']\r\nDP_2017 = crimes_2017[crimes_2017['Primary Type'] == 'DECEPTIVE PRACTICE']\r\nSO_2017 = crimes_2017[crimes_2017['Primary Type'] == 'SEX OFFENSE']\r\n\r\n#get zipcode through latitude and longitude (using google map api)\r\ngmaps_key = googlemaps.Client(key=<Your GoogleMap API Key>)\r\n\r\ndef pick_zipcode(row):\r\n '''\r\n check whether the formated address in index 0 or index 1 has the zipcode\r\n Input:\r\n a row\r\n Return:\r\n zipcode(str)\r\n '''\r\n match = re.search(r\"(IL )([0-9]{5})\", row[0]['formatted_address'])\r\n if not match:\r\n match = re.search(r\"(IL )([0-9]{5})\", row[1]['formatted_address'])\r\n return match.group(2)\r\n\r\n\r\ndef get_zipcode(df):\r\n '''\r\n Get zipcode based on the geocode information\r\n Input:\r\n a dataframe\r\n Return:\r\n a new dataframe with zipcode column\r\n '''\r\n df['Location'] = df.apply(lambda x: (x['Latitude'], x['Longitude'])\r\n if not pd.isnull(x['Location'])\r\n else np.nan, axis=1)\r\n df['geocode_result'] = df.apply(lambda x: gmaps_key.reverse_geocode(\r\n x['Location']) if not pd.isnull(\r\n x['Location']) else None, axis = 1)\r\n df['zipcode'] = df.apply(lambda x: pick_zipcode(x['geocode_result'])\r\n if x['geocode_result'] else np.nan, axis=1)\r\n df['zipcode'] = df['zipcode'].fillna(0).astype(np.int64)\r\n return df\r\n\r\n\r\n#create median table\r\ndef create_median_table(df1, df2, var_list=['unemployment rate',\\\r\n 'median household income',\\\r\n 'poverty rate', 'white rate',\\\r\n 'black rate', 'hispanic rate',\\\r\n 'asian rate', 'average family size']):\r\n '''\r\n create a table for the median statistics for two years\r\n Inputs:\r\n two dataframes\r\n a variable list\r\n Return:\r\n the median data frame\r\n '''\r\n median_dict = {}\r\n for var in var_list:\r\n median_dict[var] = [df1[var].median(), df2[var].median()]\r\n new_df = pd.DataFrame(data=median_dict)\r\n new_df.insert(0, \"year\", [2017, 2018])\r\n return new_df\r\n\r\n\r\n#Plot radar charts\r\ndef create_radar(group1, group2, df):\r\n '''\r\n create radar chart\r\n Inpust:\r\n the name of two groups\r\n a data frame that contains the information about five variables\r\n Output:\r\n create and save the radar chart\r\n '''\r\n # PART 1: Create background\r\n categories=list(df)[1:]\r\n N = len(categories)\r\n angles = [n / float(N) * 2 * pi for n in range(N)]\r\n angles += angles[:1]\r\n ax = plt.subplot(111, polar=True)\r\n ax.set_theta_offset(pi / 2)\r\n ax.set_theta_direction(-1)\r\n plt.xticks(angles[:-1], categories)\r\n ax.set_rlabel_position(0)\r\n plt.yticks([10,20,30,40,50,60], [\"10\",\"20\",\"30\",\"40\",\"50\",\"60\"], color=\"grey\", size=7)\r\n plt.ylim(0,60)\r\n # PART 2: Add plots\r\n # group1\r\n values=df.loc[0].drop('group').values.flatten().tolist()\r\n values += values[:1]\r\n ax.plot(angles, values, linewidth=1, linestyle='solid', label=group1)\r\n ax.fill(angles, values, 'b', alpha=0.1)\r\n # group2\r\n values=df.loc[1].drop('group').values.flatten().tolist()\r\n values += values[:1]\r\n ax.plot(angles, values, linewidth=1, linestyle='solid', label=group2)\r\n ax.fill(angles, values, 'r', alpha=0.1)\r\n # Add legend\r\n plt.legend(loc='upper right', bbox_to_anchor=(-0.1, 0.1))\r\n \r\n plt.savefig('picture.png', bbox_inches='tight')\r\n \r\n# Part 1 & Part 3\r\ndef high_loc(df):\r\n '''\r\n filter the df with high frequency of crimes and save its geocode\r\n Input:\r\n a data frame\r\n Return:\r\n a new data frame only contains locations having high frequency of \r\n battery\r\n '''\r\n new_df = df['Location'].value_counts().to_frame()\r\n new_df = new_df[new_df['Location'] >= 10]\r\n new_df = new_df.reset_index()\r\n new_df = new_df.rename(columns={'index': 'Location', 'Location': 'cnt'})\r\n new_df['Latitude'] = new_df.apply(lambda x: re.search((r'(\\d\\d\\.[0-9]+)(\\,)'),\\\r\n x['Location']).group(1)\\\r\n if not (x['Location']\\\r\n is np.nan) else np.nan,\\\r\n axis=1)\r\n new_df['Longitude'] = new_df.apply(lambda x: re.search((r'(\\, )(-\\d\\d\\.[0-9]+)'),\\\r\n x['Location']).group(2)\\\r\n if not (x['Location']\\\r\n is np.nan) else np.nan,\\\r\n axis=1)\r\n return new_df\r\n\r\n\r\nhigh_loc_2017 = high_loc(battery_2017)\r\nhigh_loc_2018 = high_loc(battery_2018)\r\n\r\nzipcode_battery_2017 = get_zipcode(high_loc_2017)\r\nzipcode_battery_2018 = get_zipcode(high_loc_2018)\r\n\r\nbattery_2017_filtered = acs_2017[acs_2017['zipcode'].isin(zipcode_battery_2017['zipcode'])]\r\nbattery_2018_filtered = acs_2017[acs_2017['zipcode'].isin(zipcode_battery_2018['zipcode'])]\r\n\r\nmedian_df_battery = create_median_table(battery_2017_filtered, battery_2018_filtered)\r\nmedian_df_battery\r\n\r\ndf = pd.DataFrame({\r\n'group': ['battery_2017','battery_2018'],\r\n'unemployment rate': [5.8, 5.7],\r\n'povertry rate': [21.2, 20.5],\r\n'white rate': [31.3, 34.0],\r\n'black rate': [17.5, 17.8],\r\n'hispanic rate': [8.7, 11.8]\r\n})\r\n\r\ncreate_radar('battery_2017', 'battery_2018', df)\r\n\r\n# Part 2 & Part3\r\nzipcode_homicide_2017 = get_zipcode(homicide_2017)\r\nzipcode_homicide_2018 = get_zipcode(homicide_2018)\r\n\r\nhomicide_2017_filtered = acs_2017[acs_2017['zipcode'].isin(zipcode_homicide_2017['zipcode'])]\r\nhomicide_2018_filtered = acs_2017[acs_2017['zipcode'].isin(zipcode_homicide_2018['zipcode'])]\r\n\r\nmedian_df_homicide = create_median_table(homicide_2017_filtered, homicide_2018_filtered)\r\nmedian_df_homicide.head()\r\n\r\ndf = pd.DataFrame({\r\n'group': ['homicide_2017','homicide_2018'],\r\n'unemployment rate': [6.3, 6.3],\r\n'povertry rate': [20.8, 20.7],\r\n'white rate': [25.5, 28.2],\r\n'black rate': [17.9, 17.8],\r\n'hispanic rate': [13.2, 13.3]\r\n})\r\n\r\ncreate_radar('homicide_2017', 'homicide_2018', df)\r\n\r\n# Part 4\r\nzipcode_SO_2017 = get_zipcode(SO_2017)\r\nSO_2017_filtered = acs_2017[acs_2017['zipcode'].isin(\r\n zipcode_SO_2017['zipcode'])]\r\n\r\nzipcode_DP_2017 = get_zipcode(DP_2017)\r\nDP_2017_filtered = acs_2017[acs_2017['zipcode'].isin(zipcode_DP_2017)]\r\n\r\ndf = pd.DataFrame({\r\n'group': ['Deceptive Practice','Sex Offense'],\r\n'unemployment rate': [4.9, 5.1],\r\n'povertry rate': [15.0, 18.8],\r\n'white rate': [51.3, 43.5],\r\n'black rate': [10.9, 15.2],\r\n'hispanic rate': [12.5, 11.9]\r\n})\r\n \r\ncreate_radar('Deceptive Practice', 'Sex Offense', df)\r\n\r\n\r\n##############\r\n# Problem 3 #\r\n##############\r\n\r\n# Part 2\r\ndef get_july_record(df, crime, pattern):\r\n '''\r\n get the records for certain time peroid of certain crime\r\n Inputs:\r\n a data frame\r\n a crime type(str)\r\n a re pattern(to specify the time frame)\r\n Return:\r\n a new data frame\r\n '''\r\n new_df = df[df['Primary Type'] == crime]\r\n new_df['match'] = new_df.apply(lambda x: bool(re.findall(pattern,\r\n x['Date'])), axis=1)\r\n july_df = new_df[new_df['match'] == True]\r\n return july_df\r\n\r\n\r\ndef compute_increase(df1, df2, crime, pattern):\r\n '''\r\n compute the percentage increase of proportion\r\n Inputs:\r\n two data frame for different years\r\n a crime type\r\n a re pattern\r\n Return:\r\n the percentage increase(float)\r\n '''\r\n july_df1 = get_july_record(df1, crime, pattern)\r\n july_df2 = get_july_record(df2, crime, pattern)\r\n return ((len(july_df2) / len(df2) - len(july_df1) / len(df1)) /\r\n (len(july_df1) / len(df1)) * 100)\r\n \r\n#Check the week before July 26th\r\npattern = r'07/25|07/24|07/23|07/22|07/21|07/20|07/19'\r\nprint(\"Robbery increase:{}%\".format(round(compute_increase(crimes_2017, \r\n crimes_2018, 'ROBBERY', pattern), 3)))\r\nprint(\"Battery increase:{}%\".format(round(compute_increase(crimes_2017,\r\n crimes_2018, 'BATTERY', pattern), 3)))\r\nprint(\"Burglary increase:{}%\".format(round(compute_increase(crimes_2017,\r\n crimes_2018, 'BURGLARY', pattern), 3)))\r\nprint(\"Motor vehicle theft increase:{}%\".format(round(compute_increase(\r\n crimes_2017, crimes_2018, 'MOTOR VEHICLE THEFT', pattern), 3)))\r\n\r\n#Check July\r\npattern = r'^07/'\r\nprint(\"Robbery increase:{}%\".format(round(compute_increase(crimes_2017, \r\n crimes_2018, 'ROBBERY', pattern), 3)))\r\nprint(\"Battery increase:{}%\".format(round(compute_increase(crimes_2017,\r\n crimes_2018, 'BATTERY', pattern), 3)))\r\nprint(\"Burglary increase:{}%\".format(round(compute_increase(crimes_2017,\r\n crimes_2018, 'BURGLARY', pattern), 3)))\r\nprint(\"Motor vehicle theft increase:{}%\".format(round(compute_increase(\r\n crimes_2017, crimes_2018, 'MOTOR VEHICLE THEFT', pattern), 3)))\r\n#check July 26th\r\npattern = r'07/26'\r\nprint(\"Robbery increase:{}%\".format(round(compute_increase(crimes_2017, \r\n crimes_2018, 'ROBBERY', pattern), 3)))\r\nprint(\"Battery increase:{}%\".format(round(compute_increase(crimes_2017,\r\n crimes_2018, 'BATTERY', pattern), 3)))\r\nprint(\"Burglary increase:{}%\".format(round(compute_increase(crimes_2017,\r\n crimes_2018, 'BURGLARY', pattern), 3)))\r\nprint(\"Motor vehicle theft increase:{}%\".format(round(compute_increase(\r\n crimes_2017, crimes_2018, 'MOTOR VEHICLE THEFT', pattern), 3)))\r\n\r\n\r\n##############\r\n# Problem 4 #\r\n##############\r\n\r\n#Part A\r\nNSS = crimes_2017[crimes_2017['Community Area'] == 33]\r\nround(NSS['Primary Type'].value_counts()/len(NSS) * 100, 3)\r\n\r\n#Part B\r\ntheft_2017 = crimes_2017[crimes_2017['Primary Type'] == 'THEFT']\r\nfilter_condition = ((theft_2017['Community Area'] == 3) |\r\n (theft_2017['Community Area'] == 26) |\r\n (theft_2017['Community Area'] == 27))\r\ntheft_2017[filter_condition]['Community Area'].value_counts()/len(theft_2017)\r\n\r\ntheft_2018 = crimes_2018[crimes_2018['Primary Type'] == 'THEFT']\r\nfilter_condition = ((theft_2018['Community Area'] == 3) |\r\n (theft_2018['Community Area'] == 26) |\r\n (theft_2018['Community Area'] == 27))\r\ntheft_2018[filter_condition]['Community Area'].value_counts()/len(theft_2018)\r\n" }, { "alpha_fraction": 0.7564575672149658, "alphanum_fraction": 0.7601476311683655, "avg_line_length": 89.33333587646484, "blob_id": "6bb22da5bc62a8bc8e0ef16f44c51c5afc7c415d", "content_id": "c9d7e1c6ea854d945536ad5f09e4d111eed67544", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 813, "license_type": "no_license", "max_line_length": 141, "num_lines": 9, "path": "/HW3/README.md", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "# HW3\nAccording to the feedback of Assignment 2, I lack of a readme explaining my pipeline. So I include this file. \n* ```preprocess.py```: module about data imputation, discretization and hot encoding. \n* ```exploration.py```: module about data exploration. Including functions about plotting count plots and heatmap.\n* ```modeling.py```: module about building different models and predicting with test sets.\n* ```evaluation.py```:module about different evaluation metrics, relevant plots. Also includes a wrap up function to train and evaluate them.\n* ```analysis.py```: module about analysis of best models (plot precision-recall curves and find feature importance matrix)\n* ```HW3_output.ipynb```: jupyter notebook about how I implement these pipelines. \n* ```write_up.pdf```: written analysis about models\n" }, { "alpha_fraction": 0.6045918464660645, "alphanum_fraction": 0.6092687249183655, "avg_line_length": 33.52941131591797, "blob_id": "3106be5df71a5fb96b7e97d77f8d06bd5dc1732c", "content_id": "14dab1ac279cd8291f826fe006a144936b1a1560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4704, "license_type": "no_license", "max_line_length": 81, "num_lines": 136, "path": "/HW5/preprocess.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\nHW5: preprocess\nYuwei Zhang\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n\ndef create_outcome(df, post_col, funded_col, interval_col, outcome_col, period):\n '''\n Define the outcome column\n :param df: a data frame\n :param post_col: (str) the name of the posted date\n :param funded_col: (str) the name of the funded date\n :param interval_col: (str) the name of the interval\n :param outcome_col: (str) the name of the outcome column\n :param period: (timedelta object) the period that the project receive funding\n\n :return: a data frame with outcome\n '''\n\n df[post_col] = pd.to_datetime(df[post_col])\n df[funded_col] = pd.to_datetime(df[funded_col])\n df[interval_col] = df[funded_col] - df[post_col]\n df[outcome_col] = np.where(df[interval_col] <= period, 0, 1)\n return df.drop(columns=[interval_col])\n\n\ndef imputation(X_train, X_test, colnames, is_num=True):\n '''\n imputate the cells that are NaN with intended value. If it is numeric, then\n imputating value is the mean of training data, else it would be 'unknown'.\n :param X_train: a data frame of training set\n :param X_test: a data frame of test set\n :param colnames:(list) a list of colnames to be imputated\n :param is_num: (bool) check whether those columns are numeric or not\n\n :return: No returns.\n '''\n for colname in colnames:\n if is_num:\n impute_val = X_train[colname].mean()\n else:\n impute_val = 'unknown'\n\n X_train[colname] = X_train[colname].fillna(value=impute_val)\n X_test[colname] = X_test[colname].fillna(value=impute_val)\n\n\n\ndef discritize(X_train, X_test, colname, labels_list):\n '''\n Discritize the continuous variable based on training data\n Inputs:\n X_train: a data frame of training set\n X_test: a data frame of test set\n colname: the name of the column\n labels_list: the label of\n Output:\n add a new column to train ans test set respectively\n that are discritized from a continuous variable\n '''\n n = len(labels_list)\n quantile_list = []\n for i in range(0, n + 1):\n quantile_list.append(i / n)\n bins_list = list(X_train[colname].quantile(quantile_list).values)\n bins_list[0] = bins_list[0] - 1\n \n X_train[(colname + '_category')] = pd.cut(X_train[colname],\n bins=bins_list,\n labels=labels_list)\n X_test[(colname + '_category')] = pd.cut(X_test[colname],\n bins=bins_list,\n labels=labels_list)\n\n\ndef get_all_dummies(X_train, X_test, colname):\n '''\n Convert the categorical variable into dummies\n Inputs:\n X_train: a data frame of training set\n X_test: a data frame of test set\n colname: the name of the colname\n Return:\n the data frame with those dummies into data frame\n '''\n #Get the categories from training data set\n cat_list = list(X_train[colname].value_counts().index.values)\n #create dummies\n for cat in cat_list:\n X_test[cat] = np.where(X_test[colname] == cat, 1, 0)\n X_train[cat] = np.where(X_train[colname] == cat, 1, 0)\n\n \ndef get_top_k_dummies(X_train, X_test, colname, k):\n '''\n For columns with too many categories, only create dummies for \n top k categories\n Inputs:\n X_train: a data frame of training set\n X_test: a data frame of test set\n colname: the name of the column\n k: (int) the value of k\n Outputs:\n Create dummies in both train and test set\n '''\n # get top k categories from tarin set\n top_k = X_train[colname].value_counts()[:k].index\n #create dummies\n for cat in top_k:\n X_train[cat] = np.where(X_train[colname] == cat, 1, 0)\n X_test[cat] = np.where(X_test[colname] == cat, 1, 0)\n X_train['{}_others'.format(colname)] = X_train.apply(\\\n lambda x: 0 ifx[colname] in top_k else 1, axis=1)\n X_test['{}_others'.format(colname)] = X_test.apply(\\\n lambda x: 0 if x[colname] in top_k else 1, axis=1)\n \n\ndef get_dummies(X_train, X_test, colname, k):\n '''\n Wrap up get_all_dummies and get_top_k_dummies\n Inputs:\n X_train: a data frame of training set\n X_test: a data frame of test set\n colname: the name of the column\n k: (int) the value of k\n Outputs:\n Create dummies in both train and test set\n '''\n #Decide whether this use get all dummies or top k\n if len(X_train[colname].value_counts()) > k:\n get_top_k_dummies(X_train, X_test, colname, k)\n else:\n get_all_dummies(X_train, X_test, colname)\n " }, { "alpha_fraction": 0.6960705518722534, "alphanum_fraction": 0.7351242899894714, "avg_line_length": 73.22618865966797, "blob_id": "0b1b9b3c6f26b7a8d34b0aafe6578eb77efecf4d", "content_id": "972aa82230650f79ef834bd3d39f086e875ab9f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12480, "license_type": "no_license", "max_line_length": 772, "num_lines": 168, "path": "/HW1/write_up.md", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "# Problem 1\n* The number of recorded crimes in 2018 is less than that of 2017 (the difference is 1848)\n* According to the graph about the primary type of crimes in Chicago below, we can see that in general, the main types of crime remain stable in 2017 and 2018. There is no sharp decrease or increase in any type of crime on this graph with a scale of 100000 in count. Theft is the crime type with the most frequency in both years. Then comes the battery.\n ![fig1](https://github.com/haonen/Markdown-Photos/blob/master/primary_type_count.png?raw=true)\n \n* The top 5 commnuity areas that has highest crime frequency are the same in 2017 and 2018. In community area 25, the frequency of crimes and the proportion of crimes are both decreasing in 2018. But in other 4 community areas, those two indicators are increasing in 2018. Community area 8 has the sharpest increase.\n\n![fig2](https://github.com/haonen/Markdown-Photos/blob/master/top%205%20community%20area.JPG?raw=true)\n![fig3](https://github.com/haonen/Markdown-Photos/blob/master/comm_area_cnt.png?raw=true)\n![fig4](https://github.com/haonen/Markdown-Photos/blob/master/comm_area_pro.png?raw=true)\n\n* The top 5 locations that have highest frequency of crimes occurence remain the same in both years. Street is the most dangerous place if we take the frequency of crimes as an indicator. For the changes in proportions to total crimes, apartments have a higher proportion in 2018 while others have slightly decrease.\n![fig5](https://github.com/haonen/Markdown-Photos/blob/master/loc_cnt.png?raw=true)\n![fig6](https://github.com/haonen/Markdown-Photos/blob/master/loc_pro.png?raw=true)\n\n# Problem 2\nFor this problem, in order to save the time of getting zipcode from geocode by using Google Map API, I filter out dataframes for \"battery\" with locations that have more than 10 crime records. And then I use the Census API (American Facts Finder) and pick some variables on zipcode level from 2013-2017 American Community Survey 5-Year Estimates: unemployment rate, median household income, poverty rate, rate for four main races (white, black, hispanic or latino and asian), family size. I filter out the data frame for ACS data by checking whether these zipcodes are in crime reports. And I analyze the filtered data frames for different crime types and different years to see if it varies in characteristics of locations of different years or in different crime types. \nTo avoid the influences of outliners, I choose median as the statistics to describe block/zipcode characteristics. \nFor the reference of the following analysis, I record the data of those variables on Chicago City level from the same survey result:\n\n<table>\n <tr>\n <th>unemployment rate</th>\n <th>median household income</th>\n <th>poverty rate</th>\n <th>white rate</th>\n <th>black rate</th>\n <th>hispanic rate</th>\n <th>asian rate</th>\n <th>average family size</th> \n </tr>\n <tr>\n <td>9.9</td>\n <td>52,497</td>\n <td>20.6</td>\n <td>51.2</td>\n <td>31.6</td>\n <td>29.0</td>\n <td>7.2</td>\n <td>3.4</td>\n </tr>\n</table>\n\n## 1. \n<img src=\"https://github.com/haonen/Markdown-Photos/blob/master/median_battery.JPG?raw=true\" alt=\"fig7\" width=\"800\"/> \n\nThe zipcodes/blocks that have high frequency of battery have the following characteristics. The unemployment rate is around 5.7% and the poverty rate is around 20%. The median household income is around 50000 dollars and the average family size is around 3 people. The marjority of their population is the white and then comes the black. \n\n## 2.\n<img src=\"https://github.com/haonen/Markdown-Photos/blob/master/median_homicide.JPG?raw=true\" alt=\"fig7\" width=\"800\"/> \n\nThe zipcodes/blocks that have high frequency of homicide incidents have the following characteristics. The unemployment rate is around 6.3% and the poverty rate is around 20%. The median household income is also around 50000 dollars and the average family size is around 3 people. The marjority of their population is the white and then comes the black. \n\nBoth types of crime would happen in areas that have roughly the same poverty rate as Chicago city and lower median household income. But their unemployment rate are lower than the city situation and their population composition is more diverse. \n\n## 3. \nFor battery, based on the tables above and the radar chart below, we can see that, the most obvious variation is population composition. Both rates of the white and the hispanic or latino increase from 2017 to 2018. Besides that, the tables tell us that there is an increase in the median household income, but we cannot tell whether it is due to inflation. The poverty rate decrease slightly and the familiy size increases. \n\n![fig9](https://github.com/haonen/Markdown-Photos/blob/master/picture_battery.png?raw=true) \n\nThe report for homicide has a similar situation. The proportion of white people increase for these areas. The unemployment rate remains constant while the median household income also increases. Poverty rates decreases slightly and average family size increases a little bit.\n\n![fig10](https://github.com/haonen/Markdown-Photos/blob/master/picture_homicide.png?raw=true) \n\n## 4.\nWe only use the data for 2017 here.\n<img src=\"https://github.com/haonen/Markdown-Photos/blob/master/compare_so_dp.JPG?raw=true\" alt=\"drawing\" width=\"800\"/> \n<img src=\"https://github.com/haonen/Markdown-Photos/blob/master/so_dp.JPG?raw=true\" alt=\"drawing\" width=\"400\"/> \nAccording to the radar chart above, the blocks that get “Deceptive Practice” have more proportion of white population, less proportion of black population and slightly smaller proportion of hispanic or latino population than the blocks that get “Sex Offense\". Also, their poverty rate is lower. The averge household income is higher in the blocks that get “Deceptive Practice”. \n\n# Problem 3 \n## 1.\nFrom 2017 to 2018, the main types of crime do not change a lot. Some of their numbers decrease but some of them increase. And we cannot say for sure that there is a good or a bad change because in general, the records number in 2018 is less than the records number in 2017. The community areas that have high frequency of crimes happen also remain stable. \nThose \"crime blocks\" above mainly have much lower median household income than city level and competitive poverty rate. But there is an increase in median household income and a decrease in poverty rate. However, their unemployment rates are much lower than city level and their population composition are more diverse. We can also see an increase in the proportion of white people in those blocks.\n## 2.\n### A\nTo avoid the influence of randomly reports number in different years, I calculate the increase percent based on the proprotion of different types of crimes for this part. \nThese statistics are not correct. Here is my output: \n ```\nRobbery increase:-16.731%\nBattery increase:4.895%\nBurglary increase:-11.464%\nMotor vehicle theft increase:-11.461%\n ```\n I guess that they might mistake the formula for calculating increase in percentage. For example, if they take the number of records in 2018 as denominator and mistake the sign, then the percentage increase would be roughly 21% (but actually it is decreasing). \n ### B\nThese results could be misleading and I do not aggree with his conclusion. Because the choose of time frame is arbitrary and if we pick different time frame, the result could be inconsistent. For instance, here is the output for July:\n```\nRobbery increase:-12.143%\nBattery increase:4.527%\nBurglary increase:-4.9%\nMotor vehicle theft increase:-12.109%\n```\nAnd the result for July 26th:\n```\nRobbery increase:26.8%\nBattery increase:0.694%\nBurglary increase:-10.251%\nMotor vehicle theft increase:-23.611%\n```\nFor the comparison of July, the robbery decrease but for the particular day, the robbery actually increase a lot. And for Battery, it increase for the time frame of July but remain constant for particular day. So, if we pick different time frame, we might have different conclusions. And we cannot ensure that the time frame we pick guarantee roughly the same enviornment or context for crimes in both years. \n## 3\n* There is an increase in the proportion of white population in those areas that have high frequency of \"homicide\" and \"battery\". This might be a side effects of gentrification in some communities of Chicago since those areas originally have diverse population. Pay attention to the gentrification. \n* Since those area have diverse population, I would suggest the mayor to hear more from minorities and check out their living enviornment to see what cause they live in the danger of crimes. \n* The unemployment rate is much lower than city level but median household income is much lower than the city level. This might indicate that albeit people do get jobs in those areas, they earn little and it could also be a source of dissatisfaction and causes violence and crimes. The City of Chicago should focus on low income groups. \n* The poverty rate in those area are roughly the same as city level and it decreases at a slow speed. Poverty can be a source of violence and crime, so the mayor should pay attention to reduce the poverty rate.\n* Theft is a really sever type of crimes in Chicago and it has an increase in proprotion . I would suggest the city government to pay attention to this high frequency. \n## 4\nI would provide the caveats saying that since all of my analysis above is based on descriptive statistics rather than statistical inference, most of my recommendation are based on my guess. The true correlation and causality still needs further study. \n\n# Problem 4\n## A\n2111 S Michigan Ave is in the Near South Side (Community Aread code is 33) of Chicago. I first filter out all the records from this community and then count the value of each crime type. Divide the number of records for each type by the length of all the records from Near South Side, I can find out the the probabilities for each type of request. And theft is the one with highest probability. \n```\nTHEFT 28.822\nDECEPTIVE PRACTICE 15.651\nBATTERY 15.496\nCRIMINAL DAMAGE 8.006\nOTHER OFFENSE 5.940\nASSAULT 4.855\nROBBERY 4.804\nMOTOR VEHICLE THEFT 4.649\nCRIMINAL TRESPASS 4.390\nBURGLARY 3.151\nNARCOTICS 0.930\nSEX OFFENSE 0.775\nOFFENSE INVOLVING CHILDREN 0.568\nCRIM SEXUAL ASSAULT 0.465\nWEAPONS VIOLATION 0.413\nPROSTITUTION 0.310\nPUBLIC PEACE VIOLATION 0.207\nINTERFERENCE WITH PUBLIC OFFICER 0.207\nSTALKING 0.103\nOBSCENITY 0.103\nNON-CRIMINAL 0.103\nARSON 0.052\n```\n\n## B\nThe commnuity area code for Uptown is 3 and for Garfield Park is 26 and 17. For each year report, I first filter out the theft records and then filter out the records happened in Uptown or Garfield Park. Dividing the value counts for each community area by the length of theft records for each year, I can come up with the estimates of probability. \n<table>\n <tr>\n <th>Community Area</th>\n <th>Probability 2017</th>\n <th>Probability 2018</th>\n </tr>\n <tr>\n <td>Uptown</td>\n <td>0.015044</td>\n <td>0.015151</td>\n </tr>\n <tr>\n <td>Garfield Park</td>\n <td>0.009061 + 0.008983 = 0.018044</td>\n <td>0.010802 + 0.009681 = 0.117701</td>\n </tr>\n</table>\n\n\nIn 2017, it is more likely that this call comes from Garfield Park and it has 0.018044 - 0.015044 = 0.003 higher probability. \nIn 2018, albeit the theft happended in both communities rorse, it is still more likely that this call comes from Garfield Park and it has 0.117701 - 0.015151 = 0.10255 higher probability. \n \n## C\nAccording to Bayes' theorem: \nP(Battery | Garfield Park) = P(Battery) * P(Garfield Park | Battery)/P(Garfield Park) = ((100+160)/1000 * (100/100+160))/(600/1000)=1/6 \nP(Battery | Uptown) = P(Battery) * P(Uptown | Battery)/P(Uptown) = 2/5 \n2/5-1/6 = 0.23 \nSo, the probability that this call comes from Uptown is roughly 23% higher than the probability of it coming from Garfield Park.\n" }, { "alpha_fraction": 0.6021109819412231, "alphanum_fraction": 0.6031783819198608, "avg_line_length": 36.82949447631836, "blob_id": "5e2238e4e4b9180b6b4a3c5f92681c7468aa2fe7", "content_id": "a72469e0388c09f3f7f69b56f6ad440ed95c95bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8432, "license_type": "no_license", "max_line_length": 111, "num_lines": 217, "path": "/HW3/modeling.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\r\nHW3: modeling\r\nYuwei Zhang\r\n\"\"\"\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import BaggingClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.svm import LinearSVC\r\n\r\n\r\ndef split_data(X, y, test_size, temporal, split_var, split_period_list, period):\r\n '''\r\n Split the data set\r\n :param X: the feature set\r\n :param y: the outcome set\r\n :param test_size: (float) the size of test\r\n :param temporal: (boolean) whether to use temporal validation\r\n :param split_var: (str) split the data set based on this variable\r\n :param split_period_list: (list) a list of datetime object\r\n :param period: (relativedelta object) the period\r\n\r\n :return:\r\n '''\r\n\r\n split_results = []\r\n if temporal:\r\n for i, date in enumerate(split_period_list):\r\n next_date = date + period\r\n X_train = X[X[split_var] <= date]\r\n X_train = X_train.drop([split_var], axis=1)\r\n y_train = y[X[split_var] <= date]\r\n X_test = X[(X[split_var] > date) & (X[split_var] <= next_date)]\r\n X_test = X_test.drop([split_var], axis=1)\r\n y_test = y[(X[split_var] > date) & (X[split_var] <= next_date)]\r\n split_results.append((X_train, X_test, y_train, y_test))\r\n else:\r\n X = X.drop([temporal_var], axis=1)\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= \\\r\n test_size, random_state=0)\r\n split_results.append((X_train, X_test, y_train, y_test))\r\n return split_results\r\n\r\n\r\ndef build_lr(param_dict, X_train, y_train):\r\n '''\r\n Build a Logistic Regression model based on different choices of parameters\r\n :param X_train: feature set for training\r\n :param y_train: outcome set for training\r\n :param param_dict: (dict) a dictionary about parameters\r\n\r\n :return: a dictionary of logistic regression models\r\n '''\r\n model_dict = {}\r\n solver_list = param_dict['solver']\r\n penalty_list = param_dict['penalty']\r\n C_list = param_dict['C']\r\n for solver_choice in solver_list:\r\n for penalty_choice in penalty_list:\r\n for c_val in C_list:\r\n key = \"solver={}, penalty={}, C={}\".format(solver_choice, penalty_choice, c_val)\r\n lr = LogisticRegression(solver=solver_choice, penalty=penalty_choice, C=c_val)\r\n lr.fit(X_train, y_train)\r\n model_dict[key] = lr\r\n\r\n return model_dict\r\n\r\n\r\ndef build_knn(param_dict, X_train, y_train):\r\n '''\r\n Build a k nearest neighbors model based on different choices of parameters\r\n :param param_dict: (dict) a dictionary about parameters\r\n :param X_train: feature set for training\r\n :param y_train: outcome set for training\r\n\r\n :return: a dictionary of knn models\r\n '''\r\n model_dict = {}\r\n neighbor_list = param_dict['n_neighbors']\r\n p_list = param_dict['p']\r\n for num_of_neighbors in neighbor_list:\r\n for p_val in p_list:\r\n key = \"n_neighbors={}, p={}\".format(num_of_neighbors, p_val)\r\n knn = KNeighborsClassifier(n_neighbors=num_of_neighbors, p=p_val)\r\n knn.fit(X_train, y_train)\r\n model_dict[key] = knn\r\n return model_dict\r\n\r\n\r\ndef build_dt(param_dict, X_train, y_train):\r\n '''\r\n Build a decision tree model based on different choices of parameters\r\n :param param_dict: (dict) a dictionary about parameters\r\n :param X_train: the feature set for training\r\n :param y_train: the outcome set for training\r\n\r\n :return: a dictionary of decision tree models\r\n '''\r\n model_dict = {}\r\n criterion_list = param_dict['criterion']\r\n d_list = param_dict['max_depth']\r\n for criterion_choice in criterion_list:\r\n for d in d_list:\r\n key = \"criterion={}, max_depth={}\".format(criterion_choice, d)\r\n decision_tree = DecisionTreeClassifier(criterion=criterion_choice, max_depth=d)\r\n decision_tree.fit(X_train, y_train)\r\n model_dict[key] = decision_tree\r\n return model_dict\r\n\r\n\r\ndef build_svm(param_dict, X_train, y_train):\r\n '''\r\n Build a svm model based on different choices of parameters\r\n :param param_dict: (dict) a dictionary about parameters\r\n :param X_train: the feature set for training\r\n :param y_train: the outcome set for training\r\n\r\n :return: a dictionary of svm models\r\n '''\r\n model_dict = {}\r\n c_list = param_dict['C']\r\n for c_val in c_list:\r\n key = \"C={}\".format(c_val)\r\n svm = LinearSVC(random_state=0, tol=1e-5, C=c_val)\r\n svm.fit(X_train, y_train)\r\n model_dict[key] = svm\r\n return model_dict\r\n\r\n\r\ndef build_rf(param_dict, X_train, y_train):\r\n '''\r\n Build random forest models ensemble based on different choice\r\n of parameters\r\n :param param_dict: (dict) a dictionary about parameters\r\n :param X_train: the feature set for training\r\n :param y_train: the outcome set for training\r\n\r\n :return: a dictionary of random forest models\r\n '''\r\n model_dict = {}\r\n estimator_list = param_dict['n_estimators']\r\n d_list = param_dict['max_depth']\r\n criterion_list = param_dict['criterion']\r\n for num_estimators in estimator_list:\r\n for d in d_list:\r\n for criterion_choice in criterion_list:\r\n key = \"n_estimators={}, max_depth={}, criterion={}\".format(num_estimators, d, criterion_choice)\r\n rf = RandomForestClassifier(n_estimators=num_estimators,\r\n max_depth=d,\r\n criterion=criterion_choice)\r\n rf.fit(X_train, y_train)\r\n model_dict[key] = rf\r\n return model_dict\r\n\r\n\r\ndef build_bagging(param_dict, X_train, y_train):\r\n '''\r\n Build bagging models from different parameters\r\n :param param_dict: (dict) a dictionary about parameters\r\n :param X_train: the feature set for training\r\n :param y_train: the outcome set for training\r\n\r\n :return: a dictionary of bagging models\r\n '''\r\n model_dict = {}\r\n base_list = param_dict['base_estimator']\r\n estimator_list = param_dict['n_estimators']\r\n sample_list = param_dict['max_samples']\r\n for base in base_list:\r\n for num_estimators in estimator_list:\r\n for sample in sample_list:\r\n key = \"base_estimator={}, n_estimators={}, max_samples={}\".format(base, num_estimators, sample)\r\n bagging = BaggingClassifier(base_estimator=base,\r\n n_estimators= num_estimators,\r\n max_samples=sample)\r\n bagging.fit(X_train, y_train)\r\n model_dict[key] = bagging\r\n return model_dict\r\n\r\n\r\ndef build_boosting(param_dict,X_train, y_train):\r\n '''\r\n Build a boosting model based on different choices of parameters\r\n :param param_dict: (dict) a dictionary about parameters\r\n :param X_train: (np array) the feature set for training\r\n :param y_train: (np.array) the outcome set for training\r\n\r\n :return: a list of boosting models\r\n '''\r\n model_dict = {}\r\n base_list = param_dict['base_estimator']\r\n estimator_list = param_dict['n_estimators']\r\n rate_list = param_dict['learning_rate']\r\n for base in base_list:\r\n for num_estimators in estimator_list:\r\n for rate in rate_list:\r\n key = \"base_estimator={}, n_estimators={}, learning_rate={}\".format(base, num_estimators, rate)\r\n boosting = AdaBoostClassifier(base_estimator=base,\r\n n_estimators=num_estimators,\r\n learning_rate=rate)\r\n boosting.fit(X_train, y_train)\r\n model_dict[key] = boosting\r\n return model_dict\r\n\r\n\r\ndef predict_models(classifier, model_dict, X_test):\r\n pred_scores_dict = {}\r\n for params, model in model_dict.items():\r\n if classifier == 'Support Vector Machine':\r\n pred_scores = model.decision_function(X_test)\r\n else:\r\n pred_scores = model.predict_proba(X_test)[:, 1]\r\n pred_scores_dict[params] = pred_scores\r\n return pred_scores_dict\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6426049470901489, "alphanum_fraction": 0.6429972648620605, "avg_line_length": 41.20338821411133, "blob_id": "06cbc4b0b54a1fb968bb0bd14ef6da70d2a2e3fb", "content_id": "2b3bf76a63e8702c6651f9eec498231c1babea47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2549, "license_type": "no_license", "max_line_length": 99, "num_lines": 59, "path": "/HW3/analysis.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\r\nHW3: Analysis\r\nYuwei Zhang\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport modeling\r\nimport evaluation\r\n\r\n\r\ndef analysis(classifier_dict, model_name, X_train, y_train, X_test, y_test):\r\n '''\r\n Plot the precision-recall curve for best models\r\n :param classifier_dict: (dict) a dictionary for the best model\r\n :param model_name: (str) the name of model\r\n :param X_train: the train feature set\r\n :param y_train: the train outcome set\r\n :param X_test: the test feature set\r\n :param y_test: the test outcome set\r\n\r\n :return: save or show the precision recall curve\r\n '''\r\n for classifier, param_dict in classifier_dict.items():\r\n if classifier == 'Logistic Regression':\r\n model_dict = modeling.build_lr(param_dict, X_train, y_train)\r\n if classifier == 'K Nearest Neighbors':\r\n model_dict = modeling.build_knn(param_dict, X_train, y_train)\r\n if classifier == 'Decision Tree':\r\n model_dict = modeling.build_dt(param_dict, X_train, y_train)\r\n if classifier == 'Support Vector Machine':\r\n model_dict = modeling.build_svm(param_dict, X_train, y_train)\r\n if classifier == 'Random Forest':\r\n model_dict = modeling.build_rf(param_dict, X_train, y_train)\r\n if classifier == 'Boosting':\r\n model_dict = modeling.build_boosting(param_dict, X_train, y_train)\r\n if classifier == 'Bagging':\r\n model_dict = modeling.build_bagging(param_dict, X_train, y_train)\r\n\r\n pred_scores_dict = modeling.predict_models(classifier, model_dict, X_test)\r\n for params, pred_score in pred_scores_dict.items():\r\n evaluation.plot_precision_recall_n(y_test, pred_score, model_name, 'save')\r\n\r\n\r\ndef get_feature_importance(classifier_dict, X_train, y_train):\r\n '''\r\n Create the data frame for feature's importance of decision tree models\r\n :param classifier_dict: (dict) a dictionary for the best model\r\n :param X_train: the train feature set\r\n :param y_train: the train outcome set\r\n\r\n :return: a data frame about features' importance\r\n '''\r\n for classifier, param_dict in classifier_dict.items():\r\n model_dict = modeling.build_rf(param_dict, X_train, y_train)\r\n for _, model in model_dict.items():\r\n d = {'Features': X_train.columns, \"Importance\": model.feature_importances_}\r\n feature_importance = pd.DataFrame(data=d)\r\n feature_importance = feature_importance.sort_values(by=['Importance'], ascending=False)\r\n return feature_importance\r\n" }, { "alpha_fraction": 0.6060476899147034, "alphanum_fraction": 0.6077512502670288, "avg_line_length": 29.30666732788086, "blob_id": "053a8278e3f608c07117ced819f5ce37a1d99150", "content_id": "c7f123306170db36decddd855b155cec8ee70d8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2348, "license_type": "no_license", "max_line_length": 81, "num_lines": 75, "path": "/HW3/preprocess.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\r\nHW3: preprocess\r\nYuwei Zhang\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef create_outcome(df, post_col, funded_col, interval_col, outcome_col, period):\r\n '''\r\n Define the outcome column\r\n :param df: a data frame\r\n :param post_col: (str) the name of the posted date\r\n :param funded_col: (str) the name of the funded date\r\n :param interval_col: (str) the name of the interval\r\n :param outcome_col: (str) the name of the outcome column\r\n :param period: (timedelta object) the period that the project receive funding\r\n\r\n :return: No returns but add three two columns\r\n '''\r\n\r\n df[post_col] = pd.to_datetime(df[post_col])\r\n df[funded_col] = pd.to_datetime(df[funded_col])\r\n df[interval_col] = df[funded_col] - df[post_col]\r\n df[outcome_col] = np.where(df[interval_col] <= period, 1, 0)\r\n\r\n\r\ndef imputation(df, colnames, is_num=True):\r\n '''\r\n imputate the cells that are NaN with intended value. If it is numeric, then\r\n imputating value is its mean, else it would be 'unknown'.\r\n :param df: a data frame\r\n :param colnames:(list) a list of colnames to be imputated\r\n :param is_num: (bool) check whether those columns are numeric or not\r\n\r\n :return: No returns.\r\n '''\r\n\r\n for colname in colnames:\r\n if is_num:\r\n impute_val = df[colname].mean()\r\n else:\r\n impute_val = 'unknown'\r\n\r\n df[colname] = df[colname].fillna(value=impute_val)\r\n\r\n\r\ndef discritize(df, colname, bins_list, labels_list):\r\n '''\r\n Discritize the continuous variable\r\n Inputs:\r\n df: a data frame\r\n colname: the name of the column\r\n bins_list: the list of the boundaries to be cut\r\n labels_list: the label of\r\n Output:\r\n add a new column that are discritized from a continuous variable\r\n '''\r\n df[(colname + '_category')] = pd.cut(df[colname],\r\n bins=bins_list,\r\n labels=labels_list,\r\n include_lowest=True, right=False)\r\n\r\n\r\ndef get_dummies(df, colname):\r\n '''\r\n Convert the categorical variable into dummies\r\n Inputs:\r\n df: a data frame\r\n colname: the name of the colname\r\n Return:\r\n the data frame with those dummies into data frame\r\n '''\r\n return pd.concat([df,pd.get_dummies(df[colname])], axis=1)\r\n" }, { "alpha_fraction": 0.7588152289390564, "alphanum_fraction": 0.7616360783576965, "avg_line_length": 87.625, "blob_id": "644092caa5717c0b056c258e4b540a6f9bee90b8", "content_id": "4796a1c7397f287001436ec39be5444ca3d352d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 709, "license_type": "no_license", "max_line_length": 161, "num_lines": 8, "path": "/HW5/README.md", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "# Assignment 5\n* ```preprocess.py```: module about data imputation, discretization and hot encoding. \n* ```exploration.py```: module about data exploration. Including functions about plotting distribution plot, count plots and heatmap.\n* ```modeling.py```: module about building different models and predicting with test sets and evaluating each model. Include the function of temporal validation.\n* ```evaluation.py```:module about different evaluation metrics, relevant plots.\n* ```HW5_output.ipynb```: jupyter notebook about how I implement these pipelines. \n* ```write_up.pdf```: written analysis about this project\n* ```graphs```: a folder contains all the precision and recall curves, roc curves plots.\n" }, { "alpha_fraction": 0.6339671015739441, "alphanum_fraction": 0.6393327713012695, "avg_line_length": 33.87029266357422, "blob_id": "9f0290fd092b696ea2d5eecb92a30cd1c84f4bd2", "content_id": "9d07a6cc2d9a35e0ae74b3e0eba0bdf6594155ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8573, "license_type": "no_license", "max_line_length": 96, "num_lines": 239, "path": "/HW3/evaluation.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\r\nHW3: evaluation\r\nYuwei Zhang\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import auc\r\nfrom sklearn.metrics import precision_recall_curve\r\nimport matplotlib.pyplot as plt\r\nfrom modeling import *\r\n\r\n\r\ndef compute_acc(y_true, y_scores, k):\r\n '''\r\n Compute accuracy score based on threshold\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an accuracy score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return accuracy_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef compute_f1(y_true, y_scores, k):\r\n '''\r\n Compute f1 score based on threshold\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an f1 score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return f1_score(y_true_sorted, preds_at_k)\r\n\r\ndef compute_auc_roc(y_true, y_scores, k):\r\n '''\r\n Compute area under Receiver Operator Characteristic Curve\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an auc_roc score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return roc_auc_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef compute_auc(pred_scores, true_labels):\r\n '''\r\n Compute auc score\r\n :param pred_scores: an array of predicted scores\r\n :param true_labels: an array of true labels\r\n\r\n :return: area under curve score\r\n '''\r\n fpr, tpr, thresholds = roc_curve(true_labels, pred_scores, pos_label=2)\r\n return auc(fpr, tpr)\r\n\r\n\r\ndef wrap_up(classifier_dict, X_train, y_train, X_test, y_test, threshold, threshold_list):\r\n '''\r\n A wrap up function to train data on al the intended classifiers and\r\n calculate the corresponding metrics\r\n :param classifier_dict: (dict) a dictionary of mapping classifiers to their parameters\r\n :param X_train: the train feature set\r\n :param y_train: the train outcome set\r\n :param X_test: the test feature set\r\n :param y_test: the test outcome set\r\n :param threshold: (float) the threshold for computing accuracy, f1, auc_roc\r\n :param threshold_list: (list) a list of thresholds for computing precision and recall\r\n\r\n :return: a dictionary of mapping models to evaluation metrics.\r\n '''\r\n count = 0\r\n evaluation = {}\r\n baseline = y_test.mean()\r\n for classifier, param_dict in classifier_dict.items():\r\n if classifier == 'Logistic Regression':\r\n model_dict = build_lr(param_dict, X_train, y_train)\r\n if classifier == 'K Nearest Neighbors':\r\n model_dict = build_knn(param_dict, X_train, y_train)\r\n if classifier == 'Decision Tree':\r\n model_dict = build_dt(param_dict, X_train, y_train)\r\n if classifier == 'Support Vector Machine':\r\n model_dict = build_svm(param_dict, X_train, y_train)\r\n if classifier == 'Random Forest':\r\n model_dict = build_rf(param_dict, X_train, y_train)\r\n if classifier == 'Boosting':\r\n model_dict = build_boosting(param_dict, X_train, y_train)\r\n if classifier == 'Bagging':\r\n model_dict = build_bagging(param_dict, X_train, y_train)\r\n\r\n pred_scores_dict = predict_models(classifier, model_dict, X_test)\r\n\r\n for params, score in pred_scores_dict.items():\r\n evaluation[count] = [classifier]\r\n print(\"Running {} ...\".format(classifier))\r\n evaluation[count].append(params)\r\n evaluation[count].append(baseline)\r\n accuracy = compute_acc(y_test, score, threshold)\r\n evaluation[count].append(accuracy)\r\n f1 = compute_f1(y_test, score, threshold)\r\n evaluation[count].append(f1)\r\n auc_roc = compute_auc_roc(y_test, score, threshold)\r\n evaluation[count].append(auc_roc)\r\n for threshold in threshold_list:\r\n precision = precision_at_k(y_test, score, threshold)\r\n evaluation[count].append(precision)\r\n recall = recall_at_k(y_test, score, threshold)\r\n evaluation[count].append(recall)\r\n count += 1\r\n return evaluation\r\n\r\n\r\ndef write_as_df(dict, col_list):\r\n '''\r\n Write the evaluation result into a data frame\r\n :param dict: a dictionary mapping models to evaluation results\r\n :param col_list: (list) a list of the column names in data frame\r\n\r\n :return: a data frame\r\n '''\r\n return pd.DataFrame.from_dict(dict, orient='index', columns=col_list)\r\n\r\n\r\n# The following functions are referenced from:\r\n# https://github.com/rayidghani/magicloops/blob/master/mlfunctions.py\r\n\r\ndef joint_sort_descending(l1, l2):\r\n '''\r\n Sort two arrays together\r\n :param l1: numpy array\r\n :param l2: numpy array\r\n\r\n :return: two sorted arrays\r\n '''\r\n idx = np.argsort(l1)[::-1]\r\n return l1[idx], l2[idx]\r\n\r\n\r\ndef generate_binary_at_k(y_scores, k):\r\n '''\r\n predict labels based on thresholds\r\n :param y_scores: the predicted scores\r\n :param k: (int or float) threshold\r\n\r\n :return: predicted labels\r\n '''\r\n cutoff_index = int(len(y_scores) * (k / 100.0))\r\n predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]\r\n return predictions_binary\r\n\r\n\r\ndef precision_at_k(y_true, y_scores, k):\r\n '''\r\n Compute precision based on threshold (percentage)\r\n :param y_true: the true labels\r\n :param y_scores: the predicted labels\r\n :param k: (int or float) the threshold\r\n\r\n :return: (float) precision score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n return precision_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef recall_at_k(y_true, y_scores, k):\r\n '''\r\n Compute recall based on threshold (percentage)\r\n :param y_true: the true labels\r\n :param y_scores: the predicted labels\r\n :param k: (int or float) the threshold\r\n\r\n :return: (float) recall score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n return recall_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef plot_precision_recall_n(y_true, y_prob, model_name, output_type):\r\n '''\r\n Plot precision and recall at different percent of population\r\n :param y_true: the true labels\r\n :param y_prob: the predicted labels\r\n :param model_name: the name of the model\r\n :param output_type: (str) 'save' or 'show'\r\n\r\n :return: No returns but a plot\r\n '''\r\n y_score = y_prob\r\n precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_score)\r\n precision_curve = precision_curve[:-1]\r\n recall_curve = recall_curve[:-1]\r\n pct_above_per_thresh = []\r\n number_scored = len(y_score)\r\n for value in pr_thresholds:\r\n num_above_thresh = len(y_score[y_score >= value])\r\n pct_above_thresh = num_above_thresh / float(number_scored)\r\n pct_above_per_thresh.append(pct_above_thresh)\r\n pct_above_per_thresh = np.array(pct_above_per_thresh)\r\n\r\n plt.clf()\r\n fig, ax1 = plt.subplots()\r\n ax1.plot(pct_above_per_thresh, precision_curve, 'b')\r\n ax1.set_xlabel('percent of population')\r\n ax1.set_ylabel('precision', color='b')\r\n ax2 = ax1.twinx()\r\n ax2.plot(pct_above_per_thresh, recall_curve, 'r')\r\n ax2.set_ylabel('recall', color='r')\r\n ax1.set_ylim([0, 1])\r\n ax1.set_ylim([0, 1])\r\n ax2.set_xlim([0, 1])\r\n\r\n name = model_name\r\n plt.title(name)\r\n if (output_type == 'save'):\r\n plt.savefig(name)\r\n elif (output_type == 'show'):\r\n plt.show()\r\n else:\r\n plt.show()\r\n" }, { "alpha_fraction": 0.6347922086715698, "alphanum_fraction": 0.6403089165687561, "avg_line_length": 30.61627960205078, "blob_id": "8ed9877cee2ab7c23c6a209b0168048155076431", "content_id": "421c3466a00916df9b6fb4d4a73a3050b15d1cd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2719, "license_type": "no_license", "max_line_length": 98, "num_lines": 86, "path": "/HW2/modeling_and_evaluation.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\nHW 2: Modeling and evaluation\nYuwei Zhang\n\"\"\"\n\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn import preprocessing\nfrom sklearn.metrics import jaccard_similarity_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\n\n\ndef preprocess_feature(df, features):\n '''\n Choose features with selected columns and normalize them\n Inputs:\n df: a data frame\n features: a list of selected feature names\n Return:\n preprocessed features\n '''\n X = df[features]\n X = preprocessing.StandardScaler().fit(X).transform(X)\n return X\n\n\ndef split_data(X, y, test_size):\n '''\n Split data set into train set and test set. Print out their shape\n Inputs:\n X: feature array\n y: an numpy array\n test_size: the proportion of test set\n Returns:\n train set and test set\n '''\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=4)\n print('Train set:', X_train.shape, y_train.shape)\n print('Test set:', X_test.shape, y_test.shape)\n return X_train, X_test, y_train, y_test\n\n\ndef get_label(pred_score, threshold):\n '''\n Get the label of test data based on choosen threshold\n Inputs:\n pred_score(numpy array): an arrary of predicted probability\n threshold(float): the threshold to choose labels\n Returns:\n an array of predicted labels\n '''\n pred_label = np.array(list(map(lambda x: 1 if x > threshold else 0, pred_score)))\n return pred_label\n\n\ndef fit_and_evaluation(x_train, y_train, x_test, y_test):\n '''\n Create the decision tree model with max_depth from 3 to 6 and evaluate each model\n by jaccard index and F1 score report\n Inputs:\n train set and test set\n Output:\n print out differnt evaluation results for differnt models\n return the best decision tree model\n '''\n max_jaccard = float('-inf')\n best_tree = None\n for d in range(3, 7):\n creditTree = tree.DecisionTreeClassifier(criterion=\"entropy\", max_depth=d)\n creditTree.fit(x_train, y_train)\n # Predicting\n DT_score = creditTree.predict_proba(x_test)[:,1]\n DT_yhat = get_label(DT_score, 0.4)\n print('evaluation for max_depth = {}:'.format(d))\n print()\n # Jaccard Index\n j_index = jaccard_similarity_score(y_test, DT_yhat)\n print(\" jaccard index for DT: {}\".format(round(j_index, 3)))\n # F1_score\n print(\" F1 score report for DT:\" + \"\\n\", classification_report(y_test, DT_yhat))\n \n if j_index >= max_jaccard:\n max_jaccard = j_index\n best_tree = creditTree\n return best_tree\n" }, { "alpha_fraction": 0.530299186706543, "alphanum_fraction": 0.5401875972747803, "avg_line_length": 54.94326400756836, "blob_id": "939dfc60f0905faa829267778ad44bd9b6b43cfb", "content_id": "13e6709561ea7c5a519767e1052f4e91b07408f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7888, "license_type": "no_license", "max_line_length": 230, "num_lines": 141, "path": "/HW5/modeling.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\nHW5: modeling\nYuwei Zhang\n\"\"\"\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import ParameterGrid\nfrom datetime import timedelta\nfrom datetime import datetime\nfrom evaluation import *\n\n\ndef split_data(X, y, test_size, temporal, split_var, start_date, end_date, delay_interval, period):\n '''\n Split the data set\n :param X: the feature set\n :param y: the outcome set\n :param test_size: (float) the size of test\n :param temporal: (boolean) whether to use temporal validation\n :param split_var: (str) split the data set based on this variable\n :param start_date: the start date of this data set\n :param end_date: the end date of this data set\n :param delay_interval: how many days the test set should be away from train set\n :param period: (relativedelta object) the period\n\n :return: a list of splited train and test sets\n '''\n one_day = timedelta(days=1)\n split_results = []\n if temporal:\n count = 1\n while True:\n train_end = start_date + period * count - one_day - delay_interval\n X_train = X[X[split_var] < train_end]\n X_train = X_train.drop([split_var], axis=1)\n y_train = y[X[split_var] < train_end]\n test_start = start_date + period * count\n test_end = test_start + period - one_day\n X_test = X[(X[split_var] >= test_start) & (X[split_var] <= test_end)]\n X_test = X_test.drop([split_var], axis=1)\n y_test = y[(X[split_var] >= test_start) & (X[split_var] <= test_end)]\n split_results.append((X_train, X_test, y_train, y_test))\n count += 1\n print('start date: {}'.format(start_date))\n print('train_end_date: {}'.format(train_end))\n print('test_start_date: {}'.format(test_start))\n print('test_end_date: {}'.format(test_end))\n print('end_date: {}'.format(end_date))\n print()\n # when exhaust the last date of project, stop the loop\n if start_date + period * count - one_day >= end_date:\n break\n else:\n X = X.drop([temporal_var], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= \\\n test_size, random_state=0)\n split_results.append((X_train, X_test, y_train, y_test))\n return split_results\n\n\ndef run_models(models_list, clfs, grid, X_train, X_test, y_train, y_test, threshold):\n \"\"\"\n Run all the models in models_list and adjust hyper-parameters based on grid, evaluate\n them, save the results into a data frame and save the corresponding graphs\n Inputs:\n models_list:(list of str)a list of names of models to run\n clfs:a dictionary of base models\n grid:a dictionary of parameters\n X_train:the train feature set\n X_test: the test feature set\n y_train: the train outcome set\n y_test: the test outcome set\n threshold: (int) threshold for evaluation metircs\n Returns:\n a data frame and a bunch of graphs\n \"\"\"\n # create the empty data frame\n col_list = ['model_name', 'parameters', 'baseline', 'accuarcy', 'f1', 'auc_roc',\n 'precision_1%', 'precision_2%', 'precision_5%',\n 'precision_10%', 'precision_20%', 'precision_30%',\n 'precision_50%', 'recall_1%', 'recall_2%',\n 'recall_5%', 'recall_10%', 'recall_20%','recall_30%', 'recall_50%' ]\n results_df = pd.DataFrame(columns=col_list)\n \n for index,clf in enumerate([clfs[x] for x in models_list]):\n parameter_values = grid[models_list[index]]\n for params in ParameterGrid(parameter_values):\n try:\n print(\"Running {} ...\".format(models_list[index]))\n clf.set_params(**params)\n if models_list[index] == 'Support Vector Machine':\n y_pred_probs = clf.fit(X_train, y_train).decision_function(X_test)\n else:\n y_pred_probs = clf.fit(X_train, y_train).predict_proba(X_test)[:,1]\n \n if models_list[index] == \"Decision Tree\" or models_list[index] == \"Random Forest\":\n d = {'Features': X_train.columns, \"Importance\": clf.feature_importances_}\n feature_importance = pd.DataFrame(data=d)\n feature_importance = feature_importance.sort_values(by=['Importance'], ascending=False)\n print(feature_importance.head())\n \n # Sort true y labels and predicted scores at the same time \n y_pred_probs_sorted, y_test_sorted = zip(*sorted(zip(y_pred_probs, y_test), reverse=True))\n # Write the evaluation results into data frame\n results_df.loc[len(results_df)] = [models_list[index], params,\n precision_at_k(y_test_sorted,y_pred_probs_sorted, 100),\n compute_acc(y_test_sorted, y_pred_probs_sorted, threshold), compute_f1(y_test_sorted, y_pred_probs_sorted, threshold),\n compute_auc_roc(y_test_sorted, y_pred_probs_sorted, threshold),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,1),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,2),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,5),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,10),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,20),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,30),\n precision_at_k(y_test_sorted,y_pred_probs_sorted,50),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,1),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,2),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,5),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,10),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,20),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,30),\n recall_at_k(y_test_sorted,y_pred_probs_sorted,50)]\n \n graph_name_pr = 'D:/UChicago/2019 spring/CAPP30254/assignments/HW5/grpahs/' + \\\n 'precision_recall_curve of ' + models_list[index] + \\\n datetime.now().strftime(\"%m-%d-%Y %H%M%S\")\n plot_precision_recall_n(y_test, y_pred_probs, clf, graph_name_pr, 'save')\n graph_name_roc = 'D:/UChicago/2019 spring/CAPP30254/assignments/HW5/grpahs/' + \\\n 'roc_curve of' + models_list[index] +\\\n datetime.now().strftime(\"%m-%d-%Y %H%M%S\")\n plot_roc(clf, graph_name_roc, y_pred_probs, y_test, 'save')\n except IndexError as e:\n print('Error:',e)\n continue\n return results_df\n" }, { "alpha_fraction": 0.6476494073867798, "alphanum_fraction": 0.6565737128257751, "avg_line_length": 29.91132926940918, "blob_id": "0d8ac90a3cb608baceee6b4da3a03cb0c148b4f6", "content_id": "15293479574a7c2684745c7209a50a2637e23f2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6275, "license_type": "no_license", "max_line_length": 96, "num_lines": 203, "path": "/HW5/evaluation.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\nHW5: evaluation\nYuwei Zhang\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import precision_recall_curve\nimport matplotlib.pyplot as plt\n\n\ndef compute_acc(y_true, y_scores, k):\n '''\n Compute accuracy score based on threshold\n :param pred_scores: (np array) an array of predicted score\n :param threshold: (float) the threshold of labeling predicted results\n :param y_test: test set\n\n :return: (float) an accuracy score\n '''\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n\n return accuracy_score(y_true_sorted, preds_at_k)\n\n\ndef compute_f1(y_true, y_scores, k):\n '''\n Compute f1 score based on threshold\n :param pred_scores: (np array) an array of predicted score\n :param threshold: (float) the threshold of labeling predicted results\n :param y_test: test set\n\n :return: (float) an f1 score\n '''\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n\n return f1_score(y_true_sorted, preds_at_k)\n\ndef compute_auc_roc(y_true, y_scores, k):\n '''\n Compute area under Receiver Operator Characteristic Curve\n :param pred_scores: (np array) an array of predicted score\n :param threshold: (float) the threshold of labeling predicted results\n :param y_test: test set\n\n :return: (float) an auc_roc score\n '''\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n\n return roc_auc_score(y_true_sorted, preds_at_k)\n\n\ndef compute_auc(pred_scores, true_labels):\n '''\n Compute auc score\n :param pred_scores: an array of predicted scores\n :param true_labels: an array of true labels\n\n :return: area under curve score\n '''\n fpr, tpr, thresholds = roc_curve(true_labels, pred_scores, pos_label=2)\n return auc(fpr, tpr)\n\n\n\n\n# The following functions are referenced from:\n# https://github.com/rayidghani/magicloops/blob/master/mlfunctions.py\n\ndef joint_sort_descending(l1, l2):\n '''\n Sort two arrays together\n :param l1: numpy array\n :param l2: numpy array\n\n :return: two sorted arrays\n '''\n idx = np.argsort(l1)[::-1]\n return l1[idx], l2[idx]\n\n\ndef generate_binary_at_k(y_scores, k):\n '''\n predict labels based on thresholds\n :param y_scores: the predicted scores\n :param k: (int or float) threshold\n\n :return: predicted labels\n '''\n cutoff_index = int(len(y_scores) * (k / 100.0))\n predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]\n return predictions_binary\n\n\ndef precision_at_k(y_true, y_scores, k):\n '''\n Compute precision based on threshold (percentage)\n :param y_true: the true labels\n :param y_scores: the predicted labels\n :param k: (int or float) the threshold\n\n :return: (float) precision score\n '''\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n return precision_score(y_true_sorted, preds_at_k)\n\n\ndef recall_at_k(y_true, y_scores, k):\n '''\n Compute recall based on threshold (percentage)\n :param y_true: the true labels\n :param y_scores: the predicted labels\n :param k: (int or float) the threshold\n\n :return: (float) recall score\n '''\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n return recall_score(y_true_sorted, preds_at_k)\n\n\ndef plot_precision_recall_n(y_true, y_prob, plot_name, save_name, output_type):\n '''\n Plot precision and recall at different percent of population\n :param y_true: the true labels\n :param y_prob: the predicted labels\n :param model_name: the name of the model\n :param output_type: (str) 'save' or 'show'\n\n :return: No returns but a plot\n '''\n y_score = y_prob\n precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_score)\n precision_curve = precision_curve[:-1]\n recall_curve = recall_curve[:-1]\n pct_above_per_thresh = []\n number_scored = len(y_score)\n for value in pr_thresholds:\n num_above_thresh = len(y_score[y_score >= value])\n pct_above_thresh = num_above_thresh / float(number_scored)\n pct_above_per_thresh.append(pct_above_thresh)\n pct_above_per_thresh = np.array(pct_above_per_thresh)\n\n plt.clf()\n fig, ax1 = plt.subplots()\n ax1.plot(pct_above_per_thresh, precision_curve, 'b')\n ax1.set_xlabel('percent of population')\n ax1.set_ylabel('precision', color='b')\n ax2 = ax1.twinx()\n ax2.plot(pct_above_per_thresh, recall_curve, 'r')\n ax2.set_ylabel('recall', color='r')\n ax1.set_ylim([0, 1])\n ax1.set_ylim([0, 1])\n ax2.set_xlim([0, 1])\n\n plt.title(plot_name)\n if (output_type == 'save'):\n plt.savefig(save_name)\n elif (output_type == 'show'):\n plt.show()\n else:\n plt.show()\n \n \ndef plot_roc(plot_name, save_name, probs, y_true, output_type):\n '''\n Plot the AUC-ROC curve\n :param name: the saved path\n :param probs: the predicted probability\n :param y_true: the true labels\n :param output_type: 'save' or 'show'\n\n :return: No returns\n '''\n fpr, tpr, thresholds = roc_curve(y_true, probs)\n roc_auc = auc(fpr, tpr)\n plt.clf()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.05])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(plot_name)\n plt.legend(loc=\"lower right\")\n if (output_type == 'save'):\n plt.savefig(save_name, close=True)\n plt.close()\n elif (output_type == 'show'):\n plt.show()\n else:\n plt.show()\n" }, { "alpha_fraction": 0.6015686392784119, "alphanum_fraction": 0.6031372547149658, "avg_line_length": 25.5625, "blob_id": "9cbccd389de1e0af949e0009aecd30e78bda7941", "content_id": "347d5e8bf0cf83286e8b38b9c3a81a234db894eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 72, "num_lines": 48, "path": "/HW2/preprocess.py", "repo_name": "haonen/CAPP30254", "src_encoding": "UTF-8", "text": "\"\"\"\nHW2: Data Preprocessing\nYuwei Zhang\n\"\"\"\n\nimport pandas as pd\n\n\ndef imputation(df, colname):\n '''\n imputate the cells that are NaN with the mean of this column\n Inputs:\n df: a data frame\n colname: the name of the colunmn that contains NA values\n Output:\n imputate NA cells with mean value\n '''\n avg = df[colname].mean()\n df[colname] = df[colname].fillna(value=avg)\n\n\ndef discretize(df, colname, bins_list, labels_list):\n '''\n Discretize the continuous variable\n Inputs:\n df: a data frame\n colname: the name of the column\n bins_list: the list of the boundaries to be cut\n labels_list: the label of\n Output:\n add a new column that are discritized from a continuous variable\n '''\n df[(colname + '_category')] = pd.cut(df[colname],\n bins=bins_list,\n labels=labels_list,\n include_lowest=True, right=False)\n\n\ndef get_dummies(df, colname):\n '''\n Convert the categorical variable into dummies\n Inputs:\n df: a data frame\n colname: the name of the colname\n Return:\n the data frame with those dummies into data frame\n '''\n return pd.concat([df,pd.get_dummies(df[colname])], axis=1)\n" } ]
14
dmitrya4/serverscripts
https://github.com/dmitrya4/serverscripts
97c99da844c4a26c187b730ee0c4d322cd58299f
9f31ba11ae6afcc967fe13224f81db874a108ee0
4bf47dc2c589e120ec7d3cb94afb51d0f079fbdb
refs/heads/master
2021-01-10T13:00:45.415829
2015-09-24T20:16:58
2015-09-24T20:16:58
44,920,531
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5102040767669678, "alphanum_fraction": 0.5646258592605591, "avg_line_length": 12.454545021057129, "blob_id": "e847bac04517aefb8bd6ca53e643f9553a7011af", "content_id": "0fa122437875bc1b2721f2bae0fbc72ebf611da9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 147, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/rename933", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd /mnt/flash/prepare933\n\ni=0\nfor f in `ls -tr *.jpg 2>/dev/null`\ndo\n newf=`printf %06d $i`.jpg\n mv $f $newf\n i=$((i+1))\ndone" }, { "alpha_fraction": 0.629482090473175, "alphanum_fraction": 0.6434262990951538, "avg_line_length": 19.875, "blob_id": "fcfe5b8be38caac609644c6cc1bb4b709f107e39", "content_id": "1d8afd106936a3feb6f537783346aeecc87732fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 127, "num_lines": 48, "path": "/933avicreator.py", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport re\nimport os\nimport sys\nimport cmd\nimport subprocess\nimport fileinput\nimport glob\nimport logging\nfrom datetime import datetime, date, time\n\nlogging.basicConfig(format = '%(levelname)-8s [%(asctime)s] %(message)s', level = logging.INFO, filename = '933avicreator.log')\n\n#Current utc date & time\nname = datetime.utcnow()\n\n#Go to directory\npath = \"/mnt/flash/prepare933\"\nos.chdir(path)\n\n#Make & start command to convert jpg to avi\ncomname = \"avconv -r 5 -i %06d.jpg -r 5 -vcodec mjpeg -qscale 1 .avi\"\nresult = subprocess.getoutput(comname)\nlogging.info('AVI created!')\n\n#Rename .avi to current_utc_date_time.avi\nf = os.listdir(path)\n#vid = glob.glob('.avi')\nren = os.rename('.avi', str(name)+'.avi')\nlogging.info('___Exit from script___')\n\n\n#////////////////////////////\n#path = \"/home/pi/scripts\"\n#os.chdir(path)\n#f = open('index','r')\n#f.readline(i)\n#print(i)\n#f.close()\n\n#i=i+1\n\n#f = open('index','w')\n#f.write(str(i) + '\\n')\n#print(i)\n#f.close()\n#//////////////////////////\n\n\n" }, { "alpha_fraction": 0.7062146663665771, "alphanum_fraction": 0.7316384315490723, "avg_line_length": 19.882352828979492, "blob_id": "c2c283f4b1cdfeb48c584ed5c69867a0b9e68aa7", "content_id": "cf7f86c424671cbe2de2df9c5ca40746b53f5cd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 354, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/processingroom1data", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/bin/bash -xv\n\n#cd /mnt/flash/room1\n#mv -f *jpg /mnt/flash/prepareroom1\n\ncd /home/pi/scripts/\n./renameroom1\n./room1avicreator.py\n\ncd /mnt/flash/prepareroom1\n#cp -r *avi /mnt/flash/ftp/privat/room1\nmv -f *avi /mnt/flash/ftp/privat/room1\n#Temporary swithed off this operation\n#mv -f *avi /mnt/flash/dav/video/room1\n\ncd /mnt/flash/prepareroom1\nrm -rf *jpg" }, { "alpha_fraction": 0.6578947305679321, "alphanum_fraction": 0.7131578922271729, "avg_line_length": 18.049999237060547, "blob_id": "59f0012c34333ff546890eaf5207ed9747e5ee46", "content_id": "2c75c511a9399ccf85435978e0ca7341b2d0b71b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 380, "license_type": "no_license", "max_line_length": 38, "num_lines": 20, "path": "/processingipdata", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/bin/bash -xv\n\n#cd /mnt/flash/ftp/933\n#mv -f *jpg /mnt/flash/prepare933\n\n#cd /mnt/flash/ftp/933\n#rm -rf *jpg\n\ncd /home/pi/scripts\n./rename933\n./933avicreator.py\n\ncd /mnt/flash/prepare933\n#cp -r *avi /mnt/flash/ftp/privat/holl\nmv -f *avi /mnt/flash/ftp/privat/holl\n#Temporary switched off this operation\n#mv -f *avi /mnt/flash/dav/video/holl\n\ncd /mnt/flash/prepare933\nrm -rf *jpg" }, { "alpha_fraction": 0.650602400302887, "alphanum_fraction": 0.6695352792739868, "avg_line_length": 47.5, "blob_id": "7491fe6cfc43517eba586670378d8f563b1cfc8d", "content_id": "4ca076e3668a27b0fb63462e8664aada90fcf6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 581, "license_type": "no_license", "max_line_length": 109, "num_lines": 12, "path": "/deltemp.php", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/php\n<?php\n$date = date(DATE_RFC2822);\nfile_put_contents(\"/home/pi/scripts/deltemp.log\", \"$date\".PHP_EOL, FILE_APPEND);\nmysql_connect(\"localhost\", \"root\", \"093833\");\nfile_put_contents(\"/home/pi/scripts/deltemp.log\", \"Connect to database OK!\".PHP_EOL, FILE_APPEND);\nmysql_select_db(\"home\");\nmysql_query(\"DELETE FROM tprocessor ORDER BY time LIMIT 8\");\nmysql_close();\nfile_put_contents(\"/home/pi/scripts/deltemp.log\", \"Old data deleted!\".PHP_EOL, FILE_APPEND);\nfile_put_contents(\"/home/pi/scripts/deltemp.log\", \"__________________________________\".PHP_EOL, FILE_APPEND);\n?>" }, { "alpha_fraction": 0.6702380776405334, "alphanum_fraction": 0.688095211982727, "avg_line_length": 41.04999923706055, "blob_id": "dfc18a03f4fea681846a99adccd56f27f4360d8f", "content_id": "2979bf8218b11f33cf905c945166f067f99d73b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 840, "license_type": "no_license", "max_line_length": 98, "num_lines": 20, "path": "/tproc.php", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/php\n<?php\n\n$date = date(DATE_RFC2822);\nfile_put_contents(\"/home/pi/scripts/tproc.log\", \"$date\".PHP_EOL, FILE_APPEND);\n$filename = \"/sys/class/thermal/thermal_zone0/temp\";\n$handle = fopen($filename, \"r\");\n$content = fread($handle, filesize($filename));\n$result = intval($content/1000);\necho $result;\nfile_put_contents(\"/home/pi/scripts/tproc.log\", \"$result\".PHP_EOL, FILE_APPEND);\nfclose($handle);\n\n$link = mysql_connect('localhost', 'root', '093833') or die (\"Could not connect:\" .mysql_error());\n//file_put_contents(\"/home/pi/scripts/tproc.log\", \"$link\".PHP_EOL, FILE_APPEND);\nmysql_select_db('home') or die (\"Could not select database\");\nmysql_query(\"INSERT INTO tprocessor(time, t) VALUES(NOW(), $result)\");\nmysql_close($link);\nfile_put_contents(\"/home/pi/scripts/tproc.log\", \"___Exit from script___\".PHP_EOL, FILE_APPEND);\n?>" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 17, "blob_id": "0af0ef58f780b27c8fca55782d70991e4caf6b5f", "content_id": "87becd7307ae67bb15b9ea8ec3f0006fec5c0dc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 72, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/moveroom1", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/bin/bash -xv\n\ncd /mnt/flash/room1\nmv -f *jpg /mnt/flash/prepareroom1\n" }, { "alpha_fraction": 0.5977393388748169, "alphanum_fraction": 0.6303191781044006, "avg_line_length": 21.74242401123047, "blob_id": "085eff8640750b8d5eb8f634b57cb92b29e46830", "content_id": "cc8495c26dc11d4ec096c831dae4aa3a93556bbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 139, "num_lines": 66, "path": "/gsmalarm.py", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport sys\nimport RPi.GPIO as GPIO\nimport serial\nimport time\nimport subprocess\nimport logging\n\nch_in = 5;\n\nlogging.basicConfig(format = '%(levelname)-8s [%(asctime)s] %(message)s', level = logging.INFO, filename = '/home/pi/scripts/gsmalarm.log')\n\nport = serial.Serial(\"/dev/ttyAMA0\", baudrate = 9600, timeout = 2)\nstart = \"AT\"\nechooff = \"ATE0\"\ncmgfset = \"AT+CMGF=1\"\ncscsset = \"AT+CSCS=\\\"GSM\\\"\"\ndial1 = \"ATD>1;\" #My number\ndial2 = \"ATD>2;\" #Tatyana number\nendcall = \"ATH\"\nsmsnumber1 = \"AT+CMGS=\\\"+79307040757\\\"\"\nsms = \"ALARM! GAS LEVEL IS OUT OF RANGE!\"\n\n#Send sms for me and Tatyana\n#def sendsms():\n# port.write(smsnumber1+\"\\r\\n\")\n# time.sleep(0.2)\n# port.write(sms+chr(26)) #Send text and ctrl-z\n# time.sleep(0.2)\n\n#Three times call me and Tatyana\n\n\n#read = port.read()\n#print read\n# port.close()\n\ndef main():\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(ch_in, GPIO.IN)\n GPIO.add_event_detect(ch_in, GPIO.RISING)\n\n\nif __name__=='__main__':\n main()\n\nwhile True:\n if GPIO.event_detected(ch_in):\n port.open()\n port.write(start+\"\\r\\n\")\n time.sleep(0.2)\n port.write(echooff+\"\\r\\n\")\n time.sleep(0.2)\n port.write(cmgfset+\"\\r\\n\")\n time.sleep(0.2)\n port.write(cscsset+\"\\r\\n\")\n time.sleep(0.2)\n logging.info('Neoway ready!')\n for i in range(3):\n port.write(dial1+\"\\r\\n\")\n time.sleep(20)\n port.write(endcall+\"\\r\\n\")\n time.sleep(10)\n logging.info('Call = OK, end Call = OK')\n port.close()\n\n\n\n" }, { "alpha_fraction": 0.5074074268341064, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 14, "blob_id": "dbbeca367262cff2017add916fd15ef924ddf271", "content_id": "b6bbfea646e9e0f0ee58b4ebc8686b78a00c6bfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 270, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/renameroom1", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/bin/bash -xv\n\ncd /mnt/flash/prepareroom1\n#num = 1\ni=0\nfor f in `ls -tr *.jpg 2>/dev/null`\ndo\n newf=`printf %06d $i`.jpg \n mv $f $newf\n i=$((i+1))\n\n#mv \"$f\" \"$num\".jpg\n#let \"num += 1\"\n\n\ndone\n\n#avconv -r 10 -i %06d.jpg -r 10 -vcodec mjpeg -qscale 1 video1.avi\n" }, { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 17, "blob_id": "3b956620d3d5ab2bb31f74d74a35609a05588a92", "content_id": "54801494912d2278ac60811ac739d5fbfe62c33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 71, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/move933", "repo_name": "dmitrya4/serverscripts", "src_encoding": "UTF-8", "text": "#!/bin/bash -xv\n\ncd /mnt/flash/ftp/933\nmv -f *jpg /mnt/flash/prepare933" } ]
10
saumil-jain/python_utils
https://github.com/saumil-jain/python_utils
212e7c8b6537f5dee9bd6a4a32d806cad7f3f8e3
375858210a6e1dd04a0eba48224b0db249a586a0
b35eab674c88fa997031476a6357a4318e042e48
refs/heads/master
2021-01-22T08:05:51.215766
2018-04-04T16:48:41
2018-04-04T16:48:41
92,602,009
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6314433217048645, "alphanum_fraction": 0.6374570727348328, "avg_line_length": 40.57143020629883, "blob_id": "c8612745b958bcf80d7ca3d6655d4998b59da743", "content_id": "66dacc9c7d5988fbabfb8770f325e7d95e4897a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 78, "num_lines": 28, "path": "/random_utils/random_numbers.py", "repo_name": "saumil-jain/python_utils", "src_encoding": "UTF-8", "text": "import random\n\n\ndef generate_random_numbers(output_file_name=\"random_output\",\n limit=100,\n start=0,\n stop=100):\n \"\"\"Generate a list of random numbers between two numbers and save to file.\n\n The output file is saved in the current working directory.\n\n :param output_file_name: The file name where the output will be saved.\n :param limit: The total number of random numbers to be generated.\n :param start: The inclusive start number of the range of random numbers.\n :param stop: The exclusive end number of the range of random numbers.\n :return: None\n \"\"\"\n with open(output_file_name, 'w') as output_file:\n for i in range(limit):\n output_file.write(str(random.randrange(start, stop)) + '\\n')\n\n\nif __name__ == \"__main__\":\n output_file_name = input('Enter output file name:')\n limit = int(input('Enter the count of random numbers to be generated:'))\n start = int(input('Enter the start range (inclusive):'))\n stop = int(input('Enter the stop range (exclusive):'))\n generate_random_numbers(output_file_name, limit, start, stop)\n" }, { "alpha_fraction": 0.469453364610672, "alphanum_fraction": 0.4823151230812073, "avg_line_length": 21.214284896850586, "blob_id": "cf337556f98db540138c4f59513dd33673e8055a", "content_id": "a8d61717a11697f8243ac198215752919b66c52d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 65, "num_lines": 28, "path": "/general/fibo.py", "repo_name": "saumil-jain/python_utils", "src_encoding": "UTF-8", "text": "def fibo_generator(count):\n \"\"\"Creates a generator which generates the fibonacci sequence\n\n :param count: count of fibonacci numbers to be generated\n :return:\n \"\"\"\n try:\n if count <= 0:\n return\n a = 0\n b = 1\n yield a\n if count == 1:\n return\n yield b\n if count == 2:\n return\n for i in range(count - 2):\n c = a + b\n yield c\n a, b = b, c\n except TypeError:\n raise TypeError(\"Only integers allowed\")\n\n\nif __name__ == \"__main__\":\n for i in fibo_generator(10):\n print(i)\n" }, { "alpha_fraction": 0.6302682161331177, "alphanum_fraction": 0.6321839094161987, "avg_line_length": 30.625, "blob_id": "2e0f50c37fa52dd71e5484d2f303ee9892d65612", "content_id": "41f78fc9b8e3eecd7146e69e8e315bdcf830eaa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1566, "license_type": "no_license", "max_line_length": 107, "num_lines": 48, "path": "/dateutils/date_diff.py", "repo_name": "saumil-jain/python_utils", "src_encoding": "UTF-8", "text": "import datetime\r\n\r\n\r\ndef parse_date(date_str):\r\n \"\"\"Parses a date string\r\n\r\n The date string should be in the format dd/mm/yyyy\r\n\r\n :param date_str: The input date string\r\n :return: A datetime.datetime object representing the input date\r\n :raises: IndexError and ValueError if date not in the required format\r\n \"\"\"\r\n try:\r\n date_list = date_str.split(\"/\")\r\n day = int(date_list[0])\r\n month = int(date_list[1])\r\n year = int(date_list[2])\r\n date_object = datetime.date(year=year, month=month, day=day)\r\n\r\n return date_object\r\n except (IndexError, ValueError) as e:\r\n print(\"Invalid date format '{}'. It must be in the format dd/mm/yyyy\".format(date_str))\r\n raise\r\n\r\n\r\ndef calculate_date_diff_from_today(date_object):\r\n \"\"\"Calculates the difference between input date and today's date.\r\n\r\n :param date_object: The datetime.date object representing the input date\r\n :return: the difference of the two dates as a datetime.timedelta object.\r\n \"\"\"\r\n today = datetime.date.today()\r\n difference = today - date_object\r\n return date_object, today, difference\r\n\r\n\r\ndef main():\r\n date_str = input(\"Enter date:\\n\")\r\n try:\r\n date_object, today, date_difference = calculate_date_diff_from_today(parse_date(date_str))\r\n print(\"The difference between {} and {} (today) is {}\".format(date_object, today, date_difference))\r\n except Exception as e:\r\n print(\"Not able to calculate difference\")\r\n print(e)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" }, { "alpha_fraction": 0.8139534592628479, "alphanum_fraction": 0.8139534592628479, "avg_line_length": 20.5, "blob_id": "299db222635ca472443417423b4cb2e734410e95", "content_id": "9c4299bf95207f2585bb19f21a3542507d124567", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "saumil-jain/python_utils", "src_encoding": "UTF-8", "text": "# python_utils\nUtilities written in Python\n" }, { "alpha_fraction": 0.6119047403335571, "alphanum_fraction": 0.613095223903656, "avg_line_length": 31.30769157409668, "blob_id": "dbd56397967628783e9eb66f9561dd0a8b2b5efd", "content_id": "6a968a7661520ff3cbb9c3e57088a309b0b3226c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 96, "num_lines": 26, "path": "/os_utils/update_git_repo.py", "repo_name": "saumil-jain/python_utils", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\n\ndir_to_scan = input('Enter dir path:')\n\nif not os.path.isdir(dir_to_scan):\n print('not a valid dir')\n exit()\n\nfor sub_dir in os.listdir(dir_to_scan):\n print(os.path.join(dir_to_scan, sub_dir))\n sub_dir = os.path.join(dir_to_scan, sub_dir)\n if os.path.isdir(sub_dir) and '.git' in os.listdir(sub_dir):\n print('git repo found in ' + sub_dir)\n\n os.chdir(sub_dir)\n\n # execute command here\n git_pull_command = 'git pull origin master'\n completed_process = subprocess.run(git_pull_command, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n print('git pull command executed. Output is:')\n print(completed_process.stdout.decode('utf-8'))\n\n else:\n print('git repo not found in ' + sub_dir)\n" } ]
5
b-mu/backpack
https://github.com/b-mu/backpack
c4f884ee2c6efc49b1cbd0c91362f7dc987bd676
511107f52db93c9f196efbf5a64a39622132139a
fc648e07c02a47bfe00a993b45282e599d0aeb2d
refs/heads/master
2023-07-05T01:49:59.608445
2021-06-29T21:04:22
2021-06-29T21:04:22
376,047,275
0
0
MIT
2021-06-11T14:15:27
2021-06-29T21:04:28
2021-06-29T21:12:11
Python
[ { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.7784678936004639, "avg_line_length": 36.153846740722656, "blob_id": "a85f27c4a698df895df5d7c5897748e0cce15c03", "content_id": "5edd772abfd06c14871b663a321f3145645b14b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "permissive", "max_line_length": 84, "num_lines": 13, "path": "/backpack/extensions/secondorder/diag_ggn/pooling.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.avgpool2d import AvgPool2DDerivatives\nfrom backpack.core.derivatives.maxpool2d import MaxPool2DDerivatives\nfrom backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule\n\n\nclass DiagGGNMaxPool2d(DiagGGNBaseModule):\n def __init__(self):\n super().__init__(derivatives=MaxPool2DDerivatives())\n\n\nclass DiagGGNAvgPool2d(DiagGGNBaseModule):\n def __init__(self):\n super().__init__(derivatives=AvgPool2DDerivatives())\n" }, { "alpha_fraction": 0.6594090461730957, "alphanum_fraction": 0.6625194549560547, "avg_line_length": 43.379310607910156, "blob_id": "727fd7ae4891f5982dad1e43b0a687980d064581", "content_id": "8f8ddb33e58046843c93082b5f6e42c084cf606c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "permissive", "max_line_length": 115, "num_lines": 29, "path": "/backpack/extensions/secondorder/mngd/batchnorm1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import backpack.utils.linear as LinUtils\nfrom backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\nfrom torch import einsum\nimport torch\n\nclass MNGDBatchNorm1d(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=BatchNorm1dDerivatives(), params=[\"bias\", \"weight\"])\n\n # TODO: FIX these functions for NGD\n def weight(self, ext, module, grad_inp, grad_out, backproped):\n # dgamma = self.derivatives._weight_jac_t_mat_prod(module, grad_inp, grad_out, backproped, sum_batch=False)\n \n # # fake\n # new_bp = self.derivatives._my_jac_t_mat_prod(module, grad_inp, grad_out, backproped)\n # print('new_bp :\\n', new_bp)\n\n # return einsum(\"vni,zqi->vnzq\", (dgamma, dgamma))\n return None\n\n def bias(self, ext, module, grad_inp, grad_out, backproped):\n # dbeta = self.derivatives._bias_jac_t_mat_prod(module, grad_inp, grad_out, backproped, sum_batch=False)\n # print(torch.norm(dbeta))\n # fake\n # new_bp = self.derivatives._my_jac_t_mat_prod(module, grad_inp, grad_out, backproped)\n # print('new_bp bias:\\n', new_bp)\n # return einsum(\"vni,zqi->vnzq\", (dbeta, dbeta))\n return None" }, { "alpha_fraction": 0.5644769072532654, "alphanum_fraction": 0.5888077616691589, "avg_line_length": 22.485713958740234, "blob_id": "4edf1018c290efa158ba9ac260bd4299b5c176bb", "content_id": "e796414853b9bc500312131fcd7fef66199f3855", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 822, "license_type": "permissive", "max_line_length": 72, "num_lines": 35, "path": "/backpack/extensions/firstorder/fisher/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n Conv1d,\n Conv2d,\n Linear,\n BatchNorm1d,\n BatchNorm2d\n)\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\n\nfrom . import (\n conv1d,\n conv2d,\n linear,\n batchnorm1d,\n batchnorm2d\n)\n\n\nclass Fisher(BackpropExtension):\n \n\n def __init__(self, silent=False):\n self.silent = silent\n super().__init__(\n savefield=\"fisher\",\n fail_mode=\"WARNING\",\n module_exts={\n Linear: linear.FisherLinear(self.silent),\n Conv1d: conv1d.FisherConv1d(self.silent),\n Conv2d: conv2d.FisherConv2d(self.silent),\n BatchNorm1d: batchnorm1d.FisherBatchNorm1d(self.silent),\n BatchNorm2d: batchnorm2d.FisherBatchNorm2d(self.silent),\n },\n )\n" }, { "alpha_fraction": 0.619442343711853, "alphanum_fraction": 0.6232102513313293, "avg_line_length": 27.23404312133789, "blob_id": "32385d132ea4b1f74e8f51487606a0022b1b518e", "content_id": "d6ffa7388daed2d0600e3280dfda8ce1ac03e8ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1327, "license_type": "permissive", "max_line_length": 79, "num_lines": 47, "path": "/setup.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from os import path\n\nfrom setuptools import find_packages, setup\n\n# META\n##############################################################################\nAUTHORS = \"F. Dangel, F. Kunstner\"\nNAME = \"backpack-for-pytorch\"\nPACKAGES = find_packages()\n\nDESCRIPTION = \"BackPACK: Packing more into backprop\"\nLONG_DESCR = \"\"\"\n BackPACK is built on top of PyTorch.\n It efficiently computes quantities other than the gradient.\n\n Website: https://backpack.pt\n Code: https://github.com/f-dangel/backpack\n Documentation: https://readthedocs.org/projects/backpack/\n Bug reports & feature requests: https://github.com/f-dangel/backpack/issues\n \"\"\"\n\nVERSION = \"1.2.0\"\nURL = \"https://github.com/f-dangel/backpack\"\nLICENSE = \"MIT\"\n\n# DEPENDENCIES\n##############################################################################\nREQUIREMENTS_FILE = \"requirements.txt\"\nREQUIREMENTS_PATH = path.join(path.abspath(__file__), REQUIREMENTS_FILE)\n\nwith open(REQUIREMENTS_FILE) as f:\n requirements = f.read().splitlines()\n\nsetup(\n author=AUTHORS,\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCR,\n long_description_content_type=\"text/markdown\",\n install_requires=requirements,\n url=URL,\n license=LICENSE,\n packages=PACKAGES,\n zip_safe=False,\n python_requires=\">=3.6\",\n)\n" }, { "alpha_fraction": 0.6536661386489868, "alphanum_fraction": 0.7090483903884888, "avg_line_length": 31.049999237060547, "blob_id": "57e9e75a1592893722e9274b64c25ef10aa950b5", "content_id": "6b377e5a5b75100a280089498b77faaba7c468c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1282, "license_type": "permissive", "max_line_length": 70, "num_lines": 40, "path": "/.flake8", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "[flake8]\nselect = B,C,E,F,P,W,B9\nmax-line-length = 80\nmax-complexity = 10\nignore =\n\t# replaced by B950 (max-line-length + 10%)\n\tE501, # max-line-length\n\t# ignored because pytorch uses dict\n\tC408, # use {} instead of dict()\n\t# Not Black-compatible\n\tE203, # whitespace before :\n\tE231, # missing whitespace after ','\n\tW291, # trailing whitespace\n\tW503, # line break before binary operator\n\tW504, # line break after binary operator\nexclude = docs, docs_src, build, .git\n\n\n# Differences with pytorch\n#\n# \tSmaller max-line-length\n# \tEnabled max-complexity\n#\tNo flake8-mypy (T4 range)\n#\n# Set of rules ignore by pytorch, probably to get around the C\n#\n#\tF401 (import unused in __init__.py) not ignored\n# F403 'from module import *' used; unable to detect undefined names\n# F405 Name may be undefined, or defined from star imports: module\n# F821 Undefined name name\n# F841 Local variable name is assigned to but never used\n#\n# Pytorch ignored rules that I don't see a reason to ignore (yet?):\n#\n# E305 Expected 2 blank lines after end of function or class\n# E402 Module level import not at top of file\n# E721 Do not compare types, use 'isinstance()'\n# E741 Do not use variables named 'l', 'o', or 'i'\n# E302 Expected 2 blank lines, found 0\n#\tE303 Too many blank lines (3)\n" }, { "alpha_fraction": 0.6920328736305237, "alphanum_fraction": 0.6991782784461975, "avg_line_length": 22.325000762939453, "blob_id": "67475bfd64e576d0ee5607b04056150cee533169", "content_id": "be2cff51f9f20be51522323400bd9dbd9cefacd3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2799, "license_type": "permissive", "max_line_length": 88, "num_lines": 120, "path": "/makefile", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": ".PHONY: help\n.PHONY: black black-check flake8\n.PHONY: install install-dev install-devtools install-test install-lint install-docs\n.PHONY: test\n.PHONY: conda-env\n.PHONY: black isort format\n.PHONY: black-check isort-check format-check\n.PHONY: flake8\n.PHONY: pydocstyle-check\n.PHONY: darglint-check\n.PHONY: build-docs\n\n.DEFAULT: help\nhelp:\n\t@echo \"test\"\n\t@echo \" Run pytest on the project and report coverage\"\n\t@echo \"black\"\n\t@echo \" Run black on the project\"\n\t@echo \"black-check\"\n\t@echo \" Check if black would change files\"\n\t@echo \"flake8\"\n\t@echo \" Run flake8 on the project\"\n\t@echo \"pydocstyle-check\"\n\t@echo \" Run pydocstyle on the project\"\n\t@echo \"darglint-check\"\n\t@echo \" Run darglint on the project\"\n\t@echo \"install\"\n\t@echo \" Install backpack and dependencies\"\n\t@echo \"install-dev\"\n\t@echo \" Install all development tools\"\n\t@echo \"install-lint\"\n\t@echo \" Install only the linter tools (included in install-dev)\"\n\t@echo \"install-test\"\n\t@echo \" Install only the testing tools (included in install-dev)\"\n\t@echo \"install-docs\"\n\t@echo \" Install only the tools to build/view the docs (included in install-dev)\"\n\t@echo \"conda-env\"\n\t@echo \" Create conda environment 'backpack' with dev setup\"\n\t@echo \"build-docs\"\n\t@echo \" Build the docs\"\n###\n# Test coverage\ntest:\n\t@pytest -vx --cov=backpack .\n\n###\n# Linter and autoformatter\n\n# Uses black.toml config instead of pyproject.toml to avoid pip issues. See\n# - https://github.com/psf/black/issues/683\n# - https://github.com/pypa/pip/pull/6370\n# - https://pip.pypa.io/en/stable/reference/pip/#pep-517-and-518-support\nblack:\n\t@black . --config=black.toml\n\nblack-check:\n\t@black . --config=black.toml --check\n\nflake8:\n\t@flake8 .\n\npydocstyle-check:\n\t@pydocstyle --count .\n\ndarglint-check:\n\t@darglint --verbosity 2 .\n\nisort:\n\t@isort --apply\n\nisort-check:\n\t@isort --check\n\nformat:\n\t@make black\n\t@make isort\n\t@make black-check\n\nformat-check: black-check isort-check pydocstyle-check darglint-check\n\n\n###\n# Installation\n\ninstall:\n\t@pip install -r requirements.txt\n\t@pip install .\n\ninstall-lint:\n\t@pip install -r requirements/lint.txt\n\ninstall-test:\n\t@pip install -r requirements/test.txt\n\ninstall-docs:\n\t@pip install -r requirements/docs.txt\n\ninstall-devtools:\n\t@echo \"Install dev tools...\"\n\t@pip install -r requirements-dev.txt\n\ninstall-dev: install-devtools\n\t@echo \"Install dependencies...\"\n\t@pip install -r requirements.txt\n\t@echo \"Uninstall existing version of backpack...\"\n\t@pip uninstall backpack-for-pytorch\n\t@echo \"Install backpack in editable mode...\"\n\t@pip install -e .\n\t@echo \"Install pre-commit hooks...\"\n\t@pre-commit install\n\n###\n# Conda environment\nconda-env:\n\t@conda env create --file .conda_env.yml\n\n###\n# Documentation\nbuild-docs:\n\t@cd docs_src/rtd && make html\n" }, { "alpha_fraction": 0.29629629850387573, "alphanum_fraction": 0.5185185074806213, "avg_line_length": 26, "blob_id": "da10fd87c002f4b868b01f7a35ee25353e31cc59", "content_id": "9553d73db1ef82af1952ff26292a2d6d8156ada0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 54, "license_type": "permissive", "max_line_length": 29, "num_lines": 2, "path": "/requirements.txt", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "torch >= 1.6.0, < 2.0.0\ntorchvision >= 0.7.0, < 1.0.0\n" }, { "alpha_fraction": 0.7342857122421265, "alphanum_fraction": 0.7514285445213318, "avg_line_length": 42.75, "blob_id": "94080c6d3d611922283165b54417337fe7211f2c", "content_id": "3526a61864c40b3acc26169b9286ec7fd93415d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "permissive", "max_line_length": 89, "num_lines": 8, "path": "/backpack/extensions/firstorder/fisher_block/conv1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv1d import Conv1DDerivatives\nfrom backpack.extensions.firstorder.fisher_block.fisher_block_base import FisherBlockBase\n\n\nclass FisherBlockConv1d(FisherBlockBase):\n def __init__(self, damping=1.0):\n self.damping = damping\n super().__init__(derivatives=Conv1DDerivatives(), params=[\"bias\", \"weight\"])\n" }, { "alpha_fraction": 0.4558853507041931, "alphanum_fraction": 0.4696008265018463, "avg_line_length": 35.63909912109375, "blob_id": "d1dd7725a0227aa76a640db473df029ec7ede5af", "content_id": "5fa610db450495fd6e84adf10e001044464f111f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4885, "license_type": "permissive", "max_line_length": 92, "num_lines": 133, "path": "/backpack/extensions/firstorder/fisher_block/conv2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.firstorder.fisher_block.fisher_block_base import FisherBlockBase\nfrom torch import einsum, matmul, sum, numel, sqrt, norm, eye, randint, cumsum, diag\nfrom torch.nn import Unfold, MaxPool2d, AvgPool2d\nfrom torch.nn.functional import conv1d, conv2d, conv3d\nfrom backpack.utils.ein import eingroup\nfrom backpack.utils.conv import unfold_func\n\nfrom torch.linalg import inv, svd\n\n\n# import numpy as np\n# import seaborn as sns\n# import matplotlib.pylab as plt\nMODE = 0\nclass FisherBlockConv2d(FisherBlockBase):\n def __init__(self, damping=1.0, low_rank='false', gamma=0.95, memory_efficient='false'):\n self.damping = damping\n self.low_rank = low_rank\n self.gamma = gamma\n self.memory_efficient = memory_efficient\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, bpQuantities):\n if MODE == 0: # my implementation\n\n\n grad = module.weight.grad\n # print(grad.shape)\n grad_reshape = grad.reshape(grad.shape[0], -1)\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n \n\n input = unfold_func(module)(module.input0)\n I = input\n grad_output_viewed = g_out_sc.reshape(g_out_sc.shape[0], g_out_sc.shape[1], -1)\n G = grad_output_viewed\n\n N = I.shape[0]\n K = I.shape[1]\n L = I.shape[2]\n M = G.shape[1]\n # print(N,K,L,M)\n if (L*L) * (K + M) < K * M :\n II = einsum(\"nkl,qkp->nqlp\", (I, I))\n GG = einsum(\"nml,qmp->nqlp\", (G, G))\n out = einsum('nqlp->nq', II * GG) \n x1 = einsum(\"nkl,mk->nml\", (I, grad_reshape))\n grad_prod = einsum(\"nml,nml->n\", (x1, G)) \n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"n,nml->nml\", (v, G))\n gv = einsum(\"nml,nkl->mk\", (gv, I))\n gv = gv.view_as(grad)\n gv = gv / n\n\n module.NGD_inv = NGD_inv\n if self.memory_efficient == 'true':\n module.I = module.input0\n else:\n module.I = I\n module.G = G\n \n else:\n AX = einsum(\"nkl,nml->nkm\", (I, G))\n AX_ = AX.reshape(n , -1)\n out = matmul(AX_, AX_.t()) \n grad_prod = einsum(\"nkm,mk->n\", (AX, grad_reshape))\n\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"nkm,n->mk\", (AX, v))\n gv = gv.view_as(grad)\n gv = gv / n\n\n module.NGD_inv = NGD_inv\n module.AX = AX\n\n ### testing low-rank\n if self.low_rank == 'true':\n V, S, U = svd(AX_.T, compute_uv=True, full_matrices=False)\n U = U.t()\n V = V.t()\n \n cs = cumsum(S, dim = 0)\n sum_s = sum(S)\n index = ((cs - self.gamma * sum_s) <= 0).sum()\n U = U[:, 0:index]\n S = S[0:index]\n V = V[0:index, :]\n \n module.U = U\n module.S = S\n module.V = V\n \n update = (grad - gv)/self.damping\n return (out, grad_prod, update)\n elif MODE == 2:\n # st = time.time()\n\n A = module.input0\n n = A.shape[0]\n p = 1\n M = g_out[0]\n\n M = M.reshape( M.shape[1] * M.shape[0], M.shape[2], M.shape[3]).unsqueeze(1)\n A = A.permute(1 ,0, 2, 3)\n output = conv2d(A, M, groups = n, padding = (p,p))\n output = output.permute(1, 0, 2, 3)\n output = output.reshape(n, -1)\n K_torch = matmul(output, output.t())\n # en = time.time()\n # print('Elapsed Time Conv2d Mode 2:', en - st)\n\n return K_torch\n\n \n def bias(self, ext, module, g_inp, g_out, bpQuantities):\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n # grad_prod = einsum(\"nchw,c->n\", (g_out_sc, grad))\n\n # out = einsum(\"nchw,lchw->nl\", g_out_sc, g_out_sc)\n out = 0\n grad_prod = 0\n update = grad\n return (out, grad_prod, update)\n \n\n\n\n" }, { "alpha_fraction": 0.8195121884346008, "alphanum_fraction": 0.8195121884346008, "avg_line_length": 40, "blob_id": "3fb7fbc2ff707535b5d8b9aac0e6d503510f3726", "content_id": "478d7ace80d8bfe8b2ac418531fbb1c735078813", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 205, "license_type": "permissive", "max_line_length": 80, "num_lines": 5, "path": "/test/pytest.ini", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "[pytest]\noptional_tests:\n montecarlo: slow tests using low-precision allclose after Monte-Carlo sampling\nfilterwarnings = \n ignore:cannot collect test class 'TestProblem':pytest.PytestCollectionWarning:\n" }, { "alpha_fraction": 0.5664539337158203, "alphanum_fraction": 0.587402880191803, "avg_line_length": 29.533897399902344, "blob_id": "9943d546bac35654ea2def2c469a2aa1b24254cc", "content_id": "6ce2dfd98363406530cc3ae811a8df23e45350e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7208, "license_type": "permissive", "max_line_length": 111, "num_lines": 236, "path": "/mngd_2ndorder.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision\nfrom backpack import backpack, extend\nimport torch.optim as optim\nfrom backpack.extensions import MNGD\nfrom torchsummary import summary\nimport time\nimport math\n\n# fixing HTTPS issue on Colab\nfrom six.moves import urllib\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nurllib.request.install_opener(opener)\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pylab as plt\n\n# torch.set_default_dtype(torch.float64)\n\n# Hyperparameters\nBATCH_SIZE = 128\nEPOCHS = 1\nPLOT = False\nnum_classes = 10\nSTEP_SIZE = 0.1\nalpha_lm = 10\ntaw = 0.01\nMAX_ITER = 60000//BATCH_SIZE\ntorch.manual_seed(0)\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint('Selected Device:', device)\nprint('BATCH_SIZE:', BATCH_SIZE)\n\nmnist_loader = torch.utils.data.dataloader.DataLoader(\n torchvision.datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,)\n )\n ])),\n batch_size=BATCH_SIZE,\n shuffle=True\n)\n\n\n\n\n# ##### base model from backpack website:\n# model = torch.nn.Sequential(\n# torch.nn.Conv2d(1, 20, 3, 1, padding = (1,1)),\n# # torch.nn.BatchNorm2d(2),\n# torch.nn.ReLU(),\n# torch.nn.Conv2d(20, 20, 3, 1, padding = (1,1)),\n# torch.nn.ReLU(),\n# torch.nn.Conv2d(20, 20, 3, 1, padding = (1,1)),\n# torch.nn.ReLU(),\n# torch.nn.Conv2d(20, 20, 3, 1, padding = (1,1)),\n# torch.nn.ReLU(),\n# torch.nn.Conv2d(20, 2, 3, 1, padding = (1,1)),\n# torch.nn.ReLU(),\n# torch.nn.Flatten(), \n# torch.nn.Linear(28*28*2, 10),\n# ).to(device)\n\n\n\n##### fully connected network. Test for linear timings.\nmodel = torch.nn.Sequential(\n torch.nn.Flatten(), \n torch.nn.Linear(28*28, 100),\n # torch.nn.ReLU(),\n # torch.nn.Linear(100, 100),\n # torch.nn.ReLU(),\n # torch.nn.Linear(100, 100),\n # torch.nn.ReLU(),\n # torch.nn.Linear(100, 100),\n torch.nn.ReLU(),\n torch.nn.Linear(100, 10)\n).to(device)\n\nsummary(model, ( 1, 28, 28))\n\nloss_function = torch.nn.CrossEntropyLoss()\nloss_function_none = torch.nn.CrossEntropyLoss(reduction='none')\n\ndef get_accuracy(output, targets):\n \"\"\"Helper function to print the accuracy\"\"\"\n predictions = output.argmax(dim=1, keepdim=True).view_as(targets)\n return predictions.eq(targets).float().mean().item()\n\n\n\nextend(model)\nextend(loss_function)\nextend(loss_function_none)\n\n\noptimizer = optim.SGD(model.parameters(), lr=STEP_SIZE)\n\n\ndef get_diff(A, B):\n ''' returns relative error between A and B\n '''\n # return torch.norm(A - B)/torch.norm(A)\n return torch.norm(A - B)/torch.norm(A)\n\n\ndef optimal_JJT(outputs, targets):\n jac_list = 0\n vjp = 0\n grads = []\n\n \n with backpack(MNGD()):\n loss = loss_function(outputs, targets)\n loss.backward(retain_graph=True)\n\n for name, param in model.named_parameters():\n mngd_vals = param.mngd\n grads.append(param.grad.reshape(1, -1))\n # print(mngd_vals)\n # print(param.grad) \n grads = torch.cat(grads, 1) \n\n return loss, grads\n\n\nacc_list = []\ntime_list = []\nloss_list = []\nepoch_time_list = []\nstart_time= time.time()\n# loss_prev = 0.\n# taylor_appx_prev = 0.\n\nfor epoch in range(EPOCHS):\n start_time_epoch = time.time()\n for batch_idx, (inputs, targets) in enumerate(mnist_loader):\n\n # print(model._backward_hooks)\n\n # for child in model.children():\n # d = child._backward_hooks\n # for item in d:\n # print(d[item])\n # print('CCC')\n\n DAMPING = alpha_lm + taw\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = model(inputs)\n accuracy = get_accuracy(outputs, targets)\n\n ######## calling individual function for JJT computation\n ### Our extension\n\n # first compute the original gradient\n optimizer.zero_grad()\n # loss = loss_function(outputs, targets)\n # loss.backward(retain_graph=True)\n # loss_org = loss.item()\n\n # grad_org = []\n # grad_dict = {}\n # for name, param in model.named_parameters():\n # grad_org.append(param.grad.reshape(1, -1))\n # grad_dict[name] = param.grad.clone()\n\n # grad_org = torch.cat(grad_org, 1)\n ###### now we have to compute the true fisher\n # with torch.no_grad():\n # sampled_y = torch.multinomial(torch.nn.functional.softmax(outputs, dim=1),1).squeeze().to(device)\n \n loss_org, grad_ = optimal_JJT(outputs, targets)\n # NGD_kernel, vjp = optimal_JJT(outputs, sampled_y, grad_org, acc_test, acc_hard_test)\n # NGD_inv = torch.linalg.inv(NGD_kernel + DAMPING * torch.eye(BATCH_SIZE))\n # v = torch.matmul(NGD_inv, vjp.unsqueeze(1))\n\n ####### rescale v:\n # v_sc = v/(BATCH_SIZE * DAMPING)\n\n\n\n # last part of SMW formula\n # grad_new = []\n # for name, param in model.named_parameters():\n # param.grad = grad_dict[name] \n # grad_new.append(param.grad.reshape(1, -1))\n # grad_new = torch.cat(grad_new, 1) \n optimizer.step()\n \n \n if batch_idx % 10 == 0:\n # print('real %f appx %f first order %f' % (loss_org, taylor_appx, loss_org + STEP_SIZE * gp))\n # print('damping:', DAMPING)\n # if batch_idx > 0:\n # print('ro:', ro)\n acc_list.append(accuracy)\n time_list.append(time.time() - start_time)\n loss_list.append(loss_org)\n \n # print('Seq vs vmap error:', get_diff(JJT_naive_seq, JJT_naive_vmap))\n # print('opt vs backpack error:', get_diff(JJT_backpack, JJT_opt))\n # print('opt vs linear error:', get_diff(JJT_opt, JJT_linear))\n # print('opt vs conv error:', get_diff(JJT_opt, JJT_conv))\n # print('opt vs blocked error:', get_diff(JJT_opt, JJT_opt_blk))\n # print('opt vs fused error:', get_diff(JJT_opt, JJT_fused))\n # print(torch.allclose(JJT_naive_seq, JJT_opt) )\n # print('Jacobian Computation Time [Sequential]:', time_seq)\n # print('Jacobian Computation Time [Optimal]:', time_opt)\n # print('Jacobian Computation Time [VMAP]:', time_vmap)\n # print('Speedup over sequential:', time_seq/ time_opt)\n print('Elapsed time:', time.time() - start_time_epoch)\n print(\n \"Iteration %3.d/%d \" % (batch_idx, MAX_ITER) +\n \"Minibatch Loss %.3f \" % (loss_org) +\n \"Accuracy %.0f\" % (accuracy * 100) + \"%\"\n )\n\n if batch_idx >= MAX_ITER:\n break\n epoch_time = time.time() - start_time_epoch\n epoch_time_list.append(epoch_time)\n print('Elapsed time for epoch %d time: %.3f' % (epoch , epoch_time))\n\nprint('Epoch times : ', epoch_time_list)\nprint('Time(s) ACC. LOSS')\nfor i in range(len(time_list)):\n print('%.3f, %.3f, %.3f' %(time_list[i], acc_list[i], loss_list[i].item()))\n\n\n" }, { "alpha_fraction": 0.5952169895172119, "alphanum_fraction": 0.6200177073478699, "avg_line_length": 27.94871711730957, "blob_id": "718a6d009a2c36b75cfbed490810dd0770f4b852", "content_id": "aac17cd81c0a4543d8e78522a7ac2a5f804cc089", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1129, "license_type": "permissive", "max_line_length": 113, "num_lines": 39, "path": "/backpack/extensions/firstorder/fisher_block/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n Conv1d,\n Conv2d,\n Linear,\n BatchNorm1d,\n BatchNorm2d\n)\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\n\nfrom . import (\n conv1d,\n conv2d,\n linear,\n batchnorm1d,\n batchnorm2d\n)\n\n\nclass FisherBlock(BackpropExtension):\n \n\n def __init__(self, damping=1.0, alpha=0.95, low_rank='false', gamma=0.95, memory_efficient='false'):\n self.gamma = gamma\n self.damping = damping\n self.alpha =alpha\n self.low_rank = low_rank\n self.memory_efficient = memory_efficient\n super().__init__(\n savefield=\"fisher_block\",\n fail_mode=\"WARNING\",\n module_exts={\n Linear: linear.FisherBlockLinear(self.damping, self.alpha),\n Conv1d: conv1d.FisherBlockConv1d(self.damping),\n Conv2d: conv2d.FisherBlockConv2d(self.damping, self.low_rank, self.gamma, self.memory_efficient),\n BatchNorm1d: batchnorm1d.FisherBlockBatchNorm1d(self.damping),\n BatchNorm2d: batchnorm2d.FisherBlockBatchNorm2d(self.damping),\n },\n )\n" }, { "alpha_fraction": 0.5790554285049438, "alphanum_fraction": 0.5927447080612183, "avg_line_length": 30.042552947998047, "blob_id": "c0244d3201e5e9c39a4b425d720ff1a3b10a21d9", "content_id": "20ffacfabaf7aacc299de513c3bf1ad43ae91545", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1461, "license_type": "permissive", "max_line_length": 89, "num_lines": 47, "path": "/backpack/extensions/secondorder/mngd/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n AvgPool2d,\n Conv2d,\n CrossEntropyLoss,\n Dropout,\n Flatten,\n Linear,\n MaxPool2d,\n MSELoss,\n ReLU,\n Sigmoid,\n Tanh,\n ZeroPad2d,\n BatchNorm1d\n)\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\n# from backpack.extensions.secondorder.hbp import LossHessianStrategy\n\n# from . import activations, conv2d, dropout, flatten, linear, losses, padding, pooling\nfrom . import activations, linear, losses, conv2d, flatten, pooling, dropout, batchnorm1d\n\n\nclass MNGD(BackpropExtension):\n def __init__(self, savefield=None):\n if savefield is None:\n savefield = \"mngd\"\n\n super().__init__(\n savefield=savefield,\n fail_mode=\"ERROR\",\n module_exts={\n # MSELoss: losses.DiagGGNMSELoss(),\n CrossEntropyLoss: losses.MNGDLoss(),\n Linear: linear.MNGDLinear(),\n MaxPool2d: pooling.MNGDMaxPool2d(),\n AvgPool2d: pooling.MNGDAvgPool2d(),\n # ZeroPad2d: padding.DiagGGNZeroPad2d(),\n Conv2d: conv2d.MNGDConv2d(),\n Dropout: dropout.MNGDDropout(),\n Flatten: flatten.MNGDFlatten(),\n ReLU: activations.MNGDReLU(),\n Sigmoid: activations.MNGDSigmoid(),\n BatchNorm1d: batchnorm1d.MNGDBatchNorm1d()\n # Tanh: activations.DiagGGNTanh(),\n },\n )\n\n\n" }, { "alpha_fraction": 0.7604690194129944, "alphanum_fraction": 0.7705192565917969, "avg_line_length": 31.2702693939209, "blob_id": "6d382ce9e7729f365a53c9f34fa0fc5ebbb1fd27", "content_id": "d2a928fab399ba526449eebee4d5505464611c9d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1194, "license_type": "permissive", "max_line_length": 75, "num_lines": 37, "path": "/test/extensions/firstorder/batch_l2_grad/test_batchl2grad.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\"Test class for module Batch_l2_grad (L2 norm of batch gradients) \nfrom `backpack.core.extensions.firstorder`\n\nTest individual gradients for the following layers:\n- L2 norm of batch gradients of linear layers\n- L2 norm of batch gradients of convolutional layers\n\n\"\"\"\nfrom test.automated_test import check_sizes_and_values\nfrom test.extensions.problem import make_test_problems\nfrom test.extensions.implementation.autograd import AutogradExtensions\nfrom test.extensions.implementation.backpack import BackpackExtensions\nfrom test.extensions.firstorder.batch_l2_grad.batchl2grad_settings import (\n BATCHl2GRAD_SETTINGS,\n)\n\nimport pytest\n\n\nPROBLEMS = make_test_problems(BATCHl2GRAD_SETTINGS)\nIDS = [problem.make_id() for problem in PROBLEMS]\n\n\[email protected](\"problem\", PROBLEMS, ids=IDS)\ndef test_batch_l2_grad(problem):\n \"\"\"Test l2 norm of individual gradients\n\n Args:\n problem (ExtensionsTestProblem): Problem for extension test.\n \"\"\"\n problem.set_up()\n\n backpack_res = BackpackExtensions(problem).batch_l2_grad()\n autograd_res = AutogradExtensions(problem).batch_l2_grad()\n\n check_sizes_and_values(autograd_res, backpack_res)\n problem.tear_down()\n" }, { "alpha_fraction": 0.6526586413383484, "alphanum_fraction": 0.6526586413383484, "avg_line_length": 37.86666488647461, "blob_id": "d418856f21324f4b924257ff4ee4293b0bcab0d0", "content_id": "9b6a823ebba43206f4428b579d64c5ea23d83e9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "permissive", "max_line_length": 84, "num_lines": 30, "path": "/backpack/extensions/secondorder/diag_hessian/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\n\nimport backpack.utils.linear as LinUtils\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.secondorder.diag_hessian.diag_h_base import DiagHBaseModule\n\n\nclass DiagHLinear(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n sqrt_h_outs = backproped[\"matrices\"]\n sqrt_h_outs_signs = backproped[\"signs\"]\n h_diag = torch.zeros_like(module.bias)\n\n for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):\n h_diag_curr = LinUtils.extract_bias_diagonal(module, h_sqrt)\n h_diag.add_(sign * h_diag_curr)\n return h_diag\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n sqrt_h_outs = backproped[\"matrices\"]\n sqrt_h_outs_signs = backproped[\"signs\"]\n h_diag = torch.zeros_like(module.weight)\n\n for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):\n h_diag_curr = LinUtils.extract_weight_diagonal(module, h_sqrt)\n h_diag.add_(sign * h_diag_curr)\n return h_diag\n" }, { "alpha_fraction": 0.7275985479354858, "alphanum_fraction": 0.7275985479354858, "avg_line_length": 30, "blob_id": "387f73e71deb9a343d8b08118dc7d0606dad50db", "content_id": "1d27984f9ae46f1614b0eebef42e382d6a1678e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "permissive", "max_line_length": 73, "num_lines": 18, "path": "/backpack/extensions/secondorder/mngd/activations.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.relu import ReLUDerivatives\nfrom backpack.core.derivatives.sigmoid import SigmoidDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\n\n\nclass MNGDReLU(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=ReLUDerivatives())\n\n\nclass MNGDSigmoid(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=SigmoidDerivatives())\n\n\n# class DiagGGNTanh(DiagGGNBaseModule):\n# def __init__(self):\n# super().__init__(derivatives=TanhDerivatives())\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.572265625, "avg_line_length": 35.35503005981445, "blob_id": "c864bed078b6759ae6bb82b89f476d74dcc538b9", "content_id": "0a261b880de5ba8cdf8aedee728c1d1da9c62bfe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6144, "license_type": "permissive", "max_line_length": 84, "num_lines": 169, "path": "/backpack/core/derivatives/convnd.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from numpy import prod\nfrom torch import einsum\nfrom torch.nn import Conv1d, Conv2d, Conv3d\nfrom torch.nn.grad import _grad_input_padding\nfrom torch.nn.functional import conv1d, conv2d, conv3d\nfrom torch.nn.functional import conv_transpose1d, conv_transpose2d, conv_transpose3d\n\nfrom backpack.core.derivatives.basederivatives import BaseParameterDerivatives\nfrom backpack.utils import conv as convUtils\nfrom backpack.utils.ein import eingroup\n\n\nclass ConvNDDerivatives(BaseParameterDerivatives):\n def __init__(self, N):\n if N == 1:\n self.module = Conv1d\n self.dim_text = \"x\"\n self.conv_func = conv1d\n self.conv_transpose_func = conv_transpose1d\n elif N == 2:\n self.module = Conv2d\n self.dim_text = \"x,y\"\n self.conv_func = conv2d\n self.conv_transpose_func = conv_transpose2d\n elif N == 3:\n self.module = Conv3d\n self.dim_text = \"x,y,z\"\n self.conv_func = conv3d\n self.conv_transpose_func = conv_transpose3d\n else:\n raise ValueError(\"{}-dimensional Conv. is not implemented.\".format(N))\n self.conv_dims = N\n\n def hessian_is_zero(self):\n return True\n\n def get_unfolded_input(self, module):\n return convUtils.unfold_by_conv(module.input0, module)\n\n def _jac_mat_prod(self, module, g_inp, g_out, mat):\n dims = self.dim_text\n mat_as_conv = eingroup(\"v,n,c,{}->vn,c,{}\".format(dims, dims), mat)\n jmp_as_conv = self.conv_func(\n mat_as_conv,\n module.weight.data,\n stride=module.stride,\n padding=module.padding,\n dilation=module.dilation,\n groups=module.groups,\n )\n return self.reshape_like_output(jmp_as_conv, module)\n\n def _jac_t_mat_prod(self, module, g_inp, g_out, mat):\n dims = self.dim_text\n mat_as_conv = eingroup(\"v,n,c,{}->vn,c,{}\".format(dims, dims), mat)\n jmp_as_conv = self.__jac_t(module, mat_as_conv)\n return self.reshape_like_input(jmp_as_conv, module)\n\n def __jac_t(self, module, mat):\n input_size = list(module.input0.size())\n input_size[0] = mat.size(0)\n\n grad_padding = _grad_input_padding(\n grad_output=mat,\n input_size=input_size,\n stride=module.stride,\n padding=module.padding,\n kernel_size=module.kernel_size,\n dilation=module.dilation,\n )\n\n jac_t_mat = self.conv_transpose_func(\n input=mat,\n weight=module.weight,\n bias=None,\n stride=module.stride,\n padding=module.padding,\n output_padding=grad_padding,\n groups=module.groups,\n dilation=module.dilation,\n )\n return jac_t_mat\n\n def _bias_jac_mat_prod(self, module, g_inp, g_out, mat):\n \"\"\"mat has shape [V, C_out]\"\"\"\n # Expand batch dimension\n jac_mat = mat.unsqueeze(1)\n # Expand data dimensions\n for i in range(3, len(module.output_shape) + 1):\n jac_mat = jac_mat.unsqueeze(i)\n\n expand_shape = [-1, module.output_shape[0], -1, *module.output_shape[2:]]\n\n return jac_mat.expand(*expand_shape)\n\n def _bias_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):\n axes = list(range(3, len(module.output_shape) + 1))\n if sum_batch:\n axes = [1] + axes\n return mat.sum(axes)\n\n def _weight_jac_mat_prod(self, module, g_inp, g_out, mat):\n if module.groups != 1:\n raise NotImplementedError(\"Groups greater than 1 are not supported yet\")\n\n dims = self.dim_text\n dims_joined = dims.replace(\",\", \"\")\n\n jac_mat = eingroup(\"v,o,i,{}->v,o,i{}\".format(dims, dims_joined), mat)\n X = self.get_unfolded_input(module)\n jac_mat = einsum(\"nij,vki->vnkj\", X, jac_mat)\n return self.reshape_like_output(jac_mat, module)\n\n def _weight_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):\n if module.groups != 1:\n raise NotImplementedError(\"Groups greater than 1 are not supported yet\")\n\n V = mat.shape[0]\n N, C_out = module.output_shape[0], module.output_shape[1]\n C_in = module.input0_shape[1]\n C_in_axis = 1\n N_axis = 0\n dims = self.dim_text\n\n repeat_pattern = [1, C_in] + [1 for _ in range(self.conv_dims)]\n mat = eingroup(\"v,n,c,{}->vn,c,{}\".format(dims, dims), mat)\n mat = mat.repeat(*repeat_pattern)\n mat = eingroup(\"a,b,{}->ab,{}\".format(dims, dims), mat)\n mat = mat.unsqueeze(C_in_axis)\n\n repeat_pattern = [1, V] + [1 for _ in range(self.conv_dims)]\n input = eingroup(\"n,c,{}->nc,{}\".format(dims, dims), module.input0)\n input = input.unsqueeze(N_axis)\n input = input.repeat(*repeat_pattern)\n\n grad_weight = self.conv_func(\n input,\n mat,\n bias=None,\n stride=module.dilation,\n padding=module.padding,\n dilation=module.stride,\n groups=C_in * N * V,\n ).squeeze(0)\n\n for dim in range(self.conv_dims):\n axis = dim + 1\n size = module.weight.shape[2 + dim]\n grad_weight = grad_weight.narrow(axis, 0, size)\n\n sum_dim = \"\" if sum_batch else \"n,\"\n eingroup_eq = \"vnio,{}->v,{}o,i,{}\".format(dims, sum_dim, dims)\n\n return eingroup(\n eingroup_eq, grad_weight, dim={\"v\": V, \"n\": N, \"i\": C_in, \"o\": C_out}\n )\n\n def ea_jac_t_mat_jac_prod(self, module, g_inp, g_out, mat):\n in_features = int(prod(module.input0.size()[1:]))\n out_features = int(prod(module.output.size()[1:]))\n\n mat = mat.reshape(out_features, *module.output.size()[1:])\n jac_t_mat = self.__jac_t(module, mat).reshape(out_features, in_features)\n\n mat_t_jac = jac_t_mat.t().reshape(in_features, *module.output.size()[1:])\n jac_t_mat_t_jac = self.__jac_t(module, mat_t_jac)\n jac_t_mat_t_jac = jac_t_mat_t_jac.reshape(in_features, in_features)\n\n return jac_t_mat_t_jac.t()\n" }, { "alpha_fraction": 0.5878918170928955, "alphanum_fraction": 0.6023355722427368, "avg_line_length": 36.82558059692383, "blob_id": "71c75160545b7cb0ef0a6d67827cac4fdcc2dc00", "content_id": "7aa42ba0f0bcf84a950c225bf51b21f6a3c948a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3254, "license_type": "permissive", "max_line_length": 98, "num_lines": 86, "path": "/backpack/utils/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum\n# TODO: remove\nfrom torch import rand\nimport opt_einsum as oe\nfrom torch import zeros\nfrom torch.nn.functional import dropout\n\nimport torch\nimport numpy as np\ndef extract_weight_diagonal(module, backproped):\n return einsum(\"vno,ni->oi\", (backproped ** 2, module.input0 ** 2))\n\n\ndef extract_bias_diagonal(module, backproped):\n return einsum(\"vno->o\", backproped ** 2)\n\n# TODO: Add support for NGD here\ndef extract_weight_ngd(module, backproped, MODE):\n\t#### exact methods ####\n\t# test: naive method plus\n # A = einsum(\"vno,ni->vnoi\", (backproped, module.input0))\n # return einsum(\"vnoi,kloi->vnkl\", (A, A))\n\n # test: me plus [GOLD]\n if MODE == -1: # silent mode to avoid doing any extra work here, only return 0\n v = backproped.shape[0]\n n = backproped.shape[1]\n return zeros(v*n,v*n).to(module.input0.device)\n elif MODE == 7: # test the order\n B = einsum(\"ni,li->nl\", (module.input0, module.input0)) \n # print('B', B) \n A = einsum(\"vno,klo->vnkl\", (backproped, backproped))\n # print('A', A)\n return einsum(\"vnkl,nl->vnkl\", (A, B))\n elif MODE == 17: # add dropout in backward pass for large linear layers\n # this is a sampling technique\n inp = module.input0\n l = inp.shape[1]\n prob = 0.1\n l_new = int(np.floor(prob * l))\n\n # print('input to linear layer before droput:', inp.shape)\n Borg = einsum(\"ni,li->nl\", (inp, inp)) \n\n if inp.shape[1] > 7000:\n inp = inp[:, torch.randint(l, (l_new,))] \n\n B = einsum(\"ni,li->nl\", (inp, inp)) / ( prob)\n # print(torch.norm(B - Borg)/torch.norm(Borg))\n\n A = einsum(\"vno,klo->vnkl\", (backproped, backproped))\n return einsum(\"vnkl,nl->vnkl\", (A, B))\n elif MODE == 13: # testing block diagonal version\n B = einsum(\"ni,li->nl\", (module.input0, module.input0)) \n A = einsum(\"vno,vlo->vnl\", (backproped, backproped))\n return einsum(\"vnl,nl->vnl\", (A, B))\n else:\n B = einsum(\"ni,li->nl\", (module.input0, module.input0))\t\n A = einsum(\"vno,klo->vnkl\", (backproped, backproped))\n return einsum(\"vnkl,nl->vnkl\", (A, B))\n\n # test: me plus plus [SILVER]\n # A = einsum(\"ni,li,vno,klo->vnkl\", (module.input0, module.input0, backproped, backproped))\n # return A\n\n # test: opt_einsum\n # A = oe.contract(\"ni,li,vno,klo->vnkl\", module.input0, module.input0, backproped, backproped)\n # return A\n\n #### extra approximations ####\n # test: only diagonals:\n # A = einsum(\"vno,ni->vnoi\", (backproped ** 2, module.input0 ** 2))\n # return einsum(\"vnoi->vn\", A)\n\ndef extract_bias_ngd(module, backproped, MODE):\n if MODE == -1: # silent mode, only backpropagating Jacobians\n v = backproped.shape[0]\n n = backproped.shape[1]\n return zeros(v*n,v*n).to(module.input0.device)\n elif MODE == 7 or MODE == 17: # test the order\n return einsum(\"vno,klo->vnkl\", backproped, backproped)\n elif MODE == 13: # test the block version\n return einsum(\"vno,vlo->vnl\", backproped, backproped)\n else: # normal mode\n return einsum(\"vno,klo->vnkl\", backproped, backproped)\n # return einsum(\"vno->vn\", backproped ** 2)\n\n" }, { "alpha_fraction": 0.6207471489906311, "alphanum_fraction": 0.6257504820823669, "avg_line_length": 29.59183692932129, "blob_id": "ab6b7bfc56350e697b52d42b14597e60fcbe763e", "content_id": "4f31f93367f8cb6188de504ad1f1452d65312e4b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2998, "license_type": "permissive", "max_line_length": 88, "num_lines": 98, "path": "/backpack/extensions/secondorder/diag_ggn/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n AvgPool2d,\n Conv2d,\n CrossEntropyLoss,\n Dropout,\n Flatten,\n Linear,\n MaxPool2d,\n MSELoss,\n ReLU,\n Sigmoid,\n Tanh,\n ZeroPad2d,\n)\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\nfrom backpack.extensions.secondorder.hbp import LossHessianStrategy\n\nfrom . import activations, conv2d, dropout, flatten, linear, losses, padding, pooling\n\n\nclass DiagGGN(BackpropExtension):\n VALID_LOSS_HESSIAN_STRATEGIES = [\n LossHessianStrategy.EXACT,\n LossHessianStrategy.SAMPLING,\n ]\n\n def __init__(self, loss_hessian_strategy=LossHessianStrategy.EXACT, savefield=None):\n if savefield is None:\n savefield = \"diag_ggn\"\n if loss_hessian_strategy not in self.VALID_LOSS_HESSIAN_STRATEGIES:\n raise ValueError(\n \"Unknown hessian strategy: {}\".format(loss_hessian_strategy)\n + \"Valid strategies: [{}]\".format(self.VALID_LOSS_HESSIAN_STRATEGIES)\n )\n\n self.loss_hessian_strategy = loss_hessian_strategy\n super().__init__(\n savefield=savefield,\n fail_mode=\"ERROR\",\n module_exts={\n MSELoss: losses.DiagGGNMSELoss(),\n CrossEntropyLoss: losses.DiagGGNCrossEntropyLoss(),\n Linear: linear.DiagGGNLinear(),\n MaxPool2d: pooling.DiagGGNMaxPool2d(),\n AvgPool2d: pooling.DiagGGNAvgPool2d(),\n ZeroPad2d: padding.DiagGGNZeroPad2d(),\n Conv2d: conv2d.DiagGGNConv2d(),\n Dropout: dropout.DiagGGNDropout(),\n Flatten: flatten.DiagGGNFlatten(),\n ReLU: activations.DiagGGNReLU(),\n Sigmoid: activations.DiagGGNSigmoid(),\n Tanh: activations.DiagGGNTanh(),\n },\n )\n\n\nclass DiagGGNExact(DiagGGN):\n \"\"\"\n Diagonal of the Generalized Gauss-Newton/Fisher.\n Uses the exact Hessian of the loss w.r.t. the model output.\n\n Stores the output in :code:`diag_ggn_exact`,\n has the same dimensions as the gradient.\n\n For a faster but less precise alternative,\n see :py:meth:`backpack.extensions.DiagGGNMC`.\n\n \"\"\"\n\n def __init__(self):\n super().__init__(\n loss_hessian_strategy=LossHessianStrategy.EXACT, savefield=\"diag_ggn_exact\"\n )\n\n\nclass DiagGGNMC(DiagGGN):\n \"\"\"\n Diagonal of the Generalized Gauss-Newton/Fisher.\n Uses a Monte-Carlo approximation of\n the Hessian of the loss w.r.t. the model output.\n\n Stores the output in :code:`diag_ggn_mc`,\n has the same dimensions as the gradient.\n\n For a more precise but slower alternative,\n see :py:meth:`backpack.extensions.DiagGGNExact`.\n\n \"\"\"\n\n def __init__(self, mc_samples=1):\n self._mc_samples = mc_samples\n super().__init__(\n loss_hessian_strategy=LossHessianStrategy.SAMPLING, savefield=\"diag_ggn_mc\"\n )\n\n def get_num_mc_samples(self):\n return self._mc_samples\n" }, { "alpha_fraction": 0.7411444187164307, "alphanum_fraction": 0.7574931979179382, "avg_line_length": 44.875, "blob_id": "b9b02610a63fc1c69de3e443a23bd091ed6dcc2e", "content_id": "0dda3c65443b6c1cc5547efee9980e695d9543b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "permissive", "max_line_length": 100, "num_lines": 8, "path": "/backpack/extensions/firstorder/fisher_block_eff/conv1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv1d import Conv1DDerivatives\nfrom backpack.extensions.firstorder.fisher_block_eff.fisher_block_eff_base import FisherBlockEffBase\n\n\nclass FisherBlockEffConv1d(FisherBlockEffBase):\n def __init__(self, damping=1.0):\n self.damping = damping\n super().__init__(derivatives=Conv1DDerivatives(), params=[\"bias\", \"weight\"])\n" }, { "alpha_fraction": 0.584664523601532, "alphanum_fraction": 0.5974441170692444, "avg_line_length": 30.897958755493164, "blob_id": "77d5c606d627b9b2ef9d62f038465b6c1180dc28", "content_id": "badc0a9d6fc77b0199c2f893ae26aa4c24d9c577", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1565, "license_type": "permissive", "max_line_length": 89, "num_lines": 49, "path": "/backpack/extensions/secondorder/trial/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n AvgPool2d,\n Conv2d,\n CrossEntropyLoss,\n Dropout,\n Flatten,\n Linear,\n MaxPool2d,\n MSELoss,\n ReLU,\n Sigmoid,\n Tanh,\n ZeroPad2d,\n BatchNorm1d\n)\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\n# from backpack.extensions.secondorder.hbp import LossHessianStrategy\n\n# from . import activations, conv2d, dropout, flatten, linear, losses, padding, pooling\nfrom . import activations, linear, losses, conv2d, flatten, pooling, dropout, batchnorm1d\n\n\nclass TRIAL(BackpropExtension):\n def __init__(self, MODE, savefield=None):\n # print('MODE:', MODE)\n self.MODE = MODE\n if savefield is None:\n savefield = \"trial\"\n\n super().__init__(\n savefield=savefield,\n fail_mode=\"ERROR\",\n module_exts={\n # MSELoss: losses.DiagGGNMSELoss(),\n CrossEntropyLoss: losses.TRIALCrossEntropyLoss(),\n Linear: linear.TRIALLinear(self.MODE),\n MaxPool2d: pooling.TRIALMaxPool2d(),\n AvgPool2d: pooling.TRIALAvgPool2d(),\n # ZeroPad2d: padding.DiagGGNZeroPad2d(),\n Conv2d: conv2d.TRIALConv2d(self.MODE),\n Dropout: dropout.TRIALDropout(),\n Flatten: flatten.TRIALFlatten(),\n ReLU: activations.TRIALReLU(),\n Sigmoid: activations.TRIALSigmoid(),\n BatchNorm1d: batchnorm1d.TRIALBatchNorm1d()\n # Tanh: activations.DiagGGNTanh(),\n },\n )\n\n\n" }, { "alpha_fraction": 0.570911705493927, "alphanum_fraction": 0.5810419917106628, "avg_line_length": 26.098039627075195, "blob_id": "47c83a6358756f9afa2600a852c296faa69796f3", "content_id": "0a010a9e219b64a2fbc29b76517f1a9600b5f6f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1382, "license_type": "permissive", "max_line_length": 85, "num_lines": 51, "path": "/backpack/extensions/secondorder/diag_hessian/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n AvgPool2d,\n Conv2d,\n CrossEntropyLoss,\n Dropout,\n Flatten,\n Linear,\n MaxPool2d,\n MSELoss,\n ReLU,\n Sigmoid,\n Tanh,\n ZeroPad2d,\n)\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\n\nfrom . import activations, conv2d, dropout, flatten, linear, losses, padding, pooling\n\n\nclass DiagHessian(BackpropExtension):\n \"\"\"\n Diagonal of the Hessian.\n\n Stores the output in :code:`diag_h`, has the same dimensions as the gradient.\n\n .. warning::\n\n Very expensive on networks with non-piecewise linear activations.\n\n \"\"\"\n\n def __init__(self):\n super().__init__(\n savefield=\"diag_h\",\n fail_mode=\"ERROR\",\n module_exts={\n MSELoss: losses.DiagHMSELoss(),\n CrossEntropyLoss: losses.DiagHCrossEntropyLoss(),\n Linear: linear.DiagHLinear(),\n MaxPool2d: pooling.DiagHMaxPool2d(),\n AvgPool2d: pooling.DiagHAvgPool2d(),\n ZeroPad2d: padding.DiagHZeroPad2d(),\n Conv2d: conv2d.DiagHConv2d(),\n Dropout: dropout.DiagHDropout(),\n Flatten: flatten.DiagHFlatten(),\n ReLU: activations.DiagHReLU(),\n Sigmoid: activations.DiagHSigmoid(),\n Tanh: activations.DiagHTanh(),\n },\n )\n" }, { "alpha_fraction": 0.6626871228218079, "alphanum_fraction": 0.7077690958976746, "avg_line_length": 38.80141830444336, "blob_id": "9de9715e3fc414888a4c3657a8b9f0740f1d8330", "content_id": "66e9fbe5ef900eb6a884fe10e6c0987776d6209d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5612, "license_type": "permissive", "max_line_length": 98, "num_lines": 141, "path": "/changelog.md", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [Unreleased]\n\n\n## [1.2.0] - 2020-10-26\n\nThanks to [@sbharadwajj](https://github.com/sbharadwajj) for\nco-authoring many PRs shipped in this release.\n\n### Added\n- Deprecated `python3.5`, tested compatibility with PyTorch 1.6.0\n [[PR](https://github.com/f-dangel/backpack/pull/88)]\n- Support first-order extensions for `Conv1d`, `Conv3d`,\n `ConvTranspose1d`, `ConvTranspose2d`, `ConvTranspose3d`\n - `extensions.BatchGrad`\n [[PR](https://github.com/f-dangel/backpack/pull/92)]\n - `extensions.BatchL2Grad`\n [[PR](https://github.com/f-dangel/backpack/pull/100)]\n - `extensions.SumGradSquared` and `extensions.Variance`\n [[PR](https://github.com/f-dangel/backpack/pull/105)]\n - Raise exceptions for unsupported exotic hyperparameters\n [[PR1](https://github.com/f-dangel/backpack/pull/108),\n [PR2](https://github.com/f-dangel/backpack/pull/109)]\n- New example: Backpropagating through BackPACK quantities\n [[commit](https://github.com/f-dangel/backpack/commit/8ef33a42badded9a1d9b5013f8686bfa7feec6e7)]\n- New extensions in API: Block-diagonal curvature products\n - Exposed via `extensions.HMP`, `extensions.GGNMP`,\n `extensions.PCHMP`\n [[PR](https://github.com/f-dangel/backpack/pull/73)]\n - Examples: Hutchinson trace estimation\n [[PR](https://github.com/f-dangel/backpack/pull/98)]\n and Hessian-free optimization with CG\n [[PR](https://github.com/f-dangel/backpack/pull/99)]\n### Fixed\n\n- Add missing `zero_grad` in the diagonal GGN second-order\n optimization example\n [[PR](https://github.com/f-dangel/backpack/pull/101)]\n\n### Internal\n- Increased test coverage\n - New test suite for `backpack.extensions`\n [[PR](https://github.com/f-dangel/backpack/pull/90)]\n - New test suite for `backpack.core`\n [[PR](https://github.com/f-dangel/backpack/pull/75)]\n- Implemented derivatives of the following operations in\n `backpack.core`\n - More activation functions\n [[PR](https://github.com/f-dangel/backpack/pull/76)]\n - `Conv1d`, `Conv3d`\n [[PR](https://github.com/f-dangel/backpack/pull/79)]\n - `ConvTranspose1d`, `ConvTranspose2d`, `ConvTranspose3d`\n [[PR](https://github.com/f-dangel/backpack/pull/84)]\n- Refactor `firstorder` extensions to share more code\n [[PR1](https://github.com/f-dangel/backpack/pull/105),\n [PR2](https://github.com/f-dangel/backpack/pull/105)]\n- Removed `detach`s to support differentiating through\n quantities\n [[PR](https://github.com/f-dangel/backpack/pull/70)]\n\n\n## [1.1.1] - 2020-04-29\n\n### Added\n- Improved documentation, moved to [ReadTheDocs](https://docs.backpack.pt) \n [[PR1](https://github.com/f-dangel/backpack/pull/57), \n [PR2](https://github.com/f-dangel/backpack/pull/58),\n [PR3](https://github.com/f-dangel/backpack/pull/66)]\n- Tested compatibility with PyTorch 1.5.0.\n- Support 2nd-order backprop for vectors in `MSELoss` \n [[PR](https://github.com/f-dangel/backpack/pull/61)]\n- Sanity checks to raise warnings if the following are used.\n `inplace` modification \n [[PR](https://github.com/f-dangel/backpack/pull/59)],\n unsupported loss parameters \n [[PR](https://github.com/f-dangel/backpack/pull/60)],\n custom losses in 2nd-order backpropagation \n [[PR](https://github.com/f-dangel/backpack/pull/60)]\n\n### Fixed\n- Removed `opt_einsum` dependency \n [[PR](https://github.com/f-dangel/backpack/pull/54)]\n- Missing implementations and wrong backpropagation of KFRA \n for `Conv2d`, `MaxPool2d`, and `AvgPool2d` \n [[PR](https://github.com/f-dangel/backpack/pull/53)]\n- Remove `try_view` and use `reshape` to use PyTorch 1.4.0 improvements \n [[PR](https://github.com/f-dangel/backpack/pull/50)]\n\n### Internal\n- Docstring style [[PR](https://github.com/f-dangel/backpack/pull/52)]\n\n\n## [1.1.0] - 2020-02-11\n\n### Added\n- Support MC sampling \n [[Issue](https://github.com/f-dangel/backpack/issues/21),\n [PR](https://github.com/f-dangel/backpack/pull/36)]\n- Utilities to handle Kronecker factors \n [[PR](https://github.com/f-dangel/backpack/pull/17)]\n- Examples \n [[PR](https://github.com/f-dangel/backpack/pull/34)]\n \n### Fixed\n- Fixed documentation issue in `Batch l2` \n [[PR](https://github.com/f-dangel/backpack/pull/33)]\n- Added support for stride parameter in Conv2d \n [[Issue](https://github.com/f-dangel/backpack/issues/30), \n [PR](https://github.com/f-dangel/backpack/pull/31)]\n- Pytorch `1.3.0` compatibility \n [[PR](https://github.com/f-dangel/backpack/pull/8), \n [PR](https://github.com/f-dangel/backpack/pull/9)]\n \n### Internal\n- Added \n continuous integration [[PR](https://github.com/f-dangel/backpack/pull/19)],\n test coverage [[PR](https://github.com/f-dangel/backpack/pull/25)],\n style guide enforcement [[PR](https://github.com/f-dangel/backpack/pull/27)]\n- Changed internal shape conventions of backpropagated quantities for performance improvements \n [[PR](https://github.com/f-dangel/backpack/pull/37)]\n\n## [1.0.1] - 2019-09-05\n\n### Fixed\n- Fixed PyPI installaton \n\n## [1.0.0] - 2019-10-03 \n\nInitial release\n\n[Unreleased]: https://github.com/f-dangel/backpack/compare/v1.1.0...HEAD\n[1.2.0]: https://github.com/f-dangel/backpack/compare/1.2.0...1.1.1\n[1.1.1]: https://github.com/f-dangel/backpack/compare/1.1.0...1.1.1\n[1.1.0]: https://github.com/f-dangel/backpack/compare/1.0.1...1.1.0\n[1.0.1]: https://github.com/f-dangel/backpack/compare/1.0.0...1.0.1\n[1.0.0]: https://github.com/f-dangel/backpack/releases/tag/1.0.0\n" }, { "alpha_fraction": 0.6419214010238647, "alphanum_fraction": 0.6535662412643433, "avg_line_length": 35.157894134521484, "blob_id": "f85579baf9097490a63af0fd5b482e5a2831727b", "content_id": "73e3f913e0c55bbc927941937ccac0423178e4e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "permissive", "max_line_length": 73, "num_lines": 19, "path": "/backpack/extensions/firstorder/batch_l2_grad/convtranspose1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum\n\nfrom backpack.extensions.firstorder.base import FirstOrderModuleExtension\nfrom backpack.utils import conv_transpose as convUtils\n\n\nclass BatchL2ConvTranspose1d(FirstOrderModuleExtension):\n def __init__(self):\n super().__init__(params=[\"bias\", \"weight\"])\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n C_axis = 1\n return (einsum(\"ncl->nc\", g_out[0]) ** 2).sum(C_axis)\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n X, dE_dY = convUtils.get_convtranspose1d_weight_gradient_factors(\n module.input0, g_out[0], module\n )\n return einsum(\"nmi,nki,nmj,nkj->n\", (dE_dY, X, dE_dY, X))\n" }, { "alpha_fraction": 0.7260677218437195, "alphanum_fraction": 0.7260677218437195, "avg_line_length": 41.4375, "blob_id": "6e2b8f020a32c7b7adf374ba92692145d831ddee", "content_id": "35a464fc2b4afe443410cb7ba8df2fff0e6bef09", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "permissive", "max_line_length": 84, "num_lines": 16, "path": "/backpack/extensions/secondorder/trial/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import backpack.utils.linear as LinUtils\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.secondorder.trial.trial_base import TRIALBaseModule\n\n\nclass TRIALLinear(TRIALBaseModule):\n def __init__(self, MODE):\n self.MODE = MODE\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n # TODO: FIX these functions for NGD\n def bias(self, ext, module, grad_inp, grad_out, backproped):\n return LinUtils.extract_bias_ngd(module, backproped, self.MODE)\n\n def weight(self, ext, module, grad_inp, grad_out, backproped):\n return LinUtils.extract_weight_ngd(module, backproped, self.MODE)\n" }, { "alpha_fraction": 0.5655539631843567, "alphanum_fraction": 0.5737784504890442, "avg_line_length": 31.634920120239258, "blob_id": "8a79b1e5673533c84ac8b1464ecedd951a1cb1bc", "content_id": "da6f9baadcd0782cf276c030c94fa9e43e177529", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2067, "license_type": "permissive", "max_line_length": 89, "num_lines": 63, "path": "/backpack/extensions/firstorder/fisher_block/batchnorm1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives\nfrom backpack.extensions.firstorder.fisher_block.fisher_block_base import FisherBlockBase\n\nfrom torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nclass FisherBlockBatchNorm1d(FisherBlockBase):\n def __init__(self, damping=1.0):\n self.damping = damping\n super().__init__(derivatives=BatchNorm1dDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n G = g_out_sc\n\n I = module.input0\n mean = I.mean(dim=0)\n var = I.var(dim=0, unbiased=False)\n xhat = (I - mean) / (var + module.eps).sqrt()\n dw = g_out_sc * xhat\n\n # compute vector jacobian product in optimization method\n grad = module.weight.grad\n grad_prod = einsum(\"nk,k->n\", (dw, grad))\n\n out = matmul(dw, dw.t())\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n\n # gv = einsum(\"n,nk->k\", (v, G))\n ### multiply with Jacobian\n gv = einsum(\"n,nk->k\", (v, dw))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n\n module.dw = dw\n module.NGD_inv = NGD_inv\n\n return (out, grad_prod, update)\n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"n,no->o\", (v, g_out_sc))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n\n return (out, grad_prod, update)\n \n\n\n" }, { "alpha_fraction": 0.5120689868927002, "alphanum_fraction": 0.5887930989265442, "avg_line_length": 29.526315689086914, "blob_id": "59add7a4a0e4cb366d91b96b98f3e9cae70042e7", "content_id": "572de495aa86459d39f50a545f38552c5d54bd12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1160, "license_type": "permissive", "max_line_length": 87, "num_lines": 38, "path": "/backpack/core/derivatives/selu.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import gt, exp\nfrom torch.nn import SELU\n\nfrom backpack.core.derivatives.elementwise import ElementwiseDerivatives\n\n\nclass SELUDerivatives(ElementwiseDerivatives):\n \"\"\"Alpha and scale are not input_kwargs\"\"\"\n\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n\n def get_module(self):\n return SELU\n\n def hessian_is_zero(self):\n \"\"\"`SELU''(x) != 0`.\"\"\"\n return False\n\n def df(self, module, g_inp, g_out):\n \"\"\"First SELU derivative: `SELU'(x) = scale if x < 0 else scale*alpha*e^x`. \"\"\"\n\n df_SELU = gt(module.input0, 0).float()\n df_SELU[df_SELU == 1] = self.scale\n df_SELU[df_SELU == 0] = (\n self.scale * self.alpha * exp(module.input0[df_SELU == 0])\n )\n return df_SELU\n\n def d2f(self, module, g_inp, g_out):\n \"\"\"Second SELU derivative: `SELU''(x) = 0 if x < 0 else scale*alpha*e^x`. \"\"\"\n\n d2f_SELU = gt(module.input0, 0).float()\n d2f_SELU[d2f_SELU == 1] = 0\n d2f_SELU[d2f_SELU == 0] = (\n self.scale * self.alpha * exp(module.input0[d2f_SELU == 0])\n )\n return d2f_SELU\n" }, { "alpha_fraction": 0.758695662021637, "alphanum_fraction": 0.758695662021637, "avg_line_length": 28.677419662475586, "blob_id": "bde3cc89695b5c78b4bbac0d6f88658f83344e72", "content_id": "fa9d616eb59d38920b209d709e80722eb5eb6a92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 920, "license_type": "permissive", "max_line_length": 75, "num_lines": 31, "path": "/test/core/derivatives/settings.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\"Test configurations for `backpack.core.derivatives`.\n\nRequired entries:\n The tests for individual categories are\n written in respective files and imported here.\n Tests:\n Activation layers\n Convolutional layers\n Linear Layers\n Loss functions\n Pooling layers\n Padding layers\n\"\"\"\n\nfrom test.core.derivatives.activation_settings import ACTIVATION_SETTINGS\nfrom test.core.derivatives.convolution_settings import CONVOLUTION_SETTINGS\nfrom test.core.derivatives.linear_settings import LINEAR_SETTINGS\nfrom test.core.derivatives.loss_settings import LOSS_SETTINGS\nfrom test.core.derivatives.padding_settings import PADDING_SETTINGS\nfrom test.core.derivatives.pooling_settings import POOLING_SETTINGS\n\nSETTINGS = []\n\nSETTINGS.extend(\n ACTIVATION_SETTINGS\n + CONVOLUTION_SETTINGS\n + LINEAR_SETTINGS\n + LOSS_SETTINGS\n + PADDING_SETTINGS\n + POOLING_SETTINGS\n)\n" }, { "alpha_fraction": 0.7007211446762085, "alphanum_fraction": 0.7055288553237915, "avg_line_length": 36.727272033691406, "blob_id": "85efbfd722b659aad58fb6e0b306011eb866bc0d", "content_id": "d27c877cc525c19477d838c5cbd0a17f88f66c41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "permissive", "max_line_length": 84, "num_lines": 22, "path": "/backpack/extensions/secondorder/mngd/conv2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\nfrom backpack.utils import conv as convUtils\nfrom torch import sqrt, zeros\nimport torch\n\nfrom torch import einsum\n\nclass MNGDConv2d(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n \n # TODO: FIX these functions for NGD\n def bias(self, ext, module, grad_inp, grad_out, backproped):\n # sqrt_ggn = backproped\n # return convUtils.extract_bias_ngd(module, sqrt_ggn, self.MODE)\n return None\n\n def weight(self, ext, module, grad_inp, grad_out, backproped):\n # weight_diag= convUtils.extract_weight_ngd(module, backproped, self.MODE)\n # return weight_diag\n return None\n\n\n" }, { "alpha_fraction": 0.507964015007019, "alphanum_fraction": 0.5169667601585388, "avg_line_length": 28.97916603088379, "blob_id": "f05166f82f033c566c93bd019f15cc918a52b2ad", "content_id": "4f9a2e4de325187ec134c0e8e52f4afea5850002", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2888, "license_type": "permissive", "max_line_length": 89, "num_lines": 96, "path": "/backpack/extensions/firstorder/fisher_block/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.firstorder.fisher_block.fisher_block_base import FisherBlockBase\n\n\nclass FisherBlockLinear(FisherBlockBase):\n def __init__(self, damping=1.0, alpha=0.95):\n self.damping = damping\n self.alpha = alpha\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n # print(g_out)\n\n # check if there are stored variables:\n # if hasattr(module, \"I\"):\n # this is a sampling technique\n # inp = module.I\n # l = inp.shape[0]\n # prob = 0.1\n # l_new = int(np.floor(prob * l))\n\n # # print('input to linear layer before droput:', inp.shape)\n # Borg = einsum(\"ni,li->nl\", (inp, inp)) \n\n # if inp.shape[1] > 7000:\n # inp = inp[:, torch.randint(l, (l_new,))] \n\n # B = einsum(\"ni,li->nl\", (inp, inp)) / ( prob)\n \n\n\n\n I = module.input0\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n G = g_out_sc\n grad = module.weight.grad\n \n \n B = einsum(\"ni,li->nl\", (I, I)) \n A = einsum(\"no,lo->nl\", (G, G))\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"ni,oi->no\", (I, grad))\n grad_prod = einsum(\"no,no->n\", (grad_prod, G))\n # grad_prod = 0\n out = A * B \n # out = 0\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n\n gv = einsum(\"n,no->no\", (v, G))\n gv = einsum(\"no,ni->oi\", (gv, I))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n # update = grad\n\n # store for later use:\n # module.A = A\n # module.B = B\n # module.out = out\n module.I = I\n module.G = G\n module.NGD_inv = NGD_inv\n return (out, grad_prod, update)\n \n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n\n grad = module.bias.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n # grad_prod = 0\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n # out = 0\n\n\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"n,no->o\", (v, g_out_sc))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n # update = grad\n\n return (out, grad_prod, update)\n \n\n" }, { "alpha_fraction": 0.5917431116104126, "alphanum_fraction": 0.5983690023422241, "avg_line_length": 31.1639347076416, "blob_id": "351ce50c45d55702209136666d6e02414f998c8a", "content_id": "4c7cf06f6615026f91a04b38e7f6379eff09c2c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1962, "license_type": "permissive", "max_line_length": 78, "num_lines": 61, "path": "/backpack/core/derivatives/batchnorm2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from warnings import warn\n\nfrom torch import einsum\nfrom torch.nn import BatchNorm2d\nimport torch\nfrom backpack.core.derivatives.basederivatives import BaseParameterDerivatives\n\n\nclass BatchNorm2dDerivatives(BaseParameterDerivatives):\n def get_module(self):\n return BatchNorm2d\n\n def hessian_is_zero(self):\n return False\n\n def hessian_is_diagonal(self):\n return False\n\n def _jac_mat_prod(self, module, g_inp, g_out, mat):\n return self._jac_t_mat_prod(module, g_inp, g_out, mat)\n\n def _jac_t_mat_prod(self, module, g_inp, g_out, mat):\n return None\n\n def _weight_jac_mat_prod(self, module, g_inp, g_out, mat):\n return None\n\n def get_normalized_input_and_var(self, module):\n input = module.input0\n mean = input.mean(dim=(0, 2, 3), keepdim=True)\n var = input.var(dim=(0, 2, 3), unbiased=False, keepdim=True)\n return (input - mean) / (var + module.eps).sqrt(), var\n\n def _weight_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch):\n # TODO: complete this function\n if not sum_batch:\n warn(\n \"BatchNorm batch summation disabled.\"\n \"This may not compute meaningful quantities\"\n )\n\n x_hat, _ = self.get_normalized_input_and_var(module)\n equation = \"vnihw,nihw->v{}i\".format(\"\" if sum_batch is True else \"n\")\n operands = [mat, x_hat]\n return einsum(equation, operands)\n\n def _bias_jac_mat_prod(self, module, g_inp, g_out, mat):\n return None\n\n def _bias_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):\n # TODO: complete this function\n if not sum_batch:\n warn(\n \"BatchNorm batch summation disabled.\"\n \"This may not compute meaningful quantities\"\n )\n N_axis = 3,4 \n return mat.sum(N_axis)\n else:\n N_axis = 1\n return mat.sum(N_axis)\n" }, { "alpha_fraction": 0.7672332525253296, "alphanum_fraction": 0.7932011485099792, "avg_line_length": 28.41666603088379, "blob_id": "b47ee9a81d7516beb88a831053275d4bc7bed58d", "content_id": "b9d2f1f716cb8eef7801501b63fe89aa7560b17a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2118, "license_type": "permissive", "max_line_length": 57, "num_lines": 72, "path": "/backpack/core/derivatives/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch.nn import (\n ELU,\n SELU,\n AvgPool2d,\n Conv1d,\n Conv2d,\n Conv3d,\n ConvTranspose1d,\n ConvTranspose2d,\n ConvTranspose3d,\n CrossEntropyLoss,\n Dropout,\n LeakyReLU,\n Linear,\n LogSigmoid,\n MaxPool2d,\n MSELoss,\n ReLU,\n Sigmoid,\n Tanh,\n ZeroPad2d,\n BatchNorm1d,\n BatchNorm2d\n)\n\nfrom .avgpool2d import AvgPool2DDerivatives\nfrom .conv1d import Conv1DDerivatives\nfrom .conv_transpose1d import ConvTranspose1DDerivatives\nfrom .conv2d import Conv2DDerivatives\nfrom .conv_transpose2d import ConvTranspose2DDerivatives\nfrom .conv3d import Conv3DDerivatives\nfrom .conv_transpose3d import ConvTranspose3DDerivatives\nfrom .crossentropyloss import CrossEntropyLossDerivatives\nfrom .dropout import DropoutDerivatives\nfrom .elu import ELUDerivatives\nfrom .leakyrelu import LeakyReLUDerivatives\nfrom .linear import LinearDerivatives\nfrom .logsigmoid import LogSigmoidDerivatives\nfrom .maxpool2d import MaxPool2DDerivatives\nfrom .mseloss import MSELossDerivatives\nfrom .relu import ReLUDerivatives\nfrom .selu import SELUDerivatives\nfrom .sigmoid import SigmoidDerivatives\nfrom .tanh import TanhDerivatives\nfrom .zeropad2d import ZeroPad2dDerivatives\nfrom .batchnorm1d import BatchNorm1dDerivatives\nfrom .batchnorm2d import BatchNorm2dDerivatives\n\nderivatives_for = {\n Linear: LinearDerivatives,\n Conv1d: Conv1DDerivatives,\n Conv2d: Conv2DDerivatives,\n Conv3d: Conv3DDerivatives,\n AvgPool2d: AvgPool2DDerivatives,\n MaxPool2d: MaxPool2DDerivatives,\n ZeroPad2d: ZeroPad2dDerivatives,\n Dropout: DropoutDerivatives,\n ReLU: ReLUDerivatives,\n Tanh: TanhDerivatives,\n Sigmoid: SigmoidDerivatives,\n ConvTranspose1d: ConvTranspose1DDerivatives,\n ConvTranspose2d: ConvTranspose2DDerivatives,\n ConvTranspose3d: ConvTranspose3DDerivatives,\n LeakyReLU: LeakyReLUDerivatives,\n LogSigmoid: LogSigmoidDerivatives,\n ELU: ELUDerivatives,\n SELU: SELUDerivatives,\n CrossEntropyLoss: CrossEntropyLossDerivatives,\n MSELoss: MSELossDerivatives,\n BatchNorm1d: BatchNorm1dDerivatives,\n BatchNorm2d: BatchNorm2dDerivatives,\n}\n" }, { "alpha_fraction": 0.5688111782073975, "alphanum_fraction": 0.582760751247406, "avg_line_length": 33.80434799194336, "blob_id": "e730623b7607260ad33765a0f6b347284c62660a", "content_id": "8cfb1e61b7aff9b338978aa3119c7f501a8aff5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4803, "license_type": "permissive", "max_line_length": 85, "num_lines": 138, "path": "/backpack/utils/ein.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\" Einsum utility functions. \"\"\"\n\nimport torch\n\n\ndef eingroup(equation, operand, dim=None):\n \"\"\"Use einsum-like notation for (un-)grouping dimensions.\n\n Dimensions that cannot be inferred can be handed in via a mapping `dim`.\n\n Arguments:\n equation (str): Equation specifying the (un-)grouping of axes.\n operand (torch.Tensor): The tensor that `equation` will be applied to.\n dim (dict, optional): A mapping from letters in `equation` to\n dimensions. Only required if `eingroup` cannot infer the dimension.\n For instance, consider you want to interpret a vector with 10\n elements as a 5x2 matrix. The equation `\"i,j->ij\"` is not\n sufficient, you need to specify `dim = {\"i\": 5, \"j\": 2}`.\n\n Note:\n Many operations in `backpack` require that certain axes of a tensor\n be treated identically, and will therefore be grouped into a single\n dimension. One way to do that is using `view`s or `reshape`s.\n `eingroup` helps facilitate this process. It can be used in roughly\n the same way as `einsum`, but acts only on a single tensor at\n a time (although this could be fixed with an improved syntax and\n equation analysis).\n\n Idea:\n * `\"a,b,c->ab,c\"`: group dimension `a` and `b` into a single one.\n * `\"a,b,c->ba,c\"` to transpose, then group `b` and `a` dimension.\n\n Examples:\n Different reshapes of a [2 x 2 x 2] tensor:\n >>> t = torch.Tensor([0, 1, 2, 3, 4, 5, 6, 7]).reshape(2, 2, 2)\n >>> t_flat = t.reshape(-1)\n >>> # group all dimensions\n >>> eingroup(\"i,j,k->ijk\", t)\n torch.Tensor([0, 1, 2, 3, 4, 5, 6, 7])\n >>> # interpret as 2 x 4 matrix\n >>> eingroup(\"i,j,k->i,jk\", t)\n torch.Tensor([[0, 1, 2, 3], [4, 5, 6, 7]])\n >>> # grouping (specifying grouping dimensions)\n >>> eingroup(\"ijk->i,j,k\", dim={\"i\": 2, \"j\": 2, \"k\": 2})\n torch.Tensor([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])\n >>> # grouping with additional contraction\n >>> eingroup(\"ijk->j,k\", dim={\"j\": 2, \"k\": 2})\n torch.Tensor([[[4, 5], [8, 10]])\n\n Returns:\n torch.Tensor: Result of the (un-)grouping operation.\n\n Raises:\n KeyError: If information about a dimension in `dim` is missing\n or can be removed. # noqa: DAR402\n RuntimeError: If the groups inferred from `equation` do not match\n the number of axes of `operand` # noqa: DAR402\n\n \"\"\"\n dim = {} if dim is None else dim\n in_shape, out_shape, einsum_eq = _eingroup_preprocess(equation, operand, dim=dim)\n\n operand_in = operand.reshape(in_shape)\n result = torch.einsum(einsum_eq, operand_in)\n return result.reshape(out_shape)\n\n\ndef _eingroup_preprocess(equation, operand, dim):\n \"\"\"Process `eingroup` equation.\n\n Return the `reshape`s and `einsum` equations that have to\n be performed.\n \"\"\"\n split, sep = \"->\", \",\"\n\n def groups(string):\n return string.split(sep)\n\n lhs, rhs = equation.split(split)\n in_groups, out_groups = groups(lhs), groups(rhs)\n\n dim = __eingroup_infer(in_groups, operand, dim)\n in_shape_flat, out_shape = __eingroup_shapes(in_groups, out_groups, dim)\n\n return in_shape_flat, out_shape, equation.replace(sep, \"\")\n\n\ndef __eingroup_shapes(in_groups, out_groups, dim):\n \"\"\"Return shape the input needs to be reshaped, and the output shape.\"\"\"\n\n def shape(groups, dim):\n return [group_dim(group, dim) for group in groups]\n\n def product(nums):\n assert len(nums) > 0\n\n result = 1\n for num in nums:\n result *= num\n return result\n\n def group_dim(group, dim):\n try:\n return product([dim[g] for g in group])\n except KeyError as e:\n raise KeyError(\"Unknown dimension for an axis {}\".format(e))\n\n out_shape = shape(out_groups, dim)\n\n in_groups_flat = []\n for group in in_groups:\n for letter in group:\n in_groups_flat.append(letter)\n in_shape_flat = shape(in_groups_flat, dim)\n\n return in_shape_flat, out_shape\n\n\ndef __eingroup_infer(in_groups, operand, dim):\n \"\"\"Infer the size of each axis.\"\"\"\n if not len(in_groups) == len(operand.shape):\n raise RuntimeError(\n \"Got {} input groups {}, but tensor has {} axes.\".format(\n len(in_groups), in_groups, len(operand.shape)\n )\n )\n\n for group, size in zip(in_groups, operand.shape):\n if len(group) == 1:\n axis = group[0]\n if axis in dim.keys():\n raise KeyError(\n \"Can infer dimension of axis {}.\".format(axis),\n \"Remove from dim = {}.\".format(dim),\n )\n dim[axis] = size\n\n return dim\n" }, { "alpha_fraction": 0.6867815852165222, "alphanum_fraction": 0.6954023241996765, "avg_line_length": 35.105262756347656, "blob_id": "eeba592dff66a42ba433bbb6eac1f80005fd3b9f", "content_id": "0daf659a45e78e9d16e81fdc3c3a18adaae72847", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "permissive", "max_line_length": 100, "num_lines": 19, "path": "/backpack/extensions/firstorder/fisher_block_eff/batchnorm2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.batchnorm2d import BatchNorm2dDerivatives\nfrom backpack.extensions.firstorder.fisher_block_eff.fisher_block_eff_base import FisherBlockEffBase\n\nfrom torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nclass FisherBlockEffBatchNorm2d(FisherBlockEffBase):\n def __init__(self, damping=1.0):\n self.damping = damping\n super().__init__(derivatives=BatchNorm2dDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n \n return module.weight.grad\n \n\n def bias(self, ext, module, g_inp, g_out, backproped):\n \n return module.bias.grad\n \n\n" }, { "alpha_fraction": 0.715976357460022, "alphanum_fraction": 0.7278106212615967, "avg_line_length": 36.55555725097656, "blob_id": "f393c500e35ca6613b5dc852b56d963873b43fdc", "content_id": "e8e45c9e506b68468c1b321153f29e8446ddf504", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "permissive", "max_line_length": 83, "num_lines": 9, "path": "/backpack/extensions/firstorder/batch_grad/batchnorm2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.batchnorm2d import BatchNorm2dDerivatives\nfrom backpack.extensions.firstorder.batch_grad.batch_grad_base import BatchGradBase\n\n\nclass BatchGradBatchNorm2d(BatchGradBase):\n def __init__(self):\n super().__init__(\n derivatives=BatchNorm2dDerivatives(), params=[\"bias\", \"weight\"]\n )\n" }, { "alpha_fraction": 0.5819672346115112, "alphanum_fraction": 0.5969945192337036, "avg_line_length": 30.826086044311523, "blob_id": "ea3ce74ad447c1ac5ad2b2fe5028aa65ad13d299", "content_id": "044bc97aadd663901a048e5aa4a626abafebb063", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 734, "license_type": "permissive", "max_line_length": 79, "num_lines": 23, "path": "/backpack/core/derivatives/elu.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import gt, exp\nfrom torch.nn import ELU\n\nfrom backpack.core.derivatives.elementwise import ElementwiseDerivatives\n\n\nclass ELUDerivatives(ElementwiseDerivatives):\n def get_module(self):\n return ELU\n\n def hessian_is_zero(self):\n \"\"\"`ELU''(x) ≠ 0`.\"\"\"\n return False\n\n def df(self, module, g_inp, g_out):\n \"\"\"First ELU derivative: `ELU'(x) = alpha * e^x if x < 0 else 1`. \"\"\"\n df_ELU = gt(module.input0, 0).float()\n df_ELU[df_ELU == 0] = module.alpha * exp(module.input0[df_ELU == 0])\n return df_ELU\n\n def d2f(self, module, g_inp, g_out):\n \"\"\"Second ELU derivative: `ELU''(x) = alpha * e^x if x < 0 else 1`. \"\"\"\n return self.df(module, g_inp, g_out)\n" }, { "alpha_fraction": 0.5809054374694824, "alphanum_fraction": 0.5853877067565918, "avg_line_length": 33.859375, "blob_id": "e98e5d689b6039c4bde5b75a8f98abffff13a9e8", "content_id": "fab25f8f1cb8d8d81f7ddde53f640ecd16ed0b59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2231, "license_type": "permissive", "max_line_length": 83, "num_lines": 64, "path": "/test/extensions/implementation/autograd.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from test.extensions.implementation.base import ExtensionsImplementation\n\nimport torch\n\n\nclass AutogradExtensions(ExtensionsImplementation):\n \"\"\"Extension implementations with autograd.\"\"\"\n\n def batch_grad(self):\n N = self.problem.input.shape[0]\n batch_grads = [\n torch.zeros(N, *p.size()).to(self.problem.device)\n for p in self.problem.model.parameters()\n ]\n\n loss_list = torch.zeros((N))\n gradients_list = []\n for b in range(N):\n _, _, loss = self.problem.forward_pass(sample_idx=b)\n gradients = torch.autograd.grad(loss, self.problem.model.parameters())\n gradients_list.append(gradients)\n loss_list[b] = loss\n\n _, _, batch_loss = self.problem.forward_pass()\n factor = self.problem.get_reduction_factor(batch_loss, loss_list)\n\n for b, gradients in zip(range(N), gradients_list):\n for idx, g in enumerate(gradients):\n batch_grads[idx][b, :] = g.detach() * factor\n\n return batch_grads\n\n def batch_l2_grad(self):\n batch_grad = self.batch_grad()\n batch_l2_grads = [(g ** 2).flatten(start_dim=1).sum(1) for g in batch_grad]\n return batch_l2_grads\n\n def sgs(self):\n N = self.problem.input.shape[0]\n sgs = [\n torch.zeros(*p.size()).to(self.problem.device)\n for p in self.problem.model.parameters()\n ]\n\n loss_list = torch.zeros((N))\n gradients_list = []\n for b in range(N):\n _, _, loss = self.problem.forward_pass(sample_idx=b)\n gradients = torch.autograd.grad(loss, self.problem.model.parameters())\n loss_list[b] = loss\n gradients_list.append(gradients)\n\n _, _, batch_loss = self.problem.forward_pass()\n factor = self.problem.get_reduction_factor(batch_loss, loss_list)\n\n for _, gradients in zip(range(N), gradients_list):\n for idx, g in enumerate(gradients):\n sgs[idx] += (g.detach() * factor) ** 2\n return sgs\n\n def variance(self):\n batch_grad = self.batch_grad()\n variances = [torch.var(g, dim=0, unbiased=False) for g in batch_grad]\n return variances\n" }, { "alpha_fraction": 0.7982062697410583, "alphanum_fraction": 0.8071748614311218, "avg_line_length": 33.30769348144531, "blob_id": "e9b676b9034db7702a4309c2dd0ecdfe51d2aaca", "content_id": "5f526877b723d5dc07cc4799c71a809af1609a55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "permissive", "max_line_length": 82, "num_lines": 13, "path": "/backpack/extensions/secondorder/mngd/losses.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from functools import partial\n\nfrom backpack.core.derivatives.crossentropyloss import CrossEntropyLossDerivatives\nfrom backpack.core.derivatives.mseloss import MSELossDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\nfrom torch import softmax\nfrom torch import rand\n\n\nclass MNGDLoss(MNGDBaseModule):\n def backpropagate(self, ext, module, grad_inp, grad_out, backproped):\n \n return rand(10,10)\n" }, { "alpha_fraction": 0.6207243204116821, "alphanum_fraction": 0.6297786831855774, "avg_line_length": 35.814815521240234, "blob_id": "03db9ff844b145f61f8b4ca3cf5c93f7849010af", "content_id": "27064e60afd168f5f21c2465a7f2958fa041690d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 994, "license_type": "permissive", "max_line_length": 73, "num_lines": 27, "path": "/backpack/extensions/firstorder/fisher_block/fisher_block_base.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.extensions.firstorder.base import FirstOrderModuleExtension\nfrom torch import matmul\n\nclass FisherBlockBase(FirstOrderModuleExtension):\n def __init__(self, derivatives, params=None):\n self.derivatives = derivatives\n self.N_axis = 0\n super().__init__(params=params)\n\n def bias(self, ext, module, g_inp, g_out, bpQuantities):\n grad_batch = self.derivatives.bias_jac_t_mat_prod(\n module, g_inp, g_out, g_out[0], sum_batch=False\n )\n n = grad_batch.shape[0]\n grad_batch = n * grad_batch.reshape(grad_batch.shape[0], -1)\n\n return matmul(grad_batch, grad_batch.t())\n\n\n def weight(self, ext, module, g_inp, g_out, bpQuantities):\n grad_batch = self.derivatives.weight_jac_t_mat_prod(\n module, g_inp, g_out, g_out[0], sum_batch=False\n )\n n = grad_batch.shape[0]\n grad_batch = n * grad_batch.reshape(grad_batch.shape[0], -1)\n\n return matmul(grad_batch, grad_batch.t())\n" }, { "alpha_fraction": 0.7160326242446899, "alphanum_fraction": 0.7228260636329651, "avg_line_length": 42.29411697387695, "blob_id": "695dce67bdbb6af3af255bb3d4155e54ce53e055", "content_id": "9f4bd29fef2f06d12d14f0f59f80af0e41fd9db8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "permissive", "max_line_length": 84, "num_lines": 17, "path": "/backpack/extensions/secondorder/diag_ggn/conv2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule\nfrom backpack.utils import conv as convUtils\n\n\nclass DiagGGNConv2d(DiagGGNBaseModule):\n def __init__(self):\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n\n def bias(self, ext, module, grad_inp, grad_out, backproped):\n sqrt_ggn = backproped\n return convUtils.extract_bias_diagonal(module, sqrt_ggn)\n\n def weight(self, ext, module, grad_inp, grad_out, backproped):\n X = convUtils.unfold_func(module)(module.input0)\n weight_diag = convUtils.extract_weight_diagonal(module, X, backproped)\n return weight_diag\n" }, { "alpha_fraction": 0.5910329222679138, "alphanum_fraction": 0.5999504327774048, "avg_line_length": 34.72566223144531, "blob_id": "9d6fe73cedab9278704743fab81c0b1c96a6146f", "content_id": "69424b7fd03a26d557ba2a34f4d292fb21c6ac76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4037, "license_type": "permissive", "max_line_length": 80, "num_lines": 113, "path": "/backpack/core/derivatives/avgpool2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\"The code relies on the insight that average pooling can be understood as\nconvolution over single channels with a constant kernel.\"\"\"\n\nimport torch.nn\nfrom torch.nn import AvgPool2d, Conv2d, ConvTranspose2d\n\nfrom backpack.core.derivatives.basederivatives import BaseDerivatives\nfrom backpack.utils.ein import eingroup\n\n\nclass AvgPool2DDerivatives(BaseDerivatives):\n def get_module(self):\n return AvgPool2d\n\n def hessian_is_zero(self):\n return True\n\n def ea_jac_t_mat_jac_prod(self, module, g_inp, g_out, mat):\n \"\"\"Use fact that average pooling can be implemented as conv.\"\"\"\n _, C, H_in, W_in = module.input0.size()\n in_features = C * H_in * W_in\n _, _, H_out, W_out = module.output.size()\n out_features = C * H_out * W_out\n\n mat = mat.reshape(out_features * C, 1, H_out, W_out)\n jac_t_mat = self.__apply_jacobian_t_of(module, mat).reshape(\n out_features, in_features\n )\n mat_t_jac = jac_t_mat.t().reshape(in_features * C, 1, H_out, W_out)\n jac_t_mat_t_jac = self.__apply_jacobian_t_of(module, mat_t_jac).reshape(\n in_features, in_features\n )\n\n return jac_t_mat_t_jac.t()\n\n def check_exotic_parameters(self, module):\n assert module.count_include_pad, (\n \"Might not work for exotic hyperparameters of AvgPool2d, \"\n + \"like count_include_pad=False\"\n )\n\n def _jac_mat_prod(self, module, g_inp, g_out, mat):\n self.check_exotic_parameters(module)\n\n mat_as_pool = self.__make_single_channel(mat, module)\n jmp_as_pool = self.__apply_jacobian_of(module, mat_as_pool)\n self.__check_jmp_out_as_pool(mat, jmp_as_pool, module)\n\n return self.reshape_like_output(jmp_as_pool, module)\n\n def __make_single_channel(self, mat, module):\n \"\"\"Create fake single-channel images, grouping batch,\n class and channel dimension.\"\"\"\n result = eingroup(\"v,n,c,w,h->vnc,w,h\", mat)\n C_axis = 1\n return result.unsqueeze(C_axis)\n\n def __apply_jacobian_of(self, module, mat):\n conv2d = Conv2d(\n in_channels=1,\n out_channels=1,\n kernel_size=module.kernel_size,\n stride=module.stride,\n padding=module.padding,\n bias=False,\n ).to(module.input0.device)\n\n conv2d.weight.requires_grad = False\n avg_kernel = torch.ones_like(conv2d.weight) / conv2d.weight.numel()\n conv2d.weight.data = avg_kernel\n\n return conv2d(mat)\n\n def __check_jmp_out_as_pool(self, mat, jmp_as_pool, module):\n V = mat.size(0)\n N, C_out, H_out, W_out = module.output_shape\n assert jmp_as_pool.shape == (V * N * C_out, 1, H_out, W_out)\n\n def _jac_t_mat_prod(self, module, g_inp, g_out, mat):\n self.check_exotic_parameters(module)\n\n mat_as_pool = self.__make_single_channel(mat, module)\n jmp_as_pool = self.__apply_jacobian_t_of(module, mat_as_pool)\n self.__check_jmp_in_as_pool(mat, jmp_as_pool, module)\n\n return self.reshape_like_input(jmp_as_pool, module)\n\n def __apply_jacobian_t_of(self, module, mat):\n C_for_conv_t = 1\n\n conv2d_t = ConvTranspose2d(\n in_channels=C_for_conv_t,\n out_channels=C_for_conv_t,\n kernel_size=module.kernel_size,\n stride=module.stride,\n padding=module.padding,\n bias=False,\n ).to(module.input0.device)\n\n conv2d_t.weight.requires_grad = False\n avg_kernel = torch.ones_like(conv2d_t.weight) / conv2d_t.weight.numel()\n conv2d_t.weight.data = avg_kernel\n\n V_N_C_in = mat.size(0)\n _, _, H_in, W_in = module.input0.size()\n output_size = (V_N_C_in, C_for_conv_t, H_in, W_in)\n\n return conv2d_t(mat, output_size=output_size)\n\n def __check_jmp_in_as_pool(self, mat, jmp_as_pool, module):\n V = mat.size(0)\n N, C_in, H_in, W_in = module.input0_shape\n assert jmp_as_pool.shape == (V * N * C_in, 1, H_in, W_in)\n" }, { "alpha_fraction": 0.5640928149223328, "alphanum_fraction": 0.5640928149223328, "avg_line_length": 29.21839141845703, "blob_id": "cb848627a84d45e6d83eb67a22c4e81d8807ca19", "content_id": "71f45adbd7f794b98f9ccc27fe1af8a2bbfece03", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2629, "license_type": "permissive", "max_line_length": 76, "num_lines": 87, "path": "/backpack/extensions/backprop_extension.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import warnings\n\nfrom torch.nn import Sequential\n\nFAIL_ERROR = \"ERROR\"\nFAIL_WARN = \"WARN\"\nFAIL_SILENT = \"SILENT\"\n\n\nclass BackpropExtension:\n \"\"\"\n Base class for the BackPACK extensions.\n\n Descendants of this class need to\n - define in what field to save results\n - provide a mapping from Module classes to ModuleExtension instances.\n\n They can then be passed to the Backpack context manager, i.e.,\n ```\n with backpack(NewPackpropExtension(\"myfield\", module_to_extensions)):\n loss(model(X), Y).backward()\n\n for p in model.parameters():\n print(p.myfield)\n ```\n \"\"\"\n\n __external_module_extensions = {}\n\n def __init__(self, savefield, module_exts, fail_mode=FAIL_ERROR):\n \"\"\"\n Parameters\n ----------\n savefield : str\n Where to save results\n module_exts : dict\n Dictionary mapping module classes to `ModuleExtension` instances\n fail_mode : str, optional\n Behavior when encountering an unknown layer.\n Can be\n - \"ERROR\": raise a NotImplementedError\n - \"WARN\": raise a UserWarning\n - \"SILENT\": skip the module silently\n \"\"\"\n self.savefield = savefield\n self.__module_extensions = {\n **module_exts,\n **self.__class__.__external_module_extensions,\n }\n\n self.__fail_mode = fail_mode\n\n @classmethod\n def add_module_extension(cls, module, extension):\n cls.__external_module_extensions[module] = extension\n\n def __get_module_extension(self, module):\n module_extension = self.__module_extensions.get(module.__class__)\n\n def no_op(*args):\n return None\n\n if module_extension is None:\n\n if isinstance(module, Sequential):\n return no_op\n\n if self.__fail_mode is FAIL_ERROR:\n raise NotImplementedError(\n \"Extension saving to {} \".format(self.savefield)\n + \"does not have an extension for \"\n + \"Module {}\".format(module.__class__)\n )\n elif self.__fail_mode == FAIL_WARN:\n warnings.warn(\n \"Extension saving to {} \".format(self.savefield)\n + \"does not have an extension for \"\n + \"Module {}\".format(module.__class__)\n )\n\n return no_op\n\n return module_extension.apply\n\n def apply(self, module, g_inp, g_out):\n module_extension = self.__get_module_extension(module)\n module_extension(self, module, g_inp, g_out)\n" }, { "alpha_fraction": 0.5369904637336731, "alphanum_fraction": 0.5450427532196045, "avg_line_length": 34.46428680419922, "blob_id": "cfe32d6ae35bbf7dee11cc58d3160285502c89a4", "content_id": "aff93a020a16e8a12325741b40ab54c16674e080", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1987, "license_type": "permissive", "max_line_length": 84, "num_lines": 56, "path": "/backpack/extensions/firstorder/fisher/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum\n\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.firstorder.fisher.fisher_base import FisherBase\n\n\nclass FisherLinear(FisherBase):\n def __init__(self, silent=False):\n self.silent = silent\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n\n if not self.silent:\n grad = module.weight.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n B = einsum(\"ni,li->nl\", (module.input0, module.input0)) \n A = einsum(\"no,lo->nl\", (g_out_sc, g_out_sc))\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"ni,oi->no\", (module.input0, grad))\n grad_prod = einsum(\"no,no->n\", (grad_prod, g_out_sc))\n\n return (A * B, grad_prod)\n else:\n grad = module.weight.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"ni,oi->no\", (module.input0, grad))\n grad_prod = einsum(\"no,no->n\", (grad_prod, g_out_sc))\n\n return grad_prod\n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n\n if not self.silent:\n grad = module.bias.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n return (out, grad_prod)\n else:\n \n grad = module.bias.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n return grad_prod\n\n" }, { "alpha_fraction": 0.7555205225944519, "alphanum_fraction": 0.7555205225944519, "avg_line_length": 32.3684196472168, "blob_id": "d1c154e92dadfc26703fdf349c6b57f86471f0a8", "content_id": "649b63482abc5b1d75bc7f334a7a709cdc635e72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "permissive", "max_line_length": 84, "num_lines": 19, "path": "/backpack/extensions/secondorder/diag_ggn/activations.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.relu import ReLUDerivatives\nfrom backpack.core.derivatives.sigmoid import SigmoidDerivatives\nfrom backpack.core.derivatives.tanh import TanhDerivatives\nfrom backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule\n\n\nclass DiagGGNReLU(DiagGGNBaseModule):\n def __init__(self):\n super().__init__(derivatives=ReLUDerivatives())\n\n\nclass DiagGGNSigmoid(DiagGGNBaseModule):\n def __init__(self):\n super().__init__(derivatives=SigmoidDerivatives())\n\n\nclass DiagGGNTanh(DiagGGNBaseModule):\n def __init__(self):\n super().__init__(derivatives=TanhDerivatives())\n" }, { "alpha_fraction": 0.5722684860229492, "alphanum_fraction": 0.5846807360649109, "avg_line_length": 35.664669036865234, "blob_id": "078e2a8df19c7d8c595a77bf5b4274508e530e33", "content_id": "cb7d185510c09b23e3da7a56e53bae660bbfc6e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6123, "license_type": "permissive", "max_line_length": 84, "num_lines": 167, "path": "/backpack/core/derivatives/conv_transposend.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from numpy import prod\nfrom torch import einsum\nfrom torch.nn import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d\nfrom torch.nn.grad import _grad_input_padding\nfrom torch.nn.functional import conv1d, conv2d, conv3d\nfrom torch.nn.functional import conv_transpose1d, conv_transpose2d, conv_transpose3d\n\nfrom backpack.core.derivatives.basederivatives import BaseParameterDerivatives\nfrom backpack.utils.conv_transpose import unfold_by_conv_transpose\nfrom backpack.utils.ein import eingroup\n\n\nclass ConvTransposeNDDerivatives(BaseParameterDerivatives):\n def __init__(self, N):\n if N == 1:\n self.module = ConvTranspose1d\n self.dim_text = \"x\"\n self.conv_func = conv1d\n self.conv_transpose_func = conv_transpose1d\n elif N == 2:\n self.module = ConvTranspose2d\n self.dim_text = \"x,y\"\n self.conv_func = conv2d\n self.conv_transpose_func = conv_transpose2d\n elif N == 3:\n self.module = ConvTranspose3d\n self.dim_text = \"x,y,z\"\n self.conv_func = conv3d\n self.conv_transpose_func = conv_transpose3d\n else:\n raise ValueError(\"{}-dimensional Conv. is not implemented.\".format(N))\n self.conv_dims = N\n\n def hessian_is_zero(self):\n return True\n\n def _bias_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):\n axes = list(range(3, len(module.output_shape) + 1))\n if sum_batch:\n axes = [1] + axes\n return mat.sum(axes)\n\n def _bias_jac_mat_prod(self, module, g_inp, g_out, mat):\n # Expand batch dimension\n jac_mat = mat.unsqueeze(1)\n # Expand data dimensions\n for i in range(3, len(module.output_shape) + 1):\n jac_mat = jac_mat.unsqueeze(i)\n\n expand_shape = [-1, module.output_shape[0], -1, *module.output_shape[2:]]\n\n return jac_mat.expand(*expand_shape)\n\n def _weight_jac_mat_prod(self, module, g_inp, g_out, mat):\n if module.groups != 1:\n raise NotImplementedError(\"Groups greater than 1 are not supported yet\")\n\n V = mat.shape[0]\n G = module.groups\n C_in = module.input0.shape[1]\n N = module.output.shape[0]\n C_out = module.output.shape[1]\n\n mat_reshape = mat.reshape(V, C_in, G, C_out // G, *module.weight.shape[2:])\n u = unfold_by_conv_transpose(module.input0, module).reshape(\n N, C_in // G, G, *module.weight.shape[2:], *module.output.shape[2:]\n )\n\n dims_kern = \"xyz\"[: self.conv_dims]\n dims_data = \"abc\"[: self.conv_dims]\n einstr = \"nig{0}{1},vigo{0}->vngo{1}\".format(dims_kern, dims_data)\n jac_mat = einsum(einstr, u, mat_reshape)\n\n return self.reshape_like_output(jac_mat, module)\n\n def _weight_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):\n if module.groups != 1:\n raise NotImplementedError(\"Groups greater than 1 are not supported yet\")\n\n V = mat.shape[0]\n G = module.groups\n C_in = module.input0.shape[1]\n N = module.output.shape[0]\n C_out = module.output.shape[1]\n\n mat_reshape = mat.reshape(V, N, G, C_out // G, *module.output.shape[2:])\n\n u = unfold_by_conv_transpose(module.input0, module).reshape(\n N, C_in // G, G, *module.weight.shape[2:], *module.output.shape[2:]\n )\n\n dims_kern = \"xyz\"[: self.conv_dims]\n dims_data = \"abc\"[: self.conv_dims]\n result_str = (\"vigo\" if sum_batch else \"vnigo\") + dims_kern\n equation = \"nig{0}{1},vngo{1}->{2}\".format(dims_kern, dims_data, result_str)\n\n final_shape = (\n (V, *module.weight.shape) if sum_batch else (V, N, *module.weight.shape)\n )\n\n return einsum(equation, u, mat_reshape).reshape(final_shape)\n\n def ea_jac_t_mat_jac_prod(self, module, g_inp, g_out, mat):\n in_features = int(prod(module.input0.size()[1:]))\n out_features = int(prod(module.output.size()[1:]))\n\n mat = mat.reshape(out_features, *module.output.size()[1:])\n jac_t_mat = self.__jac_t(module, mat).reshape(out_features, in_features)\n\n mat_t_jac = jac_t_mat.t().reshape(in_features, *module.output.size()[1:])\n jac_t_mat_t_jac = self.__jac_t(module, mat_t_jac)\n jac_t_mat_t_jac = jac_t_mat_t_jac.reshape(in_features, in_features)\n\n return jac_t_mat_t_jac.t()\n\n def _jac_mat_prod(self, module, g_inp, g_out, mat):\n mat_as_conv = eingroup(\"v,n,c,{0}->vn,c,{0}\".format(self.dim_text), mat)\n jmp_as_conv = self.__jac(module, mat_as_conv)\n return self.reshape_like_output(jmp_as_conv, module)\n\n def __jac(self, module, mat):\n input_size = list(module.output.size())\n input_size[0] = mat.size(0)\n\n grad_padding = _grad_input_padding(\n grad_output=mat,\n input_size=input_size,\n stride=module.stride,\n padding=module.padding,\n kernel_size=module.kernel_size,\n dilation=module.dilation,\n )\n\n jac_t_mat = conv_transpose1d(\n input=mat,\n weight=module.weight,\n bias=None,\n stride=module.stride,\n padding=module.padding,\n output_padding=grad_padding,\n groups=module.groups,\n dilation=module.dilation,\n )\n return jac_t_mat\n\n def _jac_t_mat_prod(self, module, g_inp, g_out, mat):\n mat_as_conv = eingroup(\"v,n,c,{0}->vn,c,{0}\".format(self.dim_text), mat)\n jmp_as_conv = self.__jac_t(module, mat_as_conv)\n return self.reshape_like_input(jmp_as_conv, module)\n\n def __jac_t(self, module, mat):\n jac_t = self.conv_func(\n mat,\n module.weight,\n bias=None,\n stride=module.stride,\n padding=module.padding,\n dilation=module.dilation,\n groups=module.groups,\n )\n\n for dim in range(self.conv_dims):\n axis = dim + 1\n size = module.input0.shape[axis]\n jac_t = jac_t.narrow(axis, 0, size)\n\n return jac_t\n" }, { "alpha_fraction": 0.7193877696990967, "alphanum_fraction": 0.7193877696990967, "avg_line_length": 33.35293960571289, "blob_id": "fb6db7936681a26252ea066f54acac2a95df4a42", "content_id": "6d6da0bd537bcfc687ebee87a4b86e69dfe71989", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1176, "license_type": "permissive", "max_line_length": 82, "num_lines": 34, "path": "/backpack/extensions/secondorder/trial/losses.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from functools import partial\n\nfrom backpack.core.derivatives.crossentropyloss import CrossEntropyLossDerivatives\nfrom backpack.core.derivatives.mseloss import MSELossDerivatives\nfrom backpack.extensions.secondorder.trial.trial_base import TRIALBaseModule\nfrom torch import softmax\nfrom torch import rand\n\n\nclass TRIALLoss(TRIALBaseModule):\n def backpropagate(self, ext, module, grad_inp, grad_out, backproped):\n hess_func = self.make_loss_hessian_func(ext)\n # print(hess_func(module, grad_inp, grad_out))\n return hess_func(module, grad_inp, grad_out)\n\n def make_loss_hessian_func(self, ext):\n \"\"\"Get function that produces the backpropagated quantity.\"\"\"\n return self.derivatives.sqrt_hessian\n\n\n# class DiagGGNMSELoss(DiagGGNLoss):\n# def __init__(self):\n# super().__init__(derivatives=MSELossDerivatives())\n\n\n# class TRIALCrossEntropyLoss(TRIALLoss):\n# def __init__(self):\n# super().__init__(derivatives=CrossEntropyLossDerivatives())\n\n\nclass TRIALCrossEntropyLoss(TRIALLoss):\n def __init__(self):\n \t# set normalized to True for NGD\n super().__init__(derivatives=MSELossDerivatives(True)) \n" }, { "alpha_fraction": 0.5446577668190002, "alphanum_fraction": 0.5607679486274719, "avg_line_length": 30.856382369995117, "blob_id": "373599a2da4054c68b5cee8aa5cab8bba2ece6f5", "content_id": "d49443a7e9ac12266537c2862b998e6ae0d674de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11980, "license_type": "permissive", "max_line_length": 88, "num_lines": 376, "path": "/example_ngd.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision\n# The main BackPACK functionalities\nfrom backpack import backpack, extend\n# The diagonal GGN extension\n# from backpack.extensions import DiagGGNMC\nimport torch.optim as optim\nfrom backpack.extensions import TRIAL\nfrom torchsummary import summary\nimport time\n\n# fixing HTTPS issue on Colab\nfrom six.moves import urllib\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nurllib.request.install_opener(opener)\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pylab as plt\n\n# torch.set_default_dtype(torch.float64)\n# This layer did not exist in Pytorch 1.0\n\n# Hyperparameters\n# 0: matmul\n# 1: fft\n# 2: conv2d\n# -1: silent mode [only backpropagation]\n# [7]: ordering test [v, n, v, n]\n# 13: blocked version [v, n, n]\n# 17: adding dropout in backward pass for large linear layers\n# 666: using backpack for conv2d [not good. becauseof repeating]\nMODE = 7\n\nprint('Convolution mode is:')\nif MODE == 0:\n print('MATMUL')\nelif MODE == 1:\n print('FFT')\nelif MODE == 2:\n print('CONV2D')\nelif MODE == -1:\n print('Silent mode: no computation done in backward pass.')\n\nBATCH_SIZE = 64\nEPOCHS = 1\nPLOT = False\nnum_classes = 10\nSTEP_SIZE = 0.01\nDAMPING = 1.0\nMAX_ITER = 60000//BATCH_SIZE\ntorch.manual_seed(0)\nbc = BATCH_SIZE * num_classes\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint('Selected Device:', device)\nprint('BATCH_SIZE:', BATCH_SIZE)\n\nmnist_loader = torch.utils.data.dataloader.DataLoader(\n torchvision.datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,)\n )\n ])),\n batch_size=BATCH_SIZE,\n shuffle=True\n)\n\n\n\n\n##### base model from backpack website:\nmodel = torch.nn.Sequential(\n torch.nn.Conv2d(1, 50, 3, 1, padding = (1,1)),\n torch.nn.ReLU(),\n\n torch.nn.Conv2d(50, 5, 3, 1, padding = (1,1)),\n torch.nn.ReLU(),\n\n torch.nn.Flatten(), \n torch.nn.Linear(28*28*5, 20),\n\n torch.nn.ReLU(),\n\n torch.nn.Linear(20, 100),\n\n torch.nn.ReLU(),\n\n torch.nn.Linear(100, 10),\n\n\n).to(device)\n\n\n\n##### fully connected network. Test for linear timings.\n# model = torch.nn.Sequential(\n# torch.nn.Flatten(),\n# torch.nn.Linear(784, 1000),\n# torch.nn.ReLU(),\n# torch.nn.Linear(1000, 1000),\n# torch.nn.ReLU(),\n# torch.nn.Linear(1000, 500),\n# torch.nn.ReLU(),\n# torch.nn.Linear(500, 10),\n# ).to(device)\n\nsummary(model, ( 1, 28, 28))\n\nloss_function = torch.nn.CrossEntropyLoss()\n\ndef get_accuracy(output, targets):\n \"\"\"Helper function to print the accuracy\"\"\"\n predictions = output.argmax(dim=1, keepdim=True).view_as(targets)\n return predictions.eq(targets).float().mean().item()\n\n\n# class TrialOptimizer(torch.optim.Optimizer):\n# def __init__(self, parameters, step_size, damping):\n# super().__init__(\n# parameters, \n# dict(step_size=step_size, damping=damping)\n# )\n\n# def step(self):\n# for group in self.param_groups:\n# for p in group[\"params\"]:\n# step_direction = p.grad / (p.trial + group[\"damping\"])\n# p.data.add_(-group[\"step_size\"], step_direction)\n# return loss\nextend(model)\nextend(loss_function)\n\n# optimizer = TrialOptimizer(\n# model.parameters(), \n# step_size=STEP_SIZE, \n# damping=DAMPING\n# )\n\noptimizer = optim.SGD(model.parameters(), lr=STEP_SIZE)\n\n\ndef get_diff(A, B):\n ''' returns relative error between A and B\n '''\n # return torch.norm(A - B)/torch.norm(A)\n return torch.norm(A - B)/torch.norm(A)\n\n\ndef naive_seq():\n jac_list = []\n for j in range(num_classes):\n for i in range(BATCH_SIZE):\n output[i,j].backward(retain_graph=True)\n L = []\n for name, param in model.named_parameters():\n L.append(param.grad.view(1, -1))\n param.grad = None\n jac_list.append(torch.cat(L, 1))\n jac = torch.cat(jac_list, 0)\n JJT = torch.matmul(jac, jac.permute(1,0))/BATCH_SIZE\n return JJT\n\n\ndef naive_vmap():\n I_N = torch.eye(num_classes)\n # torch._C._debug_only_display_vmap_fallback_warnings(True)\n L = []\n def get_jacobian(v):\n j = torch.autograd.grad(output[i,:], model.parameters(), v, retain_graph = True)\n jac_persample = []\n for j_ in j:\n jac_persample.append(j_.view( -1))\n for name, param in model.named_parameters():\n param.grad = None\n return torch.cat(jac_persample, 0)\n\n for i in range(BATCH_SIZE):\n jacobian = torch.vmap(get_jacobian)(I_N)\n L.append(jacobian)\n\n jac = torch.cat(L, 0)\n jac = jac.reshape(BATCH_SIZE, num_classes, -1)\n jac = jac.permute(1, 0 , 2)\n jac = jac.reshape(BATCH_SIZE * num_classes, -1)\n JJT = torch.matmul(jac, jac.permute(1,0))/BATCH_SIZE\n return JJT\n\ndef optimal_JJT(RESHAPE):\n\n jac_list = 0\n jac_list_linear = 0\n jac_list_conv = 0\n bc = BATCH_SIZE * num_classes\n L = []\n\n with backpack(TRIAL(MODE)):\n loss = loss_function(output, y)\n loss.backward(retain_graph=True)\n for name, param in model.named_parameters():\n trial_vals = param.trial\n # print('var name and shape:', name,' ', param.shape)\n if RESHAPE: # not useful\n # trial_vals = trial_vals.permute(1, 0, 3, 2) # reshaping to [n, v, n, v]\n vs = [2, 1, 0, 7,6,5,4,3, 9, 8]\n # print(trial_vals.shape)\n trial_vals = trial_vals[vs, :, :, :]\n trial_vals = trial_vals[:, :, vs, :]\n # print(trial_vals.shape)\n\n L.append([trial_vals / BATCH_SIZE, name]) \n # if '0' not in name and '2' not in name and '4' not in name :\n # jac_list_linear += trial_vals.reshape(bc, bc)\n # else:\n # jac_list_conv += trial_vals.reshape(bc, bc)\n\n jac_list += trial_vals.reshape(bc, bc)\n param.trial = None\n # param.grad = None\n JJT = jac_list / BATCH_SIZE\n JJT_linear = jac_list_linear / BATCH_SIZE\n JJT_conv = jac_list_conv / BATCH_SIZE\n # if torch.allclose(JJT, JJT_conv + JJT_linear) == False:\n # print('JJT:', JJT)\n # print('JJT_conv:', JJT_conv)\n # print('JJT_linear:', JJT_linear)\n return JJT, L, JJT_linear, JJT_conv\n\ndef optimal_JJT_blk():\n jac_list = 0\n bc = BATCH_SIZE * num_classes\n # L = []\n\n with backpack(TRIAL(MODE)):\n loss = loss_function(output, y)\n loss.backward(retain_graph=True)\n for name, param in model.named_parameters():\n trial_vals = param.trial\n # L.append([trial_vals / BATCH_SIZE, name])\n jac_list += torch.block_diag(*trial_vals)\n param.trial = None\n JJT = jac_list / BATCH_SIZE\n return JJT\n\nacc_list = []\ntime_list = []\nloss_list = []\nepoch_time_list = []\nstart_time= time.time()\nfor epoch in range(EPOCHS):\n start_time_epoch = time.time()\n for batch_idx, (x, y) in enumerate(mnist_loader):\n # y, indices = torch.sort(y)\n # x = x[indices, :, :, :]\n x, y = x.to(device), y.to(device)\n output = model(x)\n accuracy = get_accuracy(output, y)\n\n ######## calling individual function for JJT computation\n ### Our extension\n JJT_opt, L, JJT_linear, JJT_conv = optimal_JJT(False)\n # x = torch.ones(1, BATCH_SIZE, BATCH_SIZE)\n # x = x.repeat(num_classes, 1, 1)\n # eye_blk = torch.block_diag(*x)\n # JJT_opt_blk = JJT_opt * eye_blk\n # JJT_conv_blk = JJT_conv * eye_blk\n # JJT_fused = JJT_conv_blk + JJT_linear\n\n ### Blocked NGD version\n # start_time = time.time()\n # JJT_opt_blk = optimal_JJT_blk()\n # print(torch.norm(JJT_opt))\n # print(JJT_opt)\n # time_opt = time.time() - start_time\n\n # plotting NGD kernel for some iterations\n if PLOT and batch_idx in [2, 10, 50, 600] :\n # JJT_opt_blk = optimal_JJT_blk()\n\n JJT_opt, L, _, _ = optimal_JJT(True)\n x = torch.ones(1, BATCH_SIZE, BATCH_SIZE)\n x = x.repeat(num_classes, 1, 1)\n eye_blk = torch.block_diag(*x)\n diff = JJT_opt - JJT_opt*eye_blk\n # u, s, vh = torch.linalg.svd(diff)\n # s_normal = torch.cumsum(s, dim = 0)/torch.sum(s)\n # print(s_normal.numpy())\n # fig, ax = plt.subplots()\n # im = ax.plot(s_normal)\n # print(s)\n # fig.colorbar(im, orientation='horizontal')\n # plt.show()\n \n fig, ax = plt.subplots()\n im = ax.imshow(JJT_opt - JJT_opt*eye_blk , cmap='viridis')\n fig.colorbar(im, orientation='horizontal')\n\n plt.show()\n\n # fig.suptitle('NGD Kernel')\n if(1==1):\n bc = BATCH_SIZE * num_classes\n for i in range(6):\n c = i * 2\n fig, axs = plt.subplots(1, 2)\n for row in range(2):\n ax = axs[row]\n data = L[row + c][0].reshape(bc, bc)\n print('name:', L[row + c][1])\n print('max data:', torch.max(data))\n print('min data:', torch.min(data))\n print('average data:', torch.mean(data))\n print('norm data:', torch.norm(data))\n\n ax.set_title(L[row + c][1])\n pcm = ax.imshow(data, cmap='viridis')\n fig.colorbar(pcm, ax=ax)\n plt.show()\n \n ### naive loop which is current PyTorch approach\n # start_time = time.time()\n # JJT_naive_seq = naive_seq()\n # print(torch.norm(JJT_naive_seq - JJT_opt)/(bc*bc))\n # time_seq = time.time() - start_time\n # print('naive:', JJT_naive_seq )\n\n ### vamp is slow and not worth it\n # start_time = time.time()\n # JJT_naive_vmap = naive_vmap()\n # time_vmap = time.time() - start_time\n \n # applying one step for optimization\n loss = loss_function(output, y)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n if batch_idx % 50 == 0:\n acc_list.append(accuracy)\n time_list.append(time.time() - start_time)\n loss_list.append(loss)\n # print('Seq vs vmap error:', get_diff(JJT_naive_seq, JJT_naive_vmap))\n # print('opt vs seq error:', get_diff(JJT_naive_seq, JJT_opt))\n # print('opt vs linear error:', get_diff(JJT_opt, JJT_linear))\n # print('opt vs conv error:', get_diff(JJT_opt, JJT_conv))\n # print('opt vs blocked error:', get_diff(JJT_opt, JJT_opt_blk))\n # print('opt vs fused error:', get_diff(JJT_opt, JJT_fused))\n # print(torch.allclose(JJT_naive_seq, JJT_opt) )\n # print('Jacobian Computation Time [Sequential]:', time_seq)\n # print('Jacobian Computation Time [Optimal]:', time_opt)\n # print('Jacobian Computation Time [VMAP]:', time_vmap)\n # print('Speedup over sequential:', time_seq/ time_opt)\n print('Elapsed time:', time.time() - start_time_epoch)\n print(\n \"Iteration %3.d/%d \" % (batch_idx, MAX_ITER) +\n \"Minibatch Loss %.3f \" % (loss) +\n \"Accuracy %.0f\" % (accuracy * 100) + \"%\"\n )\n\n if batch_idx >= MAX_ITER:\n break\n epoch_time = time.time() - start_time_epoch\n epoch_time_list.append(epoch_time)\n print('Elapsed time for epoch %d time: %.3f' % (epoch , epoch_time))\n\nprint('Epoch times : ', epoch_time_list)\nprint('Time(s) ACC. LOSS')\nfor i in range(len(time_list)):\n print('%.3f, %.3f, %.3f' %(time_list[i], acc_list[i], loss_list[i].item()))\n\n\n" }, { "alpha_fraction": 0.5346807241439819, "alphanum_fraction": 0.5429490208625793, "avg_line_length": 29.521127700805664, "blob_id": "80b5218dc25866043428cc27977e40816aa080b4", "content_id": "c5164a4518bd8f13a0223de76e6af2ce5355e3b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2177, "license_type": "permissive", "max_line_length": 100, "num_lines": 71, "path": "/backpack/extensions/firstorder/fisher_block_eff/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.firstorder.fisher_block_eff.fisher_block_eff_base import FisherBlockEffBase\n\n\nclass FisherBlockEffLinear(FisherBlockEffBase):\n def __init__(self, damping=1.0, alpha=0.95):\n self.damping = damping\n self.alpha = alpha\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n \n I = module.input0\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n G = g_out_sc\n grad = module.weight.grad\n \n \n B = einsum(\"ni,li->nl\", (I, I)) \n A = einsum(\"no,lo->nl\", (G, G))\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"ni,oi->no\", (I, grad))\n grad_prod = einsum(\"no,no->n\", (grad_prod, G))\n # grad_prod = 0\n out = A * B \n # out = 0\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n\n gv = einsum(\"n,no->no\", (v, G))\n gv = einsum(\"no,ni->oi\", (gv, I))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n \n module.I = I\n module.G = G\n module.NGD_inv = NGD_inv\n return update\n \n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n\n grad = module.bias.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n # grad_prod = 0\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n # out = 0\n\n\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"n,no->o\", (v, g_out_sc))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n # update = grad\n\n return update\n \n\n" }, { "alpha_fraction": 0.524848461151123, "alphanum_fraction": 0.5385858416557312, "avg_line_length": 34.840579986572266, "blob_id": "ceaa7986acd15d58a6459fb96e811edc820a361b", "content_id": "498e71c7d8ce29c45d90d1450d6660873cbc8173", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2475, "license_type": "permissive", "max_line_length": 89, "num_lines": 69, "path": "/backpack/extensions/firstorder/fisher/batchnorm2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum\nfrom torch import matmul\nfrom backpack.core.derivatives.batchnorm2d import BatchNorm2dDerivatives\nfrom backpack.extensions.firstorder.fisher.fisher_base import FisherBase\n# import time\n\nclass FisherBatchNorm2d(FisherBase):\n def __init__(self, silent):\n self.silent = silent\n super().__init__(derivatives=BatchNorm2dDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n if not self.silent:\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n input = module.input0\n mean = input.mean(dim=(0, 2, 3), keepdim=True)\n var = input.var(dim=(0, 2, 3), unbiased=False, keepdim=True)\n xhat = (input - mean) / (var + module.eps).sqrt()\n dw = g_out_sc * xhat\n out = einsum(\"nihw,lihw->nl\", dw, dw)\n\n # compute vector jacobian product in optimization method\n grad = module.weight.grad\n grad_prod = einsum(\"nihw,i->n\", (dw, grad))\n\n # en = time.time()\n # print('Elapsed Time in BatchNorm2d:', en - st)\n return (out, grad_prod)\n else:\n # st = time.time()\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n input = module.input0\n mean = input.mean(dim=(0, 2, 3), keepdim=True)\n var = input.var(dim=(0, 2, 3), unbiased=False, keepdim=True)\n xhat = (input - mean) / (var + module.eps).sqrt()\n dw = g_out_sc * xhat\n\n # compute vector jacobian product in optimization method\n grad = module.weight.grad\n grad_prod = einsum(\"nihw,i->n\", (dw, grad))\n return grad_prod\n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n if not self.silent:\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"nihw,i->n\", (g_out_sc, grad))\n\n out = einsum(\"nihw,lihw->nl\", g_out_sc, g_out_sc)\n return (out, grad_prod)\n else:\n # print('x'*100)\n\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"nihw,i->n\", (g_out_sc, grad))\n\n return grad_prod\n\n\n" }, { "alpha_fraction": 0.5829244256019592, "alphanum_fraction": 0.6002616882324219, "avg_line_length": 33.15642547607422, "blob_id": "ff3a644fdf95c41b5174da1885e357aa7163214c", "content_id": "c95f18d26c6ecc66d07d589a3e2d76b872348fa1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6114, "license_type": "permissive", "max_line_length": 96, "num_lines": 179, "path": "/backpack/utils/conv.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import einsum\nfrom torch.nn import Unfold\nfrom torch.nn.functional import conv1d, conv2d, conv3d\nfrom backpack.utils.fft_conv import fft_conv\n\nfrom backpack.utils.ein import eingroup\nimport opt_einsum as oe\nimport time\nimport torch.nn.functional as F\n\n# TODO: add computations based on backpack weight_t_jac... and compare it to MODE == 0\n# _weight_jac_t_mat_prod: that's the function\n\n\n# 0: matmul\n# 1: fft\n# 2: conv2d\ndef extract_weight_ngd(module, backproped, MODE):\n # test: naive method plus [Gold so far]\n if MODE == 0:\n input = unfold_func(module)(module.input0)\n grad_output_viewed = separate_channels_and_pixels(module, backproped)\n AX = einsum(\"nkl,vnml->vnkm\", (input, grad_output_viewed))\n v = AX.shape[0]\n n = AX.shape[1]\n AX = AX.reshape(n * v, -1)\n # return einsum(\"vnkm,zqkm->vnzq\", (AX, AX))\n return torch.matmul(AX, AX.permute(1,0))\n elif MODE == 7 or MODE == 17: # test ordering \n input = unfold_func(module)(module.input0)\n grad_output_viewed = separate_channels_and_pixels(module, backproped)\n AX = einsum(\"nkl,vnml->vnkm\", (input, grad_output_viewed))\n return einsum(\"vnkm,zqkm->vnzq\", (AX, AX))\n elif MODE == 13: # test blocked NGD\n input = unfold_func(module)(module.input0)\n grad_output_viewed = separate_channels_and_pixels(module, backproped)\n AX = einsum(\"nkl,vnml->vnkm\", (input, grad_output_viewed))\n return einsum(\"vnkm,vqkm->vnq\", (AX, AX))\n elif MODE == -1: # test silent mode\n v = backproped.shape[0]\n n = backproped.shape[1]\n return torch.zeros(v*n,v*n).to(module.input0.device)\n elif MODE == 1:\n A = module.input0\n n = A.shape[0]\n p = 2\n M = backproped\n v = M.shape[0]\n kernel_ = M.reshape(M.shape[1] * M.shape[0], M.shape[2], M.shape[3], M.shape[4])\n out = fft_conv(A, kernel_, padding = (p,p))\n K = out.reshape(v * n,-1)\n K_fft = torch.matmul(K, K.permute(1,0)) \n return K_fft\n elif MODE == 2:\n A = module.input0\n n = A.shape[0]\n p = 1\n M = backproped\n v = M.shape[0]\n M = M.permute(1, 0, 2, 3, 4)\n M = M.reshape(M.shape[2] * M.shape[1] * M.shape[0], M.shape[3], M.shape[4]).unsqueeze(1)\n A = A.permute(1 ,0, 2, 3)\n output = conv2d(A, M, groups = n, padding = (p,p))\n output = output.permute(1, 0, 2, 3)\n output = output.reshape(n, v, -1)\n output = output.permute(1, 0 , 2)\n K = output.reshape(n * v, -1)\n K_torch = torch.matmul(K, K.permute(1,0))\n return K_torch\n else:\n raise NotImplementedError(\n \"Extension SUSPENDED\")\n return 0\n \ndef extract_bias_ngd(module, backproped, MODE):\n if MODE == -1: # testing silent mode\n v = backproped.shape[0]\n n = backproped.shape[1]\n return torch.zeros(v*n,v*n).to(module.input0.device)\n elif MODE == 7 or MODE == 17: # testing the order\n return einsum(\"vnchw,klchw->vnkl\", backproped, backproped)\n elif MODE == 13: # testing the blocked NGD\n return einsum(\"vnchw,vlchw->vnl\", backproped, backproped)\n else:\n return einsum(\"vnchw,klchw->vnkl\", backproped, backproped)\n\n\ndef unfold_func(module):\n return Unfold(\n kernel_size=module.kernel_size,\n dilation=module.dilation,\n padding=module.padding,\n stride=module.stride,\n )\n\n\ndef get_conv1d_weight_gradient_factors(input, grad_out, module):\n # shape [N, C_in * K_x, L_out]\n X = unfold_by_conv(input, module)\n return X, grad_out\n\n\ndef get_weight_gradient_factors(input, grad_out, module):\n # shape [N, C_in * K_x * K_y, H_out * W_out]\n X = unfold_func(module)(input)\n dE_dY = eingroup(\"n,c,h,w->n,c,hw\", grad_out)\n return X, dE_dY\n\n\ndef get_conv3d_weight_gradient_factors(input, grad_out, module):\n # shape [N, C_in * K_x * K_y * K_z, D_out * H_out * W_out]\n X = unfold_by_conv(input, module)\n dE_dY = eingroup(\"n,c,d,h,w->n,c,dhw\", grad_out)\n return X, dE_dY\n\n\ndef separate_channels_and_pixels(module, tensor):\n \"\"\"Reshape (V, N, C, H, W) into (V, N, C, H * W).\"\"\"\n return eingroup(\"v,n,c,h,w->v,n,c,hw\", tensor)\n\n\ndef extract_weight_diagonal(module, input, grad_output):\n \"\"\"\n input must be the unfolded input to the convolution (see unfold_func)\n and grad_output the backpropagated gradient\n \"\"\"\n grad_output_viewed = separate_channels_and_pixels(module, grad_output)\n AX = einsum(\"nkl,vnml->vnkm\", (input, grad_output_viewed))\n weight_diagonal = (AX ** 2).sum([0, 1]).transpose(0, 1)\n return weight_diagonal.view_as(module.weight)\n\n\ndef extract_bias_diagonal(module, sqrt):\n \"\"\"\n `sqrt` must be the backpropagated quantity for DiagH or DiagGGN(MC)\n \"\"\"\n V_axis, N_axis = 0, 1\n bias_diagonal = (einsum(\"vnchw->vnc\", sqrt) ** 2).sum([V_axis, N_axis])\n return bias_diagonal\n\n\ndef unfold_by_conv(input, module):\n \"\"\"Return the unfolded input using convolution\"\"\"\n N, C_in = input.shape[0], input.shape[1]\n kernel_size = module.kernel_size\n kernel_size_numel = int(torch.prod(torch.Tensor(kernel_size)))\n\n def make_weight():\n weight = torch.zeros(kernel_size_numel, 1, *kernel_size)\n\n for i in range(kernel_size_numel):\n extraction = torch.zeros(kernel_size_numel)\n extraction[i] = 1.0\n weight[i] = extraction.reshape(1, *kernel_size)\n\n repeat = [C_in, 1] + [1 for _ in kernel_size]\n return weight.repeat(*repeat)\n\n def get_conv():\n functional_for_module_cls = {\n torch.nn.Conv1d: conv1d,\n torch.nn.Conv2d: conv2d,\n torch.nn.Conv3d: conv3d,\n }\n return functional_for_module_cls[module.__class__]\n\n conv = get_conv()\n unfold = conv(\n input,\n make_weight().to(input.device),\n bias=None,\n stride=module.stride,\n padding=module.padding,\n dilation=module.dilation,\n groups=C_in,\n )\n\n return unfold.reshape(N, C_in * kernel_size_numel, -1)\n" }, { "alpha_fraction": 0.7024539709091187, "alphanum_fraction": 0.7024539709091187, "avg_line_length": 31.399999618530273, "blob_id": "7ba3471e355ae062a32868b4affc94dcb2f703dc", "content_id": "5e3ca1b5f11cb68d975dfa8364fa1f46fe637656", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "permissive", "max_line_length": 63, "num_lines": 10, "path": "/backpack/extensions/secondorder/mngd/mngd_base.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.extensions.mat_to_mat_jac_base import MatToJacMat\n\n\nclass MNGDBaseModule(MatToJacMat):\n def __init__(self, derivatives, params=None):\n super().__init__(derivatives, params=params)\n\n # TODO: Saeed, add backprop for NGD\n # it should be normal backprop to get jacobians\n # maybe even no change\n\n\n" }, { "alpha_fraction": 0.6967930197715759, "alphanum_fraction": 0.705539345741272, "avg_line_length": 32.75, "blob_id": "cb9b666602d2cf71dc842562f6e82aacb4fcd814", "content_id": "28b42c820b3d02151fe81c30e0b0851e4a92aeaa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "permissive", "max_line_length": 100, "num_lines": 20, "path": "/backpack/extensions/firstorder/fisher_block_eff/batchnorm1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives\nfrom backpack.extensions.firstorder.fisher_block_eff.fisher_block_eff_base import FisherBlockEffBase\n\nfrom torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nclass FisherBlockEffBatchNorm1d(FisherBlockEffBase):\n def __init__(self, damping=1.0):\n self.damping = damping\n super().__init__(derivatives=BatchNorm1dDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n \n return module.weight.grad\n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n \n\n return module.bias.grad\n \n\n\n" }, { "alpha_fraction": 0.47710517048835754, "alphanum_fraction": 0.4928210973739624, "avg_line_length": 40.20800018310547, "blob_id": "bfa6db53eddf49e2e03eda38bdbce8e4b882e26f", "content_id": "9e06afd851b4b1f4fc6ca244309f3c6094a2c5f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5154, "license_type": "permissive", "max_line_length": 116, "num_lines": 125, "path": "/backpack/extensions/firstorder/fisher/conv2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.firstorder.fisher.fisher_base import FisherBase\nfrom torch import einsum, matmul, sum, numel, sqrt, norm\nfrom torch.nn import Unfold\nfrom torch.nn.functional import conv1d, conv2d, conv3d\nfrom backpack.utils.ein import eingroup\nfrom backpack.utils.conv import unfold_func\n\nMODE = 0\nclass FisherConv2d(FisherBase):\n def __init__(self, silent=False):\n self.silent = silent\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, bpQuantities):\n if not self.silent:\n if MODE == 0: # my implementation\n grad = module.weight.grad\n grad_reshape = grad.reshape(grad.shape[0], -1)\n # print(grad_reshape.shape)\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n input = unfold_func(module)(module.input0)\n grad_output_viewed = g_out_sc.reshape(g_out_sc.shape[0], g_out_sc.shape[1], -1)\n \n N = input.shape[0]\n K = input.shape[1]\n L = input.shape[2]\n M = grad_output_viewed.shape[1]\n\n # extra optimization for some networks such as VGG16\n if (L*L) * (K + M) < K * M :\n II = einsum(\"nkl,qkp->nqlp\", (input, input))\n GG = einsum(\"nml,qmp->nqlp\", (grad_output_viewed, grad_output_viewed))\n out = einsum('nqlp->nq', II * GG) \n x1 = einsum(\"nkl,mk->nml\", (input, grad_reshape))\n grad_prod = einsum(\"nml,nml->n\", (x1, grad_output_viewed))\n else:\n AX = einsum(\"nkl,nml->nkm\", (input, grad_output_viewed))\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"nkm,mk->n\", (AX, grad_reshape))\n\n AX = AX.reshape(n , -1)\n out = matmul(AX, AX.t())\n\n return (out, grad_prod)\n elif MODE == 2:\n # st = time.time()\n\n A = module.input0\n n = A.shape[0]\n p = 1\n M = g_out[0]\n\n M = M.reshape( M.shape[1] * M.shape[0], M.shape[2], M.shape[3]).unsqueeze(1)\n A = A.permute(1 ,0, 2, 3)\n output = conv2d(A, M, groups = n, padding = (p,p))\n output = output.permute(1, 0, 2, 3)\n output = output.reshape(n, -1)\n K_torch = matmul(output, output.t())\n # en = time.time()\n # print('Elapsed Time Conv2d Mode 2:', en - st)\n\n return K_torch\n\n elif MODE ==4: # using backpack class\n # st = time.time()\n\n grad_batch = self.derivatives.weight_jac_t_mat_prod(module, g_inp, g_out, g_out[0], sum_batch=False)\n grad_batch = grad_batch.reshape(grad_batch.shape[0], -1)\n out = matmul(grad_batch, grad_batch.t())\n # en = time.time()\n # print('Elapsed Time Conv2d Mode 4:', en - st)\n\n return out\n\n elif MODE == 6:\n return 0.\n else:\n raise NotImplementedError(\n \"Extension SUSPENDED\")\n return 0\n else:\n\n grad = module.weight.grad\n grad_reshape = grad.reshape(grad.shape[0], -1)\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n input = unfold_func(module)(module.input0)\n grad_output_viewed = g_out_sc.reshape(g_out_sc.shape[0], g_out_sc.shape[1], -1)\n \n N = input.shape[0]\n K = input.shape[1]\n L = input.shape[2]\n M = grad_output_viewed.shape[1]\n\n # extra optimization for some networks such as VGG16\n if (L*L) * (K + M) < K * M : \n x1 = einsum(\"nkl,mk->nml\", (input, grad_reshape))\n grad_prod = einsum(\"nml,nml->n\", (x1, grad_output_viewed))\n else:\n AX = einsum(\"nkl,nml->nkm\", (input, grad_output_viewed))\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"nkm,mk->n\", (AX, grad_reshape))\n return grad_prod\n \n def bias(self, ext, module, g_inp, g_out, bpQuantities):\n if not self.silent:\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"nchw,c->n\", (g_out_sc, grad))\n\n out = einsum(\"nchw,lchw->nl\", g_out_sc, g_out_sc)\n return (out, grad_prod)\n else:\n\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"nchw,c->n\", (g_out_sc, grad))\n return grad_prod\n\n\n\n" }, { "alpha_fraction": 0.6436975002288818, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 27.33333396911621, "blob_id": "76e0e63911cee803253051d7cefde7de54e4f1fd", "content_id": "5c4a8c97aa439b86c3ae9a0a175309dc88a9fcf7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "permissive", "max_line_length": 77, "num_lines": 21, "path": "/test/extensions/implementation/base.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "class ExtensionsImplementation:\n \"\"\"Base class for autograd and BackPACK implementations of extensions.\"\"\"\n\n def __init__(self, problem):\n self.problem = problem\n\n def batch_grad(self):\n \"\"\"Individual gradients.\"\"\"\n raise NotImplementedError\n\n def batch_l2_grad(self):\n \"\"\"L2 norm of Individual gradients.\"\"\"\n raise NotImplementedError\n\n def sgs(self):\n \"\"\"Sum of Square of Individual gradients\"\"\"\n raise NotImplementedError\n\n def variance(self):\n \"\"\"Variance of Individual gradients\"\"\"\n raise NotImplementedError\n" }, { "alpha_fraction": 0.5518313050270081, "alphanum_fraction": 0.5651881694793701, "avg_line_length": 32.716712951660156, "blob_id": "3131ccef8285ce0bab28e9e7310d9ac79f3d375e", "content_id": "666cdfc76b1efab2300fc37b40b835f83b8bd017", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11904, "license_type": "permissive", "max_line_length": 109, "num_lines": 353, "path": "/example_ngd_1storder.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision\nfrom backpack import backpack, extend\nimport torch.optim as optim\nfrom backpack.extensions import Fisher, BatchGrad\nfrom torchsummary import summary\nimport time\nimport math\n\n# fixing HTTPS issue on Colab\nfrom six.moves import urllib\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nurllib.request.install_opener(opener)\n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pylab as plt\n\n# torch.set_default_dtype(torch.float64)\n\n# Hyperparameters\nBATCH_SIZE = 64\nEPOCHS = 1\nPLOT = False\nnum_classes = 10\nSTEP_SIZE = 0.1\nalpha_lm = 10\ntaw = 0.01\nMAX_ITER = 60000//BATCH_SIZE\ntorch.manual_seed(0)\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint('Selected Device:', device)\nprint('BATCH_SIZE:', BATCH_SIZE)\n\nmnist_loader = torch.utils.data.dataloader.DataLoader(\n torchvision.datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,)\n )\n ])),\n batch_size=BATCH_SIZE,\n shuffle=True\n)\n\n\n\n\n##### base model from backpack website:\nmodel = torch.nn.Sequential(\n torch.nn.Conv2d(1, 2, 3, 1, padding = (1,1)),\n # torch.nn.BatchNorm2d(2),\n torch.nn.ReLU(),\n torch.nn.Flatten(), \n torch.nn.Linear(28*28*2, 10),\n ).to(device)\n\n\n\n##### fully connected network. Test for linear timings.\n# model = torch.nn.Sequential(\n# torch.nn.Flatten(), \n# torch.nn.Linear(28*28, 100),\n# torch.nn.ReLU(),\n# torch.nn.Linear(100, 100),\n# torch.nn.ReLU(),\n# torch.nn.Linear(100, 100),\n# torch.nn.ReLU(),\n# torch.nn.Linear(100, 100),\n# torch.nn.ReLU(),\n# torch.nn.Linear(100, 10)\n# ).to(device)\n\nsummary(model, ( 1, 28, 28))\n\nloss_function = torch.nn.CrossEntropyLoss()\nloss_function_none = torch.nn.CrossEntropyLoss(reduction='none')\n\ndef get_accuracy(output, targets):\n \"\"\"Helper function to print the accuracy\"\"\"\n predictions = output.argmax(dim=1, keepdim=True).view_as(targets)\n return predictions.eq(targets).float().mean().item()\n\n\n# class FisherOptimizer(torch.optim.Optimizer):\n# def __init__(self, parameters, step_size, damping):\n# super().__init__(\n# parameters, \n# dict(step_size=step_size, damping=damping)\n# )\n\n# def step(self):\n# for group in self.param_groups:\n# print(len(group))\n# for p in group[\"params\"]:\n# print('p shape:', p.shape)\n# print('p grad shape:', p.grad.shape)\n# print('p fisher:', p.fisher.shape)\n# step_direction = p.grad / (p.fisher+ group[\"damping\"])\n# p.data.add_(-group[\"step_size\"], step_direction)\n# return loss\n\nextend(model)\nextend(loss_function)\nextend(loss_function_none)\n\n# optimizer = FisherOptimizer(\n# model.parameters(), \n# step_size=STEP_SIZE, \n# damping=DAMPING\n# )\n\noptimizer = optim.SGD(model.parameters(), lr=STEP_SIZE)\n\n\ndef get_diff(A, B):\n ''' returns relative error between A and B\n '''\n # return torch.norm(A - B)/torch.norm(A)\n return torch.norm(A - B)/torch.norm(A)\n\n\ndef optimal_JJT(outputs, targets, grad_org, acc_test=False, acc_hard_test=False):\n jac_list = 0\n batch_grad_kernel = 0\n batch_grad_list = []\n vjp = 0\n loop_grad_kernel = 0\n loop_grad_list = []\n # note acc_test is useful when we don't have batchnorm\n # in case of batchnorm backpack fails and we need a for loop for individual grads\n \n if acc_test:\n with backpack(Fisher(), BatchGrad()):\n loss = loss_function(outputs, targets)\n loss.backward(retain_graph=True)\n else:\n with backpack(Fisher()):\n loss = loss_function(outputs, targets)\n loss.backward(retain_graph=True)\n\n for name, param in model.named_parameters():\n fisher_vals = param.fisher\n jac_list += fisher_vals[0]\n vjp += fisher_vals[1]\n if acc_test:\n batch_grad = BATCH_SIZE * param.grad_batch.reshape(BATCH_SIZE, -1)\n batch_grad_list.append(batch_grad)\n batch_grad_kernel += torch.matmul(batch_grad, batch_grad.t())\n param.grad_batch = None\n param.fisher = None\n # param.grad = None\n\n for name, param in model.named_parameters():\n param.grad = None\n\n if acc_hard_test:\n for i in range(BATCH_SIZE):\n loop_grad_inner_list = []\n\n loss = loss_function(outputs[i, :].unsqueeze(0), targets[i].unsqueeze(0))\n loss.backward(retain_graph=True)\n for name, param in model.named_parameters():\n loop_grad = param.grad.reshape(1, -1)\n loop_grad_inner_list.append(loop_grad)\n param.grad = None\n # print(loop_grad_list)\n loop_grad = torch.cat(loop_grad_inner_list, 1)\n loop_grad_list.append(loop_grad)\n loop_grad_all = torch.cat(loop_grad_list, 0)\n loop_grad_kernel += torch.matmul(loop_grad_all, loop_grad_all.t())\n \n\n JJT_backpack = batch_grad_kernel / BATCH_SIZE\n JJT = jac_list / BATCH_SIZE\n JJT_loop = loop_grad_kernel / BATCH_SIZE\n if acc_test:\n all_grad = torch.cat(batch_grad_list, 1)\n backpack_vjp = torch.matmul(all_grad, grad_org.t()).view_as(vjp)\n print('NGD kernel estimation error:', get_diff(JJT_backpack, JJT))\n\n if get_diff(JJT_backpack, JJT) > 0.2:\n print('JJT_backpack:\\n', JJT_backpack)\n print('JJT:\\n', JJT)\n print('Vector Jacobian error:', get_diff(backpack_vjp, vjp))\n\n if acc_hard_test:\n print('NGD kernel estimation error with loop:', get_diff(JJT_loop, JJT))\n\n return JJT, vjp\n\n\nacc_list = []\ntime_list = []\nloss_list = []\nepoch_time_list = []\nstart_time= time.time()\nloss_prev = 0.\ntaylor_appx_prev = 0.\nfor epoch in range(EPOCHS):\n start_time_epoch = time.time()\n for batch_idx, (inputs, targets) in enumerate(mnist_loader):\n\n DAMPING = alpha_lm + taw\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = model(inputs)\n accuracy = get_accuracy(outputs, targets)\n\n ######## calling individual function for JJT computation\n ### Our extension\n\n # first compute the original gradient\n acc_test = True\n acc_hard_test = True\n optimizer.zero_grad()\n loss = loss_function(outputs, targets)\n loss.backward(retain_graph=True)\n loss_org = loss.item()\n\n grad_org = []\n grad_dict = {}\n for name, param in model.named_parameters():\n grad_org.append(param.grad.reshape(1, -1))\n grad_dict[name] = param.grad.clone()\n\n grad_org = torch.cat(grad_org, 1)\n ###### now we have to compute the true fisher\n with torch.no_grad():\n sampled_y = torch.multinomial(torch.nn.functional.softmax(outputs, dim=1),1).squeeze().to(device)\n \n NGD_kernel, vjp = optimal_JJT(outputs, sampled_y, grad_org, acc_test, acc_hard_test)\n NGD_inv = torch.linalg.inv(NGD_kernel + DAMPING * torch.eye(BATCH_SIZE))\n v = torch.matmul(NGD_inv, vjp.unsqueeze(1))\n\n ####### rescale v:\n v_sc = v/(BATCH_SIZE * DAMPING)\n\n # plotting NGD kernel for some iterations\n if PLOT and batch_idx in [2, 10, 50, 500] :\n\n JJT_opt, JJT_linear, JJT_conv = optimal_JJT() \n \n fig, ax = plt.subplots()\n im = ax.imshow(JJT_opt , cmap='viridis')\n fig.colorbar(im, orientation='horizontal')\n plt.show()\n\n fig, ax = plt.subplots()\n im = ax.imshow(JJT_linear , cmap='viridis')\n fig.colorbar(im, orientation='horizontal')\n plt.show()\n\n fig, ax = plt.subplots()\n im = ax.imshow(JJT_conv , cmap='viridis')\n fig.colorbar(im, orientation='horizontal')\n plt.show()\n\n # fig.suptitle('NGD Kernel')\n if(1==2):\n bc = BATCH_SIZE * num_classes\n for i in range(6):\n c = i * 2\n fig, axs = plt.subplots(1, 2)\n for row in range(2):\n ax = axs[row]\n data = L[row + c][0].reshape(bc, bc)\n ax.set_title(L[row + c][1])\n pcm = ax.imshow(data, cmap='viridis')\n fig.colorbar(pcm, ax=ax)\n plt.show()\n \n ###### applying one step for optimization\n optimizer.zero_grad()\n loss = loss_function_none(outputs, sampled_y)\n loss = torch.sum(loss * v_sc)\n loss.backward()\n\n # last part of SMW formula\n grad_new = []\n for name, param in model.named_parameters():\n param.grad = grad_dict[name] / DAMPING - param.grad\n # param.grad = grad_dict[name] \n grad_new.append(param.grad.reshape(1, -1))\n grad_new = torch.cat(grad_new, 1) \n optimizer.step()\n \n\n gp = torch.sum( -grad_new * grad_org)\n x = (vjp.unsqueeze(1) - torch.matmul(NGD_kernel, v) )/ math.sqrt(BATCH_SIZE)\n x = x / DAMPING\n pBp = 0.5 * torch.sum(x * x)\n taylor_appx = loss_org + STEP_SIZE * gp + STEP_SIZE * STEP_SIZE * pBp\n # taylor_appx = loss_org + gp + pBp\n eps = 0.25\n if batch_idx > 0 or epoch > 0:\n ro = (loss_org - loss_prev)/ (loss_org - taylor_appx_prev)\n # print(ro)\n if ro > eps:\n alpha_lm = alpha_lm * 0.99\n else:\n alpha_lm = alpha_lm * 1.01\n # # print(ro)\n loss_prev = loss_org\n taylor_appx_prev = taylor_appx\n # print(descent)\n\n # print(get_diff(grad_new, grad_org))\n # if batch_idx > 100:\n # break\n if batch_idx % 10 == 0:\n # print('real %f appx %f first order %f' % (loss_org, taylor_appx, loss_org + STEP_SIZE * gp))\n # print('damping:', DAMPING)\n # if batch_idx > 0:\n # print('ro:', ro)\n acc_list.append(accuracy)\n time_list.append(time.time() - start_time)\n loss_list.append(loss_org)\n \n # print('Seq vs vmap error:', get_diff(JJT_naive_seq, JJT_naive_vmap))\n # print('opt vs backpack error:', get_diff(JJT_backpack, JJT_opt))\n # print('opt vs linear error:', get_diff(JJT_opt, JJT_linear))\n # print('opt vs conv error:', get_diff(JJT_opt, JJT_conv))\n # print('opt vs blocked error:', get_diff(JJT_opt, JJT_opt_blk))\n # print('opt vs fused error:', get_diff(JJT_opt, JJT_fused))\n # print(torch.allclose(JJT_naive_seq, JJT_opt) )\n # print('Jacobian Computation Time [Sequential]:', time_seq)\n # print('Jacobian Computation Time [Optimal]:', time_opt)\n # print('Jacobian Computation Time [VMAP]:', time_vmap)\n # print('Speedup over sequential:', time_seq/ time_opt)\n print('Elapsed time:', time.time() - start_time_epoch)\n print(\n \"Iteration %3.d/%d \" % (batch_idx, MAX_ITER) +\n \"Minibatch Loss %.3f \" % (loss_org) +\n \"Accuracy %.0f\" % (accuracy * 100) + \"%\"\n )\n\n if batch_idx >= MAX_ITER:\n break\n epoch_time = time.time() - start_time_epoch\n epoch_time_list.append(epoch_time)\n print('Elapsed time for epoch %d time: %.3f' % (epoch , epoch_time))\n\nprint('Epoch times : ', epoch_time_list)\nprint('Time(s) ACC. LOSS')\nfor i in range(len(time_list)):\n print('%.3f, %.3f, %.3f' %(time_list[i], acc_list[i], loss_list[i].item()))\n\n\n" }, { "alpha_fraction": 0.7543478012084961, "alphanum_fraction": 0.77173912525177, "avg_line_length": 34.38461685180664, "blob_id": "73ee21115ff3bb63842e4bfeab330a54fc48df6b", "content_id": "7ee124d5e7258195600e4c627789c47d42384b8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "permissive", "max_line_length": 73, "num_lines": 13, "path": "/backpack/extensions/secondorder/mngd/pooling.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.avgpool2d import AvgPool2DDerivatives\nfrom backpack.core.derivatives.maxpool2d import MaxPool2DDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\n\n\nclass MNGDMaxPool2d(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=MaxPool2DDerivatives())\n\n\nclass MNGDAvgPool2d(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=AvgPool2DDerivatives())\n" }, { "alpha_fraction": 0.75789475440979, "alphanum_fraction": 0.7747368216514587, "avg_line_length": 35.53845977783203, "blob_id": "496a5060220a776d555ec7fbcb665dd5a91d1987", "content_id": "f9beea606502142618ad8435fd9bc226609eb2d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "permissive", "max_line_length": 84, "num_lines": 13, "path": "/backpack/extensions/secondorder/diag_hessian/pooling.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.avgpool2d import AvgPool2DDerivatives\nfrom backpack.core.derivatives.maxpool2d import MaxPool2DDerivatives\nfrom backpack.extensions.secondorder.diag_hessian.diag_h_base import DiagHBaseModule\n\n\nclass DiagHAvgPool2d(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=AvgPool2DDerivatives())\n\n\nclass DiagHMaxPool2d(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=MaxPool2DDerivatives())\n" }, { "alpha_fraction": 0.7840909361839294, "alphanum_fraction": 0.7840909361839294, "avg_line_length": 36.71428680419922, "blob_id": "9087a7f166bb6c8b3b0dd8d214571879bfb93de5", "content_id": "27315a1ff68628a4e9da493d7a450f43df2a6557", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "permissive", "max_line_length": 76, "num_lines": 7, "path": "/backpack/extensions/secondorder/trial/dropout.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.dropout import DropoutDerivatives\nfrom backpack.extensions.secondorder.trial.trial_base import TRIALBaseModule\n\n\nclass TRIALDropout(TRIALBaseModule):\n def __init__(self):\n super().__init__(derivatives=DropoutDerivatives())\n" }, { "alpha_fraction": 0.7428115010261536, "alphanum_fraction": 0.7428115010261536, "avg_line_length": 31.947368621826172, "blob_id": "ad61a06c0a8e09d43569b4a5a9ff313805472271", "content_id": "16a59c6b0ed750f9d4d22a27a5d047b94ce8feef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "permissive", "max_line_length": 76, "num_lines": 19, "path": "/backpack/extensions/secondorder/trial/activations.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.relu import ReLUDerivatives\nfrom backpack.core.derivatives.sigmoid import SigmoidDerivatives\n# from backpack.core.derivatives.tanh import TanhDerivatives\nfrom backpack.extensions.secondorder.trial.trial_base import TRIALBaseModule\n\n\nclass TRIALReLU(TRIALBaseModule):\n def __init__(self):\n super().__init__(derivatives=ReLUDerivatives())\n\n\nclass TRIALSigmoid(TRIALBaseModule):\n def __init__(self):\n super().__init__(derivatives=SigmoidDerivatives())\n\n\n# class DiagGGNTanh(DiagGGNBaseModule):\n# def __init__(self):\n# super().__init__(derivatives=TanhDerivatives())\n" }, { "alpha_fraction": 0.594115674495697, "alphanum_fraction": 0.5951187014579773, "avg_line_length": 31.335134506225586, "blob_id": "4478639cea5364bd29f4d6db7793e9c69a1e7ca3", "content_id": "4f0deb64df9ae9ab9d48b764ffe96e18a12db395", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5982, "license_type": "permissive", "max_line_length": 92, "num_lines": 185, "path": "/backpack/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\"BackPACK.\"\"\"\nimport inspect\n\nimport torch\n\nfrom backpack.extensions.backprop_extension import BackpropExtension\n\nfrom . import extensions\nfrom .context import CTX\n\nimport torch.nn as nn\nclass backpack:\n \"\"\"Activates Backpack Extensions.\n\n Activates the BackPACK extensions passed as arguments for the\n :code:`backward` calls in the current :code:`with` block.\n \"\"\"\n\n def __init__(self, *exts: BackpropExtension, debug=False, mem_clean_up = True):\n \"\"\"Activate the Backpack extensions.\n\n Example usage:\n ```\n X, Y, model, lossfunc = get_problem()\n\n backpack.extend(model)\n backpack.extend(lossfunc)\n\n with backpack.backpack(backpack.extensions.Variance()):\n lossfunc(model(X), Y).backward()\n\n for p in model.parameters():\n print(p.grad)\n print(p.variance)\n ```\n\n .. warning ::\n\n The quantities computed by backPACK may be garbage collected when\n exiting the `with` clause. Use them within the `with` clause or\n assign them to another variable.\n\n Attributes:\n args: [BackpropExtension]\n The extensions to activate for the backward pass.\n debug: Bool, optional (default: False)\n If true, will print debug messages during the backward pass.\n \"\"\"\n for ext in exts:\n if not isinstance(ext, BackpropExtension):\n if inspect.isclass(ext) and issubclass(ext, BackpropExtension):\n raise ValueError(\n \"backpack expect instances of BackpropExtension,\"\n + \" but received a class instead [{}].\".format(ext)\n + \" Instantiate it before passing it to backpack.\"\n )\n else:\n raise ValueError(\n \"backpack expects instances of BackpropExtension,\"\n + \" but received [{}].\".format(ext)\n )\n\n self.exts = exts\n self.debug = debug\n self.mem_clean_up = mem_clean_up\n\n def __enter__(self):\n self.old_CTX = CTX.get_active_exts()\n self.old_debug = CTX.get_debug()\n CTX.set_active_exts(self.exts)\n CTX.set_debug(self.debug)\n\n def __exit__(self, type, value, traceback):\n CTX.set_active_exts(self.old_CTX)\n CTX.set_debug(self.old_debug)\n\n\ndef hook_store_io(module, input, output):\n \"\"\"Saves the input and output as attributes of the module.\n\n Args:\n module: module\n input: List of input tensors\n output: output tensor\n \"\"\"\n if module.training and (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear)):\n for i in range(len(input)):\n setattr(module, \"input{}\".format(i), input[i])\n module.output = output\n\ndef hook_param(grad):\n \"\"\"\n sending the gradient to backward hook.\n \"\"\"\n return grad \n\n\ndef hook_store_shapes(module, input, output):\n \"\"\"Store dimensionality of output as buffer.\n\n Args:\n module: module\n input: List of input tensors shapes\n output: output tensor shape\n \"\"\"\n for i in range(len(input)):\n module.register_buffer(\n \"input{}_shape\".format(i), torch.IntTensor([*input[i].size()])\n )\n module.register_buffer(\"output_shape\", torch.IntTensor([*output.size()]))\n\n\ndef memory_cleanup(module):\n \"\"\"Remove I/O stored by backpack during the forward pass.\n\n Deletes the attributes created by `hook_store_io` and `hook_store_shapes`.\n \"\"\"\n # if self.mem_clean_up:\n if hasattr(module, \"output\"):\n delattr(module, \"output\")\n if hasattr(module, \"output_shape\"):\n delattr(module, \"output_shape\")\n i = 0\n while hasattr(module, \"input{}\".format(i)):\n delattr(module, \"input{}\".format(i))\n i += 1\n i = 0\n while hasattr(module, \"input{}_shape\".format(i)):\n delattr(module, \"input{}_shape\".format(i))\n i += 1\n\n\ndef hook_run_extensions(module, g_inp, g_out):\n for backpack_extension in CTX.get_active_exts():\n if CTX.get_debug():\n print(\"[DEBUG] Running extension\", backpack_extension, \"on\", module)\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n backpack_extension.apply(module, g_inp, g_out)\n\n if not (\n CTX.is_extension_active(\n extensions.curvmatprod.HMP,\n extensions.curvmatprod.GGNMP,\n extensions.curvmatprod.PCHMP,\n\n )\n ):\n if CTX.get_active_exts() and module._keep_memory == False:\n # print('module cleaned' , module)\n memory_cleanup(module)\n # else:\n # print('[WARN] No clean up...')\n \n\n\ndef extend(module: torch.nn.Module, debug=False, keep_memory=False):\n \"\"\"Extends the ``module`` to make it backPACK-ready.\n\n If the ``module`` has children, e.g. for a ``torch.nn.Sequential``,\n they will also be extended.\n\n Args:\n module: torch.nn.Module\n The module to extend\n debug: Bool, optional (default: False)\n If true, will print debug messages during the extension.\n \"\"\"\n if debug:\n print(\"[DEBUG] Extending\", module)\n\n for child in module.children():\n extend(child, debug=debug, keep_memory=keep_memory)\n\n module_was_already_extended = getattr(module, \"_backpack_extend\", False)\n if not module_was_already_extended:\n CTX.add_hook_handle(module.register_forward_hook(hook_store_io))\n CTX.add_hook_handle(module.register_forward_hook(hook_store_shapes))\n # if hasattr(module, \"weight\"):\n # if module.weight is not None:\n # CTX.add_hook_handle(module.weight.register_hook(hook_param))\n CTX.add_hook_handle(module.register_backward_hook(hook_run_extensions))\n module._backpack_extend = True\n module._keep_memory = keep_memory\n\n return module\n" }, { "alpha_fraction": 0.7799227833747864, "alphanum_fraction": 0.7799227833747864, "avg_line_length": 36, "blob_id": "4067ff33d3a4949a40dcf39237cf10d637c451cf", "content_id": "63992ee506850476d3867eb2db38e3f8a1b40004", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "permissive", "max_line_length": 73, "num_lines": 7, "path": "/backpack/extensions/secondorder/mngd/dropout.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.dropout import DropoutDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\n\n\nclass MNGDDropout(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=DropoutDerivatives())\n" }, { "alpha_fraction": 0.5304069519042969, "alphanum_fraction": 0.5404663681983948, "avg_line_length": 32.61538314819336, "blob_id": "1183d7bbfa62c652748deac659d062bb1d73b3e3", "content_id": "fc7aedf6b55b5e9bde0b79f101c852e92e7c50f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2187, "license_type": "permissive", "max_line_length": 89, "num_lines": 65, "path": "/backpack/extensions/firstorder/fisher/batchnorm1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import einsum\nfrom torch import matmul\nfrom backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives\nfrom backpack.extensions.firstorder.fisher.fisher_base import FisherBase\n\n\nclass FisherBatchNorm1d(FisherBase):\n def __init__(self, silent):\n self.silent = silent\n super().__init__(derivatives=BatchNorm1dDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n if not self.silent:\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n input = module.input0\n mean = input.mean(dim=0)\n var = input.var(dim=0, unbiased=False)\n xhat = (input - mean) / (var + module.eps).sqrt()\n dw = g_out_sc * xhat\n\n # compute vector jacobian product in optimization method\n grad = module.weight.grad\n grad_prod = einsum(\"nk,k->n\", (dw, grad))\n\n return (matmul(dw, dw.t()), grad_prod)\n else:\n \n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n input = module.input0\n mean = input.mean(dim=0)\n var = input.var(dim=0, unbiased=False)\n xhat = (input - mean) / (var + module.eps).sqrt()\n dw = g_out_sc * xhat\n\n # compute vector jacobian product in optimization method\n grad = module.weight.grad\n grad_prod = einsum(\"nk,k->n\", (dw, grad))\n\n return grad_prod\n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n if not self.silent:\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n return (out, grad_prod)\n else:\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad = module.bias.grad\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n\n return grad_prod\n\n\n" }, { "alpha_fraction": 0.651442289352417, "alphanum_fraction": 0.6554487347602844, "avg_line_length": 38, "blob_id": "8b80a33f7ea2fd785fdc575abebaf5eaf6f9d47d", "content_id": "d0ecea92a8be91b3d134f9f75b3bde77823e7fd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1248, "license_type": "permissive", "max_line_length": 84, "num_lines": 32, "path": "/backpack/extensions/secondorder/diag_hessian/conv2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn\n\nfrom backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.secondorder.diag_hessian.diag_h_base import DiagHBaseModule\nfrom backpack.utils import conv as convUtils\n\n\nclass DiagHConv2d(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n sqrt_h_outs = backproped[\"matrices\"]\n sqrt_h_outs_signs = backproped[\"signs\"]\n h_diag = torch.zeros_like(module.bias)\n\n for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):\n h_diag_curr = convUtils.extract_bias_diagonal(module, h_sqrt)\n h_diag.add_(sign * h_diag_curr)\n return h_diag\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n sqrt_h_outs = backproped[\"matrices\"]\n sqrt_h_outs_signs = backproped[\"signs\"]\n X = convUtils.unfold_func(module)(module.input0)\n h_diag = torch.zeros_like(module.weight)\n\n for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):\n h_diag_curr = convUtils.extract_weight_diagonal(module, X, h_sqrt)\n h_diag.add_(sign * h_diag_curr)\n return h_diag\n" }, { "alpha_fraction": 0.5687606334686279, "alphanum_fraction": 0.5738539695739746, "avg_line_length": 28.450000762939453, "blob_id": "0638c5e0c399feac0f476f753c0fd87fe5cc7cd0", "content_id": "21a5a16b08ebc0132c8f044fb6077871c3bd9782", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "permissive", "max_line_length": 82, "num_lines": 60, "path": "/backpack/utils/unsqueeze.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import functools\n\n\ndef jmp_unsqueeze_if_missing_dim(mat_dim):\n \"\"\"Allow Jacobian-matrix routines to do Jacobian-vector products.\"\"\"\n\n def jmp_wrapper(jmp):\n @functools.wraps(jmp)\n def wrapped_jmp_support_jvp(self, module, g_inp, g_out, mat, **kwargs):\n is_vec = len(mat.shape) == mat_dim - 1\n mat_used = mat.unsqueeze(-1) if is_vec else mat\n result = jmp(self, module, g_inp, g_out, mat_used, **kwargs)\n if is_vec:\n return result.squeeze(-1)\n else:\n return result\n\n return wrapped_jmp_support_jvp\n\n return jmp_wrapper\n\n\ndef hmp_unsqueeze_if_missing_dim(mat_dim):\n \"\"\"Allow Hessian-matrix routines to do Hessian-vector products.\"\"\"\n\n def hmp_wrapper(hmp):\n @functools.wraps(hmp)\n def wrapped_hmp_support_hvp(mat):\n is_vec = len(mat.shape) == mat_dim - 1\n mat_used = mat.unsqueeze(-1) if is_vec else mat\n result = hmp(mat_used)\n if is_vec:\n return result.squeeze(-1)\n else:\n return result\n\n return wrapped_hmp_support_hvp\n\n return hmp_wrapper\n\n\ndef kfacmp_unsqueeze_if_missing_dim(mat_dim):\n \"\"\"\n Allows Kronecker-factored matrix-matrix routines to do matrix-vector products.\n \"\"\"\n\n def kfacmp_wrapper(kfacmp):\n @functools.wraps(kfacmp)\n def wrapped_kfacmp_support_kfacvp(mat):\n is_vec = len(mat.shape) == mat_dim - 1\n mat_used = mat.unsqueeze(-1) if is_vec else mat\n result = kfacmp(mat_used)\n if is_vec:\n return result.squeeze(-1)\n else:\n return result\n\n return wrapped_kfacmp_support_kfacvp\n\n return kfacmp_wrapper\n" }, { "alpha_fraction": 0.7580299973487854, "alphanum_fraction": 0.7751606106758118, "avg_line_length": 34.92307662963867, "blob_id": "00e6a0f3e8ce2916db45e338cd06b9f287dc8328", "content_id": "1af8e7f6bd6683a126d93a3eeface40a3b091a6e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "permissive", "max_line_length": 76, "num_lines": 13, "path": "/backpack/extensions/secondorder/trial/pooling.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.avgpool2d import AvgPool2DDerivatives\nfrom backpack.core.derivatives.maxpool2d import MaxPool2DDerivatives\nfrom backpack.extensions.secondorder.trial.trial_base import TRIALBaseModule\n\n\nclass TRIALMaxPool2d(TRIALBaseModule):\n def __init__(self):\n super().__init__(derivatives=MaxPool2DDerivatives())\n\n\nclass TRIALAvgPool2d(TRIALBaseModule):\n def __init__(self):\n super().__init__(derivatives=AvgPool2DDerivatives())\n" }, { "alpha_fraction": 0.5617918372154236, "alphanum_fraction": 0.5675889253616333, "avg_line_length": 32.58407211303711, "blob_id": "1a95981dafd1ed1fdd84d88484626c34dce229b9", "content_id": "0b5de95c32a23f84b8f506ce4b178ccac96ea9ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3796, "license_type": "permissive", "max_line_length": 74, "num_lines": 113, "path": "/backpack/core/derivatives/maxpool2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from torch import zeros\nfrom torch.nn import MaxPool2d\nfrom torch.nn.functional import max_pool2d\n\nfrom backpack.core.derivatives.basederivatives import BaseDerivatives\nfrom backpack.utils.ein import eingroup\n\n\nclass MaxPool2DDerivatives(BaseDerivatives):\n def get_module(self):\n return MaxPool2d\n\n # TODO: Do not recompute but get from forward pass of module\n def get_pooling_idx(self, module):\n _, pool_idx = max_pool2d(\n module.input0,\n kernel_size=module.kernel_size,\n stride=module.stride,\n padding=module.padding,\n dilation=module.dilation,\n return_indices=True,\n ceil_mode=module.ceil_mode,\n )\n return pool_idx\n\n def ea_jac_t_mat_jac_prod(self, module, g_inp, g_out, mat):\n \"\"\"\n\n Note: It is highly questionable whether this makes sense both\n in terms of the approximation and memory costs.\n\n Note:\n Need to loop over the samples, as dealing with all at once\n requires memory for `N * C² * H_in * W_in * H_out * W_out`\n elements\n \"\"\"\n device = mat.device\n N, C, H_in, W_in = module.input0.size()\n _, _, H_out, W_out = module.output.size()\n\n in_pixels = H_in * W_in\n in_features = C * in_pixels\n\n pool_idx = self.get_pooling_idx(module).view(N, C, H_out * W_out)\n\n def sample_ea_jac_t_mat_jac_prod(n, mat):\n jac_t_mat = sample_jac_t_mat_prod(n, mat)\n mat_t_jac = jac_t_mat.t()\n jac_t_mat_t_jac = sample_jac_t_mat_prod(n, mat_t_jac)\n return jac_t_mat_t_jac.t()\n\n def sample_jac_t_mat_prod(n, mat):\n num_cols = mat.size(1)\n idx = pool_idx[n, :, :].unsqueeze(-1).expand(-1, -1, num_cols)\n\n jac_t_mat = zeros(C, H_in * W_in, num_cols, device=device)\n mat = mat.reshape(C, H_out * W_out, num_cols)\n\n jac_t_mat.scatter_add_(1, idx, mat)\n\n return jac_t_mat.reshape(in_features, num_cols)\n\n result = zeros(in_features, in_features, device=device)\n\n for n in range(N):\n result += sample_ea_jac_t_mat_jac_prod(n, mat)\n\n return result / N\n\n def hessian_is_zero(self):\n return True\n\n def _jac_mat_prod(self, module, g_inp, g_out, mat):\n mat_as_pool = eingroup(\"v,n,c,h,w->v,n,c,hw\", mat)\n jmp_as_pool = self.__apply_jacobian_of(module, mat_as_pool)\n return self.reshape_like_output(jmp_as_pool, module)\n\n def __apply_jacobian_of(self, module, mat):\n V, HW_axis = mat.shape[0], 3\n pool_idx = self.__pool_idx_for_jac(module, V)\n return mat.gather(HW_axis, pool_idx)\n\n def __pool_idx_for_jac(self, module, V):\n \"\"\"Manipulated pooling indices ready-to-use in jac(t).\"\"\"\n\n pool_idx = self.get_pooling_idx(module)\n V_axis = 0\n return (\n eingroup(\"n,c,h,w->n,c,hw\", pool_idx)\n .unsqueeze(V_axis)\n .expand(V, -1, -1, -1)\n )\n\n def _jac_t_mat_prod(self, module, g_inp, g_out, mat):\n mat_as_pool = eingroup(\"v,n,c,h,w->v,n,c,hw\", mat)\n jmp_as_pool = self.__apply_jacobian_t_of(module, mat_as_pool)\n return self.reshape_like_input(jmp_as_pool, module)\n\n def __apply_jacobian_t_of(self, module, mat):\n V = mat.shape[0]\n result = self.__zero_for_jac_t(module, V, mat.device)\n pool_idx = self.__pool_idx_for_jac(module, V)\n\n HW_axis = 3\n result.scatter_add_(HW_axis, pool_idx, mat)\n return result\n\n def __zero_for_jac_t(self, module, V, device):\n N, C_out, _, _ = module.output_shape\n _, _, H_in, W_in = module.input0.size()\n\n shape = (V, N, C_out, H_in * W_in)\n return zeros(shape, device=device)\n" }, { "alpha_fraction": 0.6503496766090393, "alphanum_fraction": 0.6629370450973511, "avg_line_length": 45.064517974853516, "blob_id": "27d71e017bf62bfa22fd2b7fcebcbe30ba20a14e", "content_id": "0a1227032ac15154d55413e4e2280aa51810780b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1430, "license_type": "permissive", "max_line_length": 112, "num_lines": 31, "path": "/backpack/extensions/secondorder/trial/conv2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv2d import Conv2DDerivatives\nfrom backpack.extensions.secondorder.trial.trial_base import TRIALBaseModule\nfrom backpack.utils import conv as convUtils\nfrom torch import sqrt, zeros\nimport torch\n\nfrom torch import einsum\n\nclass TRIALConv2d(TRIALBaseModule):\n def __init__(self, MODE):\n self.MODE = MODE\n super().__init__(derivatives=Conv2DDerivatives(), params=[\"bias\", \"weight\"])\n \n # TODO: FIX these functions for NGD\n def bias(self, ext, module, grad_inp, grad_out, backproped):\n sqrt_ggn = backproped\n return convUtils.extract_bias_ngd(module, sqrt_ggn, self.MODE)\n\n def weight(self, ext, module, grad_inp, grad_out, backproped):\n # mask_shape = module.input0.shape\n # mask = self.create_mask_conv2d(module, mask_shape)\n # X = convUtils.unfold_func(module)(module.input0)\n # weight_diag = convUtils.extract_weight_ngd(module, X, backproped, mask)\n if self.MODE == 666: # not good because of repeating\n dw = self.derivatives.weight_jac_t_mat_prod(module, grad_inp, grad_out, backproped, sum_batch=False)\n dw = dw.reshape(dw.shape[0], dw.shape[1], dw.shape[2], -1)\n res_ = dw.permute(0,1,3,2)\n return einsum(\"vnkm,zqkm->vnzq\", (res_, res_))\n else:\n weight_diag= convUtils.extract_weight_ngd(module, backproped, self.MODE)\n return weight_diag\n\n\n" }, { "alpha_fraction": 0.5526584982872009, "alphanum_fraction": 0.557259738445282, "avg_line_length": 33.31578826904297, "blob_id": "26d7dddb2372ffd913d63cca0a8027186336f81d", "content_id": "54dd09c35056def77674a031a1ce56fbac54aa89", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1956, "license_type": "permissive", "max_line_length": 84, "num_lines": 57, "path": "/backpack/extensions/secondorder/mngd/linear.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import backpack.utils.linear as LinUtils\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule\n\n\nclass MNGDLinear(MNGDBaseModule):\n def __init__(self):\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n # TODO: FIX these functions for NGD\n def bias(self, ext, module, grad_inp, grad_out, backproped):\n grad = module.bias.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"n,no->o\", (v, g_out_sc))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n return (out, grad_prod, update)\n\n def weight(self, ext, module, grad_inp, grad_out, backproped):\n I = module.input0\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n G = g_out_sc\n grad = module.weight.grad\n \n \n B = einsum(\"ni,li->nl\", (I, I)) \n A = einsum(\"no,lo->nl\", (G, G))\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"ni,oi->no\", (I, grad))\n grad_prod = einsum(\"no,no->n\", (grad_prod, G))\n out = A * B \n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n\n gv = einsum(\"n,no->no\", (v, G))\n gv = einsum(\"no,ni->oi\", (gv, I))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n \n module.I = I\n module.G = G\n module.NGD_inv = NGD_inv\n return (out, grad_prod, update)\n" }, { "alpha_fraction": 0.6042087078094482, "alphanum_fraction": 0.6163159608840942, "avg_line_length": 37.325965881347656, "blob_id": "4ad4e4e30286737daafc6f796cafa952978d982b", "content_id": "bd769a2b1d248434037d39ae83dd6c7c4db0a93b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6938, "license_type": "permissive", "max_line_length": 126, "num_lines": 181, "path": "/backpack/utils/fft_conv.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from functools import partial\nfrom typing import Tuple, Union, Iterable\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.fft import rfftn, irfftn, fftn, ifftn\nimport torch.nn.functional as f\n\nimport time\n\ndef complex_matmul(a: Tensor, b: Tensor, groups: int = 1) -> Tensor:\n # n = a[0]\n n = a.shape[0]\n v = b.shape[0] // n\n b = b.reshape(v, n, b.shape[1], b.shape[2], b.shape[3])\n a = a.unsqueeze(1).unsqueeze(0)\n b = b.unsqueeze(3)\n # a = a.real\n # b = b.real\n # print('a,b,c:', a.shape, b.shape)\n c = a * b\n return c\n\ndef my_complex_matmul(a: Tensor, b: Tensor, groups: int = 1) -> Tensor:\n \"\"\"Multiplies two complex-valued tensors.\"\"\"\n scalar_matmul = partial(torch.einsum, \"na...,nb...-> nab...\")\n # a = a.view(a.size(0), -1, *a.shape[2:])\n # b = b.view( -1, *b.shape[1:])\n\n # Compute the real and imaginary parts independently, then manually insert them\n # into the output Tensor. This is fairly hacky but necessary for PyTorch 1.7.0,\n # because Autograd is not enabled for complex matrix operations yet. Not exactly\n # idiomatic PyTorch code, but it should work for all future versions (>= 1.7.0).\n # print('Line24: a shape, b shape:', a.shape, b.shape)\n real = scalar_matmul(a.real, b.real) - scalar_matmul(a.imag, b.imag)\n imag = scalar_matmul(a.imag, b.real) + scalar_matmul(a.real, b.imag)\n c = torch.zeros(real.shape, dtype=torch.complex64, device=a.device)\n c.real, c.imag = real, imag\n return c\n\ndef to_ntuple(val: Union[int, Iterable[int]], n: int) -> Tuple[int, ...]:\n \"\"\"Casts to a tuple with length 'n'. Useful for automatically computing the\n padding and stride for convolutions, where users may only provide an integer.\n\n Args:\n val: (Union[int, Iterable[int]]) Value to cast into a tuple.\n n: (int) Desired length of the tuple\n\n Returns:\n (Tuple[int, ...]) Tuple of length 'n'\n \"\"\"\n if isinstance(val, Iterable):\n out = tuple(val)\n if len(out) == n:\n return out\n else:\n raise ValueError(f\"Cannot cast tuple of length {len(out)} to length {n}.\")\n else:\n return n * (val,)\n\n\ndef fft_conv(\n signal: Tensor,\n kernel: Tensor,\n bias: Tensor = None,\n padding: Union[int, Iterable[int]] = 0,\n stride: Union[int, Iterable[int]] = 1,\n groups: int = 1,\n) -> Tensor:\n \"\"\"Performs N-d convolution of Tensors using a fast fourier transform, which\n is very fast for large kernel sizes. Also, optionally adds a bias Tensor after\n the convolution (in order ot mimic the PyTorch direct convolution).\n\n Args:\n signal: (Tensor) Input tensor to be convolved with the kernel.\n kernel: (Tensor) Convolution kernel.\n bias: (Tensor) Bias tensor to add to the output.\n padding: (Union[int, Iterable[int]) Number of zero samples to pad the\n input on the last dimension.\n stride: (Union[int, Iterable[int]) Stride size for computing output values.\n\n Returns:\n (Tensor) Convolved tensor\n \"\"\"\n # Cast padding & stride to tuples.\n # st = time.time()\n # padding_ = to_ntuple(padding, n=signal.ndim - 2)\n # stride_ = to_ntuple(stride, n=signal.ndim - 2)\n\n padding_ = padding\n\n stride_ = (1, 1)\n # print('padding_:', padding_)\n # print('stride_:', stride_)\n # padding_time = time.time() - st\n # print('padding_time:', padding_time)\n # Pad the input signal & kernel tensors\n signal_padding = [p for p in padding_[::-1] for _ in range(2)]\n signal = f.pad(signal, signal_padding)\n \n # Because PyTorch computes a *one-sided* FFT, we need the final dimension to\n # have *even* length. Just pad with one more zero if the final dimension is odd.\n if signal.size(-1) % 2 != 0:\n signal_ = f.pad(signal, [0, 1])\n else:\n signal_ = signal\n\n \n\n # st = time.time()\n kernel_padding = [\n pad\n for i in reversed(range(2, signal_.ndim))\n for pad in [0, signal_.size(i) - kernel.size(i)]\n ]\n # print(kernel_padding)\n # print(kernel.shape)\n padded_kernel = f.pad(kernel, kernel_padding)\n # padding_time_kernel = time.time() - st\n # print('padding_time_kernel:', padding_time_kernel)\n # Perform fourier convolution -- FFT, matrix multiply, then IFFT\n # signal_ = signal_.reshape(signal_.size(0), groups, -1, *signal_.shape[2:])\n \n # st = time.time()\n # signal_fr = rfftn(signal_, dim=tuple(range(2, signal.ndim)))\n # kernel_fr = rfftn(padded_kernel, dim=tuple(range(2, signal.ndim)))\n\n signal_fr = rfftn(signal_, dim=tuple(range(2, signal.ndim)))\n kernel_fr = rfftn(padded_kernel, dim=tuple(range(2, signal.ndim)))\n # rfft_time = time.time() - st\n # print('rfft_time:', rfft_time)\n # print('Line: padded signal shape:', signal_.shape)\n # print('Line: signal_ shape:', signal_.shape)\n # print('Line: padded_kernel shape:', padded_kernel.shape)\n # print('Line: kernel_fr shape:', kernel_fr.shape)\n\n # st = time.time()\n kernel_fr.imag *= -1\n\n # output_fr = complex_matmul(signal_fr, kernel_fr, groups=groups)/torch.numel(signal_fr[0,0,0,:])\n # print('KOOOOME:', output_fr.shape)\n # print('KOOOOME:', output_fr)\n # x = signal_fr[0,:]\n # print(x)\n # output_fr = my_complex_matmul(signal_fr, kernel_fr, groups=groups)/torch.numel(signal_fr[0,0,0,:])\n # output_fr = my_complex_matmul(signal_fr, kernel_fr, groups=groups)\n output_fr = complex_matmul(signal_fr, kernel_fr, groups=groups)\n output = output_fr\n # print('output.shape:', output.shape)\n\n # matmul_time = time.time() - st\n # print('matmul_time:', matmul_time)\n \n # st = time.time()\n output = irfftn(output_fr, dim=tuple(range(4, signal.ndim + 2)))\n # print('output irfftn .shape:', output.shape)\n # output = ifftn(output_fr, dim=tuple(range(3, signal.ndim+1)))\n # inverse_time = time.time() - st\n # print('inverse_time:', inverse_time)\n # st = time.time()\n # Remove extra padded values\n # print('signal, kernel, padding', signal.shape, kernel.shape, padding)\n crop_slices = [slice(0, output.size(0)), slice(0, output.size(1)), slice(0, output.size(2)), slice(0, output.size(3))] + [\n slice(padding_[i-3] - 1 , (signal.size(i - 1) - kernel.size(i - 1) - padding_[i-3] +2 ), stride_[i - 3])\n for i in range(3, signal.ndim + 1)\n ]\n # crop_slices = \n # print('crop_slices:', crop_slices)\n # print('my output before croping:', output.shape)\n output = output[crop_slices].contiguous()\n # output = output[:,:,:,1:].contiguous()\n # print('output after crop:', output.shape)\n # print('output norm 2:', torch.norm(output))\n # crop_time = time.time() - st\n # print('crop_time:', crop_time)\n # Optionally, add a bias term before returning.\n if bias is not None:\n bias_shape = tuple([1, -1] + (signal.ndim - 2) * [1])\n output += bias.view(bias_shape)\n\n return output\n\n" }, { "alpha_fraction": 0.48845791816711426, "alphanum_fraction": 0.5179101228713989, "avg_line_length": 37.174034118652344, "blob_id": "dd0aea82e4617389fc7641529e4250edc93e2fbf", "content_id": "a029122050f4a94d3b61d10d11d507f2560b9ab8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13819, "license_type": "permissive", "max_line_length": 82, "num_lines": 362, "path": "/test/extensions/firstorder/firstorder_settings.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\"Test configurations for `backpack.core.extensions.firstorder`\nthat is shared among the following firstorder methods:\n- batch_grad\n- batch_l2_grad\n- sum_grad_sqaured\n- variance\n\n\nRequired entries:\n \"module_fn\" (callable): Contains a model constructed from `torch.nn` layers\n \"input_fn\" (callable): Used for specifying input function\n \"target_fn\" (callable): Fetches the groundtruth/target classes \n of regression/classification task\n \"loss_function_fn\" (callable): Loss function used in the model\n\nOptional entries:\n \"device\" [list(torch.device)]: List of devices to run the test on.\n \"id_prefix\" (str): Prefix to be included in the test name.\n \"seed\" (int): seed for the random number for torch.rand\n\"\"\"\n\n\nimport torch\nfrom test.core.derivatives.utils import classification_targets, regression_targets\n\nFIRSTORDER_SETTINGS = []\n\n###############################################################################\n# examples #\n###############################################################################\n\nexample = {\n \"input_fn\": lambda: torch.rand(3, 10),\n \"module_fn\": lambda: torch.nn.Sequential(torch.nn.Linear(10, 5)),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n \"device\": [torch.device(\"cpu\")],\n \"seed\": 0,\n \"id_prefix\": \"example\",\n}\nFIRSTORDER_SETTINGS.append(example)\n\n###############################################################################\n# test setting: Linear Layers #\n###############################################################################\n\nFIRSTORDER_SETTINGS += [\n # classification\n {\n \"input_fn\": lambda: torch.rand(3, 10),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Linear(10, 7), torch.nn.Linear(7, 5)\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 10),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Linear(10, 7), torch.nn.ReLU(), torch.nn.Linear(7, 5)\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n # Regression\n {\n \"input_fn\": lambda: torch.rand(3, 10),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Linear(10, 7), torch.nn.Sigmoid(), torch.nn.Linear(7, 5)\n ),\n \"loss_function_fn\": lambda: torch.nn.MSELoss(reduction=\"mean\"),\n \"target_fn\": lambda: regression_targets((3, 5)),\n },\n]\n\n###############################################################################\n# test setting: Convolutional Layers #\n###############################################################################\n\nFIRSTORDER_SETTINGS += [\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv1d(3, 2, 2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(12, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv1d(3, 2, 2, bias=False),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(12, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 8),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv1d(\n 3, 6, 2, stride=4, padding=2, padding_mode=\"zeros\", dilation=3\n ),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(18, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv1d(3, 2, 2, padding=2, dilation=1, stride=2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(10, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 2, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv1d(2, 3, 2, padding=0, dilation=2, groups=1),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(15, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv2d(3, 2, 2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(72, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv2d(3, 2, 2, bias=False),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(72, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 8, 8),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv2d(\n 3, 6, 2, stride=4, padding=2, padding_mode=\"zeros\", dilation=3\n ),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(54, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv2d(3, 2, 2, padding=0, stride=2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(18, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 2, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv2d(2, 3, 2, padding=0, dilation=2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(75, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 2, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv3d(3, 2, 2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(72, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 2, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv3d(3, 2, 2, bias=False),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(72, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 4, 8, 8),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv3d(\n 3, 6, 2, padding=2, stride=4, dilation=3, padding_mode=\"zeros\"\n ),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(108, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 2, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv3d(3, 2, 2, dilation=1, padding=2, stride=3),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(64, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 2, 3, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.Conv3d(2, 3, 2, dilation=2, padding=0),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(75, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose1d(3, 2, 2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(16, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose1d(3, 2, 2, bias=False),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(16, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose1d(3, 2, 2, padding=2, dilation=1, stride=2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(20, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 2, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose1d(2, 3, 2, padding=0, dilation=5, stride=3),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(72, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"sum\"),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose2d(3, 2, 2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(128, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 3, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose2d(3, 2, 2, bias=False),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(128, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(3, 2, 9, 9),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose2d(2, 4, 2, padding=0, dilation=2, groups=1),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(484, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(),\n \"target_fn\": lambda: classification_targets((3,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(2, 3, 2, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose3d(3, 2, 2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(384, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((2,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(2, 3, 2, 7, 7),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose3d(3, 2, 2, bias=False),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(384, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((2,), 5),\n },\n {\n \"input_fn\": lambda: torch.rand(2, 3, 5, 5, 5),\n \"module_fn\": lambda: torch.nn.Sequential(\n torch.nn.ConvTranspose3d(3, 2, 2, padding=2, dilation=2, stride=2),\n torch.nn.ReLU(),\n torch.nn.Flatten(),\n torch.nn.Linear(686, 5),\n ),\n \"loss_function_fn\": lambda: torch.nn.CrossEntropyLoss(reduction=\"mean\"),\n \"target_fn\": lambda: classification_targets((2,), 5),\n },\n]\n" }, { "alpha_fraction": 0.7508038878440857, "alphanum_fraction": 0.7508038878440857, "avg_line_length": 31.736841201782227, "blob_id": "1f489842dc1396253b8e6dd87dc8105588c5e40d", "content_id": "aa476e3af486711d081c0965022b8e80cba9a8e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "permissive", "max_line_length": 84, "num_lines": 19, "path": "/backpack/extensions/secondorder/diag_hessian/activations.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.relu import ReLUDerivatives\nfrom backpack.core.derivatives.sigmoid import SigmoidDerivatives\nfrom backpack.core.derivatives.tanh import TanhDerivatives\nfrom backpack.extensions.secondorder.diag_hessian.diag_h_base import DiagHBaseModule\n\n\nclass DiagHReLU(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=ReLUDerivatives())\n\n\nclass DiagHSigmoid(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=SigmoidDerivatives())\n\n\nclass DiagHTanh(DiagHBaseModule):\n def __init__(self):\n super().__init__(derivatives=TanhDerivatives())\n" }, { "alpha_fraction": 0.7267080545425415, "alphanum_fraction": 0.739130437374115, "avg_line_length": 39.25, "blob_id": "0d2a722edf51936abbc5ccbcb72b9bcefcabcbaa", "content_id": "5c6137b7317a0f86e91cf9e46ef174ad1b3cc18b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "permissive", "max_line_length": 84, "num_lines": 8, "path": "/backpack/extensions/firstorder/fisher/conv1d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.conv1d import Conv1DDerivatives\nfrom backpack.extensions.firstorder.fisher.fisher_base import FisherBase\n\n\nclass FisherConv1d(FisherBase):\n def __init__(self, silent=False):\n self.silent = silent\n super().__init__(derivatives=Conv1DDerivatives(), params=[\"bias\", \"weight\"])\n" }, { "alpha_fraction": 0.5759878158569336, "alphanum_fraction": 0.5790273547172546, "avg_line_length": 15.04878044128418, "blob_id": "d967d67761d51d7f65351bf460e98221e0fe5684", "content_id": "d6549ea586ab3d55df5e8ed3dc900971862a40ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "permissive", "max_line_length": 109, "num_lines": 41, "path": "/backpack/extensions/__init__.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "\"\"\"\nBackPACK Extensions\n\"\"\"\n\nfrom .curvmatprod import GGNMP, HMP, PCHMP\nfrom .firstorder import BatchGrad, BatchL2Grad, SumGradSquared, Variance, Fisher, FisherBlock, FisherBlockEff\nfrom .secondorder import (\n TRIAL,\n HBP,\n KFAC,\n KFLR,\n KFRA,\n DiagGGN,\n DiagGGNExact,\n DiagGGNMC,\n DiagHessian,\n MNGD,\n)\n\n__all__ = [\n \"MNGD\",\n \"Fisher\",\n \"FisherBlock\",\n \"FisherBlockEff\",\n \"TRIAL\",\n \"PCHMP\",\n \"GGNMP\",\n \"HMP\",\n \"BatchL2Grad\",\n \"BatchGrad\",\n \"SumGradSquared\",\n \"Variance\",\n \"KFAC\",\n \"KFLR\",\n \"KFRA\",\n \"HBP\",\n \"DiagGGNExact\",\n \"DiagGGNMC\",\n \"DiagGGN\",\n \"DiagHessian\",\n]\n" }, { "alpha_fraction": 0.6435797810554504, "alphanum_fraction": 0.6552529335021973, "avg_line_length": 34.20547866821289, "blob_id": "f1eb9c043e98256f54c402a48d16c86f98d73329", "content_id": "b08edf85fd9f4419d67993c0f7fb002da754a0d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2570, "license_type": "permissive", "max_line_length": 88, "num_lines": 73, "path": "/backpack/utils/conv_transpose.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.nn.functional import conv_transpose1d, conv_transpose2d, conv_transpose3d\n\nfrom backpack.utils.ein import eingroup\n\n\ndef get_convtranspose1d_weight_gradient_factors(input, grad_out, module):\n N, C_in = input.shape[0], input.shape[1]\n kernel_size = module.kernel_size\n kernel_size_numel = int(torch.prod(torch.Tensor(kernel_size)))\n\n X = unfold_by_conv_transpose(input, module).reshape(N, C_in * kernel_size_numel, -1)\n return X, grad_out\n\n\ndef get_convtranspose2d_weight_gradient_factors(input, grad_out, module):\n N, C_in = input.shape[0], input.shape[1]\n kernel_size = module.kernel_size\n kernel_size_numel = int(torch.prod(torch.Tensor(kernel_size)))\n\n X = unfold_by_conv_transpose(input, module).reshape(N, C_in * kernel_size_numel, -1)\n dE_dY = eingroup(\"n,c,h,w->n,c,hw\", grad_out)\n return X, dE_dY\n\n\ndef get_convtranspose3d_weight_gradient_factors(input, grad_out, module):\n N, C_in = input.shape[0], input.shape[1]\n kernel_size = module.kernel_size\n kernel_size_numel = int(torch.prod(torch.Tensor(kernel_size)))\n\n X = unfold_by_conv_transpose(input, module).reshape(N, C_in * kernel_size_numel, -1)\n dE_dY = eingroup(\"n,c,d,h,w->n,c,dhw\", grad_out)\n return X, dE_dY\n\n\ndef unfold_by_conv_transpose(input, module):\n \"\"\"Return the unfolded input using transpose convolution.\"\"\"\n N, C_in = input.shape[0], input.shape[1]\n kernel_size = module.kernel_size\n kernel_size_numel = int(torch.prod(torch.Tensor(kernel_size)))\n\n def make_weight():\n weight = torch.zeros(1, kernel_size_numel, *kernel_size)\n\n for i in range(kernel_size_numel):\n extraction = torch.zeros(kernel_size_numel)\n extraction[i] = 1.0\n weight[0, i] = extraction.reshape(*kernel_size)\n\n repeat = [C_in, 1] + [1 for _ in kernel_size]\n weight = weight.repeat(*repeat)\n return weight.to(module.weight.device)\n\n def get_conv_transpose():\n functional_for_module_cls = {\n torch.nn.ConvTranspose1d: conv_transpose1d,\n torch.nn.ConvTranspose2d: conv_transpose2d,\n torch.nn.ConvTranspose3d: conv_transpose3d,\n }\n return functional_for_module_cls[module.__class__]\n\n conv_transpose = get_conv_transpose()\n unfold = conv_transpose(\n input,\n make_weight().to(module.weight.device),\n bias=None,\n stride=module.stride,\n padding=module.padding,\n dilation=module.dilation,\n groups=C_in,\n )\n\n return unfold.reshape(N, -1, kernel_size_numel)\n" }, { "alpha_fraction": 0.6087591052055359, "alphanum_fraction": 0.6124087572097778, "avg_line_length": 33.25, "blob_id": "eb39c26cb6d23ab12e8d9cb59b4506d505ebca99", "content_id": "e474680f36e3c751c96f0b2462f3a6b6f240fc51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1370, "license_type": "permissive", "max_line_length": 81, "num_lines": 40, "path": "/test/extensions/implementation/backpack.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from test.extensions.implementation.base import ExtensionsImplementation\n\nimport backpack.extensions as new_ext\nfrom backpack import backpack\n\n\nclass BackpackExtensions(ExtensionsImplementation):\n \"\"\"Extension implementations with BackPACK.\"\"\"\n\n def __init__(self, problem):\n problem.extend()\n super().__init__(problem)\n\n def batch_grad(self):\n with backpack(new_ext.BatchGrad()):\n _, _, loss = self.problem.forward_pass()\n loss.backward()\n batch_grads = [p.grad_batch for p in self.problem.model.parameters()]\n return batch_grads\n\n def batch_l2_grad(self):\n with backpack(new_ext.BatchL2Grad()):\n _, _, loss = self.problem.forward_pass()\n loss.backward()\n batch_l2_grad = [p.batch_l2 for p in self.problem.model.parameters()]\n return batch_l2_grad\n\n def sgs(self):\n with backpack(new_ext.SumGradSquared()):\n _, _, loss = self.problem.forward_pass()\n loss.backward()\n sgs = [p.sum_grad_squared for p in self.problem.model.parameters()]\n return sgs\n\n def variance(self):\n with backpack(new_ext.Variance()):\n _, _, loss = self.problem.forward_pass()\n loss.backward()\n variances = [p.variance for p in self.problem.model.parameters()]\n return variances\n" }, { "alpha_fraction": 0.6818851232528687, "alphanum_fraction": 0.6907216310501099, "avg_line_length": 34.21052551269531, "blob_id": "ad0fd58813aaba8236a6b025257feecd6355f6c4", "content_id": "b087fe2ef2939c46a754ed2904a03978e11f0e7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "permissive", "max_line_length": 89, "num_lines": 19, "path": "/backpack/extensions/firstorder/fisher_block/batchnorm2d.py", "repo_name": "b-mu/backpack", "src_encoding": "UTF-8", "text": "from backpack.core.derivatives.batchnorm2d import BatchNorm2dDerivatives\nfrom backpack.extensions.firstorder.fisher_block.fisher_block_base import FisherBlockBase\n\nfrom torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nclass FisherBlockBatchNorm2d(FisherBlockBase):\n def __init__(self, damping=1.0):\n self.damping = damping\n super().__init__(derivatives=BatchNorm2dDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n \n return module.weight.grad\n \n\n def bias(self, ext, module, g_inp, g_out, backproped):\n \n return module.bias.grad\n \n\n" } ]
76
cpchristensen/crud-application
https://github.com/cpchristensen/crud-application
e306d26eccf927cd909aa7f8c03561cb8ba7f223
f36467cdcf61102674a0b074b42738b45ca555ce
a618a3962d283eae86d589c85c9ff6b0d89325c0
refs/heads/master
2021-01-07T18:20:03.497125
2020-02-20T03:10:55
2020-02-20T03:10:55
241,781,022
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5828693509101868, "alphanum_fraction": 0.5980728268623352, "avg_line_length": 30.554054260253906, "blob_id": "7847efb051bc5d7d5279068bf4c2ade6a0ad04ad", "content_id": "8381ec2546a0aa86a6d3ad85a5b2264e446e33e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4670, "license_type": "no_license", "max_line_length": 194, "num_lines": 148, "path": "/adv-prog-proj.py", "repo_name": "cpchristensen/crud-application", "src_encoding": "UTF-8", "text": "import json\nfrom bson import json_util\nfrom pymongo import MongoClient\nimport bottle\n\nconnection = MongoClient('localhost', 27017)\ndb = connection['market']\ncollection = db['stocks']\n\nhtml_template = \"<!DOCTYPE html><html><head><title>CRUD App</title><head/><body><h1>JSON Response</h1><p>REPLACE_HERE</p><p><a href=\\\"http://localhost:8080/index\\\">Go back</a></p><body/></html>\"\n\nBASE_URL = \"/stocks/api/v1.0/\"\n\[email protected](BASE_URL + \"createStock/\", method=\"POST\")\ndef createStock():\n ticker = bottle.request.forms.get(\"ticker\")\n document = json_util.loads(bottle.request.forms.get(\"json-payload\"))\n \n # Sets the correct Ticker for the new stock.\n document[\"Ticker\"] = ticker\n \n try:\n collection.insert_one(document)\n bottle.response.status = 200\n with open(\"./success.html\", \"r\") as f:\n data = f.read()\n return data\n except Exception as e:\n bottle.response.status = 500\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n\n\[email protected](BASE_URL + \"getStock/\", method=\"POST\")\ndef getStock():\n ticker = bottle.request.forms.get(\"ticker\")\n\n try:\n res = collection.find_one({\"Ticker\": ticker})\n bottle.response.status = 200\n return html_template.replace(\"REPLACE_HERE\", json_util.dumps(res))\n except Exception as e:\n bottle.response.status = 404\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n\[email protected](BASE_URL + \"updateStock/\", method=\"POST\")\ndef updateStock():\n ticker = bottle.request.forms.get(\"ticker\")\n document = json_util.loads(bottle.request.forms.get(\"json-payload\"))\n\n try:\n res = collection.update_one({\"Ticker\" : ticker}, {\"$set\" : document}, upsert=False)\n except:\n bottle.response.status = 404\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n\n bottle.response.status = 200\n with open(\"./success.html\", \"r\") as f:\n data = f.read()\n return data\n\[email protected](BASE_URL + \"deleteStock/\", method=\"POST\")\ndef deleteStock():\n ticker = bottle.request.forms.get(\"ticker\")\n\n try:\n res = collection.delete_one({\"Ticker\": ticker})\n bottle.response.status = 200\n with open(\"./success.html\", \"r\") as f:\n data = f.read()\n return data\n except:\n bottle.response.status = 400\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n \[email protected](BASE_URL + \"stockReport/\", method=\"POST\")\ndef stockReport():\n tickers = bottle.request.forms.get(\"tickers\").split(\",\")\n print(tickers)\n\n res = {}\n try:\n for t in tickers:\n res[t] = collection.find_one({\"Ticker\": t})\n bottle.response.status = 200\n return html_template.replace(\"REPLACE_HERE\", json_util.dumps(res))\n except Exception as e:\n bottle.response.status = 404\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n \[email protected](BASE_URL + \"industryReport/\", method=\"POST\")\ndef industryReport():\n industry = bottle.request.forms.get(\"industry\")\n industry = industry.replace(\"%20\", \" \")\n industry = industry.replace(\"%26\", \"&\")\n industry = industry.replace(\"%2E\", \".\")\n \n try:\n res = collection.find({\"Industry\": industry})\n res = res.sort([(\"Performance (Year)\", -1)]).limit(5)\n bottle.response.status = 200\n return html_template.replace(\"REPLACE_HERE\", json_util.dumps(res))\n except Exception as e:\n print(e)\n bottle.response.status = 404\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n\[email protected](BASE_URL + \"portfolio/\", method=\"POST\")\ndef portfolio():\n company_name = bottle.request.forms.get(\"company\")\n company_name = company_name.replace(\"%20\", \" \")\n company_name = company_name.replace(\"%26\", \"&\")\n company_name = company_name.replace(\"%2E\", \".\")\n\n try:\n res = collection.find_one({\"Company\": company_name})\n industry = res[\"Industry\"]\n res = collection.find({\"Industry\": industry})\n bottle.response.status = 200\n return html_template.replace(\"REPLACE_HERE\", json_util.dumps(res))\n except Exception as e:\n print(e)\n bottle.response.status = 404\n with open(\"./failure.html\", \"r\") as f:\n data = f.read()\n return data\n\[email protected](\"/index\", method=\"GET\")\ndef index():\n with open(\"./index.html\", \"r\") as f:\n data = f.read()\n return data\n\ndef main():\n bottle.run(host=\"localhost\", port=8080)\n\nmain()\n" } ]
1
suiyidajiangyou/machineLearningPython
https://github.com/suiyidajiangyou/machineLearningPython
bc5b867f69d50a09f30581fbb11b040021d8d336
01970c477908ecfce61a52028e993e6afcec5ce9
536299a3da4c77bb5d6bd4da8c592061b0079fbf
refs/heads/master
2021-01-01T17:33:56.589786
2017-07-23T13:21:45
2017-07-23T13:21:45
98,097,494
2
0
null
2017-07-23T12:49:29
2017-07-23T12:54:06
2017-07-23T13:21:55
Python
[ { "alpha_fraction": 0.650602400302887, "alphanum_fraction": 0.674960732460022, "avg_line_length": 35.36190414428711, "blob_id": "7f85f9d3d38bc9b47afab1a1994cd8b9e3588a33", "content_id": "3d6f4ca19cfc6618fb0ae00564274ac1e6a36ac6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3928, "license_type": "no_license", "max_line_length": 113, "num_lines": 105, "path": "/code/knn.py", "repo_name": "suiyidajiangyou/machineLearningPython", "src_encoding": "UTF-8", "text": "from numpy import *\nimport operator\n\ndef creatDataSet():\n group = array([[1.0 , 1.1],[1.0 , 1.0],[0 , 0],[0,0.1]])\n labels = ['A','A','B','B']\n return group,labels\n# group,labels = creatDataSet()\n\n#print(group,'\\n',lables)\n\ndef classify0(inX,dataSet,labels,k):\n dataSetSize = dataSet.shape[0] #shape[0]就是读取矩阵第一维度的长度\n diffMat = tile(inX,(dataSetSize,1)) - dataSet\n sqDiffMat = diffMat**2\n sqDistances = sqDiffMat.sum(axis=1) #将一个矩阵的每一行向量相加\n distances = sqDistances**0.5\n sortedDistIndicies = distances.argsort() # 从小到大排序,返回的是index\n classCount={}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel,0)+1 #记录每个类的个数\n sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\n return sortedClassCount[0][0]\n#print (classify0([0,0.5],group,labels,3))\n\ndef file2matrix(filename):\n\n fr = open(filename)\n arrayOLines = fr.readlines()\n numberOfLines = len(arrayOLines)\n returnMat = zeros((numberOfLines,3))\n classLabelVector = []\n index = 0\n for line in arrayOLines:\n line = line.strip()\n listFromLine = line.split('\\t')\n returnMat[index,:] = listFromLine[0:3]\n classLabelVector.append(int(listFromLine[-1]))\n index=index+1\n return returnMat,classLabelVector\n\n# datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')\n#print(datingDataMat,datingLabels)\n\n# import matplotlib\n# import matplotlib.pyplot as plt\n# fig = plt.figure()\n# ax = fig.add_subplot(111)\n#\n# #print(datingDataMat[:,0])\n#\n# ax.scatter(datingDataMat[:,0],datingDataMat[:,1],15.0*array(datingLabels),15.0*array(datingLabels)) #用标签来区分大小颜色\n#\n# plt.show()\ndef autoNorm(dataSet):\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n ranges = maxVals-minVals\n normDataSet = zeros(shape(dataSet))\n m = dataSet.shape[0]\n normDataSet = dataSet - tile(minVals,(m,1))\n normDataSet = normDataSet/tile(ranges,(m,1))\n return normDataSet,ranges,minVals\n# normMat,ranges,minVals = autoNorm(datingDataMat)\n\n# print(normMat)\n# print(ranges)\n\ndef datingClassTest():\n hoRatio = 0.10\n datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')\n normMat,ranges,minVals = autoNorm(datingDataMat)\n m = normMat.shape[0]\n numTestVecs = int(m*hoRatio)\n errorCount = 0.0\n for i in range(numTestVecs):\n classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)\n print (\"the classifier came back with :%d,the real answer is : %d\"%(classifierResult,datingLabels[i]))\n if (classifierResult != datingLabels[i]):\n errorCount +=1.0\n print(\"the total error rate is : %f\"%(errorCount/float(numTestVecs)))\n# datingClassTest()\n\ndef classifyPerson():\n resultList = ['not at all','in small doses','in large doses']\n percentTals = float(input(\"percentage of time spent playing video games?\"))\n ffMiles = float(input(\"frequent flier miles earned per year?\"))\n iceCream = float(input(\"liters of icr cream consumed per year?\"))\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n normMat, ranges, minVals = autoNorm(datingDataMat)\n inArr = array([ffMiles, percentTals,iceCream])\n classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLabels,3)\n print(\"You will probably like this person: \",resultList[classifierResult - 1])\n#classifyPerson()\n\ndef img2vector(filename):\n returnVect = zeros(1,1024)\n fr = open(filename)\n for i in range (32):\n lineStr = fr.readline()\n for j in range (32):\n returnVect[0,32*i+j] = int(lineStr[j])\n return returnVect\nimg2vector(r'C:\\lcf\\coding\\machinlearning\\2.3\\trainingDigits\\0_12.txt')\n" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6448979377746582, "avg_line_length": 40.60377502441406, "blob_id": "507d7bf1db045f1f1275819ac3fb29a02b62cd04", "content_id": "3c6d7cf589e9f85333ef7f254dc50181f67b41b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2299, "license_type": "no_license", "max_line_length": 104, "num_lines": 53, "path": "/2.3/shouxieshuzi.py", "repo_name": "suiyidajiangyou/machineLearningPython", "src_encoding": "UTF-8", "text": "from numpy import *\nimport operator\nfrom os import listdir\ndef img2vector(filename):\n returnVect = zeros((1,1024))\n fr = open(filename)\n for i in range (32):\n lineStr = fr.readline()\n for j in range (32):\n returnVect[0,32*i+j] = int(lineStr[j])\n return returnVect\n#print (img2vector(r'trainingDigits\\0_12.txt')[0,0:31])\n\ndef classify0(inX,dataSet,labels,k):\n dataSetSize = dataSet.shape[0] #shape[0]就是读取矩阵第一维度的长度\n diffMat = tile(inX,(dataSetSize,1)) - dataSet\n sqDiffMat = diffMat**2\n sqDistances = sqDiffMat.sum(axis=1) #将一个矩阵的每一行向量相加\n distances = sqDistances**0.5\n sortedDistIndicies = distances.argsort() # 从小到大排序,返回的是index\n classCount={}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel,0)+1 #记录每个类的个数\n sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\n return sortedClassCount[0][0]\n\ndef handwritingClassTest():\n hwLabels = []\n trainingFlieList = listdir('trainingDigits')\n m = len(trainingFlieList)\n trainingMat = zeros((m,1024))\n for i in range(m):\n fileNameStr = trainingFlieList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i,:] = img2vector('trainingDigits/%s'%fileNameStr)\n testFileList = listdir('testDigits')\n errorCount = 0.0\n mTest = len(testFileList)\n for i in range(mTest):\n fileNameStr = testFileList[i]\n fileStr = fileNameStr.split('.')[0] #切片\n classNumStr = int(fileStr.split('_')[0])\n vectorUnderTest = img2vector('testDigits/%s' %fileNameStr)\n classifierResult = classify0(vectorUnderTest,trainingMat,hwLabels,3)\n print(\"the classifier came back with: %d,the real answer is :%d\"%(classifierResult,classNumStr))\n if (classifierResult != classNumStr):\n errorCount +=1.0\n print (\"\\n the total number of errors is :%d\"%errorCount)\n print (\"\\nthe total error rate is : %f\"% (errorCount/float(mTest)))\nhandwritingClassTest()\n" } ]
2
ptillemans/pas.plugins.ldap
https://github.com/ptillemans/pas.plugins.ldap
714b726658cec8cf11ff92ad2ea48106cb631eeb
760c8a114924c3e7d56f0bf131a7584a16f30b58
1f841fb4027d01d603460f89f573017a73bfe6c1
refs/heads/master
2021-01-16T18:15:01.892986
2013-09-11T15:09:03
2013-09-11T15:09:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.698305070400238, "alphanum_fraction": 0.698305070400238, "avg_line_length": 26.65625, "blob_id": "1915539383a907940dd4fe96ad42614635cd5f94", "content_id": "6944455dfff600a7362f5e008c1915209294dc10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 72, "num_lines": 32, "path": "/src/pas/plugins/ldap/plonecontrolpanel/cache.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "from zope.interface import implementer\nfrom zope.component import queryUtility\nfrom persistent import Persistent\nfrom plone.registry import (\n field,\n Record,\n)\nfrom plone.registry.interfaces import IRegistry\nfrom pas.plugins.ldap.interfaces import ICacheSettingsRecordProvider\n\n\nREGKEY = 'pas.plugins.ldap.memcached'\n\n\nclass NullRecord(object):\n value = ''\n\n\n@implementer(ICacheSettingsRecordProvider)\nclass CacheSettingsRecordProvider(Persistent):\n\n def __call__(self):\n registry = queryUtility(IRegistry)\n if not registry:\n # XXX must not happen, be gentle anyway\n return NullRecord()\n records = registry.records\n if REGKEY not in records:\n # init if not exist\n value = field.TextLine(title=u'servers, delimited by space')\n records[REGKEY] = Record(value)\n return records[REGKEY]\n" }, { "alpha_fraction": 0.7191011309623718, "alphanum_fraction": 0.7191011309623718, "avg_line_length": 25.700000762939453, "blob_id": "2dc92aa0b20ac4b74829ec017fa9fa493322b310", "content_id": "9d3de1e5bb3d4dded6f7a0fc0ebb8c9faa61d9a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/src/pas/plugins/ldap/zmi/manage_plugin.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "from pas.plugins.ldap.properties import BasePropertiesForm\n\nclass ManageLDAPPlugin(BasePropertiesForm):\n\n @property\n def plugin(self):\n return self.context\n\n def next(self, request):\n return '%s/manage_ldapplugin' % self.context.absolute_url()\n" }, { "alpha_fraction": 0.768750011920929, "alphanum_fraction": 0.7718750238418579, "avg_line_length": 28.090909957885742, "blob_id": "e2bb0e41a0cf1af66e8473a2736a6f0c931be9fb", "content_id": "7df346b2a9d7f96d03bba90bdc382067215982a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/src/pas/plugins/ldap/plonecontrolpanel/__init__.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "from zope.interface import implementer\nfrom Products.CMFQuickInstallerTool.interfaces import INonInstallable\n\n\n@implementer(INonInstallable)\nclass HiddenProfiles(object):\n \"\"\"This hides zope2 profile from the quick installer tool.\n \"\"\"\n\n def getNonInstallableProducts(self):\n return ['pas.plugins.ldap']\n" }, { "alpha_fraction": 0.5881748199462891, "alphanum_fraction": 0.5897172093391418, "avg_line_length": 28.923076629638672, "blob_id": "b2f05ce7bb9e49886cd9f68a9488db5d69167ad0", "content_id": "ddbb3710b9ac35c31d941fb6877cd01db9d589a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1945, "license_type": "no_license", "max_line_length": 79, "num_lines": 65, "path": "/src/pas/plugins/ldap/plonecontrolpanel/inspector.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport json\nfrom node.utils import encode\nfrom node.ext.ldap import LDAPNode\nfrom node.ext.ldap.interfaces import (\n ILDAPProps,\n ILDAPUsersConfig,\n ILDAPGroupsConfig,\n)\nfrom zope.component import getUtility\nfrom Products.Five import BrowserView\nfrom Products.CMFCore.interfaces import ISiteRoot\n\n\nclass LDAPInspector(BrowserView):\n\n @property\n def plugin(self):\n portal = getUtility(ISiteRoot)\n aclu = portal.acl_users\n plugin = aclu.pasldap\n return plugin\n\n @property\n def props(self):\n return ILDAPProps(self.plugin)\n\n def users_children(self):\n users = ILDAPUsersConfig(self.plugin)\n return self.children(users.baseDN)\n\n def groups_children(self):\n groups = ILDAPGroupsConfig(self.plugin)\n return self.children(groups.baseDN)\n\n def node_attributes(self):\n rdn = self.request['rdn']\n base = self.request['base']\n if base == 'users':\n users = ILDAPUsersConfig(self.plugin)\n baseDN = users.baseDN\n else:\n groups = ILDAPGroupsConfig(self.plugin)\n baseDN = groups.baseDN\n root = LDAPNode(baseDN, self.props)\n node = root[rdn]\n ret = dict()\n for key, val in node.attrs.items():\n try:\n if not node.attrs.is_binary(key):\n ret[encode(key)] = encode(val)\n else:\n ret[encode(key)] = \"(Binary Data with %d Bytes)\" % len(val)\n except UnicodeDecodeError, e:\n ret[key.encode('utf-8')] = '! (UnicodeDecodeError)'\n except Exception, e:\n ret[key.encode('utf-8')] = '! (Unknown Exception)'\n return json.dumps(ret)\n\n def children(self, baseDN):\n node = LDAPNode(baseDN, self.props)\n ret = list()\n for key in node:\n ret.append({'rdn': key})\n return json.dumps(ret)\n" }, { "alpha_fraction": 0.7266514897346497, "alphanum_fraction": 0.7266514897346497, "avg_line_length": 24.823530197143555, "blob_id": "26263eb5844c175d7f41abccd18dd147424a8077", "content_id": "6341da7a2124ee9f00c27654d14c5ce441c4a5a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 439, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/src/pas/plugins/ldap/cache.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "from zope.component import queryUtility\nfrom bda.cache import (\n Memcached,\n NullCache,\n)\nfrom .interfaces import ICacheSettingsRecordProvider\n\n\ndef cacheProviderFactory():\n recordProvider = queryUtility(ICacheSettingsRecordProvider)\n if not recordProvider:\n return NullCache()\n value = recordProvider().value or ''\n servers = value.split()\n if servers:\n return Memcached(servers)\n return NullCache()\n" }, { "alpha_fraction": 0.6059120297431946, "alphanum_fraction": 0.6064407825469971, "avg_line_length": 32.181819915771484, "blob_id": "84bbad0b0d477b0ba2db7b7720e3b43845a02e49", "content_id": "c1f11aac9323ca6d74a15c64f3c708393fa0b793", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20805, "license_type": "no_license", "max_line_length": 80, "num_lines": 627, "path": "/src/pas/plugins/ldap/plugin.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport ldap\nimport logging\nfrom zope.interface import implementer\nfrom zope.globalrequest import getRequest\nfrom node.ext.ldap.interfaces import (\n ILDAPProps,\n ILDAPUsersConfig,\n ILDAPGroupsConfig,\n)\nfrom node.ext.ldap.base import (\n encode_utf8,\n decode_utf8,\n)\nfrom node.ext.ldap.ugm import Ugm\nfrom BTrees import OOBTree\nfrom App.class_init import InitializeClass\nfrom AccessControl import ClassSecurityInfo\nfrom Products.PageTemplates.PageTemplateFile import PageTemplateFile\nfrom Products.PluggableAuthService.permissions import (\n ManageUsers,\n ManageGroups,\n)\nfrom Products.PluggableAuthService.plugins.BasePlugin import BasePlugin\nfrom Products.PluggableAuthService.interfaces import plugins as pas_interfaces\nfrom Products.PlonePAS import interfaces as plonepas_interfaces\nfrom Products.PlonePAS.plugins.group import PloneGroup\nfrom .sheet import LDAPUserPropertySheet\nfrom .interfaces import ILDAPPlugin\n\n\nlogger = logging.getLogger('pas.plugins.ldap')\nzmidir = os.path.join(os.path.dirname(__file__), 'zmi')\n\n\ndef manage_addLDAPPlugin(dispatcher, id, title='', RESPONSE=None, **kw):\n \"\"\"Create an instance of a LDAP Plugin.\n \"\"\"\n ldapplugin = LDAPPlugin(id, title, **kw)\n dispatcher._setObject(ldapplugin.getId(), ldapplugin)\n if RESPONSE is not None:\n RESPONSE.redirect('manage_workspace')\n\n\nmanage_addLDAPPluginForm = PageTemplateFile(\n os.path.join(zmidir, 'add_plugin.pt'),\n globals(),\n __name__='addLDAPPlugin'\n)\n\n\n@implementer(\n ILDAPPlugin,\n pas_interfaces.IAuthenticationPlugin,\n pas_interfaces.IGroupEnumerationPlugin,\n pas_interfaces.IGroupsPlugin,\n pas_interfaces.IPropertiesPlugin,\n pas_interfaces.IUserEnumerationPlugin,\n plonepas_interfaces.capabilities.IDeleteCapability,\n plonepas_interfaces.capabilities.IGroupCapability,\n plonepas_interfaces.capabilities.IPasswordSetCapability,\n plonepas_interfaces.group.IGroupManagement,\n plonepas_interfaces.group.IGroupIntrospection,\n plonepas_interfaces.plugins.IMutablePropertiesPlugin,\n plonepas_interfaces.plugins.IUserManagement)\nclass LDAPPlugin(BasePlugin):\n \"\"\"Glue layer for making node.ext.ldap available to PAS.\n \"\"\"\n security = ClassSecurityInfo()\n meta_type = 'LDAP Plugin'\n manage_options = (\n { 'label' : 'LDAP Settings',\n 'action' : 'manage_ldapplugin'\n },) + BasePlugin.manage_options\n\n #XXX: turn this to False when going productive, just in case\n _dont_swallow_my_exceptions = False # Tell PAS not to swallow our exceptions\n\n def __init__(self, id, title=None, **kw):\n self._setId(id)\n self.title = title\n self.settings = OOBTree.OOBTree()\n\n security.declarePrivate('groups_enabled')\n @property\n def groups_enabled(self):\n return self.groups is not None\n\n security.declarePrivate('users_enabled')\n @property\n def users_enabled(self):\n return self.users is not None\n\n def _ugm(self):\n request = getRequest()\n rcachekey = '_ldap_ugm_%s_' % self.getId()\n if request and rcachekey in request.keys():\n return request[rcachekey]\n props = ILDAPProps(self)\n ucfg = ILDAPUsersConfig(self)\n gcfg = ILDAPGroupsConfig(self)\n ugm = Ugm(props=props, ucfg=ucfg, gcfg=gcfg, rcfg=None)\n if request:\n request[rcachekey] = ugm\n return ugm\n\n security.declarePrivate('groups')\n @property\n def groups(self):\n request = getRequest()\n rcachekey = '_ldap_ugm_groups_%s_' % self.getId()\n if request and rcachekey in request.keys():\n return request[rcachekey]\n try:\n self._v_ldaperror = False\n groups = self._ugm().groups\n if request:\n request[rcachekey] = groups\n return groups\n except ldap.LDAPError, e:\n self._v_ldaperror = str(e)\n logger.warn('groups -> %s' % self._v_ldaperror)\n return None\n except Exception, e:\n self._v_ldaperror = str(e)\n logger.exception('groups -> %s' % self._v_ldaperror)\n return None\n\n security.declarePrivate('users')\n @property\n def users(self):\n request = getRequest()\n rcachekey = '_ldap_ugm_users_%s_' % self.getId()\n if request and rcachekey in request.keys():\n return request[rcachekey]\n try:\n self._v_ldaperror = False\n users = self._ugm().users\n if request:\n request[rcachekey] = users\n return users\n except ldap.LDAPError, e:\n self._v_ldaperror = str(e)\n logger.warn('users -> %s' % self._v_ldaperror)\n return None\n except Exception, e:\n self._v_ldaperror = str(e)\n logger.exception('users -> %s' % self._v_ldaperror)\n return None\n\n security.declareProtected(ManageUsers, 'ldaperror')\n @property\n def ldaperror(self):\n if hasattr(self, '_v_ldaperror') and self._v_ldaperror:\n return self._v_ldaperror\n return False\n\n security.declarePublic('reset')\n def reset(self):\n # XXX flush caches\n pass\n\n ###\n # pas_interfaces.IAuthenticationPlugin\n #\n # Map credentials to a user ID.\n #\n security.declarePublic('authenticateCredentials')\n def authenticateCredentials(self, credentials):\n \"\"\"credentials -> (userid, login)\n\n o 'credentials' will be a mapping, as returned by IExtractionPlugin.\n\n o Return a tuple consisting of user ID (which may be different\n from the login name) and login\n\n o If the credentials cannot be authenticated, return None.\n \"\"\"\n login = credentials.get('login')\n pw = credentials.get('password')\n if not (login and pw):\n return None\n logger.debug('credentials: %s' % credentials)\n users = self.users\n if not users:\n return\n userid = users.authenticate(login, pw)\n if userid:\n logger.info('logged in %s' % userid)\n return (userid, login)\n\n ###\n # pas_interfaces.IGroupEnumerationPlugin\n #\n # Allow querying groups by ID, and searching for groups.\n #\n security.declarePrivate('enumerateUsers')\n def enumerateGroups(self, id=None, exact_match=False, sort_by=None,\n max_results=None, **kw):\n \"\"\" -> ( group_info_1, ... group_info_N )\n\n o Return mappings for groups matching the given criteria.\n\n o 'id' in combination with 'exact_match' true, will\n return at most one mapping per supplied ID ('id' and 'login'\n may be sequences).\n\n o If 'exact_match' is False, then 'id' may be treated by\n the plugin as \"contains\" searches (more complicated searches\n may be supported by some plugins using other keyword arguments).\n\n o If 'sort_by' is passed, the results will be sorted accordingly.\n known valid values are 'id' (some plugins may support others).\n\n o If 'max_results' is specified, it must be a positive integer,\n limiting the number of returned mappings. If unspecified, the\n plugin should return mappings for all groups satisfying the\n criteria.\n\n o Minimal keys in the returned mappings:\n\n 'id' -- (required) the group ID\n\n 'pluginid' -- (required) the plugin ID (as returned by getId())\n\n 'properties_url' -- (optional) the URL to a page for updating the\n group's properties.\n\n 'members_url' -- (optional) the URL to a page for updating the\n principals who belong to the group.\n\n o Plugin *must* ignore unknown criteria.\n\n o Plugin may raise ValueError for invalid critera.\n\n o Insufficiently-specified criteria may have catastrophic\n scaling issues for some implementations.\n \"\"\"\n groups = self.groups\n if not groups:\n logger.warn(self._v_ldaperror)\n return ()\n if id:\n kw['id'] = id\n if not kw: # show all\n matches = groups.ids\n else:\n try:\n matches = groups.search(criteria=kw, exact_match=exact_match)\n except ValueError:\n return ()\n if sort_by == 'id':\n matches = sorted(matches)\n pluginid = self.getId()\n ret = [dict(id=encode_utf8(id), pluginid=pluginid) for id in matches]\n if max_results and len(ret) > max_results:\n ret = ret[:max_results]\n return ret\n\n ###\n # pas_interfaces.IGroupsPlugin\n #\n # Determine the groups to which a user belongs.\n security.declarePrivate('getGroupsForPrincipal')\n def getGroupsForPrincipal(self, principal, request=None):\n \"\"\"principal -> ( group_1, ... group_N )\n\n o Return a sequence of group names to which the principal\n (either a user or another group) belongs.\n\n o May assign groups based on values in the REQUEST object, if present\n \"\"\"\n users = self.users\n if not users:\n return tuple()\n try:\n _principal = self.users[principal.getId()]\n except KeyError:\n # XXX: that's where group in group will happen, but so far\n # group nodes do not provide membership info so we just\n # return if there is no user\n return tuple()\n if self.groups:\n # XXX: provide group_ids function in UGM! Way too calculation-heavy\n # now\n return [_.id for _ in _principal.groups]\n return tuple()\n\n ###\n # pas_interfaces.IUserEnumerationPlugin\n #\n # Allow querying users by ID, and searching for users.\n #\n security.declarePrivate('enumerateUsers')\n def enumerateUsers(self, id=None, login=None, exact_match=False,\n sort_by=None, max_results=None, **kw):\n \"\"\"-> ( user_info_1, ... user_info_N )\n\n o Return mappings for users matching the given criteria.\n\n o 'id' or 'login', in combination with 'exact_match' true, will\n return at most one mapping per supplied ID ('id' and 'login'\n may be sequences).\n\n o If 'exact_match' is False, then 'id' and / or login may be\n treated by the plugin as \"contains\" searches (more complicated\n searches may be supported by some plugins using other keyword\n arguments).\n\n o If 'sort_by' is passed, the results will be sorted accordingly.\n known valid values are 'id' and 'login' (some plugins may support\n others).\n\n o If 'max_results' is specified, it must be a positive integer,\n limiting the number of returned mappings. If unspecified, the\n plugin should return mappings for all users satisfying the criteria.\n\n o Minimal keys in the returned mappings:\n\n 'id' -- (required) the user ID, which may be different than\n the login name\n\n 'login' -- (required) the login name\n\n 'pluginid' -- (required) the plugin ID (as returned by getId())\n\n 'editurl' -- (optional) the URL to a page for updating the\n mapping's user\n\n o Plugin *must* ignore unknown criteria.\n\n o Plugin may raise ValueError for invalid criteria.\n\n o Insufficiently-specified criteria may have catastrophic\n scaling issues for some implementations.\n \"\"\"\n # TODO: sort_by in node.ext.ldap\n if login:\n if not isinstance(login, basestring):\n # XXX TODO\n raise NotImplementedError('sequence is not supported yet.')\n kw['login'] = login\n \n # pas search users gives both login and name if login is meant \n if \"login\" in kw and \"name\" in kw:\n del kw[\"name\"]\n \n if id:\n if not isinstance(id, basestring):\n # XXX TODO\n raise NotImplementedError('sequence is not supported yet.')\n kw['id'] = id\n users = self.users\n if not users:\n return tuple()\n try:\n matches = users.search(\n criteria=kw,\n attrlist=('login',),\n exact_match=exact_match)\n except ValueError:\n return tuple()\n pluginid = self.getId()\n ret = list()\n for id, attrs in matches:\n ret.append({\n 'id': encode_utf8(id),\n 'login': attrs['login'][0],\n 'pluginid': pluginid})\n if max_results and len(ret) > max_results:\n ret = ret[:max_results]\n return ret\n\n ###\n # plonepas_interfaces.group.IGroupManagement\n #\n security.declarePrivate('addGroup')\n def addGroup(self, id, **kw):\n \"\"\"\n Create a group with the supplied id, roles, and groups.\n return True if the operation suceeded\n \"\"\"\n #XXX\n return False\n\n security.declareProtected(ManageGroups, 'addPrincipalToGroup')\n def addPrincipalToGroup(self, principal_id, group_id):\n \"\"\"\n Add a given principal to the group.\n return True on success\n \"\"\"\n #XXX\n return False\n\n security.declarePrivate('updateGroup')\n def updateGroup(self, id, **kw):\n \"\"\"\n Edit the given group. plugin specific\n return True on success\n \"\"\"\n #XXX\n return False\n\n security.declarePrivate('setRolesForGroup')\n def setRolesForGroup(self, group_id, roles=()):\n \"\"\"\n set roles for group\n return True on success\n \"\"\"\n # even Products.PlonePAS.plugins.GroupAwareRoleManager does not\n # implement this. We're save to ignore it too for now. But at least\n # we do implement it.\n return False\n\n security.declarePrivate('removeGroup')\n def removeGroup(self, group_id):\n \"\"\"\n Remove the given group\n return True on success\n \"\"\"\n #XXX\n return False\n\n security.declareProtected(ManageGroups, 'removePrincipalFromGroup')\n def removePrincipalFromGroup(self, principal_id, group_id):\n \"\"\"\n remove the given principal from the group\n return True on success\n \"\"\"\n #XXX\n return False\n\n ###\n # plonepas_interfaces.plugins.IMutablePropertiesPlugin\n # (including signature of pas_interfaces.IPropertiesPlugin)\n #\n # Return a property set for a user. Property set can either an object\n # conforming to the IMutable property sheet interface or a dictionary (in\n # which case the properties are not persistently mutable).\n #\n security.declarePrivate('getPropertiesForUser')\n def getPropertiesForUser(self, user_or_group, request=None):\n \"\"\"User -> IMutablePropertySheet || {}\n\n o User will implement IPropertiedUser. ???\n\n o Plugin may scribble on the user, if needed (but must still\n return a mapping, even if empty).\n\n o May assign properties based on values in the REQUEST object, if\n present\n \"\"\"\n ugid = user_or_group.getId()\n try:\n if self.enumerateUsers(id=ugid) or self.enumerateGroups(id=ugid):\n return LDAPUserPropertySheet(user_or_group, self)\n except KeyError:\n pass\n return {}\n\n security.declarePrivate('setPropertiesForUser')\n def setPropertiesForUser(self, user, propertysheet):\n \"\"\"Set modified properties on the user persistently.\n\n Does nothing, it is called by MutablePropertySheet in\n setProperty and setProperties. This should not affect us at\n all as we handle setting of properties via our own\n LDAPPropertySheet\n \"\"\"\n pass\n\n security.declarePrivate('deleteUser')\n def deleteUser(self, user_id):\n \"\"\"Remove properties stored for a user.\n\n Does nothing, if a user is deleted by ``doDeleteUser``, all it's\n properties are away as well.\n \"\"\"\n pass\n\n ###\n # plonepas_interfaces.plugins.IUserManagement\n # (including signature of pas_interfaces.IUserAdderPlugin)\n #\n security.declarePrivate('doAddUser')\n def doAddUser(self, login, password):\n \"\"\" Add a user record to a User Manager, with the given login\n and password\n\n o Return a Boolean indicating whether a user was added or not\n \"\"\"\n # XXX\n return False\n\n security.declarePrivate('doChangeUser')\n def doChangeUser(self, user_id, password, **kw):\n \"\"\"Change a user's password (differs from role) roles are set in\n the pas engine api for the same but are set via a role\n manager)\n \"\"\"\n users = self.users\n if self.users:\n self.users.passwd(user_id, None, password)\n\n security.declarePrivate('doDeleteUser')\n def doDeleteUser(self, login):\n \"\"\"Remove a user record from a User Manager, with the given login\n and password\n\n o Return a Boolean indicating whether a user was removed or\n not\n \"\"\"\n # XXX\n return False\n\n ###\n # plonepas_interfaces.capabilities.IDeleteCapability\n # (plone ui specific)\n #\n security.declarePublic('allowDeletePrincipal')\n def allowDeletePrincipal(self, id):\n \"\"\"True if this plugin can delete a certain user/group.\n \"\"\"\n # XXX\n return False\n\n ###\n # plonepas_interfaces.capabilities.IGroupCapability\n # (plone ui specific)\n #\n security.declarePublic('allowGroupAdd')\n def allowGroupAdd(self, principal_id, group_id):\n \"\"\"\n True if this plugin will allow adding a certain principal to\n a certain group.\n \"\"\"\n # XXX\n return False\n\n security.declarePublic('allowGroupRemove')\n def allowGroupRemove(self, principal_id, group_id):\n \"\"\"\n True if this plugin will allow removing a certain principal\n from a certain group.\n \"\"\"\n # XXX\n return False\n\n ###\n # plonepas_interfaces.capabilities.IGroupIntrospection\n # (plone ui specific)\n\n def getGroupById(self, group_id):\n \"\"\"\n Returns the portal_groupdata-ish object for a group\n corresponding to this id. None if group does not exist here!\n \"\"\"\n group_id = decode_utf8(group_id)\n groups = self.groups\n if not groups or group_id not in groups.keys():\n return None\n ugmgroup = self.groups[group_id]\n title = ugmgroup.attrs.get('title', None)\n group = PloneGroup(ugmgroup.id, title).__of__(self)\n pas = self._getPAS()\n plugins = pas.plugins\n # add properties\n for propfinder_id, propfinder in \\\n plugins.listPlugins(pas_interfaces.IPropertiesPlugin):\n data = propfinder.getPropertiesForUser(group, None)\n if not data:\n continue\n group.addPropertysheet(propfinder_id, data)\n # add subgroups\n group._addGroups(pas._getGroupsForPrincipal(group, None,\n plugins=plugins))\n # add roles\n for rolemaker_id, rolemaker in \\\n plugins.listPlugins(pas_interfaces.IRolesPlugin):\n roles = rolemaker.getRolesForPrincipal(group, None)\n if not roles:\n continue\n group._addRoles(roles)\n return group\n\n def getGroups(self):\n \"\"\"\n Returns an iteration of the available groups\n \"\"\"\n return map(self.getGroupById, self.getGroupIds())\n\n def getGroupIds(self):\n \"\"\"\n Returns a list of the available groups (ids)\n \"\"\"\n return self.groups and self.groups.ids or []\n\n def getGroupMembers(self, group_id):\n \"\"\"\n return the members of the given group\n \"\"\"\n try:\n group = self.groups[group_id]\n except (KeyError, TypeError):\n return ()\n return tuple(group.member_ids)\n\n ###\n # plonepas_interfaces.capabilities.IPasswordSetCapability\n # (plone ui specific)\n #\n security.declarePublic('allowPasswordSet')\n def allowPasswordSet(self, id):\n \"\"\"True if this plugin can set the password of a certain user.\n \"\"\"\n users = self.users\n if not users:\n return False\n try:\n return len(self.users.search(criteria={'id': id},\n attrlist=(),\n exact_match=True)) > 0\n except ValueError:\n return False\n\nInitializeClass(LDAPPlugin)\n" }, { "alpha_fraction": 0.5671996474266052, "alphanum_fraction": 0.5671996474266052, "avg_line_length": 33.796993255615234, "blob_id": "9d6a49c1766bf161e2d6ca20fdeab4f9114e0b3f", "content_id": "5df2dbf27c72e44ab7d02e61399e9914440ff6f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4628, "license_type": "no_license", "max_line_length": 80, "num_lines": 133, "path": "/src/pas/plugins/ldap/plonecontrolpanel/exportimport.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "import types\nfrom zope.interface import implementer\nfrom zope.component import queryMultiAdapter\nfrom BTrees.OOBTree import OOBTree\nfrom Products.GenericSetup.utils import XMLAdapterBase\nfrom Products.GenericSetup.interfaces import IBody\n\n\ndef _get_import_export_handler(context):\n aclu = context.getSite().acl_users\n logger = context.getLogger('pas.plugins.ldap')\n if 'pasldap' not in aclu.objectIds():\n return\n pasldap = aclu.pasldap\n handler = queryMultiAdapter((pasldap, context), IBody)\n if handler is not None:\n handler.filename = '%s%s' % (handler.name, handler.suffix)\n return handler\n logger.warning(\"Can't find handler for ldap settings\")\n\n\ndef import_settings(context):\n logger = context.getLogger('pas.plugins.ldap')\n handler = _get_import_export_handler(context)\n if not handler:\n return\n body = context.readDataFile(handler.filename)\n if body is None:\n return\n handler.body = body\n logger.info(\"Imported ldap settings.\")\n\n\ndef export_settings(context):\n handler = _get_import_export_handler(context)\n if not handler:\n return\n body = handler.body\n if body is None:\n logger = context.getLogger('pas.plugins.ldap')\n logger.warning(\"Problem to get ldap settings.\")\n return\n context.writeDataFile(handler.filename, body, handler.mime_type)\n\n\n@implementer(IBody)\nclass LDAPPluginXMLAdapter(XMLAdapterBase):\n \"\"\"import pas groups from ldap config.\n \"\"\"\n name = 'ldapsettings'\n\n def _exportNode(self):\n node = self._getObjectNode('object')\n self._setDataAndType(self.context.settings, node)\n return node\n\n def _importNode(self, node):\n data = self._getDataByType(node)\n if not data:\n self._logger.error('data is empty')\n return\n for key in data:\n self.context.settings[key] = data[key]\n\n def _setDataAndType(self, data, node):\n if isinstance(data, (tuple, list)):\n node.setAttribute('type', 'list')\n for value in data:\n element = self._doc.createElement('element')\n self._setDataAndType(value, element)\n node.appendChild(element)\n return\n if isinstance(data, (dict, OOBTree)):\n node.setAttribute('type', 'dict')\n for key in sorted(data.keys()):\n element = self._doc.createElement('element')\n element.setAttribute('key', key)\n self._setDataAndType(data[key], element)\n node.appendChild(element)\n return\n if type(data) is types.BooleanType:\n node.setAttribute('type', 'bool')\n data = str(data)\n elif type(data) is types.IntType:\n node.setAttribute('type', 'int')\n data = str(data)\n elif type(data) is types.FloatType:\n node.setAttribute('type', 'float')\n data = str(data)\n elif type(data) in types.StringTypes:\n node.setAttribute('type', 'string')\n else:\n self._logger.warning('Invalid type %s found for key %s on export, '\\\n 'skipped.' % (type(data), data))\n return\n child = self._doc.createTextNode(data)\n node.appendChild(child)\n\n def _getDataByType(self, node):\n vtype = node.getAttribute('type')\n if vtype == 'list':\n data = list()\n for element in node.childNodes:\n if element.nodeName != 'element':\n continue\n data.append(self._getDataByType(element))\n return data\n if vtype == 'dict':\n data = dict()\n for element in node.childNodes:\n if element.nodeName != 'element':\n continue\n key = element.getAttribute('key')\n if key is None:\n self._logger.warning('No key found for dict on import, '\\\n 'skipped.')\n continue\n data.update({key: self._getDataByType(element)})\n return data\n data = self._getNodeText(node)\n if vtype == 'bool':\n data = data.lower() == 'true'\n elif vtype == 'int':\n data = int(data)\n elif vtype == 'float':\n data = float(data)\n elif vtype == 'string':\n data = str(data)\n else:\n self._logger.warning('Invalid type %s found on import, skipped.' %\\\n vtype)\n data = None\n return data\n" }, { "alpha_fraction": 0.7266314029693604, "alphanum_fraction": 0.7266314029693604, "avg_line_length": 22.625, "blob_id": "e6ebefcebe918d88b025bc7af21511627e33a1de", "content_id": "b1cc2707adc478280181bd87af3cab95b4fbcbb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 70, "num_lines": 24, "path": "/src/pas/plugins/ldap/__init__.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "import os\nfrom AccessControl.Permissions import add_user_folders\nfrom Products.PluggableAuthService import registerMultiPlugin\nfrom .plugin import (\n LDAPPlugin,\n manage_addLDAPPlugin,\n manage_addLDAPPluginForm,\n zmidir,\n)\n\n\n# XXX temp\nimport monkey\n\n\ndef initialize(context):\n registerMultiPlugin(LDAPPlugin.meta_type)\n context.registerClass(\n LDAPPlugin,\n permission=add_user_folders,\n icon=os.path.join(zmidir, \"ldap.png\"),\n constructors=(manage_addLDAPPluginForm, manage_addLDAPPlugin),\n visibility=None\n )\n" }, { "alpha_fraction": 0.7028985619544983, "alphanum_fraction": 0.7060041427612305, "avg_line_length": 28.272727966308594, "blob_id": "7f394350960f87d8ba5a3b371a2f9ff7d2737037", "content_id": "21f6ae2cc35fc639dc937c9507b830a7948271ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 966, "license_type": "no_license", "max_line_length": 74, "num_lines": 33, "path": "/src/pas/plugins/ldap/plonecontrolpanel/controlpanel.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom zope.component import getUtility\nfrom zope.i18nmessageid import MessageFactory\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom pas.plugins.ldap.properties import BasePropertiesForm\n\n\n_ = MessageFactory('pas.plugins.ldap')\n\n\ndef getPortal():\n return getUtility(ISiteRoot)\n\n\nclass LDAPControlPanel(BasePropertiesForm):\n\n def next(self, request):\n return '%s/plone_ldapcontrolpanel' % self.context.absolute_url()\n\n @property\n def plugin(self):\n \"\"\"ControlPanel config is only for GS installed 'pasldap' plugin\n \"\"\"\n portal = getPortal()\n aclu = portal.acl_users\n plugin = aclu.pasldap\n return plugin\n\n def save(self, widget, data):\n BasePropertiesForm.save(self, widget, data)\n messages = IStatusMessage(self.request)\n messages.addStatusMessage(_(u'LDAP Settings saved.'), type=\"info\")\n" }, { "alpha_fraction": 0.7026239037513733, "alphanum_fraction": 0.704081654548645, "avg_line_length": 26.717172622680664, "blob_id": "ffbf1656f1dcd4919619a40f9e5aa72f4d79bd3b", "content_id": "72b86fe9c07fd0041a6e43ea1c0ff0d853f41a10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2744, "license_type": "no_license", "max_line_length": 102, "num_lines": 99, "path": "/README.rst", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "This is a LDAP Plugin for the `Zope2 <http://zope2.zope.org>`_\n`Pluggable Authentication Service (PAS) <http://pypi.python.org/pypi/Products.PluggableAuthService>`_.\n\nIt provides users and/or groups from an LDAP Directory. It works in a plain\nZope2 even if it depends on\n`PlonePAS <http://pypi.python.org/pypi/Products.PlonePAS>`_.\nIf `Plone <http://plone.org>`_ is installed an\nintegration layer with a setup-profile and a plone-controlpanel page is\navailable.\n\n``pas.plugins.ldap`` is **not** releated to the old LDAPUserFolder/\nLDAPMultiPlugins and the packages stacked on top of it in any way.\n\nIt is based on **node.ext.ldap**, an almost framework independent ldap stack.\n\nFor now users and groups can't be added or deleted. But properties on both are\nread/write. See section *TODO*.\n\n\nInstallation\n============\n\n\nZope2\n-----\n\nAdd to the instance section of your buildout::\n\n eggs = \n ...\n pas.plugins.ldap\n \n zcml = \n ...\n pas.plugins.ldap\n \nRun buildout. Restart Zope.\n\nThen got to your acl_users folder and add an LDAP-Plugin. Configure it using the\nsettings form and activate its features with the ``activate`` tab.\n\n\nPlone\n-----\n\nAdd to the instance section of your buildout::\n\n eggs = \n ...\n pas.plugins.ldap\n\nRun buildout. Restart Plone.\n\nThen go to the Plone control-panel, select ``extensions`` and install the LDAP\nPlugin. A new LDAP Settings icon appear on the left. Click it and configure the\nplugin there.\n\nTo use an own integration-profile, just add to the profiles\n``metadata.xml`` file::\n\n ...\n <dependencies>\n ...\n <dependency>profile-pas.plugins.ldap.plonecontrolpanel:default</dependency>\n </dependencies>\n ...\n\nAdditionally ldap settings can be exported and imported with ``portal_setup``.\nYou can place the exported ``ldapsettings.xml`` in your integration profile, so\nit will be imported with your next install again. Attention: The **ldap-password\nis in there in plain text!**\n\n\nSource Code\n===========\n\nIf you want to help with the development (improvement, update, bug-fixing, ...)\nof ``pas.plugins.ldap`` this is a great idea!\n\nThe code is located in the\n`github collective <http://github.com/bluedynamics/pas.plugins.ldap>`_.\n\nYou can clone it or `get access to the github-collective\n<http://collective.github.com/>`_ and work directly on the project.\n\nMaintainers are Robert Niederreiter, Jens Klein and the BlueDynamics Alliance\ndeveloper team. We appreciate any contribution and if a release is needed\nto be done on pypi, please just contact one of us\n`dev@bluedynamics dot com <mailto:[email protected]>`_\n\n\nContributors\n============\n\n- Jens W. Klein <jens [at] bluedynamics [dot] com>\n\n- Robert Niederrreiter <rnix [at] squarewave [dot] at>\n\n- Florian Friesdorf <flo [at] chaoflow [dot] net>\n" }, { "alpha_fraction": 0.7275862097740173, "alphanum_fraction": 0.7275862097740173, "avg_line_length": 23.16666603088379, "blob_id": "8154acf94a256ca14785dff9cc6e8063e534f9ec", "content_id": "0554bc55596ea0df9b4ecbbc6fc9ccafe56daa91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/src/pas/plugins/ldap/interfaces.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "from zope.interface import Interface\n\n\nclass ILDAPPlugin(Interface):\n \"\"\"Marker Interface for the LDAP Plugin\n \"\"\"\n\n\nclass ICacheSettingsRecordProvider(Interface):\n \"\"\"cache settings provider, expects to return a record on call\n In future this may be used more generic.\n \"\"\"\n" }, { "alpha_fraction": 0.5196850299835205, "alphanum_fraction": 0.523059606552124, "avg_line_length": 22.394737243652344, "blob_id": "1a43bca7bd81038be310404b3cf00efadf99a13f", "content_id": "88751fcba5ea9e729a6562abc52627363d0d1fe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 61, "num_lines": 38, "path": "/src/pas/plugins/ldap/tests.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "import unittest\nimport doctest\nimport pprint\nfrom interlude import interact\nfrom plone.testing import (\n layered,\n z2,\n)\nfrom .testing import PASLDAPLayer\n\n\noptionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS\noptionflags |= doctest.REPORT_ONLY_FIRST_FAILURE\n\n\nTESTFILES = [\n ('properties.rst', PASLDAPLayer),\n ('plugin.rst', PASLDAPLayer)\n]\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTests([\n layered(\n doctest.DocFileSuite(\n docfile,\n globs={'interact': interact,\n 'pprint': pprint.pprint,\n 'z2': z2,\n },\n optionflags=optionflags,\n ),\n layer=layer(),\n )\n for docfile, layer in TESTFILES\n ])\n return suite\n" }, { "alpha_fraction": 0.6547884345054626, "alphanum_fraction": 0.6904231905937195, "avg_line_length": 18.478260040283203, "blob_id": "fa7c36e658e6ff284c01ba85663f3eb2d4fd3550", "content_id": "20d56d57cd63e625d736f10c7d6a4fddbed3dcfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 449, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/HISTORY.rst", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "\nHistory\n=======\n\n1.0.2\n-----\n\n- sometimes ldap returns an empty string as portrait. take this as no portrait.\n [jensens, 2013-09-11]\n\n1.0.1\n-----\n\n- because of passwordreset problem we figured out that pas searchUsers calls\n plugins search with both login and name, which was passed to ugm and returned\n always an empty result\n [benniboy]\n\n1.0\n---\n\n- make it work.\n\n- base work done so far in ``bda.pasldap`` and ``bda.plone.ldap`` was merged.\n" }, { "alpha_fraction": 0.5821963548660278, "alphanum_fraction": 0.5864459872245789, "avg_line_length": 29.414966583251953, "blob_id": "716904c11928230ac4dfb41fc9b8aac576e3a981", "content_id": "ee835ac5158fa8e40c7f80126fdfd304c929a8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4471, "license_type": "no_license", "max_line_length": 80, "num_lines": 147, "path": "/src/pas/plugins/ldap/testing.py", "repo_name": "ptillemans/pas.plugins.ldap", "src_encoding": "UTF-8", "text": "from zope.interface import (\n Interface,\n implementer,\n)\nfrom zope.component import (\n adapter,\n provideAdapter,\n provideUtility,\n)\nfrom plone.testing import (\n Layer,\n zodb,\n zca,\n z2,\n)\nimport Zope2\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom node.ext.ldap.interfaces import (\n ILDAPProps,\n ILDAPUsersConfig,\n ILDAPGroupsConfig,\n)\nfrom node.ext.ldap import testing as ldaptesting\n\n\nSITE_OWNER_NAME = SITE_OWNER_PASSWORD = 'admin'\n\n\n@implementer(ILDAPProps)\n@adapter(Interface)\ndef ldapprops(context):\n return ldaptesting.props\n\n\n@implementer(ILDAPUsersConfig)\n@adapter(Interface)\ndef usersconfig(context):\n return ldaptesting.LDIF_groupOfNames_10_10.ucfg\n\n\n@implementer(ILDAPGroupsConfig)\n@adapter(Interface)\ndef groupsconfig(context):\n return ldaptesting.LDIF_groupOfNames_10_10.gcfg\n\n\nclass PASLDAPLayer(Layer):\n # big parts copied from p.a.testing!\n\n defaultBases = (ldaptesting.LDIF_groupOfNames_10_10, z2.STARTUP)\n\n # Products that will be installed, plus options\n products = (\n ('Products.GenericSetup' , {'loadZCML': True}, ),\n ('Products.CMFCore' , {'loadZCML': True}, ),\n ('Products.PluggableAuthService' , {'loadZCML': True}, ),\n ('Products.PluginRegistry' , {'loadZCML': True}, ),\n ('Products.PlonePAS' , {'loadZCML': True}, ),\n )\n\n def setUp(self):\n self['zodbDB'] = zodb.stackDemoStorage(self.get('zodbDB'),\n name='PASLDAPLayer')\n self['app'] = z2.addRequestContainer(Zope2.app(self['zodbDB'].open()),\n environ=None)\n self.setUpZCML()\n self.setUpProducts(self['app'])\n self.setUpDefaultContent(self['app'])\n\n def tearDown(self):\n self.tearDownProducts(self['app'])\n self.tearDownZCML()\n del self['app']\n self['zodbDB'].close()\n del self['zodbDB']\n\n\n def setUpZCML(self):\n \"\"\"Stack a new global registry and load ZCML configuration of Plone\n and the core set of add-on products into it.\n \"\"\"\n # Create a new global registry\n zca.pushGlobalRegistry()\n\n from zope.configuration import xmlconfig\n self['configurationContext'] = context = zca.stackConfigurationContext(\n self.get('configurationContext'))\n\n # Load dependent products's ZCML\n from zope.dottedname.resolve import resolve\n\n def loadAll(filename):\n for p, config in self.products:\n if not config['loadZCML']:\n continue\n try:\n package = resolve(p)\n except ImportError:\n continue\n try:\n xmlconfig.file(filename, package, context=context)\n except IOError:\n pass\n\n loadAll('meta.zcml')\n loadAll('configure.zcml')\n loadAll('overrides.zcml')\n provideAdapter(ldapprops)\n provideAdapter(usersconfig)\n provideAdapter(groupsconfig)\n provideUtility(self['app'], provides=ISiteRoot)\n\n def tearDownZCML(self):\n \"\"\"Pop the global component registry stack, effectively unregistering\n all global components registered during layer setup.\n \"\"\"\n # Pop the global registry\n zca.popGlobalRegistry()\n\n # Zap the stacked configuration context\n del self['configurationContext']\n\n def setUpProducts(self, app):\n \"\"\"Install all old-style products listed in the the ``products`` tuple\n of this class.\n \"\"\"\n for p, config in self.products:\n z2.installProduct(app, p)\n\n def tearDownProducts(self, app):\n \"\"\"Uninstall all old-style products listed in the the ``products``\n tuple of this class.\n \"\"\"\n for p, config in reversed(self.products):\n z2.uninstallProduct(app, p)\n\n def setUpDefaultContent(self, app):\n \"\"\"Add the site owner user to the root user folder.\"\"\"\n\n # Create the owner user and \"log in\" so that the site object gets\n # the right ownership information\n app['acl_users'].userFolderAddUser(\n SITE_OWNER_NAME,\n SITE_OWNER_PASSWORD,\n ['Manager'],\n []\n )\n" } ]
14
djcrush44/Quake
https://github.com/djcrush44/Quake
a7cab4401d945b82ec082ee14619e419e26e27e3
9c694b3723b3701157560e9bb18c2046440b2fae
23958bc9afe10fb8cbf7ff22766e93f3d7462a95
refs/heads/master
2021-05-16T00:40:09.317623
2017-10-14T22:07:46
2017-10-14T22:07:46
106,964,997
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6837545037269592, "alphanum_fraction": 0.6931408047676086, "avg_line_length": 28.457447052001953, "blob_id": "e34260c7f8757841ea0380b5162b6340a38b7661", "content_id": "e6eccdcc6f2875087a7f0830f260433d2c408b7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2770, "license_type": "no_license", "max_line_length": 114, "num_lines": 94, "path": "/quake.py", "repo_name": "djcrush44/Quake", "src_encoding": "UTF-8", "text": "import urllib.request\nimport urllib.parse\nimport time\nimport math\n\n# inputs responses until valid response is given\ndef field_input(l):\n\ttest = input(\"Input %s: \" %l)\n\twhile (not is_number(test)):\n\t\ttest = input(\"Not a valid number. Try again: \")\n\treturn test\n\n# inputs responses until valid positive integer is given\t\ndef field_integer(i):\n\ttest = input(\"Input %s: \" %i)\n\twhile (not str.isdecimal(test) or int(test)<=0):\n\t\ttest = input(\"Not a valid positive integer. Try again: \")\n\treturn test\n\n# returns probablity of quake in next x years as a percentile\t\ndef get_prob(lam,x):\n\treturn (1-math.pow(math.e,-lam*x)) * 100\n\t\n# checks that input is a valid decimal number\t\ndef is_number(s):\n try:\n complex(s)\n return True\n except ValueError:\n return False\n\n\n\t\t\n\t\t\n# calculate starting year from user input\nuserYears = field_integer('number of years to search')\ndate = time.gmtime(time.time())\nyear = date[0] - int(userYears)\n\n# create parameters for web query\ndata = {}\ndata['starttime'] = str(year) + time.strftime('-%m-%d')\ndata['latitude'] = field_input('latitude')\ndata['longitude'] = field_input('longitude')\ndata['maxradiuskm'] = field_input('search radius (km)')\n\n# create complete url \nurl_values = urllib.parse.urlencode(data)\nurl = 'https://earthquake.usgs.gov/fdsnws/event/1/count?format=quakeml&' + url_values\n\n# fetch query from database\nwith urllib.request.urlopen(url) as response:\n\t\tquery = response.read()\n\n\t\t\n# compute total quake count and lambda\ncount = int(query)\navg = count/int(userYears)\n\n# compute mean, median, and starting probabilites from exponential distribution\nif avg == 0:\n\tbeta = 0\nelse:\n\tbeta = 1/avg\nmedian = beta * math.log(2)\nprob = {}\n\nprob['day'] = get_prob(avg, 1/365)\nprob['month'] = get_prob(avg,1/12)\nprob[1] = get_prob(avg,1)\nprob[5] = get_prob(avg,5)\nprob[10] = get_prob(avg,10)\n\n# print results\nprint(\"\\nRESULTS\\nNumber of recorded quakes: %d\" % count)\nprint(\"Median time until next quake: %f years\" % median)\nprint(\"Mean time until next quake: %f years\\n\" % beta)\n\n\n# print each time period calculated above\nfor year in prob:\n\tif type(year) == int:\n\t\tprint(\"Probability that a quake will occur within %d year(s): %f%%\" % (int(year),prob[year]))\n\telse:\n\t\tprint(\"Probability that a quake will occur within a %s: %f%%\" % (year,prob[year]))\n\n\n# field user input for custom time search\t\t\nkeepSearching = input(\"Would you like to check the probability of other years? Response may be a decimal. [y/n] \")\nwhile keepSearching == 'y' or keepSearching == 'Y':\n\tyear = field_input(\"number of years\")\n\tp = get_prob(avg,float(year))\n\tprint(\"There is a %f%% probability of a quake within the next %s year(s)\" % (p,year))\n\tkeepSearching = input(\"Would you like to check the probability of other years? [y/n] \")\n\t" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 9, "blob_id": "b2afa0e133f917be386d3b1b90460529845016dc", "content_id": "e38b2563be66397f03a1af3d91b6fdd7212c4cd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11, "license_type": "no_license", "max_line_length": 9, "num_lines": 1, "path": "/README.md", "repo_name": "djcrush44/Quake", "src_encoding": "UTF-8", "text": "\"# Quake\" \n" } ]
2
sudo-justinwilson/python
https://github.com/sudo-justinwilson/python
0c06c683b7e477d55fcd2654c3a1a5f7890cf151
4080773feb210f133fa50768b9b65996052f2c18
776f1353cf029ace4fd2504f1c0e3f3acd81c8fc
refs/heads/master
2021-04-15T13:27:04.206625
2017-03-26T03:13:22
2017-03-26T03:13:22
62,092,600
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7161771059036255, "alphanum_fraction": 0.72062748670578, "avg_line_length": 48.6574592590332, "blob_id": "734de0b830c2617303313c4b4cbf1f70d0e8f841", "content_id": "e8fd07f22170afd88d2ae397f732cb7fc027438a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8988, "license_type": "no_license", "max_line_length": 342, "num_lines": 181, "path": "/python_data_structures_and_algorithms/chapter7/README.md", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# Chapter 7 - Linked Lists\nLinked lists are a data structure class, which have an attribute that points to the next element in the list. This allows linked lists to be flexible, as it can be distributed over anywhere where you can point to.\nThey are a flexible alternative to array based sequences.\n\n**Linked Lists vs Dynamic Arrays:**\nLinked lists can be more efficient with space,faster than dynamic arrays in some operations such as adding and removing inner elements.\nLinked lists methods are not amortized either, which may be required in certain environments.\n\n## Singly Linked Lists\nA singly linked list is a struct that only knows which element is next (not before), so list traversal can only be one way:\n\n element1 element2 element3 element4 element5\n ^ ^ ^ ^ ^\n | | | | |\n head node1 node2 node3 tail\n ____ ____ ____ _____ _____ \n next -> next -> next -> next -> next -> None\n \n * Linked list nodes are comprised of the attributes:\n * element\n * next\n \n\nEach singly linked list node consists of the following attributes:\n 1. **object reference:**\n A reference to the actual object that the node represents. This could be anything such as a dict, list, string...\n 2. **next reference:**\n This is a pointer to the next node in the linked list.\nWe need to remember head's position, so we know where the start of the list is. The tail reference is not strictly required, but without it, we would have to traverse the linked list whenever we wanted the position of the last element. It is also efficient to store the length of the list by adding/subtracting each time we add/remove a node.\n\nThe first node of the linked list is referred to as the _head_, and the last element is known as _tail_.\nIt is common for iterators to stop when they receive None. We also use this convention as the tail's next attribute points to None.\n\nWe can traverse the list by following each nodes _next_ reference, starting from the head, until we eventually reach the tail, which points to None. This is also known as \"_link hopping_\".\n\nIf you refer to the linked list representation above, a node's \"element\" attribute is a variable which can reference anything that can be assigned to a variable (anything!). The elements that the linked list point to, do not need to know they are members of a linked list.\n\n### Inserting/removing elements with singly linked lists:\n\n__Adding elements to the front:__\n\nEach time we add/remove an element, we increment/decrement self._size, so that we always know the length of the list.\n\n\nThe flow for adding an element to the front of a singly linked list is as follows:\n\n1. We define a new node, whose _next_ attribute points to the incumbent head node.\n2. The __head__ reference gets reassigned to point to the new node.\n\n__Appending elements to the end of a list:__\n\nThe procedure for adding a node after tail is:\n\n1. Define a new node, who's _next_ points to None.\n2. Reassign the linked list's _tail_ attribute, and the last node's _next_ element, so that it points to the new _tail_ node.\n\n__Removing the first element of a singly linked list:__\nRemoving the head element is like adding an element, but in reverse. \n 1. Reassign linked list's _head_ element, so that it points to the next element (head.next)\n 2. Remove references to the old head so that it is garbage collected.\n\nWith a singly linked list, there is no efficient way to remove the _tail_ node, as we need to reassign the _next_ pointer of the element before tail (second last). This is because singly linked lists only maintain a \"_next_\" attribute, but not \"previous\", making it \"one-way\", and hence the name. \n\n### Singly linked list implementation:\n\nA singly linked list member is called a \"Node\". We can represent a node with a non-public class, that is nested within the main linked list class definition:\n\n```python\n class _Node:\n __slots__ = '_element', '_next' # slots allow efficient memory use\n def __init__(self, next):\n self._element = element\n self._next = next\n```\n\n#### Linked List Attributes\n\nSo the only attributes we define are:\n\n1. element\n * Points to the actual object\n2. next\n * Points to the node in the next position\n3. head\n * points to the first element (not required for Stacks)\n4. tail\n * points to the last element\n5. size\n * the number of elements in the linked list\n\n#### Linked List Methods\n\nThe methods that we define depend on how if we want to represent a LIFO, FIFO, etc..\n\nAs Stack operations (push, pop) are only performed at the top (_beginning_), there is no need for a _tail_ reference.\n\nFor a Queue, we need the ability to perform operations at the front (deque) and back (enqueue), so we define attributes to store their respective position.\n\nCommon methods include:\n\n1. `__init__:`\n * This creates an empty linked list.\n2. `is_empty:`\n * True if there are no elements, else False.\n\nThe methods should run with a worst case of _BIG-O(1)_ as it does not depend on the length of the linked list for both Stacks and Queues. This is due to the fact that we:\n\n * Increment/decrement the list length each time we add/remove and element.\n * Operations are performed on positions relative to known Node positions.\n * EG: \n ```\n def push(self, e): self._head = self._Node(e, self._head), self._size += 1\n ```\n\n#### Implementing Cirular Singly Linked Lists\n\nImplementing circular linked lists is easier than doing so with _Arrays_, as we can point the _tail.next_ to the _head_, forming a natural circle.\n\nThe beginning and end of a linked list is totally abstract, which makes it easier to define where we want the list to start and end.\n\nA circular linked list does not require a reference to _head_, as the _next_ element for _tail_ points to head (or None, if the list is empty), as opposed to a non-circular linked list, where tail.next points to None.\n\n ```head = tail._next()```\n\nThe only required attributes for a circular linked list are:\n\n- tail\n- size\n\nWe still abstract a node with the nested Node class, but use a unique method that rotates the list, which results in _tail_ advancing towards the front of the queue.\n\n__Using a Linked List as a \"Round-Robin\" scheduler__\n\nIf there is a shared resource that is required by multiple programs, a scheduler can be used to ensure that the resource is shared equally. The Linux kernel uses this mechanism to allocate CPU time (though not in Python..).\n\nWe can implement a round-robin scheduler with a linked list as follows:\n\n1. On a populated linked-list queue, we \"dequeue\" the first element, and store it in variable \"e\". \"e\" can point to a pid, web host, etc..\n2. e is serviced.\n3. e is then \"enqueued\" to the back of the queue, where it will wait in line to be serviced again.\n\nWe can also implement different algorithms that could be more \"nice\" to certain processes (think QOS).\n\n\n## Doubly Linked Lists\n\nA doubly linked list node has pointers to _next_ and _previous_.\nThis allows us to perform operations on elements before and after a Node.\n\nWe also define \"dummy\" nodes for the head and tail called \"_sentinels_\". These simplify the implementation as the head and tail are constant, and we always have a reference to the head and tail, even when the list is empty.\n\nThe sentinels do not point to elements, and are not included in the length of the list.\n\nAn empty doubly linked list is initialized with the _head_ pointing to _tail_, which creates an empty list.\n\n\n### Adding/removing elements with doubly linked lists:\n\n1. A new Node \"N\" is defined and shimmed into it's position by settting N's _next_ and _prev_ elements to the nodes before/after.\n2. The preceeding node's next pointer, is set to point to N, and the prev pointer in the element after N is set to point at N.\n\nThe above operation can be performed anywhere in the list, as long as we know the preceding and following Nodes.\n\nTo insert at the front of the list, the preceding element would be the head sentinel, and vise-versa for the back.\n\nTo remove an element, we do the reverse of adding:\n\n1. the nodes that are before and after are set to point at each other, bypassing the redundant node.\n2. The redundant Node is set to point at None.\n\nSo in summary, a doubly linked list is differentiated singly linked list by:\n\n 1. the Node's extra attribute: __prev__\n 2. the head and tail sentinels\n 3. the ability to insert elements between existing Nodes\n 4. the ability to delete arbitrary elements.\n\nWe can define a doubly-linked base ADT that has the basic methods for a doubly-linked list to function.\nSub-classes can inherit from this ADT and provide the user with an interface that suits their needs, whether they require a stack, queue, dequeue, etc..\n\np. 275\n" }, { "alpha_fraction": 0.5810679793357849, "alphanum_fraction": 0.5810679793357849, "avg_line_length": 29.74626922607422, "blob_id": "c00bc92d5dd2993a296aa4b4fa3840058d6bfe71", "content_id": "fbc09ac821edb1e755739f38f95230da6000f1c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2060, "license_type": "no_license", "max_line_length": 150, "num_lines": 67, "path": "/python_data_structures_and_algorithms/chapter7/linkeddeque.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "from doublylinkedbase import _DoublyLinkedBase\n\nclass LinkedDequeue(_DoublyLinkedBase):\n \"\"\"\n Double-ended queue implementation based on a doubly linked list.\n\n Sub-class of __DoublyLinkedBase, and uses its constructor.\n \"\"\"\n\n def first(self):\n \"\"\"\n Return, but do not remove, the first element.\n \"\"\"\n if self.is_empty():\n raise Empty(\"Dequeue is empty\")\n return self._header._next._element # the first \"real\" item after the header\n\n def last(self):\n \"\"\"\n Return, but do not remove, the lement at the back of the deque.\n \"\"\"\n if self.is_empty():\n raise Empty(\"Dequeue is empty\")\n return self._trailer._prev._element # the last \"real\" item (real meaning it is not the head or tail), which is before the trailer sentinel\n\n def insert_first(self, e):\n \"\"\"\n Add an element to the front of the dequeue.\n \"\"\"\n self._insert_between(e, self._header, self._header._next)\n\n def insert_last(self, e):\n \"\"\"\n Add element e to the back of the queue.\n \"\"\"\n self._insert_between(e, self._trailer._prev, self._trailer)\n\n def delete_first(self):\n \"\"\"\n Remove and return the element from the front of the dequeue.\n\n else raise Empty.\n \"\"\"\n if self.is_empty():\n raise Empty(\"Dequeue is empt\")\n return self._delete_node(self._header._next) # uses an inherited method\n\n def delete_last(self):\n \"\"\"\n Remove and return the element from the back of the deque.\n\n else raise Empty().\n \"\"\"\n if self.is_empty():\n raise Empty(\"Dequeue is empty\")\n return self._delete_node(self._trailer._prev) # inherited\n\nif __name__ == '__main__':\n ld = LinkedDequeue()\n print(len(ld))\n ld.insert_first('this was the first element')\n print(len(ld))\n ld.insert_last('this was the last element')\n print(len(ld))\n while len(ld):\n print(ld.delete_first())\n print(len(ld))\n" }, { "alpha_fraction": 0.47429680824279785, "alphanum_fraction": 0.48399612307548523, "avg_line_length": 22.953489303588867, "blob_id": "9613a2f080211c5f0840bb585a8c99fdbe66648c", "content_id": "56c0b38a0d1912a157629b308f133fff6330d3f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 63, "num_lines": 43, "path": "/python_data_structures_and_algorithms/chapter6/is_matched_html.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "from arraystacks import ArrayStack\n\ndef is_matched_html(raw):\n \"\"\"\n Return True if all HTML tags match.\n\n raw is the raw HTML string.\n \"\"\"\n S = ArrayStack()\n j = raw.find('<')\n while j != -1:\n k = raw.find('>', j + 1)\n if k == -1:\n return False\n tag = raw[j+1:k] # this slice is the actual HTML tag\n if not tag.startswith('/'):\n S.push(tag)\n else:\n if S.is_empty():\n return False\n if tag[1:] != S.pop():\n return False\n j = raw.find('<', k+1)\n return S.is_empty()\n\nif __name__ == '__main__':\n\n s = \"\"\"<body>\n <center>\n <h1> the Little Boat </h1>\n </center>\n </body>\"\"\"\n print(\"on a correct html string: \")\n print(s)\n print(\"is matched? \", is_matched_html(s))\n print(\"Now we will try with this html string: \")\n ss = \"\"\"<body>\n <center>\n <h1> the Little Boat </h1>\n <center>\n </body>\"\"\"\n print(ss)\n print(\"is matched? \", is_matched_html(ss))\n\n" }, { "alpha_fraction": 0.7359009385108948, "alphanum_fraction": 0.7414030432701111, "avg_line_length": 49.10344696044922, "blob_id": "fdd2b2800d961207d8447aabde8f37f1b24e4386", "content_id": "d6826d2eb7b4eb476e04e409cd6522cba32ffd9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1454, "license_type": "no_license", "max_line_length": 213, "num_lines": 29, "path": "/python_data_structures_and_algorithms/chapter1/README.md", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# Python Basics\n\n## 1.8: Iterators & Generators\n__Iterators vs Iterables:__\n\n_Iterators:_\nAn iterator is an object that cycles through each element of a collection by calling __next(iterable)__, until a _StopIteration_ is raised, which means there are no more elements.\n\n_Iterables:_\nAn iterable is an object that can produce an iterator when the object is passed to __iter(object)__.\nFor example, a list is *not* an iterator and can not call next(list). But we can pass a list to iter(list), which would return an iterator that we can call next() on:\n\n l = [1,2,3,4]\n i = iter(l)\n next(i)\n 1\n next(i)\n 2\n ...\n\nAn iterator keeps its own reference to what index it's up to on the object. As an iterator just keeps an index to the collection, any updates to the collection will be reflected in the results when we call next().\n\nThis is what actually is happening in a forr loop. An iterator is produced from a sequence, and next(sequence) is called for each loop, until StopIteration is raised, which the for loop handles quietly.\n\n__Generators:__\nGenerators are similar to iterators, but a function is performed and the result is returned by calling __*yield()*__.\n\nThis lets us be efficient with memory as we only have to perform one operation at a time, instead of having to calculate all the values at once. \nSeveral yield() statements can be used with logic so that different results are yielded depending on the decision tree. \n" }, { "alpha_fraction": 0.4911993443965912, "alphanum_fraction": 0.49447399377822876, "avg_line_length": 24.715789794921875, "blob_id": "0e557de93d99698b3500dc3c16f62d1b33be57aa", "content_id": "37e050b2571f428a36b892fc5ecd949e22c33424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2443, "license_type": "no_license", "max_line_length": 91, "num_lines": 95, "path": "/python_data_structures_and_algorithms/chapter7/circularqueue.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "from linkedlists import Empty\n\nclass CircularQueue:\n \"\"\"\n Queue using cirularly linked list for storage.\n \"\"\"\n #------------ nested _Node class -----------------\n class _Node:\n \"\"\"\n This is a convenient, lightweight nonpublic class for storing a singly linked node.\n The '__slots__' statically defines the instance attributes.\n \"\"\"\n __slots__ = '_element', '_next'\n\n def __init__(self, element, next):\n self._element = element\n self._next = next\n \n def __init__(self):\n \"\"\"\n Create an empty queue.\n \"\"\"\n self._tail = None\n self._size = 0\n\n def __len__(self):\n \"\"\"\n Return the number of elements in queue.\n \"\"\"\n return self._size\n\n def is_empty(self):\n \"\"\"\n Return True if empty.\n \"\"\"\n return self._size == 0\n\n def first(self):\n \"\"\"\n Return, but do not remove, the first element.\n \"\"\"\n if self.is_empty():\n raise Empty('Queue is empty')\n oldhead = self._tail._next\n if self._size == 1:\n self._tail = None\n else:\n self._tail._next = oldhead._next\n self._size = 1\n return oldhead._element\n\n def dequeue(self):\n \"\"\"\n Remove and return the first element.\n \n Else raise Empty exception.\n \"\"\"\n if self.is_empty():\n raise Empty('The queue is empty.')\n oldhead = self._tail._next\n if self._size == 1:\n self._tail = None\n else:\n self._tail._next = oldhead._next\n self._size -= 1\n return oldhead._element\n\n def enqueue(self, e):\n \"\"\"\n Add element to the back of the queue.\n \"\"\"\n newest = self._Node(e, None)\n if self.is_empty():\n newest._next = newest\n else:\n newest._next = self._tail._next\n self._tail._next = newest\n self._tail = newest\n self._size += 1\n\n def rotate(self):\n \"\"\"\n Rotates the queue, moving the element at the front, to the back.\n \"\"\"\n if self._size > 0:\n self._tail = self._tail._next\n\nif __name__ == '__main__':\n cqueue = CircularQueue()\n cqueue.enqueue('first')\n cqueue.enqueue('third')\n cqueue.enqueue('second')\n print(len(cqueue))\n while len(cqueue):\n print(cqueue.dequeue())\n" }, { "alpha_fraction": 0.5401987433433533, "alphanum_fraction": 0.542908787727356, "avg_line_length": 24.44827651977539, "blob_id": "bee6a7cb0dea0b77794ee4817b6b5e1dc222b13d", "content_id": "f9b1cd4a72db8959ef17f2956d9c6b364c697205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2214, "license_type": "no_license", "max_line_length": 162, "num_lines": 87, "path": "/python_data_structures_and_algorithms/chapter6/stacks.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "class Empty(Exception):\n \"\"\"\n This is a custom error for stacks, queues, etc from chapter 6.\n \"\"\"\n pass\n\nclass ArrayStack:\n \"\"\"\n A Python implementation of a LIFO stack made from a Python list.\n \"\"\"\n def __init__(self):\n \"\"\"\n Create an empty stack.\n \"\"\"\n self._data = []\n\n def __len__(self):\n \"\"\"\n returns the number of elements.\n \"\"\"\n return len(self._data)\n\n def is_empty(self):\n \"\"\"\n Return True if the stack is empty.\n \"\"\"\n return len(self._data) == 0\n\n def push(self, e):\n \"\"\"\n Adds another element to the top of the stack.\n \"\"\"\n self._data.append(e)\n\n def top(self):\n \"\"\"\n Return the element at the top of a stack, but don't remove it.\n Raise exception if stack is empty.\n \"\"\"\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data[-1]\n\n def pop(self):\n \"\"\"\n This returns the top element, and also removes it.\n Else, raise an error if the stack is empty.\n \"\"\"\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data.pop()\n\ndef reverse_file(filename):\n \"\"\"\n This function reverses the contents of a file (in place & line by line) by placing each line onto a stack, then removing it and writing the results to a file.\n \"\"\"\n S = ArrayStack()\n original = open(filename)\n for line in original:\n S.push(line.rstrip('\\n')) # Notice how we can specify the string to be stripped.\n original.close()\n\n # Now we overwrite the original file with the reversed contents..\n output = open(filename, 'w')\n while not S.is_empty():\n output.write(S.pop() + '\\n')\n output.close()\n\ndef is_matched(expr):\n\nif __name__ == '__main__':\n import random\n a = ArrayStack()\n print('LENGTH: ', len(a))\n if a.is_empty():\n print('PASS: is_empty')\n a.push(1)\n a.push(2)\n a.push(3)\n print('LENGTH: ', len(a))\n for i in range(len(a)):\n print(\"Removed \", i, a.pop())\n print('LENGTH: ', len(a))\n print(\"Finished!\")\n\n f = 'file.test'\n reverse_file(f)\n" }, { "alpha_fraction": 0.4847775101661682, "alphanum_fraction": 0.4871194362640381, "avg_line_length": 22.08108139038086, "blob_id": "bb2cf7df55fc8613332119806a8b86b703c2375c", "content_id": "120b2132b7ad00cfcb198a98f4052555f14e7212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1708, "license_type": "no_license", "max_line_length": 52, "num_lines": 74, "path": "/python_data_structures_and_algorithms/chapter7/SinglyLinkedLists.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "class LinkedQueue:\n \"\"\"\n LIFO queue using a singly linked list.\n \"\"\"\n # -------- nested class:\n class _Node:\n __slots__ = '_element', '_next'\n\n def __init__(self, element, next):\n self._element = element\n self._next = next\n\n # --- start stack methods:\n def __init__(self):\n \"\"\"\n Create an empty stack.\n \"\"\"\n self._head = None\n self._size = 0\n\n def __len__(self):\n \"\"\"\n Return number of elements in stack.\n \"\"\"\n return self._size\n\n def is_empty(self):\n \"\"\"\n Return True if empty.\n \"\"\"\n return self._size == 0\n\n def push(self, e):\n \"\"\"\n Add e to the top of the stack.\n \"\"\"\n self._head = self._Node(e, self._head)\n self._size += 1\n\n def top(self):\n \"\"\"\n Return, but do not remove, the top element.\n\n Raise Empty exception if the stack is empty.\n \"\"\"\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._head._element\n\n def pop(self):\n \"\"\"\n Remove and return the top element.\n\n Raise Empty exception if the stack is empty.\n \"\"\"\n if self.is_empty():\n raise Empty('Stack is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n return answer\n\nif __name__ == '__main__':\n l = LinkedQueue()\n l.push('first')\n print(len(l))\n print(l.pop())\n print(len(l))\n print('Linked list is empty: ', l.is_empty())\n l.push('second')\n print(len(l))\n print('Linked list is empty: ', l.is_empty())\n print(l.pop())\n print(len(l))\n" }, { "alpha_fraction": 0.7250996232032776, "alphanum_fraction": 0.73041170835495, "avg_line_length": 61.75, "blob_id": "9a4897bfde9a78a16d49eefa5c57102a32c23a40", "content_id": "03d79898b3765684dfc5f5cb3cd92b82d1f6f94a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 753, "license_type": "no_license", "max_line_length": 153, "num_lines": 12, "path": "/python_data_structures_and_algorithms/chapter5/README.md", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# Chapter 5 - Array-Based Sequences\n\n## How dynamic arrays are implemented:\n - First, we have an array of X pointers, where each element points to some object.\n - When the original array is exhausted, and does not have any more empty elements left to allocate, we create another array, which is usually double.\n The first X pointers of the new array, also consist of pointers that point to the same objects as the original array.\n - Once the new array references the same objects, we can safely remove the old array, or leave it for garbage collection.\n\n## Using the built-in \"__str__\" method to print:\n - The __str__ method is the method that is actually called when we print an object. EG: print(obj) == obj.__str__(self)\n\n*Up to p.212\n" }, { "alpha_fraction": 0.6549865007400513, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 36.836734771728516, "blob_id": "7d271c37e38c72c3f89ffada1590c4cb62b5436a", "content_id": "1db8c3bc1fe053d31dd664e3d3e84c169361bfde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "no_license", "max_line_length": 260, "num_lines": 49, "path": "/python_data_structures_and_algorithms/chapter6/exercises.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# exercises from chapter 6\n\n R-6.1 What values are returned during the following series of stack operations, if executed upon an initially empty stack? push(5), push(3), pop(), push(2), push(8), pop(), pop(), push(9), push(1), pop(), push(7), push(6), pop(), pop(), push(4), pop(), pop().\n\n A) 5\n\nR-6.2 Suppose an initially empty stack S has executed a total of 25 push operations, 12 top operations, and 10 pop operations, 3 of which raised Empty errors that were caught and ignored. What is the current size of S?\n\n\tA) 18?\n\nR-6.3 Implement a function with signature transfer(S, T) that transfers all elements from stack S onto stack T, so that the element that starts at the top of S is the first to be inserted onto T, and the element at the bottom of S ends up at the top of T.\n\n\tA)\n def transfer(S, T):\n while len(l) > 0:\n s.append(l.pop())\n\n-6.4 Give a recursive method for removing all the elements from a stack.\n\n A) \n\tdef remove(obj):\n if len(obj) > 0:\n obj.pop()\n return remove(obj)\n\nR-6.5 Implement a function that reverses a list of elements by pushing them onto\na stack in one order, and writing them back to the list in reversed order.\n\n\tA)\n\tdef revers(obj):\n l = []\n for item in range(len(obj)):\n l.append(obj.pop())\n return l\n\nR-6.6 Give a precise and complete definition of the concept of matching for\ngrouping symbols in an arithmetic expression. Your definition may be\nrecursive.\n\n\tA)\n\tI do not understand the question??\n\nR-6.7 What values are returned during the following sequence of queue opera-\ntions, if executed on an initially empty queue? enqueue(5), enqueue(3),\ndequeue(), enqueue(2), enqueue(8), dequeue(), dequeue(), enqueue(9),\nenqueue(1), dequeue(), enqueue(7), enqueue(6), dequeue(), dequeue(),\nenqueue(4), dequeue(), dequeue().\n \n\tA) \n" }, { "alpha_fraction": 0.43803057074546814, "alphanum_fraction": 0.45500847697257996, "avg_line_length": 22.360000610351562, "blob_id": "025dff2bcb331a2a139bafeeba519aeff75352c5", "content_id": "315d262402c9ab9c1a0ef1a76204a977be25a1ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 109, "num_lines": 25, "path": "/negative_base.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "def to_negative_base(i, b):\n \"\"\"\n This function will convert an base 10 integer (signined & unsigned), to any base (negative or positive).\n ARGS:\n i = base 10 integer\n b = base of new number\n The result is returned as an array (list).\n\n \"\"\"\n if not i:\n return [0]\n else:\n l = []\n while i != 0:\n i,r = divmod(i,b)\n if r < 0:\n i += 1\n r += abs(b)\n l.append(r)\n return l\n\nif __name__ == '__main__':\n d = 9\n base = -2\n print(to_negative_base(d, base))\n \n" }, { "alpha_fraction": 0.44306930899620056, "alphanum_fraction": 0.4628712832927704, "avg_line_length": 24.25, "blob_id": "e714b1770e407cdd594498389a175c6f0d4ce270", "content_id": "082d5cb052ce9e84d8e71ca5d4332c231fa9ecc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 51, "num_lines": 16, "path": "/negabinary.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "def negaternary(i):\n digits = []\n if not i:\n digits = ['0']\n else:\n while i != 0:\n i, remainder = divmod(i, -2)\n if remainder < 0:\n i, remainder = i + 1, remainder + 2\n digits.append(str(remainder))\n return ''.join(digits[::-1])\n\nif __name__ == '__main__':\n x = 9\n xx = negaternary(x)\n print('here is the result:\\t', xx)\n" }, { "alpha_fraction": 0.5052101016044617, "alphanum_fraction": 0.5092437267303467, "avg_line_length": 27.87378692626953, "blob_id": "cdb281c5d964461be0c05c9246e8ef8735286e87", "content_id": "478da82c4cdc45784aed1543e5178f960a5f6df5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2975, "license_type": "no_license", "max_line_length": 85, "num_lines": 103, "path": "/python_data_structures_and_algorithms/chapter6/arrayqueue.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "class ArrayQueue:\n \"\"\"\n FIFO queue using python list for storage.\n \"\"\"\n DEFAULT_CAPACITY = 10 # size of queue before it expands\n\n def __init__(self):\n \"\"\"\n Create an empty list.\n \"\"\"\n self._data = [None] * ArrayQueue.DEFAULT_CAPACITY\n self._size = 0\n self._front = 0\n\n def __len__(self):\n \"\"\"\n Return the size of the queue.\n \"\"\"\n return self._size\n\n def is_empty(self):\n \"\"\"\n Return True if the queue is empty.\n \"\"\"\n return self._size == 0\n\n def first(self):\n \"\"\"\n Return, but do not remove, the first element.\n\n Else, raise Exception.\n \"\"\"\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._data[self._front]\n\n def dequeue(self):\n \"\"\"\n Remove and return the first element (FIFO).\n\n else, raise Exception.\n \"\"\"\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._data[self._front]\n self._data[self._front] = None # help with garbage collection\n self._front = (self._front + 1) % len(self._data)\n self._size -= 1\n return answer\n\n def enqueue(self, e):\n \"\"\"\n Add an element to the back of the queue.\n \"\"\"\n if self._size == len(self._data):\n self._resize(2 * len(self._data)) # double the size of the array\n avail = (self._front + self._size) % len(self._data)\n self._data[avail] = e\n self._size += 1\n\n def _resize(self, cap): # assuming cap >= len(self)\n \"\"\"Resize to a new list of capacity >= eln(self).\n \"\"\"\n old = self._data\n self._data = [None] * cap\n walk = self._front\n for k in range(self._size):\n self._data[k] = old[walk]\n walk = (1 + walk) % len(old) # use old size as modulus\n self._front = 0\n\n# methods: len, is_empty, first, dequeue, enqueue, _resize\nif __name__ == '__main__':\n aq = ArrayQueue()\n print('is aq empty? ', aq.is_empty())\n print('the len of aq is: ', len(aq))\n print('the length of the underlying list (self._data) is: ', len(aq._data))\n l = [\n 'first',\n 'second',\n 'third'\n 'fourth',\n 'fifth',\n 'sixth',\n 'seventh',\n 'eighth',\n 'ninth',\n 'tenth',\n 'eleventh',\n 'twelveth',\n 'thirteenth',\n ]\n for element in l:\n print('now putting ', element, ' in queue')\n aq.enqueue(element)\n print('the len of aq is: ', len(aq))\n print('the length of the underlying list (self._data) is: ', len(aq._data))\n print('the first element of aq is: ', aq.first())\n print('is aq empty? ', aq.is_empty())\n while len(aq) != 0:\n print('dequeuing: ', aq.dequeue())\n print('the len of aq is: ', len(aq))\n print('the length of the underlying list (self._data) is: ', len(aq._data))\n\n" }, { "alpha_fraction": 0.5779625773429871, "alphanum_fraction": 0.5779625773429871, "avg_line_length": 27.294116973876953, "blob_id": "62ad32e4d7c6765d25dffbc67d2916fb356aa53b", "content_id": "b0f1eae00d7a312e3942d82d5302b9596472ce50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 962, "license_type": "no_license", "max_line_length": 78, "num_lines": 34, "path": "/python_data_structures_and_algorithms/chapter6/match_delimiter.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "from arraystacks import ArrayStack\n\n\"\"\"\nThis is a program that ensures that delimiters such as \"\",[],(),{} are paired.\n\"\"\"\n\ndef is_matched(expr):\n \"\"\"\n Returns True if delimiters are properly paired.\n \"\"\"\n lefty = '({['\n righty = ')}]'\n S = ArrayStack()\n for c in expr:\n if c in lefty:\n S.push(c)\n elif c in righty:\n if S.is_empty():\n return False\n if righty.index(c) != lefty.index(S.pop()):\n return False\n return S.is_empty()\n\nif __name__ == '__main__':\n matched = 'for i in range(len(object)):print(i)'\n unmatched = 'for i in range(((len(object):print(i)'\n print('test function on a matched string: ')\n print(matched)\n print('Are the delimiters paired?')\n print(is_matched(matched))\n print('Now test the function on an unmatched delimiter: ')\n print(unmatched)\n print('Are the delimiters paired?')\n print(is_matched(unmatched))\n" }, { "alpha_fraction": 0.5995534062385559, "alphanum_fraction": 0.6289542317390442, "avg_line_length": 35.31081008911133, "blob_id": "45401eba94f5cbaa64d8c2134734b0fa4f4762d1", "content_id": "5a1d56bd3e6a20382a4e93a0d6e713c615e3f9e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2687, "license_type": "no_license", "max_line_length": 307, "num_lines": 74, "path": "/base-negative-two.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "##This was a coding challenge I was asked to perform during an online technical interview..\n##Unfortunately, I ran out of time before I got a chance to submit the solution, but I put it here for future reference...\n##\n##Challenge:\n##Write a function that accepts an array (A) as an argument. A (the array) consists 1s and 0s. \n##It is like binary, \n##\n##Binary (base 2) consists of ones and zeros, with the least signigicant numbers starting from the right most column. For each column that we move left, the value of units increase exponentially by 2:\n##\n##EG:\n## 32 16 8 4 2 1\n##\n##The challenge is to write a function that accepts an array (A) as an argument, and consists of 1s and 0s (eg: [1,0,0,1,1], that is Base -2, and also with the LEAST signigicant numbers starting from the RIGHT most column (as opposed to binary, where the least significant number is in the leftmost column).\n##The function should return an array that has the same format as the input, but returns the negative equivalent..\n##\n##INPUT:\n## An array consisting of binary integers (1s and 0s) representing a BASE -2 number, where the most significant numbers are in the right-most column.\n##\n##OUTPUT:\n## An array with the same form as the input, but representing the negative equivalent of the input value.\n##\n##EXAMPLE:\n## A = [1, 0, 0, 1, 1]\n## which is equal to decimal 9\n##\n## X = solution(A)\n## # from memory the output should be [1,0,0,1,1,1] which should be equal to decimal -9 (base -2)??\n## X\n## [1,1,0,0,1,1]\n##\n###Here's what I have so far:\n# To convert an array representing a base -2 sequence:\nsum([A[i] * ((-2) ** i) for i in range(len(A))])\n\n#To convert array A to it's decimal negative equivalent:\nsum([(j - j*2) for j in [A[i] * ((-2) ** i) for i in range(len(A))]])\n\n\n # TODO: I have to work out a way to convert the negative value into base -2?? \n To build up a base -2 table:\n [1*((-2)**i) for i in range(1,17)]\n # the above table is WRONG!! The following yields the correct table:\n To build up a base -2 table:\n [1*((-2)**i) for i in range(17)]\n\n# from wikipedia:\ndef negaternary(i):\n digits = []\n if not i:\n digits = ['0']\n else:\n while i != 0:\n i, remainder = divmod(i, -2)\n if remainder < 0:\n i, remainder = i + 1, remainder + 2\n digits.append(str(remainder))\n return ''.join(digits[::-1])\n\nif __name__ == '__main__':\n x = 9\n xx = negaternary(x)\n print('here is the result:\\t', xx)\n\n\n\n# BREAK THROUGH!!:\n\nA = [1, 0, 0, 1, 1]\nT = [1*((-2)**i) for i in range(17)]\nans = 0\nfor i in range(len(A)):\n if A[i] == 1:\n ans += T[i]\nans = 9\n" }, { "alpha_fraction": 0.46987950801849365, "alphanum_fraction": 0.47289156913757324, "avg_line_length": 21.133333206176758, "blob_id": "5fe7a63a3c4fa59d1b659852b0c925b94c0408b9", "content_id": "0504d21f21d4739a164d4ff5d4f9ecf343876181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/python_data_structures_and_algorithms/chapter7/CircularQueue.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "class CircularQueue:\n \"\"\"\n Queue implementation using circularly linked storage.\n \"\"\"\n # -------- nested class:\n class _Node:\n __slots__ = '_element', '_next'\n\n def __init__(self, element, next):\n self._element = element\n self._next = next\n\n def __init__(self):\n \"\"\"\n Create an empty queue.\n \"\"\"\n self._tail = None\n self._size = 0\n\n def __len__(self):\n \"\"\"\n Return the number of elements in list.\n \"\"\"\n return self._size\n\n def is_empty(self):\n \"\"\"\n Return True if the qyeye is empty.\n \"\"\"\n return self._size == 0\n" }, { "alpha_fraction": 0.6820269823074341, "alphanum_fraction": 0.6853287220001221, "avg_line_length": 63.5, "blob_id": "636fb58697b2c0225030af9340276945461fc56d", "content_id": "6b3f5ad7ae412ae93ad3ba397d6701f6561f8456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6966, "license_type": "no_license", "max_line_length": 543, "num_lines": 108, "path": "/python_data_structures_and_algorithms/chapter6/README.md", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# Stacks, Queues and Dequeues\nStacks are a simple data type, which are ubiquitous throughout computers. They are also known as LIFO (last in, first out), just like a stack of dishes.\n\n# Stacks (LIFO)\n\n## The Stack abstract data type:\n - The stack ADT contains few methods:\n -- push push an element onto the top of the stack.\n -- pop remove, and return the element at the top of the stack.\n - These are additional methods commonly found with stacks:\n -- top return, but do not return, the element at the top of the stack.\n -- is_empty() True if the stack is empty.\n -- len returns the number of elements in a stack.\n\n## The Adapter pattern:\nThe adapter pattern is when we take an existing class, and implement to suit our needs, with a slighlty different interface and methods.\nAn example of an adapter pattern is taking the Python \"List\" class, hiding it behind our code, and presenting new methods so that it behaves as a Stack.\n\nbasically, we use a list internally for storage and present the user with us stack interface.\n\n## Implementing an ArrayStack class, using a Python list as the backend storage:\nIn this code example, we define a class with a \"Stack\" interface, but behind the scenes, we are really using a Python list. As mentioned above, this is an example of an \"Adapter Pattern\", and it is a classic example of re-using code with OOP.\nWe just have to provide the methods for a Stack interface:\n - __init__()\n def __init__(self):\n \"\"\"\n This creates an empty list for us to store the Stack elements.\n Note that _data is a non-public attribute, as it should not be invoked directly by the user.\n \"\"\"\n self._data = []\n - push()\n def push(self, e):\n \"\"\"\n This uses the append List method, which has the same effect as pushing an element onto a stack.\n \"\"\"\n self.append(e)\n - pop()\n def pop(self):\n \"\"\"\n This uses the pop() List method, else raise exception.\n \"\"\"\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data.pop()\n - top()\n def top(self):\n \"\"\"\n Return, but do not remove, the top element, else raise Exception.\n \"\"\"\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data[-1]\n - is_empty()\n - __len__()\n def __len__(self):\n \"\"\"\n This one is easy... just return the length of the internal list.\n \"\"\"\n return len(self._data)\n\n## Reversing the contents of a file, using Stacks:\nIf you picture a stack as a literal stack of plates, if we were to place dishes one by one onto a stack, and then remove them one by one, the stack would naturally be in the reversed order. We can use the same concept to reverse the order of any stack's elements.\n\n## Using stacks to match delimiters:\nThe code example in section 6.1.4 shows us how we can ensure that delimiters are properly paired, (IE: if there is an opening bracket in some code \"(\", there obviously has to be a closing bracket \")\".\nIn this example, we do this by iterating over each char in a string, and if that char is an opening delimiter, we then push it onto a Stack. As we continue looping over each character, if we encounter a closing delimiter, if there is no matching opening delimiter, it's obviously an error, but if there is a matching opening delimiter on the Stack, we pop it, until there are either no more chars, and the stack is empty, which would return True, else there are unmatched delimiters, which would return False indicating that they do not match.\nI would imagine that this would be similar behaviour to how VIM and other IDEs would do it...\n\n## Matching HTML tags using Stacks:\nOn p.238, the example shows us how to ensure that the HTML tags are properly opened, and closed:\n1. We first create a stack, and look for the first instance of a \"<\", and assign the index of the char, in the string (which is an immutable array of chars) to the letter 'j'.\n2. We then look for a \">\", starting from position 'j+1', if there is no \">\", the string.find() method will return -1, which means that the tags are not matched.\n3. If we do find the matching \">\", we assign it to 'k' and extract the tag, with a sub-slice (tag = raw[j+1:k]), and if the HTML tag does not start with a \"/\" (which is for closing HTML tags), we push the tag onto the Stack. \n4. If the tag does start with a \"/\", we compare it to the element that is popped from the stack, which should be either an opening tag that does not start with \"/\", or we know it's an unmatched tag, and return False.\n4. We then continue iterating over the chars tarting from the char after the last tag we found, until we either find another \"<\", which we then go back to step (3), or we don't find any. If the stack is empty (because each time we find a tag, we push it onto the stack, and pop it once we found the closing tag that starts with \"/\"..) we return True (tags are matched). Otherwise if there is an element on the stack, it is unmatched and we return False.\n\n# Queues (FIFO)\n\nQueues are just what it sounds like. Last In First Out..\n\n## Queue ADT:\nThe queue abstract data type could be implemented using a List (using pop(0) to remove the front element, and append(e) to add one) but would be inefficient, as if we use pop(:-1) to remove an element that is not at the back, each element gets looped over to shift left, and runs in OMEGA(n) time.\nThe most efficient way is to use a \"Circular\" list, where we maintain an index of the element which is logically at the font, but can move.\nWe basically keep an index of the front and rear elements, so when we want to \"enqueue\" an element to the back of the queue, we can just put it after L.index(last), and vise-versa with index(first).\n\nThe queue can also increase it's length dynamically if required.\n\n### ArrayQueue:\n - EG: ArrayQueue\n L = [a=self._first, b, c]\n len(L) == 3\n If we dequeue the first element, the next element becomes self._first:\n L = [a=None, b=self._first, c]\n len(L) == 3 (--1)\n if L runs out of capacity:\n (len(L) * 2) * [None]\n If we add 2 more elements to the queue:\n L.enqueue(e, f)\n L = [None, b=self._first, c, e, f, None]\n It could eventually wrap around like:\n L = [c, e, f, None, None, b=self._first]\n It uses the following formula to calculate how to advance the index of self._front, even if it has to go around:\n self._front = (self._front % len(self._data)\n *self._data is the internal list\n\n### Deque (Double Ended Queues)\nAnother type of queue is double-ended, meaning instead of just keeping the index of the front, we keep the index of the front and back.\nIt provides more functionality, as it allows elements to be added or removed from the front or back (not the middle!).\n" }, { "alpha_fraction": 0.7446808218955994, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 22.5, "blob_id": "db3da93bb5697463e9cdd9eeaa8bbc35bc378cf5", "content_id": "32ce4a130b23f9f5d3ef66155f8388392f2b6569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# python\nrandom python scripts, modules, etc..\n" }, { "alpha_fraction": 0.7591241002082825, "alphanum_fraction": 0.7810218930244446, "avg_line_length": 44.66666793823242, "blob_id": "7b402ab68d961c046fce1b43ee8461f35c85e120", "content_id": "7b2c1135ee0f1f137ea190279816570a8d2f1fe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 137, "license_type": "no_license", "max_line_length": 75, "num_lines": 3, "path": "/python_data_structures_and_algorithms/README.md", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "# Exercises from python data structures and algorithms book\n\nUp to at least p. 282, but have to merge in the older files from tmp branch\n" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.517241358757019, "avg_line_length": 13.5, "blob_id": "a0c04b2f0761a54043e2f9658addb01282a40a22", "content_id": "4f07604c74d696f89401b6c726a7f65380a2d189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/python_data_structures_and_algorithms/chapter7/linkedlists.py", "repo_name": "sudo-justinwilson/python", "src_encoding": "UTF-8", "text": "class Empty(Exception):\n \"\"\"\n Custom error.\n \"\"\"\n" } ]
19
verysetiawan/Network-Automation-Mikrotik-PFFP
https://github.com/verysetiawan/Network-Automation-Mikrotik-PFFP
d8c34e1e0f961d7f1540b0d52c7cf30cdba565e5
1354c238facabea15a0b9addbf070112a3e74c85
6f05cc2e847fab8c10a4372679ce8fc233d46d00
refs/heads/master
2022-11-12T17:52:20.725640
2020-06-01T19:36:05
2020-06-01T19:36:05
268,612,283
3
1
null
null
null
null
null
[ { "alpha_fraction": 0.7155457735061646, "alphanum_fraction": 0.7199559211730957, "avg_line_length": 33.92307662963867, "blob_id": "d385c77b6e37658579936e07ed08f7d0501d4262", "content_id": "c33849adbb79dadfff7cccc5a03fbe56a56d306f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 907, "license_type": "no_license", "max_line_length": 102, "num_lines": 26, "path": "/README.md", "repo_name": "verysetiawan/Network-Automation-Mikrotik-PFFP", "src_encoding": "UTF-8", "text": "# Network-Automation-Mikrotik-PFFP (Python-Fetch-Flask-Paramiko)\nNetwork Automation in Mikrotik with Python-Fetch-Flask-Paramiko\n<h3>Topology</h3>\nRepository menggunakan topology berikut:\n<img src=\"toolfetchflask.png\">\n\n<h3>Materi</h3>\nmendapatkan ip address dari mikrotik dan dikirim menggunakan tool fetch ke flask python:\n<ol>\n<li>membuat dhcp leases mikrotik yang dituliskan pada /ip dhcp-server lease-script dengan materi:</li>\n <ul>\n <li>foreach script mikrotik</li>\n <li>tool fetch mikrotik</li>\n </ul>\n<li>membuat app.py dengan materi:</li>\n <ul>\n <li>flask</li>\n <li>request</li>\n <li>json</li>\n <li>render_template</li>\n <li>file append</li>\n <li>file read</li>\n </ul>\n<li>membuat file ip_address.txt untuk menyimpan ip address yang diambil dari fetch mikrotik</li>\n<li>membuat file index.html untuk menampilkan isi file ip_address.txt ke web browser</li>\n</ol>" }, { "alpha_fraction": 0.6214896440505981, "alphanum_fraction": 0.6385836601257324, "avg_line_length": 23.84848403930664, "blob_id": "73b46d90567e3f1a8f8bba811c8bbe97789457bf", "content_id": "d8b48d564770f8f5be5c0679685031dd641d2cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 58, "num_lines": 33, "path": "/app.py", "repo_name": "verysetiawan/Network-Automation-Mikrotik-PFFP", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify, render_template\n\n\napp = Flask(__name__,template_folder='template')\n\[email protected](\"/conf\", methods=[\"POST\"])\ndef config():\n #menangkap ip mikrotik client\n data = request.get_json()\n ip_mik = data[\"ip_router\"]\n\n # Cetak ip Mikrotik\n print (f\"IP Address Mikrotik adalah : {ip_mik}\")\n\n #Menyimpan informasi ip ke file ip_address.txt\n file_write = open (\"template/ip_address.txt\",\"a\")\n file_write.write (ip_mik)\n file_write.write (\"\\n\")\n file_write.close()\n\n return jsonify(data)\n\[email protected](\"/\")\ndef indexku():\n file_open = open (\"template/ip_address.txt\",\"r\")\n baca_file = file_open.readlines()\n\n return render_template (\"index.html\", var=baca_file) \n\n\n\nif __name__ == \"__main__\":\n app.run (host='192.168.122.1', debug=True, port=5005)" }, { "alpha_fraction": 0.43002545833587646, "alphanum_fraction": 0.44020354747772217, "avg_line_length": 31.83333396911621, "blob_id": "6effba8e88880c338380a352150b245882a8dde3", "content_id": "e7564d2a9fb705faa6943f97d862200efe5601d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 393, "license_type": "no_license", "max_line_length": 80, "num_lines": 12, "path": "/template/index.html", "repo_name": "verysetiawan/Network-Automation-Mikrotik-PFFP", "src_encoding": "UTF-8", "text": "<html>\n <head></head>\n <body>\n <h1>DAFTAR IP ADDRESS</h1>\n <p>IP ADDRESS YANG TERDAFTAR YAITU : </p>\n <div>\n {% for ip in var%} <!--sintaks jinja2 memulai for-->\n <li>{{ip}}</li> <!--memanggil nilai variabel ip-->\n {% endfor %} <!--sintaks jinja2 mengakhiri for-->\n </div>\n </body>\n</html>" } ]
3
cjbruin23/backend-copy-special-assessment
https://github.com/cjbruin23/backend-copy-special-assessment
4c59df52c2b17ff7fc66b5b2ba3e8e6885512362
4a8cbc7b41181417e9a37120cc2421ab9ddaedfa
f72adbb6303f261e78ad3721edea9216a68a474f
refs/heads/master
2020-03-25T18:40:46.733080
2018-08-12T15:03:15
2018-08-12T15:03:15
144,043,737
0
0
null
2018-08-08T17:01:22
2018-08-08T14:30:03
2018-08-08T14:30:02
null
[ { "alpha_fraction": 0.6465968489646912, "alphanum_fraction": 0.650959849357605, "avg_line_length": 26.285715103149414, "blob_id": "10b51d6da8604dd02d4cd58e3589b85cc4e769b0", "content_id": "04760cab2509d6d80ac867a06b640179bb0d9e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2292, "license_type": "no_license", "max_line_length": 81, "num_lines": 84, "path": "/copyspecial.py", "repo_name": "cjbruin23/backend-copy-special-assessment", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Google's Python Class\n# http://code.google.com/edu/languages/google-python-class/\n\nimport sys\nimport re\nimport os\nimport shutil\nimport commands\nimport argparse\n\n\"\"\"Copy Special exercise\n\"\"\"\n\n# +++your code here+++\n# Write functions and modify main() to call them\n\ndef get_special_paths(dir):\n special_array = []\n\n for cfile in os.listdir(dir):\n found = re.search(r'__\\w+__', cfile)\n if found:\n special_array.append(os.path.abspath(cfile))\n print \"\\n\".join(special_array)\n return special_array\n\n# New dir should have a '.' for creating new folders\n# from current directory\ndef copy_to(paths, new_dir):\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n for cfile in paths:\n shutil.copy(cfile, new_dir)\n return \n\ndef zip_to(paths, new_dir):\n print \"Command I'm going to do:\"\n cmd_to_run = \"zip -j \" + new_dir\n for a_path in paths:\n cmd_to_run += \" \" + a_path \n print cmd_to_run\n os.system(cmd_to_run)\n\n\ndef main():\n # This snippet will help you get started with the argparse module.\n parser = argparse.ArgumentParser()\n parser.add_argument('--todir', help='dest dir for special files')\n parser.add_argument('--tozip', help='dest zipfile for special files')\n parser.add_argument('dir', help='prints current directories special files')\n # TODO need an argument to pick up 'from_dir'\n args = parser.parse_args()\n\n # TODO you must write your own code to get the cmdline args.\n # Read the docs and examples for the argparse module about how to do this.\n\n # Parsing command line arguments is a must-have skill.\n # This is input data validation. If something is wrong (or missing) with any\n # required args, the general rule is to print a usage message and exit(1).\n\n # +++your code here+++\n # Call your functions\n if not args:\n parser.print_usage()\n sys.exit(1)\n \n special_array = get_special_paths(args.dir)\n\n if args.todir:\n copy_to(special_array, args.todir)\n if args.tozip:\n zip_to(special_array, args.tozip)\n \n\n \n return\n \nif __name__ == \"__main__\":\n main()\n" } ]
1
hkayesh/depend_clean
https://github.com/hkayesh/depend_clean
a8f05da1addc83d9b01cf4b4245d3c4ed6d7d21e
1d4bfdaf9a4d323582ab36e3ec0f9b4f2faae851
7137418c31538a01cc43b0557e84b6a9e729d23e
refs/heads/main
2022-12-27T15:04:20.100560
2020-10-14T06:31:18
2020-10-14T06:31:18
303,921,551
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6192660331726074, "alphanum_fraction": 0.642201840877533, "avg_line_length": 35.33333206176758, "blob_id": "6b0cc051ee88a6429209723fa6850a64437b2889", "content_id": "56ac11f44fcf9e8d986bb2db5c0a425d1ef38aa8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 218, "license_type": "permissive", "max_line_length": 99, "num_lines": 6, "path": "/system_b/convert.count.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#Required for NB: function to convert the word frequencies to yes (presence) and no (absent) labels\nconvert_count <- function(x) {\n y <- ifelse(x > 0, 1,0)\n y <- factor(y, levels=c(0,1), labels=c(\"No\", \"Yes\"))\n y\n}\n" }, { "alpha_fraction": 0.7071005702018738, "alphanum_fraction": 0.7100591659545898, "avg_line_length": 35.66666793823242, "blob_id": "a31c1f9b259f22bbe4a7beb5ebde1c96c75a8328", "content_id": "c8f58a9b0c13576a39be0f4519336480ac968538", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "permissive", "max_line_length": 77, "num_lines": 9, "path": "/system_a/evaluation_main.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "from scripts.evaluation import Evaluator\n\n\nif __name__ == '__main__':\n evaluator = Evaluator('files/srft_dataset.csv')\n\n # print segment level sentiment detection scores\n # evaluator.evaluate_sentiment_detection(scoring='f1_micro', merged=True)\n print(evaluator.get_category_counts(cat_type='sentiment', merged=True))\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 33, "blob_id": "ef3afa6632bb220426fb8638ac1b7eb220a06e07", "content_id": "3e943e3e552cc3768f2000e98aac82d71fd3a835", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 136, "license_type": "permissive", "max_line_length": 60, "num_lines": 4, "path": "/system_b/clean.data.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "# Make sure this clean function is equal to Humayun's system\nclean.data <- function(x){\n gsub(\"x0085_\", \"\", x$comment, fixed = TRUE)\n} " }, { "alpha_fraction": 0.5592938661575317, "alphanum_fraction": 0.5655622482299805, "avg_line_length": 41.48369598388672, "blob_id": "b603ec8f9485ad1d4294e6183efcf07f0cf93f22", "content_id": "958bf6cbbc755945e85d1bcaf1569c7c044e25d4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7817, "license_type": "permissive", "max_line_length": 134, "num_lines": 184, "path": "/system_a/scripts/processing.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport warnings\nimport pickle\nfrom nltk.stem import WordNetLemmatizer\nfrom openpyxl import load_workbook\nfrom segmenter import Segmenter\nfrom utilities import Utilities\nfrom wrapper_classifiers import AspectClassifier, SentimentClassifier\n\nclass Processor(object):\n\n def __init__(self, settings=None):\n self.settings = settings\n self.utilities = Utilities()\n self.segmenter = self.load_segmenter()\n self.wordnet_lemmatizer = WordNetLemmatizer()\n\n self.ml_asp_classifier = AspectClassifier(casecade=False)\n if settings is not None:\n model_path = settings['training_file']+'.aspect_model.pickle'\n if os.path.exists(model_path):\n with open(model_path, 'rb') as handle:\n self.ml_asp_classifier = pickle.load(handle)\n else:\n self.ml_asp_classifier.train(settings['training_file'])\n with open(model_path, 'wb') as f:\n pickle.dump(self.ml_asp_classifier, f)\n print(\"Aspect Extraction model written out to {}\".format(model_path))\n\n self.ml_snt_classifier = SentimentClassifier()\n if settings is not None:\n model_path = settings['training_file'] + '.sentiment_model.pickle'\n if os.path.exists(model_path):\n with open(model_path, 'rb') as handle:\n self.ml_snt_classifier = pickle.load(handle)\n else:\n self.ml_snt_classifier.train(settings['training_file'])\n with open(model_path, 'wb') as f:\n pickle.dump(self.ml_snt_classifier, f)\n print(\"Sentiment Detection model written out to {}\".format(model_path))\n\n def run(self):\n settings = self.settings\n\n data_file = settings['data_file']\n output_file = settings['output_file']\n\n df = self.utilities.read_from_csv(data_file)\n\n original_reviews = [row[0] for row in df]\n\n if 'max_reviews' in settings.keys() and settings['max_reviews'] < len(original_reviews):\n original_reviews = original_reviews[:settings['max_reviews']]\n\n original_reviews = self.utilities.convert_list_to_utf8(original_reviews)\n\n cleaned_reviews = []\n empty_review_indexes = []\n for index, review in enumerate(original_reviews):\n cleaned_review = self.utilities.clean_up_text(review.lower())\n if len(cleaned_review) > 2:\n cleaned_reviews.append(cleaned_review)\n else:\n cleaned_reviews.append(review.lower())\n empty_review_indexes.append(index)\n reviews = cleaned_reviews\n\n reviews_segments = []\n for index, review in enumerate(reviews):\n # print index\n if index in empty_review_indexes:\n reviews_segments.append([review])\n continue\n sentences = self.utilities.split_text_into_insentence(review)\n\n # start: force split exceptionally long (more than 800 chars) sentences\n tmp_sentences = []\n for sentence in sentences:\n if len(sentence) > 800:\n if '|' in sentence:\n tmp_sentences = tmp_sentences + sentence.split('|')\n else:\n first_part, second_part = sentence[:len(sentence) / 2], sentence[len(sentence) / 2:]\n tmp_sentences = tmp_sentences + [first_part, second_part]\n else:\n tmp_sentences.append(sentence)\n\n sentences = tmp_sentences\n # end: force split exceptionally long (more than 800 chars) sentences\n\n segments = []\n try:\n for sentence in sentences:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n segment_info = self.segmenter.get_segments(sentence)\n segments = segments + [sg for sg in segment_info['segments'] if len(sg) > 2]\n except AssertionError:\n # print review\n segments = [review]\n reviews_segments.append(segments)\n\n reviews_result = []\n\n for index, segments in enumerate(reviews_segments):\n\n if index not in empty_review_indexes:\n aspects = self.get_aspect_for_segments(segments)\n sentiments = self.get_sentiment_for_aspects(segments)\n else:\n # Assign 'other' for noisy reviews to keep indexes same\n aspects = ['other 1']\n sentiments = ['negative']\n\n #aspects = self.apply_dictionaries(segments, aspects)\n\n if len(segments) == 1:\n other_words = ['excellent', 'good', 'very good', 'bad', 'ok', 'no response']\n if segments[0] in other_words or len(self.utilities.tokenize(segments[0])) == 1:\n aspects = ['other 1']\n\n # Post-processing: remove duplicate aspects from a comment\n asp_snt_pair = []\n for i, aspect in enumerate(aspects):\n # if i > 0 and aspect == aspects[i - 1] and sentiments[i] == sentiments[i - 1]:\n\n if i > 0 and any(aspect.rsplit(' ', 1)[0] in item for item in asp_snt_pair):\n new_score = aspect.rsplit(' ', 1)[1]\n existing_aspects = [item.rsplit(' ', 1)[0].rsplit(' ', 1)[0] for item in asp_snt_pair]\n index_dup_aspect = existing_aspects.index(aspect.rsplit(' ', 1)[0])\n\n if float(new_score) > float(asp_snt_pair[index_dup_aspect].rsplit(' ', 1)[0].rsplit(' ', 1)[1]):\n asp_snt_pair[index_dup_aspect] = aspect + ' ' + sentiments[i]\n else:\n continue\n else:\n # Added sentiment to the result again on 19/12/2017\n asp_snt_pair.append(aspect + ' ' + sentiments[i])\n # asp_snt_pair.append(aspect)\n result = [unicode(reviews[index]).encode(\"utf-8\")] + list(set(asp_snt_pair))\n reviews_result.append(result)\n\n self.utilities.save_list_as_csv(reviews_result, output_file)\n print (\"System A output saved to the file: %s\" % output_file)\n\n def get_aspect_for_segments(self, segments):\n aspects = self.ml_asp_classifier.predict(segments)\n\n return aspects\n\n def get_sentiment_for_aspects(self, segments):\n sentiments = self.ml_snt_classifier.predict(segments)\n return sentiments\n\n def load_segmenter(self):\n training_file_name = os.path.splitext(self.settings['training_file'])[0]\n outpath = training_file_name + '.segmenter.pickle'\n segmenter = None\n if os.path.exists(outpath):\n with open(outpath, 'rb') as handle:\n segmenter = pickle.load(handle)\n else:\n if outpath is not None:\n segmenter = Segmenter(self.settings['training_file'])\n with open(outpath, 'wb') as f:\n pickle.dump(segmenter, f)\n print(\"Segmenter model written out to {}\".format(outpath))\n\n return segmenter\n\n def wordnet_lemmatizing(self, word):\n if not word:\n return \"\"\n return self.wordnet_lemmatizer.lemmatize(word)\n\n def apply_post_processing_rules(self, segment, aspect):\n care_quality_clues = {'nothing to add', 'nothing to say', 'nothing to improve', 'nothing to change', 'nothing to fix','thank'}\n new_aspect = aspect\n if aspect != 'care quality':\n for clue in care_quality_clues:\n if clue in aspect:\n new_aspect = 'care quality'\n return new_aspect\n" }, { "alpha_fraction": 0.6691176295280457, "alphanum_fraction": 0.6783088445663452, "avg_line_length": 33, "blob_id": "9330c2477b48f7208946e8b04347990594085a09", "content_id": "8f32c42aaaaf8297b489eaad3f7adbb630bb72f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 544, "license_type": "permissive", "max_line_length": 110, "num_lines": 16, "path": "/system_b/new.corpus.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#A function to convert human labelled multi-class data to one-against-all labelled data\n#For example, when label=\"waiting time\" it will only keep the latter label while assigning 0 to the remaining.\n#Assumption column one: text/comment, column two: category/class\n\nnew.corpus <- function(corpus, label){\n gold <- vector(mode=\"numeric\", length=length(corpus[[1]]))\n for(i in 1:length(corpus[[1]])){\n for(j in 2:length(corpus)){\n if(grepl(label, as.character(corpus[i,j]))){\n gold[i]<-label\n }\n }\n }\n \n return(gold)\n}\n" }, { "alpha_fraction": 0.605420708656311, "alphanum_fraction": 0.6145603656768799, "avg_line_length": 31.37755012512207, "blob_id": "890b4dc8b29f2ca868df56d766bb286efdd2e9af", "content_id": "86f2c3f0b01f53ee45accc88d8a119120688b37e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3173, "license_type": "permissive", "max_line_length": 140, "num_lines": 98, "path": "/system_b/run.prediction.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "run.prediction <-function(training.data, prediction.data, output.path) {\n \n all_data = rbind.fill(training.data, prediction.data)\n \n dataset_size = length(all_data$comment)\n train_data_count = length(training.data$comment)\n \n dtm = nlp.preprocess(all_data$comment)\n \n dtm.train <- dtm[1:train_data_count, ]\n dtm.test <- dtm[(train_data_count+1):dataset_size, ]\n \n labels <- c('environment', 'waiting time', 'staff attitude and professionalism', 'care quality')\n \n for (label in labels) {\n labels.train <- as.factor(new.enet.corpus(training.data, label))\n \n if (label == 'waiting time' || label == 'care quality') {\n enet.train(dtm.train, labels.train, label)\n } \n else if (label == 'environment') {\n svm.train(dtm.train, labels.train, label, kernel.name = 'linear')\n }\n else { # staff attitude and professionalism\n svm.train(dtm.train, labels.train, label, kernel.name = 'rbf')\n }\n }\n \n label <- 'environment'\n env_pred <- svm.predict(dtm.test, label)\n env_pred_raw <- svm.predict(dtm.test, label, response.type='proba')\n \n label <- 'waiting time'\n wt_pred <- enet.predict(dtm.test, label)\n wt_pred_raw <- enet.predict(dtm.test, label, response.type='proba')\n \n label <- 'staff attitude and professionalism'\n saap_pred <- svm.predict(dtm.test, label)\n saap_pred_raw <- svm.predict(dtm.test, label, response.type='proba')\n \n label <- 'care quality'\n cq_pred <- enet.predict(dtm.test, label)\n cq_pred_raw <- enet.predict(dtm.test, label, response.type='proba')\n \n #combine predictions and save in correct format\n result_data <- data.frame(stringsAsFactors=FALSE)\n \n comments = prediction.data$comment\n max.num.categories = 11\n \n #food.and.parking = apply.dictionaries(comments) \n for (i in 1:length(saap_pred)) {\n row = data.frame(matrix(NA, 1, max.num.categories))\n \n # declare dummy column headers; required for rbind\n names(row) <-LETTERS[1:length(row)] \n \n aspects <- list()\n if (env_pred[i] != \"0\") {\n aspects[length(aspects) + 1] <- paste(env_pred[i], env_pred_raw[i, \"environment\"])\n }\n \n if (wt_pred[i] != \"0\") {\n aspects[length(aspects)+1] <- paste(wt_pred[i], wt_pred_raw[i, \"1\"])\n }\n \n if (saap_pred[i] != \"0\") {\n aspects[length(aspects)+1] <- paste(saap_pred[i], saap_pred_raw[i, \"staff attitude and professionalism\"])\n }\n \n if (cq_pred[i] != \"0\") {\n aspects[length(aspects)+1] <- paste(cq_pred[i], cq_pred_raw[i, \"1\"])\n }\n \n #if (length(food.and.parking[[i]]) > 0) {\n # aspects = c(aspects, food.and.parking[[i]])\n #}\n \n # insert comemnt at the fist column \n row[1, 1] = comments[i] \n \n # if no aspect found, default is 'other'\n if (length(aspects) > 0) {\n for (j in 1:length(aspects)) {\n row[1, j+1] = aspects[j] \n } \n }\n else {\n row[1, 2] = 'other 0.01'\n }\n \n result_data <- rbind(result_data, row)\n }\n \n # save data as csv file\n write.table(result_data, file=paste(output.path), row.names=FALSE, col.names = FALSE, sep = \",\", na = \"\", qmethod = c(\"escape\", \"double\"))\n cat (paste(\"System B output saved to the file: \", output.path))\n} " }, { "alpha_fraction": 0.7274582982063293, "alphanum_fraction": 0.7461721897125244, "avg_line_length": 33.97618865966797, "blob_id": "89a15b8f7cf5bb607bc0c64f818d6be63eb4a1dd", "content_id": "4b7f922445704ca2505b2637308048eb755bc43a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2947, "license_type": "permissive", "max_line_length": 164, "num_lines": 84, "path": "/README.md", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "## DEPEND SYSTEM\n\n\n### Introduction\nThis document includes the instructions to run the DEPEND aspect extraction system and\nclassify new patient comments. The system consists three sub-systems: System A, System B\nand a script to combine two systems. The document also discuss the processes to setup\nnecessary environment run the system.\n\n### Environment setup\nThis system was developed and tested on a Linux system (Ubuntu 16.04, 64-bit), so it is\nrecommended to use a linux system for the best performance.\n#### Install Python\nInstall python 2.7 to run the system. After installing python, nevigate to the project root directory and run the following commands to install the required python \nmodules. \n\n``` \npip install -r requirements.txt\n```\n\n#### NLTK data\nSystem A requires NLTK stopwords corpus to run. If you do not have have nltk python module installed, run the following \ncommand to install: \n\n``` \n$ pip install nltk\n```\n\n\nTo download nltk corpus, run the commands below after starting the python command prompt:\n\n``` \n> import NLTK\n> nltk.download('stopwords')\n```\n\n* In the Python command line type “import NLTK” and press Enter\n* After that type “nltk.download('stopwords')”\n\n#### Install R\nSystem B was developed in R. It requires R version 3.2.3. Apart from the built in packages, the\nsystem requires the following R packages:\n\n* plyr 1.8.4\n* dplyr 0.7.4\n* NLP 0.1-1d\n* tm 0.7-1\n* RWeka 0.4-34\n* e1071 1.6-8\n* caret 6.0-77\n* RTextTools 1.4.2\n* Glmnet 2.0-13\n* kernlab 0.9-25\n* Mlr 2.11\n\nTo install a R package, start the R command prompt and run:\n\n```\n> install.packages(\"plyr\")\n```\n\n### Run the System\n\nBefore making prediction, the system should be trained. The Sub-system A and Sub-system B require separate training \ndatafiles. Training datafile for Sub-system A should be copied to `system_a/files/` directory and the file name should be \n`site_<site_key>_dataset.csv`, where `<site_key>` should be replaced by either `a` or `b` based on the type of the data source. \n\nSimilarly, dataset file for Sub-system B should be copied to `system_b/files/` directory and the file name should be \n`r_site_<site_key>_dataset.csv`. \n\nAfter placing the training files in the appropriate directories, run the following command to train the models and \nmake prediction:\n\n```\n./run_prediction.sh <path/to/data/file.csv> <dataset type>\n```\n\n`<path/to/data/file.csv>` should be replaced by the csv file path that contains patient comments. An example datafile is \ngiven at 'sample_data_file.csv'. Moreover, `<dataset type>` should be replaced by either `SITE-A` or `SITE-B` based on the type of the data source.\nBy default, the outputs will be saved in the root directory of the system. There are three types of outputs:\n\n1. `output.csv`: this file contains prediction per comment. \n2. `top_comments_system_a.csv`: Top five comments predicted by the System A \n3. `top_comments_system_b.csv`: top five comments predicted by the System B\n\n" }, { "alpha_fraction": 0.5969230532646179, "alphanum_fraction": 0.6092307567596436, "avg_line_length": 26.16666603088379, "blob_id": "549cac0b9bfa0249345bb62d5edf1ab6ccb6e1bf", "content_id": "b7b38cfb2a7e6d4b052d64ecf3c63fb1424d2411", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 325, "license_type": "permissive", "max_line_length": 60, "num_lines": 12, "path": "/system_b/new.enet.corpus.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#generate gold data for elastic net and SVMs\nnew.enet.corpus <- function(corpus, label){\n gold <- vector(mode=\"numeric\", length=length(corpus[[1]]))\n for(i in 1:length(corpus[[1]])){\n for(j in 2:length(corpus)){\n if(grepl(label, as.character(corpus[i,j]))){\n gold[i]<-label\n }\n }\n }\n return(gold)\n}" }, { "alpha_fraction": 0.5755780935287476, "alphanum_fraction": 0.5862880349159241, "avg_line_length": 35.89521026611328, "blob_id": "bdc211b86da2e8c7d8f8b750a6fd5b6b46b86dfd", "content_id": "70676c9f867157183f133bfd792e3352534e02bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 12325, "license_type": "permissive", "max_line_length": 169, "num_lines": 334, "path": "/system_b/load.ml.pipeline.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#NB Pipeline\n\nrun.ml <- function(seed = NULL, data, test.data, LABEL){\n #dataset_size = length(data[[1]])\n #train_data_count = length(data[[1]])*train_split\n \n set.seed(seed)\n \n n1 <- data.frame(comment = data$comment)\n n1$class <- new.corpus(data, LABEL)\n \n n1$class <- as.factor(n1$class)\n n1 <- n1[sample(nrow(n1)), ]\n \n n2 <- data.frame(comment = test.data$comment)\n n2$class <- new.corpus(test.data, LABEL)\n \n n2$class <- as.factor(n2$class)\n n2 <- n2[sample(nrow(n2)), ]\n \n \n \n #NLP pre-processing\n training.corpus <- Corpus(VectorSource(n1$comment))\n training.corpus.processed <- training.corpus %>% \n tm_map(content_transformer(tolower)) %>% \n tm_map(removePunctuation) %>%\n tm_map(stemDocument) %>%\n tm_map(removeNumbers) %>%\n tm_map(removeWords, stopwords(kind=\"en\")) %>%\n tm_map(stripWhitespace)\n \n test.corpus <- Corpus(VectorSource(n2$comment))\n test.corpus.processed <- test.corpus %>% \n tm_map(content_transformer(tolower)) %>% \n tm_map(removePunctuation) %>%\n tm_map(stemDocument) %>%\n tm_map(removeNumbers) %>%\n tm_map(removeWords, stopwords(kind=\"en\")) %>%\n tm_map(stripWhitespace)\n \n #generate document term matrix\n dtm.train <- DocumentTermMatrix(training.corpus.processed)\n dtm.test <- DocumentTermMatrix(test.corpus.processed)\n \n #subset dataset (60/40%) to training and test\n df.train <- n1\n df.test <- n2\n \n corpus.clean.train <- training.corpus.processed\n corpus.clean.test <- test.corpus.processed\n \n bitrigramtokeniser <- function(x, n) {\n RWeka:::NGramTokenizer(x, RWeka:::Weka_control(min =1, max = 3))\n }\n \n #generate document term matrix for NB\n dtm.train.nb <- DocumentTermMatrix(corpus.clean.train, control=list(wordLengths=c(2, Inf), \n tokenize = bitrigramtokeniser, \n weighting = function(x) weightTfIdf(x, normalize = FALSE),\n bounds=list(global=c(floor(length(corpus.clean.train)*0.01), floor(length(corpus.clean.train)*.8)))\n ))\n \n dtm.test.nb <- DocumentTermMatrix(corpus.clean.test, control=list(wordLengths=c(2, Inf), \n tokenize = bitrigramtokeniser, \n weighting = function(x) weightTfIdf(x, normalize = FALSE),\n bounds=list(global=c(floor(length(corpus.clean.test)*0.01), floor(length(corpus.clean.test)*.8)))\n ))\n \n # Apply the convert_count function to get final training and testing DTMs\n trainNB <- apply(dtm.train.nb, 2, convert_count)\n testNB <- apply(dtm.test.nb, 2, convert_count)\n \n # train model\n classifier <- naiveBayes(trainNB, df.train$class, laplace = 1)\n \n # save the model\n #save(classifier, file = paste0(LABEL, \".model\"))\n \n # predict \n pred <- predict(classifier, newdata=testNB) \n table(\"Predictions\"= pred, \"Actual\" = df.test$class )\n \n #evaluation and analysis\n conf.mat <- confusionMatrix(pred, df.test$class, positive=LABEL)\n \n header<-paste(\"\\nP\", \"R\", \"F1\\n\", sep = '\\t')\n content<-paste(round(conf.mat$byClass[5] ,5), round(conf.mat$byClass[6] ,5), round(conf.mat$byClass[7] ,5), sep = '\\t')\n \n cat(header)\n cat(content)\n}\n\n#Elastic net, SVM Guassian and linear\n run.lasso <-function(training.data, test.data, LABEL) {\n\n labels.train <- as.factor(new.enet.corpus(training.data, LABEL)) \n labels.test <- as.factor(new.enet.corpus(test.data, LABEL)) \n \n dtm.train <- nlp.preprocess(training.data$comment)\n dtm.test <- nlp.preprocess(test.data$comment, Terms(dtm.train))\n \n dictionary <- as.data.frame(Terms(dtm.train))\n saveRDS(dictionary, file=\"dictionary.Rda\")\n \n models.path <<- 'combine.models/'\n enet.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL)\n \n cat(\"\\nSVM - Radial Basis kernel\\n\")\n svm.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL, kernel='rbf')\n \n cat(\"\\nSVM - linear kernel\\n\")\n svm.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL)\n}\n\nrun.prediction <- function(data){\n \n output.path = 'combine.outputs/'\n \n #NLP pre-processing\n corpus <- VCorpus(VectorSource(data$comment))\n corpus.processed <- corpus %>% \n tm_map(content_transformer(tolower)) %>% \n tm_map(removePunctuation) %>%\n tm_map(stemDocument) %>%\n tm_map(removeNumbers) %>%\n tm_map(removeWords, stopwords(kind=\"en\")) %>%\n tm_map(stripWhitespace)\n \n \n #generate document term matrix\n dtm <- DocumentTermMatrix(corpus.processed)\n \n bitrigramtokeniser <- function(x, n) {\n RWeka:::NGramTokenizer(x, RWeka:::Weka_control(min =1, max = 3))\n }\n \n training.dict <- readRDS(\"dictionary.Rda\")\n training.terms <<- as.vector(training.dict[, 1])\n \n dtm.test <- DocumentTermMatrix(corpus.processed, control = list(dictionary=training.terms, \n wordLengths=c(2, Inf), \n #tokenize = bitrigramtokeniser,\n weighting = function(x) weightTfIdf(x, normalize = FALSE)\n #bounds=list(global=c(floor(length(corpus.processed)*0.01), floor(length(corpus.processed)*.8)))\n ))\n\n # Apply the convert_count function to get final training and testing DTMs\n #testNB <- apply(dtm.test.nb, 2, convert_count)\n \n #load saved models\n load.models()\n \n # predict using saved models\n env_pred <- as.character(predict(env.model, dtm.test)) \n env_pred_raw <- predict(env.model, dtm.test, type=(\"probabilities\")) \n #print (env_pred)\n \n test_mat <- testmat(training.terms, as.matrix(dtm.test))\n wt_pred <- as.character(predict(wt.model, newx=test_mat, s = \"lambda.min\", type=\"class\"))\n wt_pred_raw <- predict(wt.model, newx=test_mat, s = \"lambda.min\", type=\"response\")\n \n saap_pred <- as.character(predict(saap.model, dtm.test))\n saap_pred_raw <- predict(saap.model, dtm.test, type='probabilities')\n #print (saap_pred)\n \n cq_pred <- as.character(predict(cq.model, newx=test_mat, s = \"lambda.min\", type=\"class\"))\n cq_pred_raw <- predict(cq.model, newx=test_mat, s = \"lambda.min\", type=\"response\")\n \n #combine predictions and save in correct format\n result_data <- data.frame(stringsAsFactors=FALSE)\n \n comments = data$comment\n max.num.categories = 11\n \n #food.and.parking = apply.dictionaries(comments) \n for (i in 1:length(saap_pred)) {\n row = data.frame(matrix(NA, 1, max.num.categories))\n \n # declare dummy column headers; required for rbind\n names(row) <-LETTERS[1:length(row)] \n \n aspects = list()\n if (env_pred[i] != \"0\") {\n #aspects[length(aspects) + 1] <- env_pred[i]\n aspects[length(aspects) + 1] <- paste(c(env_pred[i], env_pred_raw[i, \"environment\"]), collapse = \" \")\n }\n \n if (wt_pred[i] != \"0\") {\n #aspects[length(aspects)+1] <- wt_pred[i]\n aspects[length(aspects)+1] <- paste(c(wt_pred[i], wt_pred_raw[i, \"1\"]), collapse = \" \")\n }\n \n if (saap_pred[i] != \"0\") {\n #aspects[length(aspects)+1] <- saap_pred[i]\n aspects[length(aspects)+1] <- paste(c(saap_pred[i], saap_pred_raw[i, \"staff attitude and professionalism\"]), collapse = \" \")\n }\n \n if (cq_pred[i] != \"0\") {\n #aspects[length(aspects)+1] <- cq_pred[i]\n aspects[length(aspects)+1] <- paste(c(cq_pred[i], cq_pred_raw[i, \"1\"]), collapse = \" \")\n }\n \n #if (length(food.and.parking[[i]]) > 0) {\n # aspects = c(aspects, food.and.parking[[i]])\n #}\n \n # insert comemnt at the fist column \n row[1, 1] = comments[i] \n \n # if no aspect found, default is 'other'\n if (length(aspects) > 0) {\n for (j in 1:length(aspects)) {\n row[1, j+1] = aspects[j] \n } \n }\n else {\n row[1, 2] = 'other 0.10'\n }\n \n result_data <- rbind(result_data, row)\n }\n \n # save data as csv file\n write.table(result_data, file=paste( output.path, 'predictions_155.csv'), row.names=FALSE, col.names = FALSE, sep = \",\", na = \"\", qmethod = c(\"escape\", \"double\"))\n}\n\nrun.prediction.2 <- function(training.data, prediction.data){ \n \n dataset_size = length(data[[1]])\n train_data_count = length(training.data[[1]])\n \n set.seed(seed)\n \n #NLP pre-processing\n corpus <- Corpus(VectorSource(data$comment))\n corpus.processed <- corpus %>% \n tm_map(content_transformer(tolower)) %>% \n tm_map(removePunctuation) %>%\n tm_map(stemDocument) %>%\n tm_map(removeNumbers) %>%\n tm_map(removeWords, stopwords(kind=\"en\")) %>%\n tm_map(stripWhitespace)\n \n \n #generate document term matrix\n dtm <- DocumentTermMatrix(corpus.processed)\n \n bitrigramtokeniser <- function(x, n) {\n RWeka:::NGramTokenizer(x, RWeka:::Weka_control(min =1, max = 3))\n }\n \n #generate document term matrix for NB\n dtm.test.nb <- DocumentTermMatrix(corpus.processed, control=list(wordLengths=c(2, Inf), \n tokenize = bitrigramtokeniser, \n weighting = function(x) weightTfIdf(x, normalize = FALSE),\n bounds=list(global=c(floor(length(corpus.processed)*0.01), floor(length(corpus.processed)*.8)))))\n \n # Apply the convert_count function to get final training and testing DTMs\n testNB <- apply(dtm.test.nb, 2, convert_count)\n \n #load saved models\n load.models()\n \n # predict using saved models\n env_pred <- as.character(predict(env.model, newdata=testNB)) \n wt_pred <- as.character(predict(wt.model, newdata=testNB))\n saap_pred <- as.character(predict(saap.model, newdata=testNB))\n cq_pred <- as.character(predict(cq.model, newdata=testNB))\n \n env_pred_raw <- predict(env.model, newdata=testNB, type=(\"raw\")) \n wt_pred_raw <- predict(wt.model, newdata=testNB, type=(\"raw\"))\n saap_pred_raw <- predict(saap.model, newdata=testNB, type=(\"raw\"))\n cq_pred_raw <- predict(cq.model, newdata=testNB, type=(\"raw\"))\n \n print (env_pred_raw)\n \n #combine predictions and save in correct format\n result_data <- data.frame(stringsAsFactors=FALSE)\n \n comments = data$comment\n max.num.categories = 11\n \n #food.and.parking = apply.dictionaries(comments) \n for (i in 1:length(saap_pred)) {\n row = data.frame(matrix(NA, 1, max.num.categories))\n \n # declare dummy column headers; required for rbind\n names(row) <-LETTERS[1:length(row)] \n \n aspects = list()\n if (env_pred[i] != \"0\") {\n #aspects[length(aspects) + 1] <- env_pred[i]\n aspects[length(aspects) + 1] <- paste(c(env_pred[i], env_pred_raw[i, \"environment\"]), collapse = \" \")\n }\n \n if (wt_pred[i] != \"0\") {\n #aspects[length(aspects)+1] <- wt_pred[i]\n aspects[length(aspects)+1] <- paste(c(wt_pred[i], wt_pred_raw[i, \"waiting time\"]), collapse = \" \")\n }\n \n if (saap_pred[i] != \"0\") {\n #aspects[length(aspects)+1] <- saap_pred[i]\n aspects[length(aspects)+1] <- paste(c(saap_pred[i], saap_pred_raw[i, \"staff attitude and professionalism\"]), collapse = \" \")\n }\n \n if (cq_pred[i] != \"0\") {\n #aspects[length(aspects)+1] <- cq_pred[i]\n aspects[length(aspects)+1] <- paste(c(cq_pred[i], cq_pred_raw[i, \"care quality\"]), collapse = \" \")\n }\n \n #if (length(food.and.parking[[i]]) > 0) {\n # aspects = c(aspects, food.and.parking[[i]])\n #}\n \n # insert comemnt at the fist column \n row[1, 1] = comments[i] \n \n # if no aspect found, default is 'other'\n if (length(aspects) > 0) {\n for (j in 1:length(aspects)) {\n row[1, j+1] = aspects[j] \n } \n }\n else {\n row[1, 2] = 'other 0.4'\n }\n \n result_data <- rbind(result_data, row)\n }\n \n # save data as csv file\n write.table(result_data, file=paste(output.path, 'predictions_111.csv'), row.names=FALSE, col.names = FALSE, sep = \",\", na = \"\", qmethod = c(\"escape\", \"double\"))\n}\n\n\n" }, { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.6911764740943909, "avg_line_length": 17.200000762939453, "blob_id": "f087b9ad8163ff1e3e8076c0e6afe1cce54fe4ef", "content_id": "b5a2122ed9af70f5bd4981fd840fdd0dcf1bd8c1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 272, "license_type": "permissive", "max_line_length": 29, "num_lines": 15, "path": "/system_b/load.libs.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "## Lib function\nload.libs <- function(){\n library(plyr)\n library(dplyr)\n library(NLP) \n library(tm)\n library(RWeka)\n library(e1071)\n library(caret)\n library(RTextTools)\n library(glmnet)\t# for lasso\n library(kernlab)# for svm\n library(mlr)\n library(optparse)\n}" }, { "alpha_fraction": 0.635706901550293, "alphanum_fraction": 0.6388028860092163, "avg_line_length": 29.28125, "blob_id": "9069f0efb9934872ddb95c6421a86e583ccd8de2", "content_id": "2dbc40c4413a7770947cd3dce41b1e348753d0e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 969, "license_type": "permissive", "max_line_length": 142, "num_lines": 32, "path": "/run_prediction.sh", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\nDATA_FILE=$1\nDATASET=$2 # SITE-A / SITE-B\n\nif [ -z \"$1\" ]; then\n echo \"Please pass a csv data file\"\n exit\nfi\n\nif [ \"$DATASET\" = \"SITE-A\" ]; then\n TRAINING_SYSTEM_A=\"site_a_dataset.csv\"\n TRAINING_SYSTEM_B=\"r_site_a_dataset.csv\"\n DATABASE_TYPE=\"site-a\"\nelse \n if [ \"$DATASET\" = \"SITE-B\" ]; then\n TRAINING_SYSTEM_A=\"site_b_dataset.csv\"\n TRAINING_SYSTEM_B=\"r_site_b_dataset.csv\"\n DATABASE_TYPE=\"site-b\"\n else\n echo \"Error: Invalid dataset type.\"\n exit\n fi\nfi\n\ncd system_a/ \npython analyzer_main.py --train files/$TRAINING_SYSTEM_A --data ../$DATA_FILE --output ../system_c/files/output_system_a.csv \ncd ../system_b/ \nRscript main.R --train files/$TRAINING_SYSTEM_B --data ../$DATA_FILE --dataset $DATABASE_TYPE --output ../system_c/files/output_system_b.csv \ncd ../system_c/ \npython main.py --a files/output_system_a.csv --b files/output_system_b.csv --dataset $DATABASE_TYPE --output ../output.csv\n" }, { "alpha_fraction": 0.5626547932624817, "alphanum_fraction": 0.5824665427207947, "avg_line_length": 35.727272033691406, "blob_id": "e780063215ef33968036bfd8dff35d8514042e7c", "content_id": "dd1aa5a833e8439e6cc41f8471827a2f9205ad18", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2019, "license_type": "permissive", "max_line_length": 116, "num_lines": 55, "path": "/system_c/main.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "import argparse\nfrom scripts.combine_two_systems import CombineSystems\n\ncombine_systems = CombineSystems()\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--a\", type=str, default='files/system_a_output.csv', help=\"--a file_path\")\nparser.add_argument(\"--b\", type=str, default='files/system-b_output.csv', help=\"--a file_path\")\nparser.add_argument(\"--dataset\", type=str, default='site-a', help=\"--dataset dataset_type\")\nparser.add_argument(\"--output\", type=str, default='files/output.csv', help=\"--output file_path\")\n\nargs = parser.parse_args()\nsystem_a_output = args.a\nsystem_b_output = args.b\ndataset = args.dataset\noutput_file = args.output\n\nfile_a_path = system_a_output\nfile_b_path = system_b_output\noutput_file_path = output_file\n\nthresholds_a = {'environment': 0.1,\n 'waiting time': 0.7,\n 'staff attitude and professionalism': 0.4,\n 'care quality': 0.2,\n 'other': 0.6,\n }\n\nthresholds_b = {'environment': 0.3,\n 'waiting time': 0.1,\n 'staff attitude and professionalism': 0.1,\n 'care quality': 0.6,\n 'other': 0.4\n }\n\nif dataset == 'site-b':\n thresholds_a = {'environment': 0.6,\n 'waiting time': 0.5,\n 'staff attitude and professionalism': 0.5,\n 'care quality': 0.4,\n 'other': 0.7,\n }\n\n thresholds_b = {'environment': 0.1,\n 'waiting time': 0.8,\n 'staff attitude and professionalism': 0.1,\n 'care quality': 0.1,\n 'other': 0.1\n }\n\ncombine_systems.combine_by_dynamic_threshold(file_a_path, file_b_path, output_file_path, thresholds_a, thresholds_b)\ncombine_systems.extract_top_comments(file_a_path, '../top_comments_system_a.csv')\ncombine_systems.extract_top_comments(file_b_path, '../top_comments_system_b.csv')\nprint '\\n\\nThe final outputs saved successfully!\\n'" }, { "alpha_fraction": 0.5789036750793457, "alphanum_fraction": 0.5948920249938965, "avg_line_length": 37.52799987792969, "blob_id": "2aefad049d94a17a4a8b7dd46f35690a8f450e90", "content_id": "cc26415346bff726b36ae8f7030f39677bf13287", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4816, "license_type": "permissive", "max_line_length": 113, "num_lines": 125, "path": "/system_c/scripts/result_processing.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom utilities import Utilities\n\n\nclass ResultProcessor:\n\n def __init__(self, plot=False):\n self.plot = plot\n self.utilities = Utilities()\n\n def get_total_positives_negatives(self, file_path, plot=False):\n rows = self.utilities.read_from_csv(file_path)\n\n all_sentiments = []\n for row in rows:\n del row[0]\n for item in row:\n all_sentiments.append(item.rsplit(' ', 1)[1])\n\n # for review in results.keys():\n # all_sentiments = all_sentiments + results[review][2]\n\n positives = all_sentiments.count('positive')\n negatives = all_sentiments.count('negative')\n\n print(\"Total positive aspects: %d\" % positives)\n print(\"Total negative aspects: %d\" % negatives)\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = 'Positive', 'Negative'\n sizes = [positives, negatives]\n explode = (0, 0.1) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Total positive and negative feebacks (positives: %d, negatives: %d)' % (positives, negatives))\n\n if plot is True:\n plt.show()\n\n def per_class_sentiments(self, file_path, plot=False):\n rows = self.utilities.read_from_csv(file_path)\n\n per_class_data = {}\n for row in rows:\n del row[0]\n\n for item in row:\n sentiment = item.rsplit(' ', 1)[1]\n aspect = item.rsplit(' ', 1)[0].rsplit(' ', 1)[0]\n if aspect in per_class_data.keys():\n per_class_data[aspect].append(sentiment)\n else:\n per_class_data[aspect] = [sentiment]\n\n aspects = per_class_data.keys()\n positives = []\n negatives = []\n for aspect in aspects:\n positives.append(per_class_data[aspect].count('positive'))\n negatives.append(per_class_data[aspect].count('negative'))\n\n\n for aspect, positive, negative in zip(aspects, positives, negatives):\n print(\"%s : %d positive, %d negative\" % (aspect, positive, negative))\n\n\n # plot as a barchart\n N = len(aspects)\n highest = max([x + y for x, y in zip(positives, negatives)])\n step = 5000\n ind = np.arange(N) # the x locations for the groups\n width = 0.35 # the width of the bars: can also be len(x) sequence\n\n fig1, ax1 = plt.subplots()\n fig1.autofmt_xdate()\n p1 = plt.bar(ind, negatives, width, color='#d62728')\n p2 = plt.bar(ind, positives, width, bottom=negatives)\n\n plt.ylabel('Number of lebels')\n plt.xlabel('Aspects')\n plt.title('Sentiments by aspect categories')\n plt.xticks(ind, aspects)\n plt.yticks(np.arange(0, highest + step, step))\n plt.legend((p2[0], p1[0]), ('Positive', 'Negative'))\n\n # show pie chart for positive aspects per category\n positives_percentage = [0] * len(positives)\n if sum(positives) > 0:\n positives_percentage = [(float(n_positive)/sum(positives))*100 for n_positive in positives]\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = aspects\n sizes = positives_percentage\n explode = [0] * len(aspects)\n explode[positives_percentage.index(max(positives_percentage))] = 0.1\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Positive aspects per category')\n\n # show pie chart for negative aspects per category\n negatives_percentage = [0] * len(negatives)\n if sum(negatives) > 0:\n negatives_percentage = [(float(n_negative) / sum(negatives)) * 100 for n_negative in negatives]\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = aspects\n sizes = negatives_percentage\n explode = [0]*len(aspects)\n explode[negatives_percentage.index(max(negatives_percentage))] = 0.1\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Negative aspects per category')\n\n if plot is True:\n plt.show()\n" }, { "alpha_fraction": 0.4965035021305084, "alphanum_fraction": 0.6958041787147522, "avg_line_length": 15.823529243469238, "blob_id": "635f440232446457b40bcbdfd25f67d49aac09c1", "content_id": "5690d6c93721758f9680f3c80338f3af8fe7ca44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 286, "license_type": "permissive", "max_line_length": 34, "num_lines": 17, "path": "/requirements.txt", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "backports.functools-lru-cache==1.4\ncycler==0.10.0\net-xmlfile==1.0.1\njdcal==1.3\nmatplotlib==2.1.1\nnltk==3.2.5\nnumpy==1.13.3\nopenpyxl==2.4.9\npandas==0.21.1\npyparsing==2.2.0\npython-dateutil==2.6.1\npytz==2017.3\nscikit-learn==0.19.1\nscipy==1.0.0\nsix==1.11.0\nsklearn==0.0\nsubprocess32==3.2.7\n" }, { "alpha_fraction": 0.6966507434844971, "alphanum_fraction": 0.7004784941673279, "avg_line_length": 25.100000381469727, "blob_id": "4d9e6237e4b40eff9b1acd4ff5ac0896119364ca", "content_id": "ef3a855c68e8904df897d3177f3ab7f288f50c7d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1045, "license_type": "permissive", "max_line_length": 108, "num_lines": 40, "path": "/system_b/evaluation.main.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#The main script to run experiments.\n\n# hide warnings\n#options(warn = -1)\n\nsource(\"load.libs.R\")\nsource(\"convert.count.R\")\nsource(\"load.data.R\")\nsource(\"new.corpus.R\")\nsource(\"clean.data.R\")\nsource(\"enet.functions.R\")\nsource(\"new.enet.corpus.R\")\n#source(\"apply.dictionaries.R\")\nsource(\"load.models.R\")\nsource(\"nlp.preprocessing.R\")\nsource(\"svm.classifier.R\")\nsource(\"enet.classifier.R\")\nsource(\"nb.classifier.R\")\nsource(\"run.evaluation.R\")\nsource(\"run.training.R\")\nsource(\"run.prediction.R\")\n\nload.libs()\n\nmodels.path <<- 'combine.models/'\noutput.path <<- 'combine.outputs/mmhsct_output_confidence_155.csv'\n\n\ntraining.data <-load.training.data()\ntest.data <- load.test.data()\n\ntraining.data$comment <- clean.data(training.data)\ntest.data$comment <- clean.data(test.data)\n\ncategories = c('environment', 'waiting time', 'staff attitude and professionalism', 'care quality', 'other')\n#for (target in categories) {\n target = 'waiting time'\n #cat (paste(\"\\n\\n********************* \", target))\n run.evaluation(training.data, test.data, target)\n#}\n\n" }, { "alpha_fraction": 0.6390306353569031, "alphanum_fraction": 0.6390306353569031, "avg_line_length": 33.130435943603516, "blob_id": "2d4d529d41bcd39fa7b28af3d43cd90dcca6266e", "content_id": "f0263fb720234c5f9b9d2e2164e09d43e3c60780", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 784, "license_type": "permissive", "max_line_length": 98, "num_lines": 23, "path": "/system_b/run.training.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "run.training <-function(training.data) {\n labels <- c('environment', 'waiting time', 'staff attitude and professionalism', 'care quality')\n \n for (label in labels) {\n dtm.train <- nlp.preprocess(training.data$comment)\n labels.train <- as.factor(new.enet.corpus(training.data, label))\n \n print (dtm.train)\n \n dictionary <- as.data.frame(Terms(dtm.train))\n saveRDS(dictionary, file=terms.path)\n \n if (label == 'waiting time' || label == 'care quality') {\n enet.train(dtm.train, labels.train, label)\n } \n else if (label == 'environment') {\n svm.train(dtm.train, labels.train, label, kernel.name = 'linear')\n }\n else { # staff attitude and professionalism\n svm.train(dtm.train, labels.train, label, kernel.name = 'rbf')\n }\n }\n}" }, { "alpha_fraction": 0.5108635425567627, "alphanum_fraction": 0.5229954719543457, "avg_line_length": 38.068965911865234, "blob_id": "d2129408ade599a7c18c6c5a480807341f436a53", "content_id": "74bd2bcc47cfe5b8be22decac84de616312a386c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9067, "license_type": "permissive", "max_line_length": 149, "num_lines": 232, "path": "/system_c/scripts/combine_two_systems.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "from utilities import Utilities\n# from comment_level_evaluation import CommentLevelEvaluation\nimport operator\n\n\nclass CombineSystems:\n def __init__(self):\n self.utilities = Utilities()\n\n self.storage_path = 'comment-level-datasets-2/'\n # self.storage_path = 'r-combine-outputs/'\n # self.random_states = [111, 122, 133, 144, 155]\n self.categories = ['environment', 'waiting time', 'staff attitude professionalism', 'care quality', 'other']\n\n def is_valid_asp_from_from_system_a(self, aspect, confidence_value, thresholds):\n is_valid = False\n # thresholds = {'environment': 0.6,\n # 'waiting time': 0.5,\n # 'staff attitude and professionalism': 0.5,\n # 'care quality': 0.4,\n # 'other': 0.7,\n # }\n\n aspects = thresholds.keys()\n if aspect in aspects and float(confidence_value) >= thresholds[aspect]:\n is_valid = True\n\n return is_valid\n\n def is_valid_asp_from_from_system_b(self, aspect, confidence_value, thresholds):\n is_valid = False\n # thresholds = {'environment': 0.1,\n # 'waiting time': 0.8,\n # 'staff attitude and professionalism': 0.1,\n # 'care quality': 0.1,\n # 'other': 0.1\n # }\n\n aspects = thresholds.keys()\n if aspect in aspects and float(confidence_value) >= thresholds[aspect]:\n is_valid = True\n\n return is_valid\n\n def apply_dictionaries(self, comment):\n food_lexicon = ['food', 'canteen', 'canten', 'coffee', 'cofee', 'coffe', 'coffee', 'tea', 'drink', 'drinks']\n parking_lexicon = ['car park', 'car-park', 'carpark', 'parking', 'bicycle']\n\n aspects = []\n all_words = self.utilities.get_lemma(comment)\n lemmatized_words = all_words.values()\n\n for word in food_lexicon:\n if word in lemmatized_words:\n aspects.append('food')\n break\n\n for word in parking_lexicon:\n if word in lemmatized_words:\n aspects.append('parking')\n break\n\n return aspects\n\n def combine_by_dynamic_threshold(self, file_a_path, file_b_path, output_file_path, thresholds_a, thresholds_b, evaluation=False):\n\n file_a = self.utilities.read_from_csv(file_a_path)\n file_b = self.utilities.read_from_csv(file_b_path)\n\n output = []\n for row_a, row_b in zip(file_a, file_b):\n\n comment = row_a[0]\n aspects = []\n\n # remove comment from the first column\n del row_a[0]\n del row_b[0]\n\n for a, b in zip(row_a, row_b):\n if not a and not b and a in self.categories:\n break\n\n # union with threshold\n if a is not None:\n asp_threshold = a.rsplit(' ', 1)[0]\n sentiment = a.rsplit(' ', 1)[1]\n aspect_a = asp_threshold.rsplit(' ', 1)[0]\n asp_snt = aspect_a + \" \" + sentiment\n if not any(aspect_a in asp for asp in aspects):\n confidence_value_a = asp_threshold.rsplit(' ', 1)[1]\n is_valid = self.is_valid_asp_from_from_system_a(aspect_a, confidence_value_a, thresholds_a)\n if is_valid:\n aspects.append(asp_snt)\n\n if b is not None:\n aspect_b = b.rsplit(' ', 1)[0]\n if aspect_b in self.categories and not any(aspect_b in asp for asp in aspects):\n confidence_value_b = b.rsplit(' ', 1)[1]\n is_valid = self.is_valid_asp_from_from_system_b(aspect_b, confidence_value_b, thresholds_b)\n if is_valid:\n aspects.append(aspect_b)\n\n # Apply food and parking dictionaries\n # TURN OFF THIS SNIPPET BEFORE EVALUATION\n if evaluation is False:\n asps_from_dictionaries = self.apply_dictionaries(comment)\n if len(asps_from_dictionaries) > 0:\n # if only environment, then replace with food/parking\n if len(aspects) == 1 and aspects[0] == 'environment':\n aspects = asps_from_dictionaries\n else:\n aspects = aspects + asps_from_dictionaries\n\n if len(aspects) < 1:\n # aspects = ['other']\n aspects = ['other negative']\n\n output.append([comment] + aspects)\n self.utilities.save_list_as_csv(output, output_file_path)\n\n def combine_by_static_threshold(self, file_a_path, file_b_path, threshold_a, threshold_b, output_file_path):\n\n file_a = self.utilities.read_from_csv(file_a_path)\n file_b = self.utilities.read_from_csv(file_b_path)\n\n output = []\n for row_a, row_b in zip(file_a, file_b):\n\n comment = row_a[0]\n aspects = []\n\n # remove comment from the first column\n del row_a[0]\n del row_b[0]\n\n for a, b in zip(row_a, row_b):\n if not a and not b and a in self.categories:\n break\n\n # union with threshold\n if a and a.rsplit(' ', 1)[0] not in aspects and float(a.rsplit(' ', 1)[1]) >= threshold_a:\n aspects.append(a.rsplit(' ', 1)[0])\n\n if b and b.rsplit(' ', 1)[0] in self.categories and b.rsplit(' ', 1)[0] not in aspects and float(b.rsplit(' ', 1)[1]) >= threshold_b:\n aspects.append(b.rsplit(' ', 1)[0])\n\n # Apply food and parking dictionaries\n # asps_from_dictionaries = self.apply_dictionaries(comment)\n # if len(asps_from_dictionaries) > 0:\n # aspects = aspects + asps_from_dictionaries\n\n if len(aspects) < 1:\n aspects = ['other']\n\n output.append([comment] + aspects)\n\n self.utilities.save_list_as_csv(output, output_file_path)\n\n def extract_top_comments(self, data_file, output_file_path):\n rows = self.utilities.read_from_csv(data_file)\n\n envs = {}\n wts = {}\n saaps = {}\n cqs = {}\n ots = {}\n\n for row in rows:\n comment = row[0]\n del rows[0]\n\n for item in row:\n # if there is sentiment remove it\n if any(snt_cat in item for snt_cat in self.utilities.sentiment_classes):\n item = item.rsplit(' ', 1)[0]\n\n if item and item.rsplit(' ', 1)[0] == 'environment':\n envs[comment] = float(item.rsplit(' ', 1)[1])\n\n if item and item.rsplit(' ', 1)[0] == 'waiting time':\n wts[comment] = float(item.rsplit(' ', 1)[1])\n\n if item and item.rsplit(' ', 1)[0] == 'staff attitude and professionalism':\n saaps[comment] = float(item.rsplit(' ', 1)[1])\n\n if item and item.rsplit(' ', 1)[0] == 'care quality':\n cqs[comment] = float(item.rsplit(' ', 1)[1])\n\n if item and item.rsplit(' ', 1)[0] == 'other':\n ots[comment] = float(item.rsplit(' ', 1)[1])\n\n # sort comments by the descending order of confidence values\n sorted_envs = [comment_data[0] for comment_data in sorted(envs.items(), key=operator.itemgetter(1), reverse=True)]\n sorted_wts = [comment_data[0] for comment_data in sorted(wts.items(), key=operator.itemgetter(1), reverse=True)]\n sorted_saaps = [comment_data[0] for comment_data in sorted(saaps.items(), key=operator.itemgetter(1), reverse=True)]\n sorted_cqs = [comment_data[0] for comment_data in sorted(cqs.items(), key=operator.itemgetter(1), reverse=True)]\n sorted_ots = [comment_data[0] for comment_data in sorted(ots.items(), key=operator.itemgetter(1), reverse=True)]\n\n # prepare output to save\n output = [['Environment', 'Waiting time', 'Staff attitude and professionalism', 'Care quality', 'Other']]\n top = 5\n for i in range(0, top):\n comments = []\n\n try:\n comments.append(sorted_envs[i])\n except IndexError:\n comments.append(None)\n\n try:\n comments.append(sorted_wts[i])\n except IndexError:\n comments.append(None)\n\n try:\n comments.append(sorted_saaps[i])\n except IndexError:\n comments.append(None)\n\n try:\n comments.append(sorted_cqs[i])\n except IndexError:\n comments.append(None)\n\n try:\n comments.append(sorted_ots[i])\n except IndexError:\n comments.append(None)\n\n output.append(comments)\n self.utilities.save_list_as_csv(output, output_file_path)\n\n\n\n" }, { "alpha_fraction": 0.5369032025337219, "alphanum_fraction": 0.5430853962898254, "avg_line_length": 35.15901184082031, "blob_id": "27cef41c21e08f57153fda17fbbb7a20a5691f33", "content_id": "b33808d33e0d1bfa2351afb85893645e1d240102", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10514, "license_type": "permissive", "max_line_length": 121, "num_lines": 283, "path": "/system_a/scripts/utilities.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "import nltk\r\nfrom nltk import ngrams\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import sent_tokenize, WordPunctTokenizer\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport csv\r\nimport pickle\r\nimport re\r\n\r\n\r\nclass Utilities(object):\r\n\r\n def __init__(self):\r\n self.noun_phrase_tags = ['NN', 'NNS', 'NNP', 'NNPS']\r\n self.verb_phrase_tags = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\r\n self.adjective_phrase_tags = ['JJ', 'JJS', 'JJR']\r\n self.adverb_phrase_tags = ['RB', 'RBR', 'RBS']\r\n self.sentiment_classes = ['negative', 'neutral', 'positive']\r\n self.conjunctions_file_path = 'conjunctions.txt'\r\n self.wordnet_lemmatizer = WordNetLemmatizer()\r\n self.conjunctions = self.get_lines_from_text_file(self.conjunctions_file_path)\r\n\r\n def write_content_to_file(self, file_path, content):\r\n output_file = open(file_path, 'w')\r\n output_file.write(content)\r\n output_file.close()\r\n\r\n def get_lines_from_text_file(self, file_path):\r\n with open(file_path) as f:\r\n lines = f.readlines()\r\n\r\n content = [line.strip() for line in lines]\r\n\r\n return content\r\n\r\n def split_text_into_insentence(self, text):\r\n sentences = sent_tokenize(text)\r\n\r\n return sentences\r\n\r\n def read_from_csv(self, file_path):\r\n data = []\r\n\r\n with open(file_path, 'rb') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=',')\r\n for row in spamreader:\r\n data.append(row)\r\n\r\n return data\r\n\r\n def store_list_to_file(self,file_path, data_list):\r\n with open(file_path, 'wb') as f:\r\n pickle.dump(data_list, f)\r\n\r\n def get_list_from_file(self, file_path):\r\n with open(file_path, 'rb') as f:\r\n data_list = pickle.load(f)\r\n\r\n return data_list\r\n\r\n def split_sentence_by_conjunction(self, sentence):\r\n sentence = sentence.lower()\r\n clauses = [sentence]\r\n matched_conjunctions = []\r\n all_indices = []\r\n for conjunction in self.conjunctions:\r\n if len(sentence.split(conjunction)) > 1:\r\n matched_conjunctions.append(conjunction)\r\n # TODO: fix the regex for to back to back to conjunctions\r\n iter = re.finditer(r\"(?:^|\\W)\"+conjunction.lower()+\"(?:$|\\W)\", sentence)\r\n indices = [m.start(0) for m in iter]\r\n # print indices\r\n all_indices = all_indices + indices\r\n\r\n all_indices = sorted(list(set(all_indices)))\r\n # temp_sentence = sentence\r\n for matched_conjunction in matched_conjunctions:\r\n # match with conjunction (whole words only)\r\n substrs = re.compile(r\"(?:^|\\W)\"+matched_conjunction.lower()+\"(?:^|\\W)\").split(sentence)\r\n sentence = '**$**'.join(substrs)\r\n\r\n clauses = filter(None, sentence.split('**$**'))\r\n index = 0\r\n for clause in clauses:\r\n clauses[index] = clause.strip()\r\n index += 1\r\n clause_info = {\r\n 'clause': clauses,\r\n 'indices': all_indices\r\n }\r\n return clause_info\r\n\r\n def get_only_comments_from_dataset(self, dataset_file_path):\r\n rows = self.read_from_csv(dataset_file_path)\r\n del rows[0]\r\n comments = []\r\n for row in rows:\r\n if len(row) > 2 and row[0] not in comments: # making comments list unique\r\n comments.append(row[0])\r\n\r\n return comments\r\n\r\n def get_unique_list_of_lists(self, data, labels):\r\n new_data = []\r\n new_labels = []\r\n index = 0\r\n for elem in data:\r\n if elem not in new_data:\r\n new_data.append(elem)\r\n new_labels.append(labels[index])\r\n index += 1\r\n\r\n unique_data = {\r\n 'data': new_data,\r\n 'labels': new_labels\r\n }\r\n\r\n return unique_data\r\n\r\n def get_dependency_by_relation(self, dependencies, relation):\r\n dependency_list = []\r\n for dependency in dependencies:\r\n if dependency[1] == relation:\r\n dependency_list.append(dependency)\r\n return dependency_list\r\n\r\n def get_dependency_by_word(self, dependencies, word):\r\n dependency_list = []\r\n for dependency in dependencies:\r\n if dependency[0][0] == word or dependency[2][0] == word:\r\n dependency_list.append(dependency)\r\n\r\n return dependency_list\r\n\r\n def strip_nonalnum_re(self, word):\r\n return re.sub(r\"^\\W+|\\W+$\", \"\", word)\r\n\r\n def get_segments_aspects_sentiments(self, dataset_file_path):\r\n segments = []\r\n aspects = []\r\n sentiments = []\r\n\r\n reviews = self.read_from_csv(dataset_file_path)\r\n segment_aspect_pairs = []\r\n\r\n for review in reviews:\r\n comment = review[0]\r\n comment_parts = comment.split('**$**')\r\n index = 1\r\n segments_per_review = []\r\n for comment_part in comment_parts:\r\n if 0 <= index < len(review) and review[index]:\r\n segments_per_review.append([comment_part, review[index]])\r\n index += 1\r\n\r\n segment_aspect_pairs = segment_aspect_pairs + segments_per_review\r\n for sg in segment_aspect_pairs:\r\n sentences = self.split_text_into_insentence(sg[0])\r\n if len(sentences) == 1:\r\n temp = sg[1].split(' ')\r\n if sg[1] == 'noise':\r\n sentiment = 'neutral'\r\n else:\r\n sentiment = temp[-1]\r\n del temp[-1]\r\n aspect = \" \".join(temp)\r\n segment = self.clean_up_text(sentences[0])\r\n if segment not in segments:\r\n segments.append(segment)\r\n aspects.append(aspect)\r\n sentiments.append(sentiment)\r\n data = {\r\n 'segments': segments,\r\n 'aspects': aspects,\r\n 'sentiments': sentiments\r\n }\r\n\r\n return data\r\n\r\n def clean_up_text(self, sentence):\r\n cleaned = re.sub(r'^( )+|^[^A-Za-z]+|\\.\\.+|\\,\\,+|(_x0085_)+|(-rrb)+|(%)|[^a-zA-Z0-9]+$', r'', sentence)\r\n return cleaned\r\n\r\n def normalise_aspect_classes(self, aspects):\r\n staff_attitude_and_professionalism_group = ['staff attitude and professionalism', 'communication']\r\n care_quality_group = ['care quality', 'process', 'waiting time']\r\n environment_group = ['food', 'environment', 'resource', 'parking']\r\n # other_group = ['noise', 'other']\r\n new_aspects = []\r\n for aspect in aspects:\r\n if aspect in staff_attitude_and_professionalism_group:\r\n new_aspects.append('staff attitude and professionalism')\r\n elif aspect in care_quality_group:\r\n new_aspects.append('care quality')\r\n elif aspect in environment_group:\r\n new_aspects.append('environment')\r\n else:\r\n new_aspects.append('other')\r\n\r\n return new_aspects\r\n\r\n def ngrams(self, sentence, n):\r\n tokens = sentence.split(' ')\r\n output = {}\r\n for i in range(len(tokens) - n + 1):\r\n g = ' '.join(tokens[i:i + n])\r\n output.setdefault(g, 0)\r\n output[g] += 1\r\n return output\r\n\r\n def get_grouped_aspects(self, training_dataset_file_path):\r\n data = self.get_segments_aspects_sentiments(training_dataset_file_path)\r\n segments = data['segments']\r\n aspects = data['aspects']\r\n\r\n grouped_by_aspects = {}\r\n for index in range(0, len(segments)):\r\n if aspects[index] in grouped_by_aspects.keys() and segments[index] not in grouped_by_aspects[aspects[index]]:\r\n grouped_by_aspects[aspects[index]].append(segments[index])\r\n elif aspects[index] not in grouped_by_aspects.keys():\r\n grouped_by_aspects[aspects[index]] = [segments[index]]\r\n\r\n return grouped_by_aspects\r\n\r\n def get_ngrams(self, sent):\r\n sent = re.sub(r'[^A-Za-z ]+', r'', sent.lower().decode('utf-8'))\r\n words = [tok for tok in nltk.word_tokenize(sent) if tok not in set(stopwords.words('english')) and len(tok) > 2]\r\n my_bigrams = ['_'.join(gram) for gram in list(ngrams(words, 2))]\r\n my_trigrams = ['_'.join(gram) for gram in list(ngrams(words, 3))]\r\n\r\n return words + my_bigrams + my_trigrams\r\n\r\n def convert_list_to_utf8(self, data):\r\n converted_data = data\r\n if len(data)>0 and isinstance(data[0], str):\r\n converted_data = [segment.decode('utf-8', 'ignore') for segment in data]\r\n return converted_data\r\n\r\n def save_list_as_csv(self, data_list, file_path):\r\n with open(file_path, 'wb') as resultFile:\r\n wr = csv.writer(resultFile, dialect='excel')\r\n wr.writerows(data_list)\r\n\r\n def get_pos_tags(self, sentences):\r\n tokens = nltk.word_tokenize(sentences)\r\n pos_tags = nltk.pos_tag(tokens)\r\n\r\n return pos_tags\r\n\r\n def tokenize(self, sentence):\r\n from nltk.tokenize import TweetTokenizer\r\n tknzr = TweetTokenizer()\r\n tokens = tknzr.tokenize(sentence)\r\n\r\n return tokens\r\n\r\n def get_lemma(self, sentences):\r\n lemmatized = {}\r\n tokens = self.tokenize(sentences)\r\n for token in tokens:\r\n results = self.wordnet_lemmatizer.lemmatize(token)\r\n lemmatized[token] = results\r\n\r\n return lemmatized\r\n\r\n def merge_classes(self, aspects):\r\n group_1 = ['staff attitude and professionalism', 'communication']\r\n group_2 = ['care quality', 'resource', 'process']\r\n group_3 = ['environment', 'food', 'parking']\r\n # group_3 = ['environment']\r\n group_4 = ['waiting time']\r\n group_5 = ['other', 'noise']\r\n # group_6 = ['food']\r\n # group_7 = ['parking']\r\n # groups = [group_1, group_2, group_3, group_4, group_5, group_6, group_7]\r\n groups = [group_1, group_2, group_3, group_4, group_5]\r\n new_aspects = []\r\n for aspect in aspects:\r\n for group in groups:\r\n if aspect in group:\r\n new_aspects.append(group[0]) # all members will be replaced by the first member of the group\r\n break\r\n return new_aspects" }, { "alpha_fraction": 0.5806691646575928, "alphanum_fraction": 0.5895910859107971, "avg_line_length": 36.33333206176758, "blob_id": "f48f7fc60035182ff313485f84690ea1eb7d95ad", "content_id": "8a104eba3d3f71e81b20deea09d0d145bafb270c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1345, "license_type": "permissive", "max_line_length": 122, "num_lines": 36, "path": "/system_b/nlp.preprocessing.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "\n\nnlp.preprocess <- function(comments, training_terms=NULL) {\n \n #NLP pre-processing\n corpus <- Corpus(VectorSource(comments))\n corpus.processed <- corpus %>% \n tm_map(content_transformer(tolower)) %>% \n tm_map(removePunctuation) %>%\n tm_map(stemDocument) %>%\n tm_map(removeNumbers) %>%\n tm_map(removeWords, stopwords(kind=\"en\")) %>%\n tm_map(stripWhitespace)\n \n \n bitrigramtokeniser <- function(x, n) {\n RWeka:::NGramTokenizer(x, RWeka:::Weka_control(min = 1, max = 3))\n }\n \n if (is.null(training_terms)) {\n control.options = list(wordLengths=c(2, Inf),\n tokenize = bitrigramtokeniser,\n weighting = function(x) weightTfIdf(x, normalize = FALSE),\n bounds=list(global=c(floor(length(corpus.processed)*0.01), floor(length(corpus.processed)*.8)))\n )\n }\n else {\n control.options = list( dictionary=training_terms,\n wordLengths=c(2, Inf),\n tokenize = bitrigramtokeniser,\n weighting = function(x) weightTfIdf(x, normalize = FALSE),\n bounds=list(global=c(floor(length(corpus.processed)*0.01), floor(length(corpus.processed)*.8)))\n )\n }\n dtm <- DocumentTermMatrix(corpus.processed, control = control.options)\n \n return (dtm)\n}" }, { "alpha_fraction": 0.606951892375946, "alphanum_fraction": 0.606951892375946, "avg_line_length": 27.794872283935547, "blob_id": "2dd41718df76f416931a0d98a030750387ccb567", "content_id": "c98b7d7e627f01e28a1f92d7ea3e04592de4382e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1122, "license_type": "permissive", "max_line_length": 68, "num_lines": 39, "path": "/system_c/scripts/utilities.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "import csv\nfrom nltk.stem import WordNetLemmatizer\n\n\nclass Utilities(object):\n def __init__(self):\n self.sentiment_classes = ['negative', 'neutral', 'positive']\n self.wordnet_lemmatizer = WordNetLemmatizer()\n\n def read_from_csv(self, file_path):\n data = []\n\n with open(file_path, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',')\n for row in spamreader:\n data.append(row)\n\n return data\n\n def save_list_as_csv(self, data_list, file_path):\n with open(file_path, 'wb') as resultFile:\n wr = csv.writer(resultFile, dialect='excel')\n wr.writerows(data_list)\n\n def tokenize(self, sentence):\n from nltk.tokenize import TweetTokenizer\n tknzr = TweetTokenizer()\n tokens = tknzr.tokenize(sentence)\n\n return tokens\n\n def get_lemma(self, sentences):\n lemmatized = {}\n tokens = self.tokenize(sentences)\n for token in tokens:\n results = self.wordnet_lemmatizer.lemmatize(token)\n lemmatized[token] = results\n\n return lemmatized" }, { "alpha_fraction": 0.5872340202331543, "alphanum_fraction": 0.5952537059783936, "avg_line_length": 34.312137603759766, "blob_id": "06a0d66dbcc5871f525de79ca09345eaf7aaa264", "content_id": "789da077b4f8548bbef0c7f6c464ac668154a12b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6110, "license_type": "permissive", "max_line_length": 124, "num_lines": 173, "path": "/system_a/scripts/evaluation.py", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "from segmenter import Segmenter\nfrom sklearn import tree\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn.metrics import classification_report as clsr\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.model_selection import ShuffleSplit\nfrom collections import Counter\nfrom sklearn import svm\nfrom sklearn.neural_network import MLPClassifier\nimport numpy as np\nimport itertools\nfrom sklearn import metrics\nfrom utilities import Utilities\nfrom processing import Processor\n\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix\n\n\nclass Evaluator:\n def __init__(self, data_file):\n self.utilities = Utilities()\n self.data_file = data_file\n self.processor = Processor({'training_file': data_file})\n self.segmenter = self.processor.load_segmenter()\n self.segments = []\n self.aspects = []\n self.sentiments = []\n\n def calculate_evaluatio_matrices(self, labels, result):\n positives = 0\n negatives = 0\n\n for label in labels:\n if label == 1:\n positives += 1\n elif label == 0:\n negatives += 1\n\n evaluation_info = {\n 'positives': positives,\n 'negatives': negatives,\n # 'precision': \"%.3f\" % precision_score(labels, result),\n # 'recall': \"%.3f\" % recall_score(labels, result),\n 'accuracy': \"%.3f\" % accuracy_score(labels, result),\n 'f1_score': \"%.3f\" % recall_score(labels, result)\n }\n\n return evaluation_info\n\n def evaluate_segmentation(self):\n dataset = self.segmenter.features_and_labels\n all_data_transformed = self.segmenter.transform_categorical_numerical(dataset['data'], 'train')\n all_data_unique = self.utilities.get_unique_list_of_lists(all_data_transformed, dataset['labels'])\n\n # model = SGDClassifier()\n model = svm.SVC(kernel='linear')\n # model = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes = (5, 2), random_state = 1) # Neural Network\n # model = MultinomialNB()\n # model = RandomForestClassifier(random_state=5)\n # model = tree.DecisionTreeClassifier(random_state=0)\n\n X = all_data_unique['data']\n y = all_data_unique['labels']\n\n f1_scores = cross_val_score(model, X, y, scoring='f1_micro', cv=5)\n print [round(score, 3) for score in f1_scores.tolist()]\n print(\"F1-score: %0.4f\" % (f1_scores.mean()))\n\n def get_segments_gold_data(self):\n rows = self.utilities.read_from_csv(self.data_file)\n\n segments = []\n aspects = []\n sentiments = []\n for row in rows:\n comment = row[0]\n\n comment_parts = comment.split('**$**')\n for index, comment_part in enumerate(comment_parts):\n segment = self.utilities.clean_up_text(comment_part)\n segments.append(segment)\n aspect = row[index + 1]\n\n if len(aspect) < 1:\n aspect = 'other neutral'\n elif aspect == 'noise':\n aspect = 'noise neutral'\n\n aspect_cls = aspect.rsplit(' ', 1)[0]\n sentiment_cls = aspect.rsplit(' ', 1)[1]\n\n\n aspects.append(aspect_cls)\n sentiments.append(sentiment_cls)\n\n data = {\n 'segments': segments,\n 'aspects': aspects,\n 'sentiments': sentiments\n }\n\n return data\n\n def evaluate_classifier(self, classifier, X, y, scoring='f1_micro'):\n # five fold cross-validation, test size 20%\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=11)\n scores = cross_val_score(classifier, X, y, cv = cv, scoring = scoring)\n\n print(sum(scores)/float(len(scores)))\n\n # # Begin evaluation\n # X_train, X_test, y_train, y_test = tts(X, y, test_size=0.3, random_state=11)\n # model = classifier.fit(X_train, y_train)\n #\n # y_pred = model.predict(X_test)\n #\n # # *** save info for error analysis\n # errors = []\n # for index in range(0, len(X_test)):\n # if y_test[index] != y_pred[index]:\n # errors.append(\"\\\"\"+X_test[index] +\"\\\",\\\"\"+ y_test[index] +\"\\\",\\\"\"+ y_pred[index]+\"\\\"\")\n #\n # str_out = \"\\n\".join(errors)\n # self.utilities.write_content_to_file('aspect_errors.csv', str_out)\n #\n #\n # print(clsr(y_test, y_pred))\n\n\n def evaluate_aspect_extraction(self, X, y, merged=True):\n if merged is True:\n y = self.processor.ml_asp_classifier.merge_classes(y)\n\n self.evaluate_classifier(self.processor.ml_asp_classifier, X, y)\n\n def transform_sentiment_classes(self, sentiment_names):\n sentiment_values = []\n for sentiment_name in sentiment_names:\n sentiment_values.append(self.utilities.sentiment_classes.index(sentiment_name))\n\n return sentiment_values\n\n def evaluate_sentiment_detection(self, scoring='f1_micro', merged=True):\n\n data = self.get_segments_gold_data()\n X = data['segments']\n print(len(X))\n y = data['sentiments']\n\n if merged:\n y = self.processor.ml_snt_classifier.merge_classes(y)\n\n self.evaluate_classifier(self.processor.ml_snt_classifier, X, y, scoring=scoring)\n\n\n def get_category_counts(self, cat_type='aspect', merged=True):\n data = self.get_segments_gold_data()\n\n if cat_type == 'aspect':\n categories = data['aspects']\n elif cat_type == 'sentiment':\n categories = data['sentiments']\n else:\n return \"Incorrect category type.\"\n\n if merged is True and cat_type == 'aspect':\n categories = self.utilities.merge_classes(categories)\n elif merged is True and cat_type == 'sentiment':\n categories = self.processor.ml_snt_classifier.merge_classes(categories)\n\n counter = Counter(categories)\n\n return counter\n\n" }, { "alpha_fraction": 0.6396468877792358, "alphanum_fraction": 0.6508828401565552, "avg_line_length": 29.414634704589844, "blob_id": "8905269c7baf81f6885f7fc89bc2df3eb7b66ce2", "content_id": "4348cf11b04fc6008536350ad10e9beb735bf9a6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1246, "license_type": "permissive", "max_line_length": 83, "num_lines": 41, "path": "/system_b/nb.classifier.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "nb.train <- function(dtm.train, labels.train, LABEL) {\n # Apply the convert_count function to get final training\n trainNB <- apply(dtm.train, 2, convert_count)\n \n # train model\n classifier <- naiveBayes(trainNB, labels.train, laplace = 1)\n \n # save model\n save(classifier, file = paste0(models.path, LABEL, \".model\"))\n \n return (classifier)\n}\n\nnb.predict <- function(dtm.pred, LABEL, response.type = 'class') {\n testNB <- apply(dtm.pred, 2, convert_count)\n \n classifier <- load.model(LABEL)\n if(response.type == 'proba') {\n #spred <- predict(classifier, newx=test_mat, s = \"lambda.min\", type=\"response\")\n \n } else {\n spred <- predict(classifier, newdata=testNB) \n }\n \n return(spred)\n}\n\nnb.evaluate <- function(dtm.train, labels.train, dtm.test, labels.test, LABEL) {\n nb.train(dtm.train, labels.train, LABEL)\n spred <- nb.predict(dtm.test, LABEL)\n \n conf.mat <- confusionMatrix(spred, labels.test, positive = LABEL)\n header<-paste(\"P\", \"R\", \"F1\\n\", sep = '\\t')\n precision <- round(conf.mat$byClass[5] ,5)\n recall <- round(conf.mat$byClass[6] ,5)\n f1.score <- round(conf.mat$byClass[7] ,5)\n content <- paste(precision, recall, f1.score , sep = '\\t')\n cat (header)\n cat (content)\n #cat(paste(f1.score, \",\"))\n}" }, { "alpha_fraction": 0.653342068195343, "alphanum_fraction": 0.6625163555145264, "avg_line_length": 32.911109924316406, "blob_id": "fd7ff1c82cd8b25d8be45c75876ba2e5fe448a6a", "content_id": "399ff232ffa2070cf3bca513b43efa164a6e2b61", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1526, "license_type": "permissive", "max_line_length": 105, "num_lines": 45, "path": "/system_b/enet.classifier.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "enet.train <- function(dtm.train, labels.train, LABEL) {\n train_mat <- chisqTwo(dtm.train, labels.train)\n \n dictionary <- as.data.frame(colnames(train_mat))\n enet.terms.path <<- 'dictionary.enet.Rda'\n saveRDS(dictionary, file=enet.terms.path)\n \n classifier \t<- cv.glmnet(train_mat, labels.train, family = \"binomial\", nfolds=10, type.measure=\"class\")\n \n # save model\n save(classifier, file = paste0(models.path, LABEL, \".model\"))\n \n return (classifier)\n}\n\nenet.predict <- function(dtm.pred, LABEL, response.type = 'class') {\n training.dict <- readRDS(enet.terms.path)\n training.terms <<- as.vector(training.dict[, 1])\n \n test_mat <- testmat(training.terms, as.matrix(dtm.pred))\n \n classifier <- load.model(LABEL)\n if(response.type == 'proba')\n spred <- predict(classifier, newx=data.matrix(test_mat), s = \"lambda.min\", type=\"response\")\n else {\n spred <- predict(classifier, newx=data.matrix(test_mat), s = \"lambda.min\", type=\"class\") \n }\n \n return(spred)\n}\n\nenet.evaluate <- function(dtm.train, labels.train, dtm.test, labels.test, LABEL) {\n enet.train(dtm.train, labels.train, LABEL)\n spred <- enet.predict(dtm.test, LABEL)\n \n conf.mat <- confusionMatrix(spred, labels.test, positive = LABEL)\n header<-paste(\"P\", \"R\", \"F1\\n\", sep = '\\t')\n precision <- round(conf.mat$byClass[5] ,5)\n recall <- round(conf.mat$byClass[6] ,5)\n f1.score <- round(conf.mat$byClass[7] ,5)\n content <- paste(precision, recall, f1.score , sep = '\\t')\n cat(header)\n cat(content)\n #cat(paste(f1.score, \",\"))\n}\n" }, { "alpha_fraction": 0.6030871272087097, "alphanum_fraction": 0.6097022891044617, "avg_line_length": 30.20689582824707, "blob_id": "e3666dc66c62abe53fbe645bbf4258c2931335e0", "content_id": "0f8c0b7697a2b450b3987cdacc70c8ab2a929086", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 907, "license_type": "permissive", "max_line_length": 108, "num_lines": 29, "path": "/system_b/apply.dictionaries.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "apply.dictionaries <- function(comments, pred) {\n food.dictionary <- list('food', 'canteen', 'canten', 'coffee', 'cofee', 'coffe', 'tea', 'drink', 'drinks')\n parking.dictionary = list('car park', 'car-park', 'carpark', 'parking', 'bicycle')\n \n all.aspects = list()\n index = 1\n \n for (comment in comments) {\n comment.aspects <- list()\n tokens <- NGramTokenizer(comment, Weka_control(min = 1, max = 2))\n food.diff <- setdiff(food.dictionary, tokens)\n parking.diff <- setdiff(parking.dictionary, tokens)\n \n \n if (length(food.dictionary) != length(food.diff)) {\n comment.aspects[[length(comment.aspects) + 1]] <- 'food'\n }\n \n if (length(parking.dictionary) != length(parking.diff)) {\n comment.aspects[[length(comment.aspects) + 1]] <- 'parking'\n }\n \n all.aspects[[index]] <- comment.aspects\n index <- index + 1\n \n }\n \n return (all.aspects)\n}\n " }, { "alpha_fraction": 0.6724821329116821, "alphanum_fraction": 0.6772403120994568, "avg_line_length": 37.212120056152344, "blob_id": "11b98215833cceb06f673c320a9fc888c788e3ee", "content_id": "43b45f61f01d96a9de5df8aedad8f8da0baf154c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1261, "license_type": "permissive", "max_line_length": 168, "num_lines": 33, "path": "/system_b/load.data.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#OBS: replace data PATH\n\n#Load data: uncomment the relevant dataset\n#Assumption column one: text/comment, column two: category/class\n\n\n# MMHSCT/SRFT\n\nload.data <- function(file.path){\n data <- read.csv(file.path, sep = \",\", header = FALSE)\n #names(data) <- c(\"comment\", \"care quality\", \"staff attitude and professionalism\", \"waiting time\", \"environment\", \"other\")\n \n #data <- read.csv(\"example_data.csv\", sep = \",\", header = FALSE)\n names(data) <- c(\"comment\")\n return(data)\n}\n\nload.training.data <- function(file.path) {\n data <- read.csv(file.path, sep = \",\", header = FALSE)\n #data <- read.csv(\"mmhsct_segments.csv\", sep = \",\", header = FALSE)\n names(data) <- c(\"comment\", \"topic\")\n \n return(data)\n}\n\nload.test.data <- function(){\n # Comment-level dataset\n #test_data <- read.csv(\"/home/hmayun/PycharmProjects/create-dataset-r/evaluation_datasets/r_comment_level_datasets/r_mmhsct_test_33.csv\", sep = \",\", header = FALSE)\n test_data <- read.csv(\"/home/hmayun/PycharmProjects/create-dataset-r/evaluation_datasets/r-comment-level-datasets-2/r_mmhsct_test_111.csv\", sep = \",\", header = FALSE)\n names(test_data) <- c(\"comment\", \"care quality\", \"staff attitude and professionalism\", \"waiting time\", \"environment\", \"other\")\n \n return(test_data)\n}\n" }, { "alpha_fraction": 0.6800785064697266, "alphanum_fraction": 0.6869480013847351, "avg_line_length": 31.90322494506836, "blob_id": "a1e92906d370f3902cd8f35e6aaa48886334749f", "content_id": "30fef672ff8ab33417bdce771c726d78b88e4780", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1019, "license_type": "permissive", "max_line_length": 83, "num_lines": 31, "path": "/system_b/run.evaluation.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "run.evaluation <-function(training.data, test.data, LABEL) {\n \n set.seed(111)\n \n labels.train <- as.factor(new.enet.corpus(training.data, LABEL)) \n labels.test <- as.factor(new.enet.corpus(test.data, LABEL)) \n \n all_data = rbind.fill(training.data, test.data)\n \n dataset_size = length(all_data$comment)\n train_data_count = length(training.data$comment)\n \n dtm = nlp.preprocess(all_data$comment)\n \n dtm.train <- dtm[1:train_data_count, ]\n dtm.test <- dtm[(train_data_count+1):dataset_size, ]\n \n \n #send comments only to naive bayes (special case)\n cat(\"\\nNaive Bayes\\n\")\n nb.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL)\n \n cat(\"\\nElastic net: logistic regression L1/L2\\n\")\n enet.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL)\n \n cat(\"\\nSVM - Radial Basis kernel\\n\")\n svm.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL, kernel='rbf')\n \n cat(\"\\nSVM - linear kernel\\n\")\n svm.evaluate(dtm.train, labels.train, dtm.test, labels.test, LABEL)\n}" }, { "alpha_fraction": 0.6490179300308228, "alphanum_fraction": 0.6584116220474243, "avg_line_length": 29.05128288269043, "blob_id": "a2ced644a09d1ac44bc440fc859c9868f87805d5", "content_id": "3a8d2e419f2cca697b52495d7562929f35189853", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1171, "license_type": "permissive", "max_line_length": 121, "num_lines": 39, "path": "/system_b/run.naivebayes.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "nb.train <- function(dtm.train, labels.train, LABEL) {\n \n # Apply the convert_count function to get final training data\n trainNB <- apply(dtm.train, 2, convert_count)\n \n # train model\n classifier <- naiveBayes(trainNB, labels.train, laplace = 1)\n \n # save model\n save(classifier, file = paste0(models.path, LABEL, \".model\"))\n \n return (classifier)\n}\n\nnb.predict <- function(dtm.pred, LABEL, response.type = 'class') {\n \n testNB <- apply(dtm.test, 2, convert_count)\n \n classifier <- load.model(LABEL)\n if(response.type == 'proba')\n #spred <- predict(classifier, newx=test_mat, s = \"lambda.min\", type=\"response\")\n else {\n spred <- predict(classifier, newdata=testNB) \n }\n \n return(spred)\n}\n\nnb.evaluate <- function(dtm.train, labels.train, dtm.test, labels.test, LABEL) {\n nb.train(dtm.train, labels.train, LABEL)\n spred <- nb.predict(dtm.test, LABEL)\n \n conf.mat <- confusionMatrix(spred, labels.test, positive = LABEL)\n cat(\"\\nNaive Bayes\\n\")\n header<-paste(\"P\", \"R\", \"F1\\n\", sep = '\\t')\n content<-paste(round(conf.mat$byClass[5] ,5), round(conf.mat$byClass[6] ,5), round(conf.mat$byClass[7] ,5), sep = '\\t')\n cat(header)\n cat(content)\n}" }, { "alpha_fraction": 0.592351496219635, "alphanum_fraction": 0.6175752878189087, "avg_line_length": 40, "blob_id": "205b4fecd2a87575829416f7124dfd393873ba52", "content_id": "6477d6a14f023b81832484aaa6b90ff3cefc2127", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1229, "license_type": "permissive", "max_line_length": 89, "num_lines": 30, "path": "/system_b/enet.functions.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "# function required for enet\nchisqTwo <- function(dtm, labels, n_out=2000){\n mat \t\t<- as.matrix(dtm)\n cat1\t\t<- \tcolSums(mat[labels==T,])\t \t# total number of times phrase used in cat1 \n cat2\t\t<- \tcolSums(mat[labels==F,])\t \t# total number of times phrase used in cat2 \n n_cat1\t\t<- \tsum(mat[labels==T,]) - cat1 \t# total number of phrases in soft minus cat1\n n_cat2\t\t<- \tsum(mat[labels==F,]) - cat2 \t# total number of phrases in hard minus cat2\n \n num \t\t<- (cat1*n_cat2 - cat2*n_cat1)^2\n den \t\t<- (cat1 + cat2)*(cat1 + n_cat1)*(cat2 + n_cat2)*(n_cat1 + n_cat2)\n chisq \t\t<- num/den\n \n chi_order\t<- chisq[order(chisq)][1:n_out] \n mat \t\t<- mat[, colnames(mat) %in% names(chi_order)]\n \n}\n\ntestmat <- function(train_mat_cols, test_mat){\t\n #train_mat_cols <- colnames(train_mat); test_mat <- as.matrix(test_dtm)\n test_mat \t<- test_mat[, colnames(test_mat) %in% train_mat_cols]\n \n miss_names \t<- train_mat_cols[!(train_mat_cols %in% colnames(test_mat))]\n if(length(miss_names)!=0){\n colClasses <- rep(\"numeric\", length(miss_names))\n df \t\t\t<- read.table(text = '', colClasses = colClasses, col.names = miss_names)\n df[1:nrow(test_mat),] <- 0\n test_mat \t<- cbind(test_mat, df)\n }\n as.matrix(test_mat)\n}" }, { "alpha_fraction": 0.6270928382873535, "alphanum_fraction": 0.642313539981842, "avg_line_length": 31.850000381469727, "blob_id": "4953da6f2b917b244a35b67f1d7162b01f507771", "content_id": "29999a4b7d9daf38da89f2b34b3c5c85afd23987", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1971, "license_type": "permissive", "max_line_length": 139, "num_lines": 60, "path": "/system_b/svm.classifier.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "svm.train <- function(dtm.train, labels.train, LABEL, kernel.name='linear', c.value=68.7) {\n #c.value = 68.7 # for MMHSCT dataset\n #c.value = 79.8 # for SRFT dataset\n \n if (kernel.name == 'rbf') {\n kernel.package.name <- 'rbfdot'\n }\n else {\n kernel.package.name <- 'vanilladot'\n }\n \n classifier \t<- ksvm(as.matrix(dtm.train), labels.train, type=\"C-svc\", kernel=kernel.package.name, C=c.value, scaled=c(), prob.model=TRUE)\n \n # save model\n save(classifier, file = paste0(models.path, LABEL, \".model\"))\n \n return (classifier)\n}\n\nsvm.predict <- function(dtm.pred, LABEL, response.type='class') {\n classifier <- load.model(LABEL)\n if (response.type == 'proba') {\n spred \t<- predict(classifier, dtm.pred, type=(\"probabilities\"))\n }\n else { # class\n spred \t<- predict(classifier, dtm.pred)\n }\n return(spred)\n}\n\nsvm.evaluate <- function(dtm.train, labels.train, dtm.test, labels.test, LABEL, kernel='linear') {\n svm.train(dtm.train, labels.train, LABEL, kernel)\n spred <- svm.predict(dtm.test, LABEL)\n \n conf.mat <- confusionMatrix(spred, labels.test, positive = LABEL)\n header<-paste(\"P\", \"R\", \"F1\\n\", sep = '\\t')\n precision <- round(conf.mat$byClass[5] ,5)\n recall <- round(conf.mat$byClass[6] ,5)\n f1.score <- round(conf.mat$byClass[7] ,5)\n content <- paste(precision, recall, f1.score , sep = '\\t')\n cat (header)\n cat (content)\n #cat(paste(f1.score, \",\"))\n}\n\ngrid.search <- function(df) {\n getParamSet(\"classif.ksvm\")\n ksvm <- makeLearner(\"classif.ksvm\", predict.type = \"response\")\n discrete_ps = makeParamSet(\n makeNumericParam(\"C\", lower = 1, upper = 101, trafo = function(x) x+1 ),\n makeNumericParam(\"sigma\", lower = 1, upper = 101)\n )\n ctrl = makeTuneControlGrid()\n rdesc = makeResampleDesc(\"CV\", iters = 5)\n \n classif.task = makeClassifTask(data = df, target = 'class')\n \n res = tuneParams(\"classif.ksvm\", task = classif.task, resampling = rdesc,\n par.set = discrete_ps, control = ctrl)\n}\n" }, { "alpha_fraction": 0.7040410041809082, "alphanum_fraction": 0.7114399671554565, "avg_line_length": 29.789474487304688, "blob_id": "2750b7e47e7e79e28faf6940c3dde932de44ba83", "content_id": "495e691a5a1d7ac6b969b9f22dc101b63c00ecc9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1757, "license_type": "permissive", "max_line_length": 109, "num_lines": 57, "path": "/system_b/main.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#The main script to run experiments.\n\n# hide warnings\noptions(warn = -1)\n\nsource(\"load.libs.R\")\nsource(\"convert.count.R\")\nsource(\"load.data.R\")\nsource(\"new.corpus.R\")\nsource(\"clean.data.R\")\nsource(\"enet.functions.R\")\nsource(\"new.enet.corpus.R\")\nsource(\"apply.dictionaries.R\")\nsource(\"load.models.R\")\nsource(\"nlp.preprocessing.R\")\nsource(\"svm.classifier.R\")\nsource(\"enet.classifier.R\")\nsource(\"nb.classifier.R\")\nsource(\"run.evaluation.R\")\n#source(\"run.training.R\")\nsource(\"run.prediction.R\")\nsource(\"run.srft.prediction.R\")\nload.libs()\n\n\noption_list = list(\n make_option(c(\"--train\"), type=\"character\", help=\"--train file_path\", metavar=\"character\"),\n make_option(c(\"--data\"), type=\"character\", help=\"--data file_path\", metavar=\"character\"),\n make_option(c(\"--output\"), type=\"character\", help=\"--output file_path\", metavar=\"character\"),\n make_option(c(\"--dataset\"), type=\"character\", default=\"site-a\", help=\"--dataset type\", metavar=\"character\")\n); \n\nopt_parser = OptionParser(option_list=option_list);\nopt = parse_args(opt_parser);\n\ntraining.file.path <- opt$train #\"files/r_mmhsct_train_111.csv\"\ndata.file.path <- opt$data #\"files/r_mmhsct_test_111.csv\"\noutput.path <- opt$output #\"files/mmhsct_output_confidence_111.csv\"\ndataset.type <- opt$dataset #'mmhsct'\n\ntraining.data <-load.training.data(training.file.path)\nprediction.data <-load.data(data.file.path)\n\ntraining.data$comment <- clean.data(training.data)\nprediction.data$comment <- clean.data(prediction.data)\nset.seed(111)\n\nmodels.path <- 'combine.models/'\nif (!dir.exists(models.path)) {\n dir.create(models.path)\n}\n\nif (dataset.type == 'site-b') {\n run.srft.prediction(training.data, prediction.data, output.path)\n} else {\n run.prediction(training.data, prediction.data, output.path)\n}\n\n\n" }, { "alpha_fraction": 0.6703979969024658, "alphanum_fraction": 0.676616907119751, "avg_line_length": 25.799999237060547, "blob_id": "ec711e4b4c96f2370d4e69260e38e7c7cd58263a", "content_id": "4b1e28231780171c5480d5d772b826c26096334e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 804, "license_type": "permissive", "max_line_length": 76, "num_lines": 30, "path": "/system_b/load.models.R", "repo_name": "hkayesh/depend_clean", "src_encoding": "UTF-8", "text": "#function to load models\n\n#model_dir = \"mmhsct.models/\"\n#model_dir = \"srft.models/\"\nmodel_dir = \"combine.models/\"\n\n\nload.models <- function(){\n #load MMHSCT ml-models\n load(file = paste0(model_dir, \"environment.model\"))\n assign(\"env.model\", classifier, envir = .GlobalEnv) \n \n \n load(file = paste0(model_dir, \"waiting time.model\"))\n assign(\"wt.model\", classifier, envir = .GlobalEnv)\n \n \n load(file = paste0(model_dir, \"staff attitude and professionalism.model\"))\n assign(\"saap.model\", classifier, envir = .GlobalEnv)\n \n \n load(file = paste0(model_dir, \"care quality.model\"))\n assign(\"cq.model\", classifier, envir = .GlobalEnv)\n}\n\nload.model <- function(LABEL) {\n load(file = paste0(model_dir, LABEL, \".model\"))\n assign(\"cls.model\", classifier, envir = .GlobalEnv)\n return(cls.model)\n}\n" } ]
31
JeonghyunKo/regression_pjt_jeju
https://github.com/JeonghyunKo/regression_pjt_jeju
e7734c60151ad9fabb52aeffdc431ca4e19c35a2
8bcfa0f963744c1cd75e968da6b254d3385365a4
c0974becfec4c16e5029acf34b5c110cbd80413f
refs/heads/main
2023-05-06T07:12:11.200806
2021-06-08T07:11:55
2021-06-08T07:11:55
369,470,396
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45723623037338257, "alphanum_fraction": 0.4594152867794037, "avg_line_length": 35.70000076293945, "blob_id": "67785bdfa6096bd80157a7f3313e3007aeee7612", "content_id": "2799c5be787f8bbf18728760cf202971dad7131c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9557, "license_type": "permissive", "max_line_length": 128, "num_lines": 150, "path": "/JejuRegion.py", "repo_name": "JeonghyunKo/regression_pjt_jeju", "src_encoding": "UTF-8", "text": "# 이 파일은 카카오 api로 가져온 버스 정류소의 주소를 기준으로, 지역을 그룹핑하는 함수를 담고 있습니다 \n# 읍/면/동/리 를 지역 특성에 따라 권역으로 묶습니다 \n\n# 전체 데이터 프레임 중 우도, 추자도는 제외하는 함수 입니다. \ndef delisland(df, colname) :\n \"\"\"\n 사용 중인 데이터프레임과, \n 데이터프레임에서 읍/면/동/리 정보를 담고 있는 column의 이름을\n parameter로 받습니다. \n 우도, 추자도를 제외한 데이터프레임을 반환합니다. \n \"\"\" \n cowdo = df[df[colname] == '우도면']\n dancedo = df[df[colname] == '추자면']\n island = pd.concat([cowdo, dancedo], axis=0)\n df = df[df.index.isin(island.index) == False]\n\n\n# 가장 하위 권역으로 묶습니다 \ndef region1(dong): \n \"\"\"\n 읍/면/동/리의 정보를 받아서 \n 입력한 읍/면/동/리가 속하는 하위 권역의 정보를 반환합니다.\n pandas.DataFrame에서 apply 메서드를 사용하기 위한 함수입니다. \n \"\"\" \n 구제주_탑동 = ['건입동', '일도일동', '삼도이동']\n 구제주 = ['일도이동', '삼도일동', '이도일동', '이도이동', '도남동']\n 용담해안도로 = ['용담일동', '용담이동', '용담삼동', '도두일동', '도두이동', '이호일동', '이호이동']\n 제주시_버스터미널 = ['오라일동']\n 구제주_오라동 = ['오라이동' , '오라삼동']\n 아라동 = ['아라일동', '아라이동']\n 첨단과학기술단지 = ['영평동', '월평동']\n 삼화지구 = ['화북일동', '화북이동', '삼양일동', '삼양이동', '삼양삼동', '도련일동', '도련이동']\n 신제주 = ['연동', '노형동']\n 외도지구 = ['외도일동', '외도이동', '도평동', '내도동']\n 봉개 = ['봉개동']\n 제주시_중산간 = ['오등동', '회천동', '용강동']\n 함덕 = ['신촌리', '함덕리', '북촌리', '조천리']\n 조천_중산간 = ['와흘리', '대흘리', '와산리', '선흘리']\n 김녕 = ['동복리', '김녕리']\n 월정 = ['월정리', '행원리', '한동리', '평대리']\n 세화 = ['세화리', '상도리', '하도리', '종달리']\n 구좌_중산간 = ['덕천리', '송당리']\n 남조로 = ['교래리', '가시리', '수망리']\n 성산일출봉_섭지코지 = ['시흥리', '오조리', '성산리', '고성리', '온평리']\n 성산_중산간 = ['수산리', '난산리']\n 성산 = ['신천리', '신풍리', '삼달리', '신산리']\n 성읍 = ['성읍리']\n 표선해수욕장 = ['하천리', '표선리']\n 표선 = ['토산리']\n 남원 = ['신흥리', '태흥리', '남원리', '의귀리']\n 남원_중산간 = ['한남리']\n 위미 = ['위미리', '하례리', '신례리']\n 서귀포_구시가지 = ['동홍동', '서홍동', '중앙동', '서귀동', '천지동']\n 서귀포_신시가지 = ['대륜동', '강정동', '서호동']\n 서귀포시_동부 = ['토평동', '신효동', '호근동', '보목동', '하효동']\n 서귀포시_중산간 = ['영남동', '하원동', '상효동', '회수동', '도순동']\n 서귀포시_법환포구 = ['법환동']\n 중문 = ['중문동', '색달동', '대포동', '상예동', '하예동']\n 사계 = ['창천리', '감산리', '화순리', '사계리', '덕수리']\n 안덕_중산간 = ['광평리', '상창리', '상천리']\n 안덕_동서광 = ['동광리', '서광리']\n 모슬포 = ['상모리', '인성리', '하모리', '동일리']\n 대정 = ['일과리', '영락리', '신평리', '안성리', '신도리', '무릉리']\n 영어교육도시 = ['구억리', '보성리']\n 한경_중산간 = ['청수리', '조수리', '낙천리', '저지리']\n 차귀 = ['고산리', '용수리']\n 신창 = ['두모리', '신창리']\n 판포 = ['금등리', '판포리']\n 협재 = ['협재리', '금능리', '월령리']\n 한림 = ['옹포리', '동명리', '한림리', '대림리', '수원리', '귀덕리', '한수리']\n 한림_중산간 = ['월림리', '상명리', '금악리', '명월리', '상대리']\n 곽지 = ['금성리', '곽지리']\n 애월_중산간 = ['남읍리', '어음리', '상가리', '유수암리', '소길리', '광령리', '고성리', '상귀리', '장전리', '납읍리', '봉성리', '해안동']\n 애월 = ['애월리', '하가리', '고내리', '신엄리', '구엄리', '수산리']\n 하귀 = ['하귀1리', '하귀2리']\n\n region_list = [구제주_탑동, 구제주, 용담해안도로, 제주시_버스터미널, 구제주_오라동, 아라동, 첨단과학기술단지, 삼화지구, 신제주, 외도지구, 봉개, 제주시_중산간, 함덕, 조천_중산간,\n 김녕, 월정, 세화, 구좌_중산간, 남조로, 성산일출봉_섭지코지, 성산_중산간, 성산, 성읍, 표선해수욕장, 표선, 남원, 남원_중산간,\n 위미, 서귀포_구시가지, 서귀포_신시가지, 서귀포시_동부, 서귀포시_중산간, 서귀포시_법환포구, 중문, 사계, 안덕_중산간, 안덕_동서광, 모슬포, 대정, \n 영어교육도시, 한경_중산간, 차귀, 신창, 판포, 협재, 한림, 한림_중산간, 곽지, 애월_중산간, 애월, 하귀]\n region_name = ['구제주_탑동', '구제주', '용담해안도로', '제주시_버스터미널', '구제주_오라동', '아라동', '첨단과학기술단지', '삼화지구', '신제주', '외도지구', '봉개', '제주시_중산간',\n '함덕', '조천_중산간', '김녕', '월정', '세화', '구좌_중산간', '남조로', '성산일출봉_섭지코지', '성산_중산간', '성산', '성읍',\n '표선해수욕장', '표선', '남원', '남원_중산간', '위미', '서귀포_구시가지', '서귀포_신시가지', '서귀포시_동부', '서귀포시_중산간',\n '서귀포시_법환포구', '중문', '사계', '안덕_중산간', '안덕_동서광', '모슬포', '대정', '영어교육도시', '한경_중산간', '차귀', '신창', '판포', '협재',\n '한림', '한림_중산간', '곽지', '애월_중산간', '애월', '하귀']\n\n for num, i in enumerate(region_list):\n if dong in i:\n return region_name[num]\n \n \n\ndef region2(region_1): \n \"\"\"\n region_1 함수로 만들어진 하위 권역의 정보를 받아서 \n 해당 하위 권역이 속하는 상위 권역의 정보를 반환합니다.\n pandas.DataFrame에서 apply 메서드를 사용하기 위한 함수입니다. \n \"\"\" \n 구제주 = ['구제주_탑동', '구제주', '용담해안도로', '제주시_버스터미널', '아라동', '구제주_오라동']\n 첨단과학기술단지 = ['첨단과학기술단지']\n 삼화지구 = ['삼화지구']\n 신제주 = ['신제주']\n 제주시외 = ['외도지구', '봉개']\n 조천 = ['함덕', '조천_중산간']\n 구좌 = ['김녕', '월정', '세화', '구좌_중산간']\n 남조로_중산간 = ['남조로']\n 성산 = ['성산일출봉_섭지코지', '성산_중산간', '성산']\n 표선 = ['성읍', '표선해수욕장', '표선']\n 남원 = ['남원', '남원_중산간', '위미']\n 서귀포시 = ['서귀포_구시가지', '서귀포_신시가지', '서귀포시_동부', '서귀포시_중산간']\n 중문 = ['중문']\n 안덕 = ['사계', '안덕_중산간', '안덕_동서광']\n 대정 = ['모슬포', '대정', '영어교육도시']\n 한경면 = ['한경_중산간', '차귀', '신창', '판포']\n 한림 = ['협재', '한림', '한림_중산간']\n 애월 = ['곽지', '애월_중산간', '애월', '하귀']\n\n region_list = [구제주, 첨단과학기술단지, 삼화지구, 신제주, 제주시외, 조천, 구좌, 남조로_중산간, 성산, 표선, 남원, 서귀포시, 중문, 안덕, 대정, \n 한경면, 한림, 애월]\n region_name = ['구제주', '첨단과학기술단지', '삼화지구', '신제주', '제주시외', '조천', '구좌', '남조로_중산간', '성산', '표선', '남원',\n '서귀포시', '중문', '안덕', '대정', '한경면', '한림', '애월']\n\n \n for num, i in enumerate(region_list):\n if region_1 in i:\n return region_name[num]\n\ndef region3(region_1):\n \"\"\"\n region_1 함수로 만들어진 하위 권역의 정보를 받아서 \n 해당 하위 권역이 속하는 관광지 권역의 정보를 반환합니다.\n 이외의 지역은 \"비관광지\"로 간주합니다. \n pandas.DataFrame에서 apply 메서드를 사용하기 위한 함수입니다. \n \"\"\" \n \n 북 = ['용담해안도로', '구제주_탑동']\n 동북 = ['함덕', '김녕', '월정', '세화']\n 동남 = ['성산일출봉_섭지코지', '표선해수욕장']\n 남 = ['남원', '위미', '서귀포시_법환포구', '중문']\n 서남 = ['사계', '모슬포', '차귀', '신창']\n 서북 = ['판포', '협재', '한림', '곽지', '애월']\n 서부_중산간 = ['안덕_중산간', '안덕_동서광']\n 동부_중산간 = ['남조로', '성산_중산간']\n\n cluster_tour = [북, 동북, 동남, 남, 서남, 서북, 서부_중산간, 동부_중산간]\n cluster_tour_name = ['북', '동북', '동남', '남', '서남', '서북', '서부_중산간', '동부_중산간']\n\n for num, i in enumerate(cluster_tour):\n if region_1 in i:\n return cluster_tour_name[num]\n\n\n" }, { "alpha_fraction": 0.6685689091682434, "alphanum_fraction": 0.7045654058456421, "avg_line_length": 36.93333435058594, "blob_id": "2e03ac2fdf74889ad15cf19b174465b6f78be282", "content_id": "4bc67fc2cffc0943e8c10f21fd0c04357fb317a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4214, "license_type": "permissive", "max_line_length": 217, "num_lines": 60, "path": "/README.md", "repo_name": "JeonghyunKo/regression_pjt_jeju", "src_encoding": "UTF-8", "text": "# 제주도 대중교통 이용 관광객 예측 프로젝트\n\n\n![jaemin-don-7ZlXsihxD2c-unsplash](https://user-images.githubusercontent.com/80465347/119299360-bb1a1900-bc99-11eb-908b-789de1220f89.jpg)\n\n\n제주도를 찾는 관광객들이 선택하는 주요 교통수단은 단연 렌트카이지만, (특히 코로나 19의 여파로) 국내 여행지의 대표인 제주도가 점점 더 각광받으면서 대중교통의 이용 또한 증가하고 있습니다. \n이에 대중교통을 이용하는 관광객의 수를 예측하여 관광지 주변 상권이나 Public Mobile 업체들이 활용할 수 있는 데이터를 만들고자 하였습니다. \n\n## 1. Project Summary \n### 1.1 목적 \n제주 국제 공항 정류장에서 탑승한 탑승객의 수로 대중교통을 이용하는 관광객 수 예측하기 \n\n### 1.2 결과\n먼저 데이터상 버스 이용객이 도민인지, 관광객인지를 확실히 알 수 없어 관광지에서 하차하는 인원은 대체로 관광객일 것이라는 가정 하에 권역별로 묶는 작업을 진행하였습니다. 그러나 권역 간 이용량 분포에서 차이점이 뚜렷하게 드러나지 않았으며, 회귀분석 결과 설명력 또한 0.2 정도로 높지 않았습니다. \n이에 접근 방향을 바꾸어 Kmeans clustering을 통해 탑승객이 도민인지 관광객인지를 구분하고자 시도하였습니다. 그 결과 클러스터 간 이용기간 및 횟수, 그리고 주 이용 정류장의 차이가 뚜렷하게 드러나 도민과 관광객의 구분에 성공하였다고 판단하였으며, 관광객으로 추정되는 클러스터의 이용량을 구하여 회귀분석을 진행하자 설명력이 0.6 이상으로 올라가는 결과를 얻었습니다. \n\n### 1.3 데이터\n\n- 버스 승객별 이용 현황 (2018.07-2019.12) / 제주데이터허브\n- 해당 기간의 일별 날씨 데이터 / 기상청 기상자료개방포털\n\n### 1.4 진행순서 \n\n- EDA\n- 권역별 그룹핑에 기반한 회귀분석 \n- Kmeans Clustering : 탑승객이 관광객인지 도민인지 구분하기 \n- 승객 클러스터링에 기반한 회귀분석 \n\n### 1.5 주요 분석 기법 및 사용 툴 \n\n- OLS, Kmeans clustering\n- [Pydeck](https://deckgl.readthedocs.io/en/latest/#)\n\n## 2. File List \n\n- JejuRegion.py : 권역 그룹핑을 위한 모듈 \n- jeju_datahub_api : 제주데이터허브 API 이용하기 \n- jeju_visualization_map : pydeck을 이용한 제주도 지역별 버스 이용량 시각화 \n- df_regression.csv : regression 용 데이터프레임 파일\n- jeju_clustering.ipynb : 버스 이용객을 대상으로 한 kmeans clustering\n- jeju_regression_part2.ipynb : 회귀분석 \n\n\n## 3. Contributors\n\n* [정현](https://data-ducky.tistory.com) - 파이덱 시각화 / 회귀분석 / 다스크\n* [재훈](https://github.com/jayz0417) - 데이터 전처리 / EDA\n\n## 4. What We Learned, and more... \n\n- DASK를 사용하면서 필요한 데이터만 가져와서 처리하거나, 최대한 간결하게 작동할 수 있는 코드를 작성하는 등 작업 효율에 대한 고민을 깊게 할 수 있었습니다. 다만 DASK에 적응하는 시간이 소요되면서 프로젝트 착수가 약간 늦어지고, 그만큼 모델 성능 향상을 위한 개선 작업을 단축시켜야 했던 점은 아쉽습니다. 지속적으로 후속 작업들을 이어갈 예정입니다. \n- 추후 제주 지역에서 일레클, ZET와 같은 공유 모빌리티 / 라스트마일 서비스 업체들의 적절한 정류장 위치 선정을 위한 이용자 수 예측이나, 가동률 예측 등 대중교통 이용 관광객 수를 이용한 심화된 프로젝트를 진행해보고 싶습니다. \n\n## 5. Acknowledgments \n\n* [Flycode77](https://github.com/FLY-CODE77) / 부족한 질문에도 항상 성실하게 알려주셔서 감사드립니다. \n* [Radajin](https://github.com/radajin) / DASK-KING\n* 제주데이터허브, 제주관광협회 / 갑작스러웠던 요청임에도 흔쾌히 데이터를 제공해주셔서 감사드립니다. \n* and [PinkWink](https://github.com/PinkWink) / Kmeans Clustering 제안으로 꽉 막혀있던 프로젝트의 돌파구를 제시하셨습니다. \n" } ]
2