repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
bala529/find-replace
https://github.com/bala529/find-replace
f581750c7ebd993c4e50d8001afc2d48589e4840
18185c9b9cc3e264b96763f6ceb799e09fed650a
3fb9f635a367c41abd1e1fdc8a100adaa6bbda3e
refs/heads/master
2020-12-25T05:06:55.790776
2016-06-21T18:19:47
2016-06-21T18:19:47
61,397,451
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7437499761581421, "alphanum_fraction": 0.7718750238418579, "avg_line_length": 17.823530197143555, "blob_id": "7cd19de7cb206e6bcf08d224e625bca2b124860d", "content_id": "c4b9261eb5706637155f67ec7867f3fa8eafab56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 320, "license_type": "no_license", "max_line_length": 111, "num_lines": 17, "path": "/README.md", "repo_name": "bala529/find-replace", "src_encoding": "UTF-8", "text": "# find-replace\n\nA python script to find and replace any string inside any file under any directory or sub-directory mentioned. \n\n\n# How to run?\n\nFor Tutorial video please visit\n\nhttps://asciinema.org/a/2w36sal8zlxi7d3zd01edt8jv\n\nMake sure that streplace.py is chmod +x\n\n./streplace.py from any where. Thats it!\n\n\nThanks\n" }, { "alpha_fraction": 0.5584020614624023, "alphanum_fraction": 0.5631784796714783, "avg_line_length": 27.44444465637207, "blob_id": "a1828a3bfaeded11a7f1e8bfa5492fcae9b5a50e", "content_id": "1e07a51ea3a6bbcc930e25cefa2015aae9812a8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2303, "license_type": "no_license", "max_line_length": 154, "num_lines": 81, "path": "/streplace.py", "repo_name": "bala529/find-replace", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport glob\nimport inspect,os\n\n# @auther: Balasubramaniyan Sundaresan, [email protected]\n#Python script to find and replace string inside any given directory. Also will list the changed made on each file. It will scan through sub-directories. \n#This script assumes that the directory to start the search is under /home\n\n\n#getting current directory and displaying it.\ndef_dir= os.getcwd()\nprint (\"\\n############################ Find and replace ##################################\\n\")\nprint ( \" Input the directory to search for a string and then to replace it.\")\nprint ( \" Author: Balasubramaniyan Sundaresan(balu), Email: [email protected]\")\nprint ( \"\\nCurrent Directory:\",def_dir)\nprint(\"\\n\")\n\ndname= input(\"DIRECTORY NAME: \");\nprint (\"\\n\")\n\nx=0\n\n#sets current path to /home, but it can be hardcoded to anylocation. \n\nprint(\"Seaching..... from '/home'\\n\")\nfor root, dirs, files in os.walk(\"/home\"):\n if x==0:\n for name in dirs:\n if name==dname:\n path = os.path.join(root, name)\n print(\"FOUND AT LOCATION: %s \\n\" % path) \n x=1 \n if x==1:\n break\n\nif x==0:\n print(\"DIRECTORY YOU ARE LOOKING FOR IS NOT FOUND!\")\n exit(0)\n\nfind = input(\"FIND STRING: \");\nprint (\"\\n\")\n\n\nreplace= input(\"REPLACE STRING: \");\nprint (\"\\n\")\n\n#changing path to the directory mentioned above\nos.chdir(path)\nlist1 = []\n\n#collecting the path of all the files residing under the mentioned directory\nfor root, dirs, files in os.walk(path):\n for name in files:\n filepath= os.path.join(root,name)\n list1.append(filepath)\n\n#print (list1)\n# Find and replace\n\nfor filepath in list1:\n f = open( filepath, 'r+' )\n contents = f.read()\n if find in contents:\n print (\"\\n********************************************************************************\")\n print (\" \\n String FOUND @ '%s'\" % filepath)\n print (\"\\n\\t\\t####BEFORE replace#### \\n\",contents)\n newdata = contents.replace(find,replace) \n f.close()\n f= open( filepath, 'w+')\n f.write(newdata)\n f.close()\n f = open (filepath,'r+')\n replaced = f.read()\n print (\"\\n\\t\\t####AFTER replace##### \\n\",replaced)\n f.close()\n else:\n print(\"STRING NOT FOUND in '%s'\" % filepath)\n print(\"\\n\")\n f.close()\n\n#end" } ]
2
chipyaya/Tennis-Match-Audio-Signal-Analysis
https://github.com/chipyaya/Tennis-Match-Audio-Signal-Analysis
993af2388272f7e344ca7370af8375eb004f17e0
67f5157bcc671a24b6784d680e86cad9c236d62e
45add86f19d181f45ba7622bab4025ce4506d0b3
refs/heads/main
2023-01-24T03:41:20.726620
2020-12-11T23:59:27
2020-12-11T23:59:37
305,836,053
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5398058295249939, "alphanum_fraction": 0.5475727915763855, "avg_line_length": 33.400001525878906, "blob_id": "c6bf98f9a5c6320ab0180184cc505a9558a65f95", "content_id": "df55c8e7bd3d2aa7e963e55c2e45b16a2fcc57fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 515, "license_type": "no_license", "max_line_length": 96, "num_lines": 15, "path": "/classification/exp-ml.sh", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "declare -a flags=(\"player_flag\" \"hand_flag\" \"dis_flag\" \"serve_flag\")\ndeclare -a models=(\"knn\" \"nb\" \"rf\" \"svm-linear\" \"ridge\")\ndeclare -a modes=(\"mfcc\" \"mfcc-4sec\" \"mfcc-avg\" \"mfcc-delta\" \"mel\" \"lfcc-4sec\" \"mfcc-lfcc-4sec\")\n\nfor flag in \"${flags[@]}\"\ndo\n for model in \"${models[@]}\"\n do\n for mode in \"${modes[@]}\"\n do\n echo \"flag:\" \"$flag\" \", model:\" \"$model\" \", mode:\" \"$mode\" \n python3 ml.py --mode \"$mode\" --classifier \"$model\" --target \"$flag\"\n done\n done\ndone" }, { "alpha_fraction": 0.6485963463783264, "alphanum_fraction": 0.6563407778739929, "avg_line_length": 35.89285659790039, "blob_id": "6b0020c875701db495755fccec817940301c28b0", "content_id": "20daf10312ac63a87b970693b46bdb5360aed9ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 135, "num_lines": 28, "path": "/archive/video2image/chopper.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport pickle\nimport time\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser(description='Process arguments.')\nparser.add_argument('-i', '--input', help='input video')\nparser.add_argument('-r', '--frame', help='frame count')\nparser.add_argument('-o', '--output', help='output folder')\nparser.add_argument('-c', '--clipsResult', help='clips result')\nargs = parser.parse_args()\n\nclips = pickle.load(open(args.clipsResult, 'rb'))\nos.system(\"mkdir {}\".format(args.output))\n\nfor clip in clips:\n print(clip[0], clip[1])\n startTime = time.strftime('%M:%S', time.gmtime(clip[0]))\n endTime = time.strftime('%M:%S', time.gmtime(clip[1]))\n duration = time.strftime('%M:%S', time.gmtime(clip[1]-clip[0]))\n \n folder = \"{}-{}-fps{}\".format(startTime, endTime, args.frame)\n os.system(\"mkdir {}/{}\".format(args.output, folder))\n\n command = \"ffmpeg -ss {} -i {} -r {} -t {} {}/{}/%03d.png\".format(startTime, args.input, args.frame, duration, args.output, folder)\n #print(command)\n os.system(command)\n" }, { "alpha_fraction": 0.8212617039680481, "alphanum_fraction": 0.8212617039680481, "avg_line_length": 427.5, "blob_id": "ad53eb7cd4c7c89c42bc8c24719cd26b8cdd9582", "content_id": "f5a2bc4754be52853e44356eba40601ac71f54a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 856, "license_type": "no_license", "max_line_length": 842, "num_lines": 2, "path": "/README.md", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "# WN-FPJ-UTCS\nTennis match analysis is a challenging task in real life, with the goal to analyze the tactics or patterns of a player. Due to the lack of data and the small size of tennis ball, it is difficult to keep track of the ball movements using image analysis and computer vision. Thus, in this paper, we propose to analyze the tennis match with audio signals to overcome the limitation. We start from collecting and labeling data from real tennis match recordings, and extract various different features from the audio signals obtained. Moreover, we experiment the features on both traditional machine learning and deep learning models. In our experiments, we quantitatively verify the effectiveness and robustness of the features and models proposed, which produces favorable results and establishes the first successful attempt to handle the task." }, { "alpha_fraction": 0.6226415038108826, "alphanum_fraction": 0.6415094137191772, "avg_line_length": 9.800000190734863, "blob_id": "eab33283bae6fa802823ada6eb0a4f45682a46b7", "content_id": "900dda085e78a12e326692d5a1442fd6ecc3cd6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/archive/mapping/README.md", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "# WN-FPJ-UTCS\n## Mapping\n```\npython3 transform.py\n```" }, { "alpha_fraction": 0.7404580116271973, "alphanum_fraction": 0.8320610523223877, "avg_line_length": 131, "blob_id": "44df7ee102ebf8e9bcbb71581131a4af94fe10b0", "content_id": "9c0cf15ea5ca11133a70af6472799083e6e1cb95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 131, "license_type": "no_license", "max_line_length": 131, "num_lines": 1, "path": "/data/complete_audio/README.md", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "Complete audio wave files can be find [here](https://drive.google.com/drive/folders/1W8ck2GuWMyl1J1dIk0N76U44Ry2Z5aXp?usp=sharing)." }, { "alpha_fraction": 0.6329814195632935, "alphanum_fraction": 0.6422825455665588, "avg_line_length": 31.61475372314453, "blob_id": "56742ebd41d4f01fc1ba9b3327eb3c700f14352f", "content_id": "1378e4b143794afe91bcc3b0f7100ad12af757f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3978, "license_type": "no_license", "max_line_length": 77, "num_lines": 122, "path": "/archive/clipping/train_applause.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport librosa\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation\nfrom keras.optimizers import Adam\n\n\ndef extract_features(f):\n try:\n y, sr = librosa.load(f)\n mfcc = librosa.feature.mfcc(y, n_mfcc=13)\n delta = librosa.feature.delta(mfcc)\n return np.vstack([mfcc, delta])\n except:\n print(f\"{f} failed\")\n\ndef get_filepaths(dirs):\n filepaths = []\n for d in dirs:\n filepaths += [os.path.join(d, f) for f in os.listdir(d)]\n return filepaths\n\ndef extract_features_from_files(dirs):\n files = get_filepaths(dirs)\n return [extract_features(f) for f in files]\n\ndef get_feats_with_window(S, window_size):\n features = []\n for i in range(window_size, S.shape[1]-window_size):\n feature = S[:, i-window_size:i+window_size]\n features.append(feature.reshape((-1)))\n return features\n\ndef read_labels(filename):\n df = pd.read_csv(filename, header=None)\n df.columns = ['File', 'Start', 'End']\n return df\n\ndef get_data_of_a_label(feats, label):\n X = [np.array(get_feats_with_window(f, window_size)) for f in feats]\n X = np.vstack(X)\n y = np.ones(len(X)) if label else np.zeros(len(X))\n return X, y\n\ndef get_data(applause_feats, non_applause_feats):\n applause_feats, applause_labels = get_data_of_a_label(\n applause_feats, 1)\n non_applause_feats, non_applause_labels = get_data_of_a_label(\n non_applause_feats, 0)\n X = np.vstack([applause_feats, non_applause_feats])\n y = np.concatenate([applause_labels, non_applause_labels])\n X, y = shuffle(X, y)\n return X, y\n\ndef calc_mean_std(X):\n means = np.zeros(X.shape[1])\n std_devs = np.zeros(X.shape[1])\n\n window_start = 0\n while window_start < X.shape[1]:\n mean = np.mean(X[:, window_start:window_start + 2*window_size])\n std = np.std(X[:, window_start:window_start + 2*window_size])\n means[window_start:window_start + 2*window_size] = mean\n std_devs[window_start:window_start + 2*window_size] = std\n window_start += 2*window_size\n\n return means, std_devs\n\ndef normalize_X(X, means, std_devs):\n for i in range(X.shape[1]):\n X[:, i] -= means[i]\n X[:, i] /= std_devs[i]\n return X\n\ndef initialize_ff_model():\n model = Sequential()\n model.add(Dense(1, input_dim=260))\n model.add(Activation('sigmoid'))\n model.compile(\n optimizer=Adam(),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n return model\n\n\nif __name__ == '__main__':\n applause_dirs = [\n '../data/applause-training-data/applause_pt1/',\n '../data/applause-training-data/applause_pt2/']\n non_applause_dirs = [\n '../data/applause-training-data/non_applause_pt1/',\n '../data/applause-training-data/non_applause_pt2/']\n\n applause_labels = read_labels(\n '../data/applause-training-data/PennSound_applause_labels.csv')\n non_applause_labels = read_labels(\n '../data/applause-training-data/PennSound_non_applause_labels.csv')\n\n applause_feats = extract_features_from_files(applause_dirs)\n applause_feats = [feat for feat in applause_feats if feat is not None]\n non_applause_feats = extract_features_from_files(non_applause_dirs)\n\n test_set_size = int(len(applause_feats) * 0.2)\n\n window_size = 5\n\n X_train, y_train = get_data(\n applause_feats[test_set_size:], non_applause_feats[test_set_size:])\n X_test, y_test = get_data(\n applause_feats[0:test_set_size], non_applause_feats[0:test_set_size])\n\n means, std_devs = calc_mean_std(X_train)\n # X_train = normalize_X(X_train, means, std_devs)\n # X_test = normalize_X(X_test, means, std_devs)\n\n model = initialize_ff_model()\n model.fit(X_train, y_train, epochs=1, batch_size=256, shuffle=True)\n model.evaluate(X_test, y_test, batch_size=256)\n model.save('models/applause-model.h5')" }, { "alpha_fraction": 0.6359742879867554, "alphanum_fraction": 0.705567479133606, "avg_line_length": 36.400001525878906, "blob_id": "81360e25c2a0e1db0cf47964a5462f7f584ab8eb", "content_id": "1921b8030c9bd1f0cdf02027e842a844ced5575b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 934, "license_type": "no_license", "max_line_length": 138, "num_lines": 25, "path": "/archive/clipping/README.md", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "# WN-FPJ-UTCS\n# Automatic clips generation using audio features\nOur goal is to generate clips that do not contain audience applause and commentator's speech.\n\nTo achieve this, we detect the start of each clip via signal volumes and assign the end of a clip to the start time of audience applauses.\n\nThe intervals of applauses are detected by a pretrained model retrieved from https://github.com/jrgillick/Applause.\n\n\n## Generate clips\n```\npython3 gen_clips.py --audio_file ../data/audio/us-open-2019-highlights.wav\n# clips dumped to results/clips-us-open-2019-highlights.p\n```\n\n## Load clips using pickle\n```\nimport pickle\nclips = pickle.load(open('results/clips-us-open-2019-highlights.p', 'rb'))\n```\n\n## Structure of clips\n- clips = [(start time 0, end time 0), (start time 1, end time 1), ..]\n- unit: second\n- e.g. clips = [(0, 7), (14, 22), (25, 48), (50, 61), (63, 98), (107, 117), (122, 125), (132, 161), (164, 168), (174, 180)]" }, { "alpha_fraction": 0.5989110469818115, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 32.060001373291016, "blob_id": "621470a90e15952bd259939da321aef9ab144dd6", "content_id": "d66fb81e8a2fd97d09daae16cdde108b044261e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1653, "license_type": "no_license", "max_line_length": 78, "num_lines": 50, "path": "/archive/mapping/tools/hsv_trackBar.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\ncv2.namedWindow(\"Hsv Capture\")\n\n# create trackbars for color change\n# IMPORTANT: You have to define the correct HSV opencv range hence 179,255,255\ncv2.createTrackbar('H', 'Hsv Capture', 0, 179, nothing)\ncv2.createTrackbar('S', 'Hsv Capture', 0, 255, nothing)\ncv2.createTrackbar('V', 'Hsv Capture', 0, 255, nothing)\n\ncv2.createTrackbar('H1', 'Hsv Capture', 0, 179, nothing)\ncv2.createTrackbar('S1', 'Hsv Capture', 0, 255, nothing)\ncv2.createTrackbar('V1', 'Hsv Capture', 0, 255, nothing)\n\nimg_path = '../video2image/us19-images/00:00-00:07-fps6/001.png'\nframe = cv2.imread(img_path)\nhsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\nwhile(True):\n # Trackbars realtime position\n h1 = cv2.getTrackbarPos('H', 'Hsv Capture')\n s1 = cv2.getTrackbarPos('S', 'Hsv Capture')\n v1 = cv2.getTrackbarPos('V', 'Hsv Capture')\n\n h2 = cv2.getTrackbarPos('H1', 'Hsv Capture')\n s2 = cv2.getTrackbarPos('S1', 'Hsv Capture')\n v2 = cv2.getTrackbarPos('V1', 'Hsv Capture')\n\n #How to store the min and max values from the trackbars\n blue_MIN = np.array([h1, s1, v1], np.uint8)\n blue_MAX = np.array([h2, s2, v2], np.uint8)\n\n #After finding your values, you can replace them like this\n #blue_MIN = np.array([102, 73, 145], np.uint8)\n #blue_MAX = np.array([123, 182, 175], np.uint8)\n \n #Using inRange to find the desired range\n hsvCapture = cv2.inRange(hsv_frame, blue_MIN, blue_MAX)\n\n cv2.imshow('Hsv Capture', hsvCapture)\n # cv2.imshow('Hsv Capture', frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7473683953285217, "avg_line_length": 33.6363639831543, "blob_id": "c5f2cbd4e327a1d6cd667e6d58738133c6d1194e", "content_id": "06f11d54b46c97e86cff5d6bd017f15dcff06f57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "no_license", "max_line_length": 179, "num_lines": 11, "path": "/archive/detect/README.md", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "# WN-FPJ-UTCS\n## Human detection\nDownload the pretrained yolov3 weight to detect/\n```\nwget https://pjreddie.com/media/files/yolov3.weights\n```\nUsage:\n```\npython3 run.py --directory=../video2image/\n```\nThis will generate a txt file for every image. Every line in the txt file is a detected human instance with four variables, which are the x, y, width, height for the bounding box." }, { "alpha_fraction": 0.7099999785423279, "alphanum_fraction": 0.7850000262260437, "avg_line_length": 49.25, "blob_id": "34f9e45ae78b9b5756bb437ddb482996121f3fc1", "content_id": "8ae2cb3d5754fff3e9d39c4d55ee57bc467d13a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 70, "num_lines": 4, "path": "/archive/mapping/tools/padding.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread('court-plan/tennis-court-plan.jpg')\nreplicate = cv2.copyMakeBorder(img,10,10,100,100,cv2.BORDER_REPLICATE)\ncv2.imwrite('court-plan/tennis-court-plan-padded.png', replicate)" }, { "alpha_fraction": 0.550222635269165, "alphanum_fraction": 0.5827807784080505, "avg_line_length": 36.15441131591797, "blob_id": "e81796b28ee077c6a4f9a6e4c61406662881c3ea", "content_id": "34589c5a67d9e1c442a781cbf5a000d1703286cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10105, "license_type": "no_license", "max_line_length": 83, "num_lines": 272, "path": "/archive/mapping/transform.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport cv2\nimport argparse\nimport numpy as np\nimport opencv_wrapper as cvw\nfrom sklearn.cluster import dbscan\n\ndef make_out_dir(out_dir, dir_name):\n path = os.path.join(out_dir, dir_name)\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\ndef detect_edges(out_path, prefix, frame):\n # Convert BGR to HSV\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # define range of blue color in HSV\n lower_blue = np.array([99, 105, 82], np.uint8)\n upper_blue = np.array([121, 184, 225], np.uint8)\n\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(frame_hsv, lower_blue, upper_blue)\n\n # cv2.imwrite(os.path.join(out_path, '{}-frame.png'.format(prefix)), frame)\n cv2.imwrite(os.path.join(out_path, '{}-mask.png'.format(prefix)), mask)\n\n # erosion and dilation\n kernel_size = 2\n kernel = np.ones((kernel_size, kernel_size), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n kernel_size = 2\n kernel = np.ones((kernel_size, kernel_size), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\n # filter out top\n fil = np.zeros(frame.shape[:2], np.uint8)\n for i in range(70, frame.shape[0]):\n for j in range(frame.shape[1]):\n fil[i, j] = 255\n mask = cv2.bitwise_and(mask, mask, mask=fil)\n\n # edge detection\n edges = cv2.Canny(mask, 1000, 1500, apertureSize=3)\n\n # Bitwise-AND mask and original image\n # res = cv2.bitwise_and(frame, frame, mask=mask)\n # cv2.imwrite(os.path.join(out_path, '{}-res.png'.format(prefix)), res)\n\n cv2.imwrite(os.path.join(out_path, '{}-mask-morph.png'.format(prefix)), mask)\n # cv2.imwrite(os.path.join(out_path, '{}-edges.png'.format(prefix)), edges)\n\n # calculate Hough lines\n lines = cv2.HoughLines(edges, 1, np.pi / 180, 50)\n\n if lines is None:\n return None\n\n line_pts = []\n n_lines = 60\n for i in range(min(n_lines, len(lines))):\n for rho,theta in lines[i]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n cv2.line(frame,(x1, y1), (x2, y2),(0, 0, 255), 2)\n line_pts.append([(x1, y1), (x2, y2)])\n\n # cv2.imwrite(os.path.join(out_path,\n # '{}-houghlines.png'.format(prefix)), frame)\n return line_pts\n \ndef find_intersection(o1, p1, o2, p2, frame_size):\n def minus(p1, p2):\n return (p1[0] - p2[0], p1[1] - p2[1])\n\n x = minus(o2, o1)\n d1 = minus(p1, o1)\n d2 = minus(p2, o2)\n\n cross = d1[0] * d2[1] - d1[1] * d2[0]\n if abs(cross) < 1e-8:\n return [False, (0, 0)]\n t1 = (x[0] * d2[1] - x[1] * d2[0]) / cross\n rx = int(o1[0] + d1[0] * t1)\n ry = int(o1[1] + d1[1] * t1)\n tf = False\n if frame_size[1] > rx and rx > 0 and frame_size[0] > ry and ry > 0:\n tf = True\n return [tf, (rx, ry)]\n\ndef find_intersections(out_path, prefix, frame, line_pts):\n # find intersection points\n points = []\n for i in range(len(line_pts)):\n for j in range(len(line_pts)):\n if i >= j:\n continue\n tf, p = find_intersection(line_pts[i][0], line_pts[i][1],\n line_pts[j][0], line_pts[j][1], frame.shape[:2])\n if tf == True:\n points.append(p)\n cv2.circle(frame, p, 5, (0, 255, 0), -1)\n if len(points) == 0:\n return None\n points = np.array(points)\n\n # cluster points and find centers\n core, lab = dbscan(points, eps=5, min_samples=3)\n centers = []\n for i in range(np.amax(lab) + 1):\n count = 0\n total = [0, 0]\n for p in range(len(points)):\n if lab[p] == i:\n count += 1\n total[0] += points[p][0]\n total[1] += points[p][1]\n total[0] = int(total[0] / count)\n total[1] = int(total[1] / count)\n centers.append(total)\n cv2.circle(frame, (total[0], total[1]), 10, (255, 0, 0), -1)\n cv2.imwrite(os.path.join(out_path,\n '{}-centers.png'.format(prefix)), frame)\n return centers\n\ndef find_centers(out_path, prefix, frame):\n line_pts = detect_edges(out_path, prefix, frame)\n if line_pts == None:\n print('{}: no edges detected'.format(file_path))\n return None\n centers = find_intersections(\n out_path, prefix, frame, line_pts)\n return centers\n\n\ndef find_centers2(out_path, prefix, frame):\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n lower_blue = np.array([99, 105, 82], np.uint8)\n upper_blue = np.array([121, 184, 225], np.uint8)\n mask = cv2.inRange(frame_hsv, lower_blue, upper_blue)\n\n fil = np.zeros(frame.shape[:2], np.uint8)\n for i in range(70, 290):\n if i < 180:\n for j in range(150, 500):\n fil[i, j] = 255\n else:\n for j in range(50, frame.shape[1]):\n fil[i, j] = 255\n mask = cv2.bitwise_and(mask, mask, mask=fil)\n cv2.imwrite(os.path.join(out_path, '{}-mask.png'.format(prefix)), mask)\n output = cv2.bitwise_and(frame, frame, mask=mask)\n\n gray = cvw.bgr2gray(output)\n corners = cv2.cornerHarris(gray, 9, 3, 0.01)\n corners = cvw.normalize(corners).astype(np.uint8)\n thresh = cvw.threshold_otsu(corners)\n dilated = cvw.dilate(thresh, 3)\n\n contours = cvw.find_external_contours(dilated)\n centers = []\n for i, contour in enumerate(contours):\n centers.append([contour.center.x, contour.center.y])\n cvw.circle(frame, contour.center, 3, cvw.Color.RED, -1)\n cv2.imwrite(os.path.join(out_path, '{}-centers.png'.format(prefix)), frame)\n return centers\n\ndef find_upper(centers):\n # print('centers:', centers)\n min_y = np.amin(np.array(centers), axis=0)[1]\n upper_left = min(centers, key=lambda p: p[1])\n delta_y = 7\n centers_filtered = list(filter(lambda p: p[1] <= min_y + delta_y, centers))\n upper_left = min(centers_filtered, key=lambda p: p[0])\n upper_right = max(centers_filtered, key=lambda p: p[0])\n return upper_left, upper_right\n\ndef find_border_points(centers):\n lower_right = max(centers, key=lambda p: p[0])\n lower_left = min(centers, key=lambda p: p[0])\n upper_left, upper_right = find_upper(centers)\n return [upper_left, upper_right, lower_right, lower_left]\n\ndef calc_transform_matrix(out_path, prefix, court, frame, border_points):\n # print('border_points:', border_points)\n # the detected border points are the source\n src_points = np.array(border_points, np.float32)\n # define destination border points on the court court \n border_x_min, border_x_max = 125, 528\n border_y_min, border_y_max = 37, 258\n dst_points = np.array([(border_x_min, border_y_max),\n (border_x_min, border_y_min),\n (border_x_max, border_y_min),\n (border_x_max, border_y_max)], np.float32)\n\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(src_points, dst_points)\n warped = cv2.warpPerspective(frame, M, court.shape[::-1][1:])\n cv2.imwrite(os.path.join(out_path, '{}-warped.png'.format(prefix)), warped)\n return M\n\ndef get_pos(dir_path, file_name):\n with open(os.path.join(dir_path, file_name)) as f:\n lines = [line.rstrip() for line in f]\n pos = np.array([line.split(' ')[:2] for line in lines], dtype=np.float32)\n return pos\n\ndef isInCourtPlan(court, x, y):\n x_max, y_max = court.shape[::-1][1:]\n if x >= 0 and x <= x_max and y >= 0 and y <= y_max:\n return True\n return False\n\ndef transform_and_project(court, M, p, color):\n p_transformed = cv2.perspectiveTransform(p.reshape(1, 1, 2), M).reshape(-1)\n x, y = p_transformed\n if isInCourtPlan(court, x, y):\n print('In plan: {} -> {}'.format(p, p_transformed))\n cv2.circle(court, (x, y), 10, color, -1)\n else:\n print('Out of plan: {} -> {}'.format(p, p_transformed))\n\ndef project(out_path, prefix, court, M, human_pos, ball_pos):\n for pos in human_pos:\n transform_and_project(court, M, pos, (0, 0, 0))\n # transform_and_project(court, M, ball_pos, (255, 0, 0))\n cv2.imwrite(os.path.join(out_path, '{}-projected.png'.format(prefix)), court)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--frame_dir', type=str, default='../video2image/us19-images')\n parser.add_argument(\n '--out_dir', type=str, default='results')\n parser.add_argument(\n '--court_img', type=str, default='court-plan/tennis-court-plan-padded.png')\n args = parser.parse_args()\n\n for i, dir_name in enumerate(os.listdir(args.frame_dir)):\n dir_path = os.path.join(args.frame_dir, dir_name)\n out_path = make_out_dir(args.out_dir, dir_name)\n if not os.path.isdir(dir_path):\n continue\n for file_name in os.listdir(dir_path):\n file_path = os.path.join(dir_path, file_name)\n prefix = file_name.split('.')[0]\n if not os.path.isfile(file_path) or not file_name.endswith('.png'):\n continue\n frame = cv2.imread(file_path)\n # shape: (n_row, n_col, n_channel)\n\n centers = find_centers(out_path, prefix, frame)\n # centers = find_centers2(out_path, prefix, frame)\n if centers == None or len(centers) == 0:\n print('{}: no intersection found'.format(file_path))\n continue\n border_points = find_border_points(centers)\n court = cv2.imread(args.court_img)\n M = calc_transform_matrix(\n out_path, prefix, court, frame, border_points)\n human_pos = get_pos(dir_path, '{}_human.txt'.format(prefix))\n ball_pos = None\n # ball_pos = get_pos(dir_path, '{}_ball.txt'.format(prefix))\n project(out_path, prefix, court, M, human_pos, ball_pos)\n print('{}: projected'.format(file_path))" }, { "alpha_fraction": 0.6153079271316528, "alphanum_fraction": 0.6234801411628723, "avg_line_length": 35.89706039428711, "blob_id": "546258405a2950176c6a1e21f6edbffc0cb85aea", "content_id": "624f40a3bb6aace555d57723da946d7e66ebb7a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5017, "license_type": "no_license", "max_line_length": 112, "num_lines": 136, "path": "/classification/ml.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport pickle\nimport argparse\nimport textwrap\nimport numpy as np\nfrom enum import Enum\nfrom argparse import RawTextHelpFormatter\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.linear_model import RidgeClassifier\nsys.path.append(\"..\")\nfrom clipping.audio2mfcc import AudioDataset\n\n\npadding_modes = ['mfcc', 'mfcc-delta', 'mel', 'lfcc']\n\nclass Label(Enum):\n player_flag = 0\n hand_flag = 1\n dis_flag = 2\n serve_flag = 3\n\ndef parse_arg():\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)\n parser.add_argument('--dir', type=str, default='cached',\n help=\"dir name which contains cached data\")\n parser.add_argument('--classifier', type=str, default='knn',\n help=\"available classifiers: knn, nb, rf, svm, svm-linear, svm-poly\")\n parser.add_argument('--target', type=str, default='dis_flag',\n help=\"available targets: player_flag, hand_flag, dis_flag, serve_flag\")\n # parser.add_argument('--normalize', action='store_true')\n parser.add_argument('--mode', type=str, default='mfcc-4sec',\n help=textwrap.dedent('''\\\n mfcc: use original mfcc;\n mfcc-avg: taking average of mfcc features;\n mfcc-4sec: use 4sec mfcc;\n mfcc-delta: use pure mfcc plus delta features;\n lfcc-4sec: use 4sec lfcc;\n mfcc-lfcc-4sec: use 4sec mfcc plus lfcc;\n mel: use melspectrogram;''')\n )\n parser.add_argument('--maxLen', type=int, default=130)\n args = parser.parse_args()\n if(args.mode == \"mfcc\"):\n args.maxLen = 130\n elif(args.mode == \"mfcc-delta\"):\n args.maxLen = 173\n elif(args.mode == \"mel\"):\n args.maxLen = 173\n elif(args.mode == \"lfcc\"):\n args.maxLen = 411\n return args\n\ndef normalization(X):\n mu = np.mean(X, axis=0)\n sigma = np.std(X, axis=0)\n return (X - mu) / sigma\n\ndef load_datasets(mode):\n audio_dir = '../data/complete_audio/'\n audio_files = ['berrettini_nadal', 'cilic_nadal', 'federer_dimitrov']\n label_dir = '../data/label/'\n\n datasets = []\n for audio_file in audio_files:\n dataset = AudioDataset(\n audio_dir, label_dir, audio_file, args.mode)\n print(audio_file)\n datasets.append(dataset)\n print('audio feat: {}'.format(datasets[0][0]['audio'].shape))\n return datasets\n\ndef extract_target_label(args, y):\n return y[:, Label[args.target].value]\n\ndef get_data(args):\n filename = '../{}/data-{}.p'.format(args.dir, args.mode)\n if os.path.exists(filename):\n print('loading data from cache: {}'.format(filename))\n [X_train, X_test, y_train, y_test] = pickle.load(\n open(filename, 'rb'))\n else:\n datasets = load_datasets(args.mode)\n X, y = [], []\n for dataset in datasets:\n for i in range(len(dataset)):\n if args.mode in padding_modes:\n zeros = np.zeros((dataset[i]['audio'].shape[0], args.maxLen - dataset[i]['audio'].shape[1]))\n feat = np.concatenate((dataset[i]['audio'], zeros), axis=1).ravel()\n else:\n feat = dataset[i]['audio'].ravel()\n X.append(feat)\n y.append([dataset[i][label.name] for label in Label])\n X = np.array(X)\n y = np.array(y)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.20, random_state=0)\n print('dumping data to cache: {}'.format(filename))\n pickle.dump([X_train, X_test, y_train, y_test],\n open(filename, 'wb'))\n y_train = extract_target_label(args, y_train)\n y_test = extract_target_label(args, y_test)\n print('X_train:{}, X_test:{}'.format(X_train.shape, X_test.shape))\n print('y_train:{}, y_test:{}'.format(y_train.shape, y_test.shape))\n return X_train, X_test, y_train, y_test\n\n\nif __name__ == '__main__':\n args = parse_arg()\n X_train, X_test, y_train, y_test = get_data(args)\n if args.classifier == 'knn':\n classifier = KNeighborsClassifier(n_neighbors=3)\n elif args.classifier == 'nb':\n classifier = GaussianNB()\n elif args.classifier == 'rf':\n classifier = RandomForestClassifier(max_depth=5, random_state=0)\n elif args.classifier == 'svm':\n classifier = svm.SVC()\n elif args.classifier == 'svm-linear':\n classifier = svm.SVC(kernel='linear')\n elif args.classifier == 'svm-poly':\n classifier = svm.SVC(kernel='poly')\n elif args.classifier == 'ridge':\n classifier = RidgeClassifier()\n else:\n raise NotImplementedError\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n print('confusion_matrix:')\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))" }, { "alpha_fraction": 0.602403998374939, "alphanum_fraction": 0.6145800948143005, "avg_line_length": 33.44623565673828, "blob_id": "d725087764031f317c1509f68eb2530633c80960", "content_id": "cddb4f5d50c87fa65fb13dd7286f8830834dc143", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6406, "license_type": "no_license", "max_line_length": 78, "num_lines": 186, "path": "/archive/clipping/gen_clips.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport librosa\nimport numpy as np\nimport pandas as pd\nfrom keras.models import load_model\nfrom scipy.signal import argrelextrema\nimport matplotlib.pyplot as plt\nimport argparse\nimport pickle\n\n\ndef extract_features(f):\n try:\n y, sr = librosa.load(f)\n mfcc = librosa.feature.mfcc(y,n_mfcc=13)\n delta = librosa.feature.delta(mfcc)\n return y, sr, np.vstack([mfcc,delta])\n except:\n print(f\"{f} failed\")\n\ndef get_feats_with_wondow(S,window_size):\n features = []\n for i in range(window_size,S.shape[1]-window_size):\n feature = S[:,i-window_size:i+window_size]\n features.append(feature.reshape((-1)))\n return features \n\ndef get_applause_instances(probs, frame_rate, threshold=0.5, min_length=10):\n instances = []\n current_list = []\n for i in range(len(probs)):\n if np.min(probs[i:i+1]) > threshold:\n current_list.append(i)\n else:\n if len(current_list) > 0:\n instances.append(current_list)\n current_list = []\n\n instances = [frame_span_to_time_span(\n collapse_to_start_and_end_frame(i), frame_rate) for i in instances \\\n if len(i) > min_length]\n return instances\n\ndef combine_consecutive_intvls(applause_intvls):\n if len(applause_intvls) == 0:\n return applause_intvls\n new_intvls = [applause_intvls[0]]\n for i in range(1, len(applause_intvls)):\n if applause_intvls[i][0] - new_intvls[-1][1] <= 2:\n new_intvls[-1] = (new_intvls[-1][0], applause_intvls[i][1])\n else:\n new_intvls.append(applause_intvls[i])\n return new_intvls\n\ndef frame_to_time(frame_index, frame_rate):\n return(frame/frame_rate)\n\ndef seconds_to_frames(s, frame_rate):\n return(int(s*frame_rate))\n\ndef collapse_to_start_and_end_frame(instance_list):\n return (instance_list[0], instance_list[-1])\n\ndef frame_span_to_time_span(frame_span, frame_rate):\n # return (frame_span[0] / frame_rate, frame_span[1] / frame_rate)\n return (round(frame_span[0] / frame_rate),\n round(frame_span[1] / frame_rate))\n\ndef seconds_to_samples(s, sr):\n return s * sr\n\ndef draw_rms(times, rms, filename):\n plt.yscale('log')\n plt.ylim(1e-3, 1e-1)\n plt.plot(times, rms)\n plt.savefig(filename)\n plt.clf()\n\ndef find_local_min_times(rms, times, threshold):\n rms[rms > threshold] = threshold\n minimum_idx = argrelextrema(rms, np.less)[0]\n return times[minimum_idx]\n\ndef select_start_times(times):\n start_times = []\n for i in range(len(times)):\n if i == 0:\n start_times.append(round(times[i]))\n else:\n if times[i] - times[i - 1] > 3:\n start_times.append(round(times[i]))\n return start_times\n\ndef detect_start_times(y, f):\n rms = librosa.feature.rms(y=y)[0]\n times = librosa.times_like(rms)\n # print('video length: {} sec = {}'.format(\n # max(times), format_time(max(times))))\n\n # fig_name = 'rms-{}.png'.format(f.split('/')[-1].split('.')[0])\n # draw_rms(times, rms, fig_name)\n\n # find local min under a threshold\n start_time_candidates = find_local_min_times(rms, times, 0.005)\n start_times = select_start_times(start_time_candidates)\n # fig_name = 'rms-clip-{}.png'.format(f.split('/')[-1].split('.')[0])\n # draw_rms(times, rms, fig_name)\n return start_times\n\ndef format_time(sec):\n return '{:02d}:{:02d}'.format(int(sec // 60), int(sec % 60))\n\ndef format_time_intvls(intvls):\n intvls_minutes = []\n for intvl in intvls:\n intvls_minutes.append((format_time(intvl[0]), format_time(intvl[1])))\n return intvls_minutes\n\ndef format_time_list(times):\n times_minutes = []\n for time in times:\n times_minutes.append(format_time(time))\n return times_minutes\n\ndef gen_final_clips(applause_intvls, start_times):\n clips = []\n if len(start_times) == 0:\n return clips\n j = 0\n start, end = start_times[0], 0\n temp_list = []\n for i in range(1, len(start_times)):\n while j < len(applause_intvls) and \\\n applause_intvls[j][1] <= start_times[i]:\n temp_list.append(applause_intvls[j])\n j += 1\n if len(temp_list) > 0:\n # end = temp_list[-1][0]\n end = temp_list[0][0]\n clips.append((start, end))\n start = start_times[i]\n temp_list = []\n clips.append((start, applause_intvls[-1][0]))\n return clips\n\ndef dump_intvls(intvls, f):\n filename = 'clips-{}.p'.format(f.split('/')[-1].split('.')[0])\n pickle.dump(intvls, open('results/{}'.format(filename), 'wb'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--audio_file')\n # parser.add_argument('--audio_folder')\n args = parser.parse_args()\n files = [args.audio_file]\n # audio_root = args.audio_folder\n # files = [audio_root + filename for filename in os.listdir(audio_root) \\\n # if os.path.isfile(audio_root + filename)]\n\n model = load_model('models/applause-model.h5')\n\n for f in files:\n print(f'file: {f}')\n y, sr, feats = extract_features(f)\n all_features = np.array(get_feats_with_wondow(feats, 5))\n preds = model.predict_proba(all_features, batch_size=256)\n smooth_preds = pd.Series(np.transpose(preds)[0]).rolling(5).mean()[4:]\n frame_rate = len(preds) / (float(len(y)) / sr) # preds_per_second\n applause_intvls = get_applause_instances(smooth_preds, frame_rate)\n # print('\\napplause intvls:', applause_intvls)\n applause_intvls_formatted = format_time_intvls(applause_intvls)\n print('\\napplause intvls:', applause_intvls_formatted)\n applause_intvls = combine_consecutive_intvls(applause_intvls)\n # print('\\ncombined applause intvls:', applause_intvls)\n applause_intvls_formatted = format_time_intvls(applause_intvls)\n print('\\ncombined applause intvls:', applause_intvls_formatted)\n start_times = detect_start_times(y, f)\n # print('\\nstart times:', start_times)\n start_times_formatted = format_time_list(start_times)\n print('\\nstart times:', start_times_formatted)\n clips = gen_final_clips(applause_intvls, start_times)\n print('\\nfinal clips:', clips)\n clips_formatted = format_time_intvls(clips)\n print('\\nfinal clips:', clips_formatted)\n dump_intvls(clips, f)" }, { "alpha_fraction": 0.6042196750640869, "alphanum_fraction": 0.6269639134407043, "avg_line_length": 39.75, "blob_id": "c99a0982eea51d54dba1ebf463293d011462d398", "content_id": "0d278addb7a757c6c04fdf7dbdd9dbf85cc86fe5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6683, "license_type": "no_license", "max_line_length": 126, "num_lines": 164, "path": "/classification/dl.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import sys\nimport argparse\nimport numpy as np\nfrom tensorflow import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Masking, Conv2D, MaxPooling2D, Flatten\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nsys.path.append(\"..\")\nfrom clipping.audio2mfcc import AudioDataset\n\nMFCC_SIZE = 13\nMAX_LEN = 130\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='mfcc')\n parser.add_argument('--model_name', type=str, default='CNN')\n parser.add_argument('--label', type=str, default='dis_flag')\n args = parser.parse_args()\n return args\n\ndef read_data(load_exist, mode):\n audio_dir = '../data/complete_audio/'\n audio_files = ['berrettini_nadal', 'cilic_nadal', 'federer_dimitrov']#, 'zverev_thiem-2020']\n label_dir = '../data/label/'\n print(\"Mode: {} Model: {} Label:{} Data: {}\".format(args.mode, args.model_name, args.label, audio_files))\n\n if(load_exist == False):\n all_audio = []\n all_dis_flag = []\n l_map = {}\n for audio_file in audio_files:\n dataset = AudioDataset(audio_dir, label_dir, audio_file, mode)\n for i in range(len(dataset)):\n if(mode == \"mfcc\" or mode == \"mfcc-delta\" or mode==\"mel\"):\n zeros = np.zeros((dataset[i]['audio'].shape[0], MAX_LEN-dataset[i]['audio'].shape[1]))\n all_audio.append(np.concatenate((dataset[i]['audio'], zeros), axis=1))\n all_dis_flag.append(dataset[i][args.label])\n else:\n all_audio.append(dataset[i]['audio'])\n all_dis_flag.append(dataset[i][args.label])\n # print(dataset[i]['audio'].shape)\n # if dataset[i]['audio'].shape[1] not in l_map:\n # l_map[dataset[i]['audio'].shape[1]] = 1\n # else:\n # l_map[dataset[i]['audio'].shape[1]] += 1\n # print(l_map)\n all_audio = np.asarray(all_audio)\n all_dis_flag = np.asarray(all_dis_flag)\n print(\"Complete reading data\")\n return all_audio, all_dis_flag\n\ndef create_nn_model():\n model = Sequential()\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(1e-4), metrics=['accuracy'])\n print(\"Complete creating nn model\")\n return model\n\ndef create_cnn_model():\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(MFCC_SIZE, MAX_LEN, 1)))\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(Flatten()) \n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(1e-4), metrics=['accuracy'])\n return model\n\ndef create_rnn_model():\n model = Sequential()\n model.add(\n Embedding(input_dim = MFCC_SIZE,\n input_length = 1690,\n output_dim = 100,\n trainable=True))\n model.add(LSTM(64, return_sequences=False, dropout=0.1, recurrent_dropout=0.1))\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return model\n\ndef train_cnn(model, all_audio, all_dis_flag, model_name):\n train_x, test_x, train_y, test_y = train_test_split(all_audio, all_dis_flag, test_size=0.2, shuffle= True, random_state=0)\n train_x = np.expand_dims(train_x, axis=3)\n test_x = np.expand_dims(test_x, axis=3)\n train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size=0.1, shuffle= True, random_state=0)\n epochs = 500\n callbacks = [\n keras.callbacks.ModelCheckpoint(\"checkpoint/cnn_{epoch}.h5\", monitor='val_accuracy', save_best_only=True),\n keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10, restore_best_weights=True)\n ]\n\n model.fit(train_x, train_y, batch_size=4, epochs=epochs, callbacks=callbacks, \n validation_data=(val_x, val_y), shuffle=True)\n print(\"Guess all 0 on test set accuracy: {:.4f}\".format(1-(sum(test_y)/test_y.shape[0])))\n print(\"cnn on val set best accuracy:\")\n model.evaluate(test_x, test_y)\n pred_y = model.predict(test_x)\n pred_y = pred_y.flatten()\n pred_y = pred_y > 0.5\n print(classification_report(pred_y, test_y))\n \ndef train_nn(model, all_audio, all_dis_flag, model_name):\n train_x, test_x, train_y, test_y = train_test_split(all_audio, all_dis_flag, test_size=0.2, shuffle= True, random_state=0)\n train_x = train_x.reshape(148, -1)\n test_x = test_x.reshape(37, -1)\n train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size=0.1, shuffle= True, random_state=0)\n epochs = 500\n callbacks = [\n keras.callbacks.ModelCheckpoint(\"checkpoint/nn_{epoch}.h5\", monitor='val_accuracy', save_best_only=True),\n keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10, restore_best_weights=True)\n ]\n\n model.fit(train_x, train_y, batch_size=4, epochs=epochs, callbacks=callbacks, \n validation_data=(val_x, val_y), shuffle=True)\n print(\"Guess all 0 on test set accuracy: {:.4f}\".format(1-(sum(test_y)/test_y.shape[0])))\n print(\"nn on val set best accuracy:\")\n model.evaluate(test_x, test_y)\n pred_y = model.predict(test_x)\n pred_y = pred_y.flatten()\n pred_y = pred_y > 0.5\n print(classification_report(pred_y, test_y))\n\nargs = parse()\nif(args.mode == \"mfcc\"):\n MFCC_SIZE = 13\n MAX_LEN = 130\nelif(args.mode == \"mfcc-4sec\"):\n MFCC_SIZE = 13\n MAX_LEN = 173\nelif(args.mode == \"mfcc-delta\"):\n MFCC_SIZE = 26\n MAX_LEN = 173\nelif(args.mode == \"mfcc-avg\"):\n MFCC_SIZE = 13\n MAX_LEN = 1\nelif(args.mode == \"lfcc-4sec\"):\n MFCC_SIZE = 13\n MAX_LEN = 173\nelif(args.mode == \"mfcc-lfcc-4sec\"):\n MFCC_SIZE = 26\n MAX_LEN = 173\nelse:\n MFCC_SIZE = 128\n MAX_LEN = 130\n\nall_audio, all_dis_flag = read_data(False, args.mode)\nif(args.model_name == \"CNN\"):\n model = create_cnn_model()\n train_cnn(model, all_audio, all_dis_flag, args.model_name)\nelif(args.model_name == \"NN\"):\n model = create_nn_model()\n train_nn(model, all_audio, all_dis_flag, args.model_name)\nelif(args.model_name == \"RNN\"):\n model = create_rnn_model()\n train_nn(model, all_audio, all_dis_flag, args.model_name)\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 7.125, "blob_id": "6aff343efcb095dea3352a1973e9de2faa043fea", "content_id": "4874f832fd905733d52a7faf8386dc7044fcc6a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 64, "license_type": "no_license", "max_line_length": 12, "num_lines": 8, "path": "/requirements.txt", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "tensorflow\nkeras\nsklearn\nlibrosa\nnumpy\npandas\nspafe\nscikit-learn" }, { "alpha_fraction": 0.5853551626205444, "alphanum_fraction": 0.6015300750732422, "avg_line_length": 34.75, "blob_id": "fe3c6456918540d87cf43803a56a0158d4673e8b", "content_id": "4fabb5752d8b852446ec3af6b377cbe59203ef2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4575, "license_type": "no_license", "max_line_length": 87, "num_lines": 128, "path": "/clipping/audio2mfcc.py", "repo_name": "chipyaya/Tennis-Match-Audio-Signal-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport librosa\nimport argparse\nimport textwrap\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport librosa.display\nfrom torch.utils.data import Dataset\nfrom argparse import RawTextHelpFormatter\nfrom spafe.features.lfcc import lfcc\n\n\nclass AudioDataset(Dataset):\n def __init__(self, audio_dir, label_dir, audio_file, mode):\n self.audio_dir = audio_dir\n self.label_dir = label_dir\n self.audio_file = audio_file\n self.mode = mode\n self.load_csv()\n\n def load_csv(self):\n audio_labels_list = pd.read_csv(self.label_dir+self.audio_file+'.csv',\n header=None, sep='\\t', skiprows=1)\n self.audio_labels_list = audio_labels_list\n\n def __len__(self):\n return len(self.audio_labels_list)\n\n def __getitem__(self, idx):\n audio = extract_features(self.audio_dir+self.audio_file+'.wav',\n self.audio_labels_list[1][idx], self.audio_labels_list[2][idx],\n self.mode)\n\n player_flag = self.audio_labels_list[0][idx]\n hand_flag = self.audio_labels_list[3][idx]\n dis_flag = self.audio_labels_list[4][idx]\n serve_flag = self.audio_labels_list[5][idx]\n\n return {\"audio\": audio, \"player_flag\": player_flag,\n \"hand_flag\": hand_flag, \"dis_flag\": dis_flag, \"serve_flag\": serve_flag}\n\ndef extract_features(f, start, end, mode):\n start = sum(x * int(t) for x, t in zip([60, 1], start.split(\":\")))\n end = sum(x * int(t) for x, t in zip([60, 1], end.split(\":\")))\n d = 2\n if mode == 'mfcc-avg' or mode == 'mfcc' or mode == 'mfcc-delta':\n y, sr = librosa.load(f, offset=start, duration=end-start+1)\n mfccs = librosa.feature.mfcc(y, n_mfcc=13)\n if mode == 'mfcc-avg':\n return np.mean(mfccs, axis=1)\n elif mode == 'mfcc-delta':\n delta = librosa.feature.delta(mfccs)\n return np.vstack([mfccs, delta])\n else:\n return mfccs\n elif mode == 'mel':\n y, sr = librosa.load(f, offset=start, duration=end-start+1)\n s = librosa.feature.melspectrogram(y, sr=sr)\n s = librosa.amplitude_to_db(s)\n # s = librosa.power_to_db(s)\n s = s.astype(np.float32)\n # plot_mel_spectrogram(s)\n return s\n elif mode == 'mfcc-4sec':\n y, sr = librosa.load(f, offset=max(0, end-d), duration=2*d)\n mfccs = librosa.feature.mfcc(y, n_mfcc=13)\n # plot_mfcc(mfccs)\n return mfccs\n elif mode == 'lfcc-4sec':\n y, sr = librosa.load(f, offset=max(0, end-d), duration=2*d)\n lfccs = np.swapaxes(lfcc(y, fs=50400, num_ceps=13), 0, 1)\n return lfccs\n elif mode == 'mfcc-lfcc-4sec':\n y, sr = librosa.load(f, offset=max(0, end-d), duration=2*d)\n mfccs = librosa.feature.mfcc(y, n_mfcc=13)\n lfccs = np.swapaxes(lfcc(y, fs=50400, num_ceps=13), 0, 1)\n return np.vstack([mfccs, lfccs])\n else:\n raise NotImplementedError\n\ndef plot_mfcc(mfcc):\n fig, ax = plt.subplots()\n img = librosa.display.specshow(mfcc, x_axis='time', ax=ax)\n fig.colorbar(img, ax=ax)\n ax.set(title='MFCC (n_mfcc=13)')\n plt.savefig('../img/mfcc.png')\n print('saved')\n input()\n\ndef plot_mel_spectrogram(s):\n plt.figure(figsize=(10, 4))\n librosa.display.specshow(s,\n y_axis='mel', fmax=8000, x_axis='time')\n plt.colorbar(format='%+2.0f dB')\n plt.title('Mel spectrogram')\n plt.savefig('../img/mel-spectrogram.png')\n print('saved')\n input()\n\ndef parse_arg():\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)\n parser.add_argument('--mode', type=str, default='mfcc-avg',\n help=textwrap.dedent('''\\\n mfcc: use original mfcc;\n mfcc-avg: taking average of mfcc features;\n mfcc-4sec: use 4sec mfcc;\n mfcc-delta: use pure mfcc plus delta features;\n lfcc-4sec: use 4sec lfcc;\n mfcc-lfcc-4sec: use 4sec mfcc plus lfcc;\n mel: use melspectrogram;''')\n )\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_arg()\n audio_dir = '../data/complete_audio/'\n audio_files = ['berrettini_nadal', 'cilic_nadal', 'federer_dimitrov']\n label_dir = '../data/label/'\n\n datasets = []\n for audio_file in audio_files:\n dataset = AudioDataset(audio_dir, label_dir, audio_file, args.mode)\n print(audio_file)\n print(dataset[0]['audio'].shape, dataset[0]['player_flag'],\n dataset[0]['hand_flag'], dataset[0]['dis_flag'], dataset[0]['serve_flag'])\n datasets.append(dataset)" } ]
16
GitSujal/Sentence-Generation-with-RNN
https://github.com/GitSujal/Sentence-Generation-with-RNN
9d285d83893c89e81e551bd3d0b276595ae566ab
e3c8b87b8f55659ce7f63e9e363f1777fccfac13
36c57834cf7546bfdeee88c035f63f0a7a9a634f
refs/heads/master
2020-04-24T13:12:30.514503
2019-02-22T02:19:45
2019-02-22T02:19:45
171,979,493
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6905560493469238, "alphanum_fraction": 0.702647864818573, "avg_line_length": 36, "blob_id": "598105808d121d339072a4f7214de032ba831a6d", "content_id": "c93e327b37b23b970f3d43cbccb9b9135dc60c0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11330, "license_type": "no_license", "max_line_length": 303, "num_lines": 306, "path": "/word_lm.py", "repo_name": "GitSujal/Sentence-Generation-with-RNN", "src_encoding": "UTF-8", "text": "import tensorflow as tf \nimport numpy as np \nimport reader\nimport time\nimport random\n\n\n\nclass PTBInput(object):\n\t\"\"\"Input Data\"\"\"\n\tdef __init__(self, config, raw_data = None, name = None):\n\t\tself.batch_size = batch_size = config.batch_size\n\t\tself.num_steps = num_steps = config.num_steps\n\t\tif raw_data is not None:\n\t\t\tself.epoch_size = ((len(raw_data) // batch_size) -1)\t// num_steps\n\t\t\tself.input_data, self.targets = reader.inputProducer(raw_data, batch_size, num_steps, name = name)\n\nclass PTBModel(object):\n\t\"\"\"PTB Model\"\"\"\n\tdef __init__(self, is_training, config, input_):\n\t\tself._is_training = is_training\n\t\tself._input = input_\n\t\tself.batch_size = input_.batch_size\n\t\tself.num_steps = input_.num_steps\n\t\tsize = config.hidden_size\n\t\tvocab_size = config.vocab_size\n\n\t\t#Initialize one-hot encoding matrix\n\t\tembedding = tf.get_variable(\"embedding\", [vocab_size, size], dtype = tf.float32)\n\t\t#input_data is batch_size X num_steps per iteration till epoch_size\n\t\t#inputs is of size batch_size X num_steps X hidden_size \n\t\tinputs = tf.nn.embedding_lookup(embedding, input_.input_data)\n\n\t\tif is_training and config.keep_prob < 1:\n\t\t\tinputs = tf.nn.dropout(inputs, config.keep_prob)\n\n\t\t#Ouput is of shape [batch_size X size]\n\t\toutput, state = self._build_rnn_graph_lstm(inputs, config, is_training)\n\n\t\tsoftmax_w = tf.get_variable(\"softmax_w\", [size, vocab_size], dtype=tf.float32)\n\t\tsoftmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=tf.float32)\n\t\tlogits = tf.matmul(output, softmax_w) + softmax_b\n \n\t\t# Reshape logits to be a 3-D tensor for sequence loss\n\t\tlogits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])\n\n\t\tself._logits = logits\n\t\tself._output_probs = tf.nn.softmax(logits) \n\n\t\tloss = tf.contrib.seq2seq.sequence_loss(logits,input_.targets,tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),average_across_timesteps=False,average_across_batch=True)\n\n\t\t# Update the cost\n\t\tself._cost = cost = tf.reduce_sum(loss)\n\t\tself._final_state = state\n\n\t\tif not is_training:\n\t\t\treturn\n\n\t\ttvars = tf.trainable_variables()\n\t\tgrads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm)\n\n\t\toptimizer = tf.train.GradientDescentOptimizer(config.learning_rate)\n\t\tself._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step = tf.train.get_or_create_global_step())\n\n\n\tdef _get_lstm_cell(self, config, is_training):\n\t\treturn tf.contrib.rnn.BasicLSTMCell(config.hidden_size, forget_bias=0.0, state_is_tuple=True,reuse=not is_training)\n\n\tdef _build_rnn_graph_lstm(self, inputs, config, is_training):\n\t\tdef make_cell():\n\t\t\tcell = self._get_lstm_cell(config, is_training)\n\t\t\t#Using dropout\n\t\t\tif is_training and config.keep_prob < 1:\n\t\t\t\tcell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob = config.keep_prob)\n\t\t\treturn cell\n\n\t\t#Multilayer RNN\n\t\tcell = tf.contrib.rnn.MultiRNNCell([make_cell() for _ in range(config.num_layers)], state_is_tuple = True)\n\n\t\t#With state_is_tuple set to True, hidden layer consisting of cell and hidden-to-output states is represented by tuple (c,h)\n\t\t#So initial state has size num_layers X [batch_size X (h,c))*size]\n\t\t#With state_is_tuple set to false, initial state is represented by \n\t\t#a concatenated matrix of shape [batch_size, num_layers * (h,c) * size]\n\n\t\tself._initial_state = cell.zero_state(config.batch_size, tf.float32)\n\t\tself._prime_initial_state = cell.zero_state(config.batch_size, tf.float32)\t\n\n\t\tstate = self._initial_state\n\t\tstartCharTensor = tf.constant(value = config.startCharID, dtype = tf.int32, shape = [config.batch_size])\n\n\t\t#Outputs is a tensor of shape [batch_size X num_steps X size]\n\t\t#state is LSTM Tuple of shape [batch_size X size] for a sequence of hidden layers\n\t\t#Output is of shape [batch_size X num_steps] rows and [size] columns\n\t\t#Weight shared across all time steps (softmax) is operated on batch_size *num_steps character vectors\n\t\t#logits is of shape [batch_size * num_steps vocab_size]\n\t\tinitMatrix = tf.constant(value = 0.05, shape = [config.batch_size, config.hidden_size], dtype = tf.float32)\n\t\tinitCell = tf.contrib.rnn.LSTMStateTuple(c = initMatrix, h = initMatrix)\n\t\tinitMultiCell = tuple(initCell for i in range(config.num_layers))\n\t\tself._prime_initial_state = initMultiCell\n\n\t\toutputs = []\n\t\twith tf.variable_scope(\"RNN\"):\n\t\t\tfor time_step in range(self.num_steps):\n\t\t\t\tif time_step > 0: tf.get_variable_scope().reuse_variables()\n\n\t\t\t\tstartCharMatchTensor = tf.reshape(tf.cast(tf.equal(startCharTensor, self._input.input_data[:, time_step]), tf.float32), shape = [config.batch_size, 1])\n\t\t\t\tstartCharMismatchTensor = tf.reshape(tf.cast(tf.not_equal(startCharTensor, self._input.input_data[:, time_step]), tf.float32), shape = [config.batch_size, 1])\n\t\t\t\tstate = tuple((tf.add(tf.multiply(self._prime_initial_state[i].c, startCharMatchTensor), tf.multiply(state[i].c, startCharMismatchTensor)), tf.add(tf.multiply(self._prime_initial_state[i].h, startCharMatchTensor), tf.multiply(state[i].h, startCharMismatchTensor))) for i in range(config.num_layers))\n\n\t\t\t\t(cell_output, state) = cell(inputs[:, time_step, :], state)\n\t\t\t\toutputs.append(cell_output)\n\t\toutput = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n\t\treturn output, state\n\n\n\t\t\"\"\"\n\t\t# Simplified Version\n\t\tinputs = tf.unstack(inputs, num=self.num_steps, axis=1)\n\t\toutputs, state = tf.contrib.rnn.static_rnn(cell, inputs,initial_state=self._initial_state)\n\t\toutput = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n\t\treturn output, state\n\t\t\"\"\"\n\n\t@property\n\tdef input(self):\n\t\treturn self._input\n\n\t@property\n\tdef logits(self):\n\t\treturn self._logits\n\n\t@property\n\tdef train_op(self):\n\t\treturn self._train_op\n\n\t@property\n\tdef cost(self):\n\t\treturn self._cost\n\n\t@property\n\tdef output_probs(self):\n\t\treturn self._output_probs\n\n\t@property\n\tdef final_state(self):\n\t\treturn self._final_state\n\n\t@property\n\tdef initial_state(self):\n\t\treturn self._initial_state\n\n\ndef run_epoch(session, model, generate_model, corpus, eval_op=None, verbose = False):\n\t\"\"\"\n\tRuns the model on the given data\n\t\"\"\"\n\tstart_time = time.time()\n\tcosts = 0.0\n\titers = 0\n\n\tstate = session.run(model.initial_state)\n\tfor step in range(model.input.epoch_size):\n\t\tcost, state, _ = session.run([model.cost, model.final_state, model.train_op], {model.initial_state: state})\n\n\t\tcosts += cost\n\t\titers += model.input.num_steps\n\n\t\tif verbose and step % (model.input.epoch_size // 10) == 10:\n\t\t\tprint(\"%.3f perplexity: %.3f speed: %.0f wps\" % ( step * 1.0 / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size / (time.time() - start_time)))\n\t\t\tprint(GenerateSentence(session, generate_model, corpus))\n\n\treturn np.exp(costs/iters)\n\ndef sample(a, temperature=1.0):\n\ta = np.log(a) / temperature\n\ta = np.exp(a) / np.sum(np.exp(a))\n\tr = random.random() # range: [0,1)\n\ttotal = 0.0\n\tfor i in range(len(a)):\n\t\ttotal += a[i]\n\t\tif total>r:\n\t\t\treturn i\n\treturn len(a)-1 \n\ndef GenerateSentence(session, model, corpus, verbose = False):\n\tid_to_char = corpus.id_to_char\n\tstartCharID = corpus.char_to_id[corpus.startChar]\n\tstopCharID = corpus.char_to_id[corpus.stopChar]\n\n\tstate = session.run(model.initial_state)\n\t_input = np.matrix([[startCharID]])\n\tbatchItr = 0\n\tbatchSize = 500\n\ttext = \"\"\n\t\n\twhile batchItr < batchSize:\n\t\toutput_probs, state = session.run([model.output_probs, model.final_state], {model.input.input_data : _input, model.initial_state:state})\n\t\t#primaryIndex = np.argpartition(output_probs[0][0], -10)[-10:]\n\t\t#x = random.choice(primaryIndex)\n\t\tx = sample(output_probs[0][0], 0.8)\t\t\n\t\t_input = np.matrix([[x]])\n\t\tif x == stopCharID:\n\t\t\ttext += '\\n'\n\t\telse:\n\t\t\ttext += id_to_char[x] + ' '\n\t\tbatchItr += 1\n\treturn text\n\n\nclass TrainConfig(object):\n\tinit_scale = 0.01\n\tlearning_rate = 0.50\n\tvocab_size = 214\n\tmax_grad_norm = 5\n\thidden_size = 250\n\tkeep_prob = 0.5\n\tbatch_size = 20\n\tnum_steps = 40\n\tnum_layers = 2\n\tmax_max_epoch = 2\n\tstartCharID = 0\n\tstopCharID = 0\n\nclass GenerateConfig(object):\n\tinit_scale = 0.01\n\tlearning_rate = 0.50\n\tmax_grad_norm = 5\n\tvocab_size = 214\n\tkeep_prob = 1.0\n\thidden_size = 250\n\tbatch_size = 1\n\tnum_steps = 1\n\tnum_layers = 2\n\tstartCharID = 0\n\tstopCharID = 0\n\n\ndef main(_):\n\tprint(\"Start\")\n\tprint(\"Preparing Corpus\")\n\tcorpus = reader.Corpus()\n\tstartCharID = corpus.char_to_id[corpus.startChar]\n\tstopCharID = corpus.char_to_id[corpus.stopChar]\n\n\tprint(\"Getting Configurations\")\n\ttrain_config = TrainConfig()\n\ttrain_config.vocab_size = corpus.vocab_size\n\ttrain_config.startCharID = startCharID\n\ttrain_config.stopCharID = stopCharID\n\tgenerate_config = GenerateConfig()\n\tgenerate_config.vocab_size = corpus.vocab_size\n\tgenerate_config.startCharID = startCharID\n\tgenerate_config.stopCharID = stopCharID\n\n\tprint(train_config.vocab_size)\n\tprint(train_config.startCharID)\n\tprint(train_config.stopCharID)\n\n\n\tprint(\"Setting up Graph\")\n\twith tf.Graph().as_default():\n\t \t#initializer = tf.random_uniform_initializer(-train_config.init_scale, train_config.init_scale)\n\t \tinitializer = tf.contrib.layers.xavier_initializer()\n\t \tprint(\"Train\")\n\t \twith tf.name_scope(\"Train\"):\n\t \t\ttrain_input = PTBInput(config = train_config, raw_data = corpus.train_set, name = \"TrainInput\")\n\t \t\twith tf.variable_scope(\"Model\", reuse = None, initializer= initializer):\n\t \t\t\ttrain_model = PTBModel(is_training = True, config = train_config, input_=train_input)\n\t \t\ttf.summary.scalar(\"Training Loss\", train_model.cost)\n\n\t \twith tf.name_scope(\"Valid\"):\n\t \t\tvalid_input = PTBInput(config = train_config, raw_data = corpus.valid_set, name = \"ValidInput\")\n\t \t\twith tf.variable_scope(\"Model\", reuse = True, initializer = initializer):\n\t \t\t\tvalid_model = PTBModel(is_training = False, config = train_config, input_=valid_input)\n\t \t\ttf.summary.scalar(\"Validation Loss\", valid_model.cost)\n\n\t \twith tf.name_scope(\"Test\"):\n\t \t\ttest_input = PTBInput(config = generate_config, raw_data = corpus.test_set, name = \"TestInput\")\n\t \t\twith tf.variable_scope(\"Model\", reuse = True, initializer = initializer):\n\t \t\t\ttest_model = PTBModel(is_training = False, config = generate_config, input_ = test_input)\n\n\t \twith tf.name_scope(\"Generate\"):\n\t \t\tgenerate_input = PTBInput(config = generate_config, raw_data = corpus.test_set, name = \"GenerateInput\")\n\t \t\twith tf.variable_scope(\"Model\", reuse = True, initializer = initializer):\n\t \t\t\tgenerate_model = PTBModel(is_training = False, config = generate_config, input_ = generate_input) \n\n\t \tmodels = {\"Train\":train_model, \"Valid\":valid_model, \"Test\":test_model, \"Generate\":generate_model}\n\t \tprint(\"Executing Graph\")\n\t \twith tf.Session() as sess:\n\t \t\tsaver = tf.train.Saver()\n\t \t\tcoord = tf.train.Coordinator()\n\t \t\tthreads = tf.train.start_queue_runners(sess = sess, coord = coord)\n\t \t\tsess.run(tf.global_variables_initializer())\n\t \t\tsaver.restore(sess, 'model/savedModelValidL2H250N40-1000')\n\t \t\tfor i in range(train_config.max_max_epoch):\n\t \t\t\ttrain_perplexity = run_epoch(session = sess, model = train_model, generate_model = generate_model, corpus = corpus, eval_op = train_model.train_op, verbose = True)\n\t \t\t\tprint(\"Epoch %d Train perplexity %.3f\" % (i+1, train_perplexity))\n\t \t\t\tgenDoc = GenerateSentence(session=sess, model=generate_model, corpus=corpus, verbose = False)\n\t\t \t\tprint(genDoc)\n\t\t \t\tsaver.save(sess, \"model/savedModelValidL2H250N40\", global_step = 1000) \n\t\t \t\t\n\t \t\tcoord.request_stop()\n\t \t\tcoord.join(threads)\n\nif __name__ == \"__main__\":\n\ttf.app.run()\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7240704298019409, "alphanum_fraction": 0.7240704298019409, "avg_line_length": 31, "blob_id": "5cb105639841c4afe295762f17c0d79ac0fc043d", "content_id": "f3f376a01b1338d2a0072feb6728fdbdd1e3212c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 516, "license_type": "no_license", "max_line_length": 78, "num_lines": 16, "path": "/kantipur.py", "repo_name": "GitSujal/Sentence-Generation-with-RNN", "src_encoding": "UTF-8", "text": "import os\nimport collections\n#import numpy as np \n\n\nwith open(\"kantipur_samachar_valid.txt\", 'r') as f:\n\tdocument = f.read()\nstopWord = '†'\nstartWord = 'Å'\n\nexpungeString = startWord + stopWord + '\\n'\ncleanDocument = ''.join( c for c in document if c not in expungeString)\n#cleanDocument = startWord + cleanDocument.replace('\\n', stopWord + startWord)\ncleanDocument = startWord + cleanDocument.replace('।', stopWord + startWord)\nwith open(\"kantipur_samachar_valid_clean.txt\", 'w') as f:\n\tf.write(cleanDocument)" }, { "alpha_fraction": 0.7066345810890198, "alphanum_fraction": 0.7132540941238403, "avg_line_length": 33.05641174316406, "blob_id": "2ca6db73915e47bf0954b8e7f8953204ebec0a9b", "content_id": "d3ebcec4c9fe0e7926505351d8b8c86ecb21bfb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6650, "license_type": "no_license", "max_line_length": 117, "num_lines": 195, "path": "/reader.py", "repo_name": "GitSujal/Sentence-Generation-with-RNN", "src_encoding": "UTF-8", "text": "\"\"\"\n\tDependencies: Python3\n\tUnresolved Issues:\n\t\t1. Check when Corpus fails to complete processing\n\t\t\tERROR:filename not found breaks the code further down the line\n\t\t\tSafely exit\n\n\"\"\"\n\n\nimport os\nimport tensorflow as tf \nimport numpy as np \nimport collections\n\n\n\nclass Corpus():\n\tdef __init__(self, preprocessed=True):\n\t\ttrain_filename = \"data/kantipur_samachar_train_clean.txt\"\n\t\tvaild_filename = \"data/kantipur_samachar_valid_clean.txt\"\n\t\ttest_filename = \"data/kantipur_samachar_test_clean.txt\"\n\n\t\tclean_train_data = self._readCorpus(filename = train_filename, preprocessed = preprocessed)\n\t\tclean_vaild_data = self._readCorpus(filename = vaild_filename, preprocessed = preprocessed)\n\t\tclean_test_data = self._readCorpus(filename = test_filename, preprocessed = preprocessed)\n\n\t\tchar_to_id = self._build_vocab(clean_train_data)\n\n\t\tself._train_set = self._file_to_ids(clean_train_data, char_to_id)\n\t\tself._validataion_set= self._file_to_ids(clean_vaild_data, char_to_id)\n\t\tself._test_set = self._file_to_ids(clean_test_data, char_to_id)\n\n\t\tself._vocab_size = len(char_to_id)\n\t\tself._char_to_id = char_to_id\n\n\n\n\n\tdef _readCorpus(self, filename, preprocessed):\n\t\t\"\"\"\n\t\t1. Read corpus file and do the following preprocessing tasks\n\t\t1.1 If the document is preprocessed i.e. start and stop Char has already been added,\n\t\t\tno changes need to be made on the document itself. Else, follow the folowing steps\n\t\t1.2 Chosen Start and Stop Character, which marks range of document with single context,\n\t\t\tmust be chosen such that it is infrequent \n\t\t1.3 Remove all previous instances of startChar and stopChar from the document\n\t\t1.4 It is assumed that each story or context sections within the document should be separated by newline character.\n\t\t\tIf it's not replace the 'separatorChar' variable, but it should be a character\n\t\t1.5 Replace 'sepratorChar' with startChar and stopChar to mark range of each story within the document\n\t\t1.6 If custom preprocessing needs to be done, add it in the _customPreprocess(_) function\n\t\t\"\"\"\n\t\t\n\t\tself._startChar = startChar ='Å'\n\t\tself._stopChar = stopChar = '†'\n\t\tseparatorChar = '\\n'\n\n\t\tcleanDocument = \"\"\n\t\tif preprocessed == False:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\twith open(filename, 'r') as f:\n\t\t\t\t\tdocument = f.read()\n\t\t\t\tcleanDocument = self._customPreprocess(cleanDocument)\n\t\t\t\texpungeString = startChar + stopChar\n\t\t\t\tcleanDocument = ''.join( c for c in document if c not in expungeString)\n\t\t\t\tcleanDocument = startChar + cleanDocument.replace(separatorChar, stopChar + startChar)\n\t\t\telse:\n\t\t\t\tprint(\"ERROR: \", filename, \" File doesn't exist\")\n\t\t\t\treturn \n\t\telse:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\twith open(filename, 'r') as f:\n\t\t\t\t\tcleanDocument = f.read()\n\t\t\t\t#cleanDocument = self._customPreprocess(cleanDocument)\n\t\t\telse:\n\t\t\t\tprint(\"ERROR: \", filename, \" File doesn't exist\")\n\t\t\t\treturn \n\t\treturn cleanDocument\n\n\n\tdef _build_vocab(self, cleanDocument):\n\t\t\"\"\"\n\t\t2. Read cleanDocument and do the following processing tasks\n\t\t2.1 Prepare list of distinct characters \n\t\t2.2 Map each character to its ID\n\t\t\"\"\"\n\n\t\t#Get Frequency Distribution of characters in the document and sort them in descending order of Frequency\n\t\tcounter = collections.Counter(cleanDocument)\n\t\tcount_pairs = sorted(counter.items(), key = lambda x: (-x[1], x[0]))\n\n\t\t#Get list of characters\n\t\tcharacters, _ = list(zip(*count_pairs))\n\t\tself._id_to_char = characters\n\t\t#Get Mapping from character to ID\n\t\tchar_to_id = dict(zip(characters, range(len(characters))))\n\t\treturn char_to_id\n\n\n\tdef _file_to_ids(self, cleanDocument, char_to_id):\n\t\t#Get Mapping from character to ID and assign integer ID to each character\n\t\treturn [char_to_id[c] for c in cleanDocument if c in char_to_id]\n\n\n\n\tdef _customPreprocess(self, cleanDoc):\n\t\t\"\"\" \n\t\tAdd additional code to change cleanDoc Here \n\t\tFollowing changes are made prior to adding start and stop delimiters\n\t\t\"\"\"\n\t\treturn cleanDoc\n\n\n\t@property\n\tdef startChar(self):\n\t\treturn self._startChar\n\n\t@property\n\tdef stopChar(self):\n\t\treturn self._stopChar\n\n\t@property\n\tdef vocab_size(self):\n\t\treturn self._vocab_size\n\n\t@property\n\tdef char_to_id(self):\n\t\treturn self._char_to_id\n\n\t@property\n\tdef id_to_char(self):\n\t\treturn self._id_to_char\n\n\t@property\n\tdef train_set(self):\n\t\treturn self._train_set\n\n\t@property\n\tdef valid_set(self):\n\t\treturn self._validataion_set\n\n\t@property\n\tdef test_set(self):\n\t\treturn self._test_set\n\n\n\ndef inputProducer(raw_data, batch_size, num_steps, name = None):\n\t\"\"\"\n\t\tTake corpus, divide it into batches and dequeue it\n\t\traw_data: Clean Corpus with character replaced by ID\n\t\tbatch_size: Size of batch of input processed at once\n\t\tnum_steps: Number of timesteps over which backprop is done for RNN\n\t\tOn epoch_size iterations, a single epoch is finished i.e all inputs are passed through,\n\t\t\twhere each iteration feeds a matrix of shape [batch_size X num_steps]\n\t\tx,y : input and target pairs, target is input displaced to the right by a single timesteps\n\t\t\t\ti.e. one character in the future\n\t\"\"\"\n\n\twith tf.name_scope(name, \"inputProducer\", [raw_data, batch_size, num_steps]):\n\t\t#total count of characters represented as integers in range [0, vocab_size)\n\t\tdata_len = len(raw_data)\n\t\t#Divide total data in batch_size counts, where each division has batch_len number of characters\n\t\t#i.e. batch_size rows and batch_len columns, using integer division to remove leftovers\n\t\tbatch_len = data_len // batch_size\n\t\tdata = np.reshape(raw_data[0: batch_size * batch_len], [batch_size, batch_len])\n\t\t#Convert to tensor\n\t\tlabel_data = tf.convert_to_tensor(data, name= \"raw_data\", dtype = tf.int32)\n\n\t\tepoch_size = (batch_len -1) // num_steps\n\t\tassertion = tf.assert_positive(epoch_size, message = \"epoch_size == 0\")\n\t\twith tf.control_dependencies([assertion]):\n\t\t\tepoch_size = tf.identity(epoch_size, name = \"epoch_size\")\n\n\t\ti = tf.train.range_input_producer(epoch_size, shuffle = False).dequeue()\n\n\t\tx = tf.strided_slice(label_data, [0, i*num_steps], [batch_size, (i+1)*num_steps])\n\t\tx.set_shape([batch_size, num_steps])\n\t\ty = tf.strided_slice(label_data, [0, i*num_steps +1], [batch_size, (i+1)*num_steps +1])\n\t\ty.set_shape([batch_size, num_steps])\n\n\treturn x, y\n\n# corpus = Corpus()\n# _batch_size = 20\n# input_data, targets = inputProducer(raw_data = corpus.train_set, batch_size = _batch_size, num_steps = 15)\n# comp_num = tf.constant(value = corpus.char_to_id[corpus.startChar] ,dtype=tf.int32, shape = [_batch_size])\n# vec_p = tf.reshape(tf.cast(tf.equal(comp_num, input_data[:,0]), tf.float32),shape = [_batch_size, 1])\n\n# print(vec_p)\n# with tf.Session() as sess:\n# \tprint(\"Input Data\")\n# \tprint(sess.run(vec_p))\n# \tprint(\"StartCharID\")\n# \tprint(sess.run(comp_num))\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8068006038665771, "alphanum_fraction": 0.8083462119102478, "avg_line_length": 52.91666793823242, "blob_id": "3cf6bb7aaf1c89c0ede507ac2fad87fe02138547", "content_id": "e1701e51b03275556d908b150b5cb67a91661d3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 647, "license_type": "no_license", "max_line_length": 197, "num_lines": 12, "path": "/README.md", "repo_name": "GitSujal/Sentence-Generation-with-RNN", "src_encoding": "UTF-8", "text": "### Sentence Generation Using RNN\nThis is an unfinished work developed as a hobby project and getting aquatinted with tensorflow and RNN model.\nConsists modules for cleaning and processing the corpus. \nCorpus is divided into three sets namely train set, valid set and test set. \nThe RNN is trained keeping uniform scale of gradients.\nThe model is trained and in each epoch, a sentence is generated.\nThis project was my getting started with RNN and tensorflow and might contain sequences of programs from tensorflow tutorial repository and other sources modified to fit my personal implementation.\n\n### Dependencies \n* Python3\n* Numpy\n* Tensorflow\n" } ]
4
XiaoZzai/AirSim
https://github.com/XiaoZzai/AirSim
ad13d4043b7fc05a1d5ade7c0278ce95759206c1
5e85a9c409112f654245ea29084dea5f2815d9b2
7deb5053ea15daf1e845a68d9140e821e4acb07c
refs/heads/master
2020-04-04T13:17:37.361668
2018-11-03T07:12:03
2018-11-03T07:12:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.605465829372406, "alphanum_fraction": 0.6382303833961487, "avg_line_length": 24.875, "blob_id": "92930e4ed8fb95d1d7c84a9f14d11f7435c869cd", "content_id": "3a619280fa48bc793f2dfee5ff58307f5a73d4e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6623, "license_type": "no_license", "max_line_length": 130, "num_lines": 256, "path": "/3D_path_finding/DDPG/drone_env.py", "repo_name": "XiaoZzai/AirSim", "src_encoding": "UTF-8", "text": "import AirSimClient\nimport time\nimport copy\nimport numpy as np\nfrom PIL import Image\nimport cv2\n\ngoal_threshold = 3\nnp.set_printoptions(precision=3, suppress=True)\nIMAGE_VIEW = True\n\nclass drone_env:\n\tdef __init__(self,start = [0,0,-5],aim = [32,38,-4]):\n\t\tself.start = np.array(start)\n\t\tself.aim = np.array(aim)\n\t\tself.client = AirSimClient.MultirotorClient()\n\t\tself.client.confirmConnection()\n\t\tself.client.enableApiControl(True)\n\t\tself.client.armDisarm(True)\n\t\tself.threshold = goal_threshold\n\t\t\n\tdef reset(self):\n\t\tself.client.reset()\n\t\tself.client.enableApiControl(True)\n\t\tself.client.armDisarm(True)\n\t\tself.client.moveToPosition(self.start.tolist()[0],self.start.tolist()[1],self.start.tolist()[2],5,max_wait_seconds = 10)\n\t\ttime.sleep(2)\n\t\t\n\t\t\n\tdef isDone(self):\n\t\tpos = self.client.getPosition()\n\t\tif distance(self.aim,pos) < self.threshold:\n\t\t\treturn True\n\t\treturn False\n\t\t\n\tdef moveByDist(self,diff, forward = False):\n\t\ttemp = AirSimClient.YawMode()\n\t\ttemp.is_rate = not forward\n\t\tself.client.moveByVelocity(diff[0], diff[1], diff[2], 1 ,drivetrain = AirSimClient.DrivetrainType.ForwardOnly, yaw_mode = temp)\n\t\ttime.sleep(0.5)\n\t\t\n\t\treturn 0\n\t\t\n\tdef render(self,extra1 = \"\",extra2 = \"\"):\n\t\tpos = v2t(self.client.getPosition())\n\t\tgoal = distance(self.aim,pos)\n\t\tprint (extra1,\"distance:\",int(goal),\"position:\",pos.astype(\"int\"),extra2)\n\t\t\n\tdef help(self):\n\t\tprint (\"drone simulation environment\")\n\t\t\n\t\t\n#-------------------------------------------------------\n# grid world\n\t\t\nclass drone_env_gridworld(drone_env):\n\tdef __init__(self,start = [0,0,-5],aim = [32,38,-4],scaling_factor = 5):\n\t\tdrone_env.__init__(self,start,aim)\n\t\tself.scaling_factor = scaling_factor\n\t\t\n\tdef interpret_action(self,action):\n\t\tscaling_factor = self.scaling_factor\n\t\tif action == 0:\n\t\t\tquad_offset = (0, 0, 0)\n\t\telif action == 1:\n\t\t\tquad_offset = (scaling_factor, 0, 0)\n\t\telif action == 2:\n\t\t\tquad_offset = (0, scaling_factor, 0)\n\t\telif action == 3:\n\t\t\tquad_offset = (0, 0, scaling_factor)\n\t\telif action == 4:\n\t\t\tquad_offset = (-scaling_factor, 0, 0)\t\n\t\telif action == 5:\n\t\t\tquad_offset = (0, -scaling_factor, 0)\n\t\telif action == 6:\n\t\t\tquad_offset = (0, 0, -scaling_factor)\n\t\t\n\t\treturn np.array(quad_offset).astype(\"float64\")\n\t\n\tdef step(self,action):\n\t\tdiff = self.interpret_action(action)\n\t\tdrone_env.moveByDist(self,diff)\n\t\t\n\t\tpos_ = v2t(self.client.getPosition())\n\t\tvel_ = v2t(self.client.getVelocity())\n\t\tstate_ = np.append(pos_, vel_)\n\t\tpos = self.state[0:3]\n\t\t\n\t\tinfo = None\n\t\tdone = False\n\t\treward = self.rewardf(self.state,state_)\n\t\treawrd = reward / 50\n\t\tif action == 0:\n\t\t\treward -= 10\n\t\tif self.isDone():\n\t\t\tdone = True\n\t\t\treward = 100\n\t\t\tinfo = \"success\"\n\t\tif self.client.getCollisionInfo().has_collided:\n\t\t\treward = -100\n\t\t\tdone = True\n\t\t\tinfo = \"collision\"\n\t\tif (distance(pos_,self.aim)>150):\n\t\t\treward = -100\n\t\t\tdone = True\n\t\t\tinfo = \"out of range\"\n\t\t\t\n\t\tself.state = state_\n\t\t\n\t\treturn state_,reward,done,info\n\t\n\tdef reset(self):\n\t\tdrone_env.reset(self)\n\t\tpos = v2t(self.client.getPosition())\n\t\tvel = v2t(self.client.getVelocity())\n\t\tstate = np.append(pos, vel)\n\t\tself.state = state\n\t\treturn state\n\t\t\n\tdef rewardf(self,state,state_):\n\t\t\n\t\tdis = distance(state[0:3],self.aim)\n\t\tdis_ = distance(state_[0:3],self.aim)\n\t\treward = dis - dis_\n\t\treward = reward * 1\n\t\treward -= 1\n\t\treturn reward\n\t\t\n#-------------------------------------------------------\n# height control\n# continuous control\n\t\t\nclass drone_env_heightcontrol(drone_env):\n\tdef __init__(self,start = [-23,0,-10],aim = [-23,125,-10],scaling_factor = 2,img_size = [64,64]):\n\t\tdrone_env.__init__(self,start,aim)\n\t\tself.scaling_factor = scaling_factor\n\t\tself.aim = np.array(aim)\n\t\tself.height_limit = -30\n\t\tself.rand = False\n\t\tif aim == None:\n\t\t\tself.rand = True\n\t\t\tself.start = np.array([0,0,-10])\n\t\telse:\n\t\t\tself.aim_height = self.aim[2]\n\t\n\tdef reset_aim(self):\n\t\tself.aim = (np.random.rand(3)*300).astype(\"int\")-150\n\t\tself.aim[2] = -np.random.randint(10) - 5\n\t\tprint (\"Our aim is: {}\".format(self.aim).ljust(80,\" \"),end = '\\r')\n\t\tself.aim_height = self.aim[2]\n\t\t\n\tdef reset(self):\n\t\tif self.rand:\n\t\t\tself.reset_aim()\n\t\tdrone_env.reset(self)\n\t\tself.state = self.getState()\n\t\treturn self.state\n\t\t\n\tdef getState(self):\n\t\tpos = v2t(self.client.getPosition())\n\t\tvel = v2t(self.client.getVelocity())\n\t\timg = self.getImg()\n\t\tstate = [img, np.array([pos[2] - self.aim_height])]\n\t\t\n\t\treturn state\n\t\t\n\tdef step(self,action):\n\t\tpos = v2t(self.client.getPosition())\n\t\tdpos = self.aim - pos\n\t\t\n\t\tif abs(action) > 1:\n\t\t\tprint (\"action value error\")\n\t\t\taction = action / abs(action)\n\t\t\n\t\ttemp = np.sqrt(dpos[0]**2 + dpos[1]**2)\n\t\tdx = dpos[0] / temp * self.scaling_factor\n\t\tdy = dpos[1] / temp * self.scaling_factor\n\t\tdz = - action * self.scaling_factor\n\t\t#print (dx,dy,dz)\n\t\tdrone_env.moveByDist(self,[dx,dy,dz],forward = True)\n\t\t\n\t\tstate_ = self.getState()\n\t\tpos = state_[1][0]\n\t\t\n\t\tinfo = None\n\t\tdone = False\n\t\treward = self.rewardf(self.state,state_)\n\t\t\n\t\tif self.isDone():\n\t\t\tif self.rand:\n\t\t\t\tdone = False\n\t\t\t\t#reward = 50\n\t\t\t\t#info = \"success\"\n\t\t\t\tself.reset_aim()\n\t\t\telse:\n\t\t\t\tdone = True\n\t\t\t\treward = 50\n\t\t\t\tinfo = \"success\"\n\t\t\t\n\t\tif self.client.getCollisionInfo().has_collided:\n\t\t\treward = -50\n\t\t\tdone = True\n\t\t\tinfo = \"collision\"\n\t\tif (pos + self.aim_height) < self.height_limit:\n\t\t\tdone = True\n\t\t\tinfo = \"too high\"\n\t\t\treward = -50\n\t\t\t\n\t\tself.state = state_\n\t\treward /= 50\n\t\tnorm_state = copy.deepcopy(state_)\n\t\tnorm_state[1] = norm_state[1]/100\n\t\t\n\t\treturn norm_state,reward,done,info\n\t\t\n\tdef isDone(self):\n\t\tpos = v2t(self.client.getPosition())\n\t\tpos[2] = self.aim[2]\n\t\tif distance(self.aim,pos) < self.threshold:\n\t\t\treturn True\n\t\treturn False\n\t\t\n\tdef rewardf(self,state,state_):\n\t\tpos = state[1][0]\n\t\tpos_ = state_[1][0]\n\t\treward = - abs(pos_) + 5\n\t\t\n\t\treturn reward\n\t\t\n\tdef getImg(self):\n\t\t\n\t\tresponses = self.client.simGetImages([AirSimClient.ImageRequest(0, AirSimClient.AirSimImageType.DepthPerspective, True, False)])\n\t\timg1d = np.array(responses[0].image_data_float, dtype=np.float)\n\t\timg2d = np.reshape(img1d, (responses[0].height, responses[0].width))\n\t\timage = Image.fromarray(img2d)\n\t\tim_final = np.array(image.resize((64, 64)).convert('L'), dtype=np.float)/255\n\t\tim_final.resize((64,64,1))\n\t\tif IMAGE_VIEW:\n\t\t\tcv2.imshow(\"view\",im_final)\n\t\t\tkey = cv2.waitKey(1) & 0xFF;\n\t\treturn im_final\n\t\t\ndef v2t(vect):\n\tif isinstance(vect,AirSimClient.Vector3r):\n\t\tres = np.array([vect.x_val, vect.y_val, vect.z_val])\n\telse:\n\t\tres = np.array(vect)\n\treturn res\n\ndef distance(pos1,pos2):\n\tpos1 = v2t(pos1)\n\tpos2 = v2t(pos2)\n\t#dist = np.sqrt(abs(pos1[0]-pos2[0])**2 + abs(pos1[1]-pos2[1])**2 + abs(pos1[2]-pos2[2]) **2)\n\tdist = np.linalg.norm(pos1-pos2)\n\t\t\n\treturn dist" } ]
1
NeugebauerAlex/MAFzucBioPortal
https://github.com/NeugebauerAlex/MAFzucBioPortal
365156721eed736b9bedb713e66ace7eb2d49a95
e0cad9899b571e4ed9379fbd3ab27fe6edf1d51f
fba53a7fa74a29d2645f72c0406e43b480257279
refs/heads/master
2023-01-28T00:56:21.242722
2020-12-02T10:00:06
2020-12-02T10:00:06
317,816,822
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6817018389701843, "alphanum_fraction": 0.6865003108978271, "avg_line_length": 32.25531768798828, "blob_id": "bcb67a252c6d8fd3f30b588cafe3486cafb6c89f", "content_id": "6f650eacc77df40bd1143f916abca5e358f8dc8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3132, "license_type": "no_license", "max_line_length": 79, "num_lines": 94, "path": "/createCaseFiles.py", "repo_name": "NeugebauerAlex/MAFzucBioPortal", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport csv\nimport errno\nimport os\nfrom os.path import abspath\nfrom backports import configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\n# Lese die csv Datei ein und Zähle die Zeilen für case_list_description\n\ninput_file = open(config['createCasefiles']['input_file'])\nreader_file = csv.reader(input_file)\nvalue_with_header = len(list(reader_file))\nvalue = value_with_header - 1\n\n\n\n# Das Argument und den Pfad angeben für die beiden zu erstellenden case-Dateien\nfilename1 = abspath(config['createCasefiles']['filename1'])\nfilename2 = abspath(config['createCasefiles']['filename2'])\n\n#SAMPLE_ID herauskriegen\nos.chdir(config['createMetaFiles']['ord_name'])\n\nResult_for_data_clinical_sample = \"\"\nResult_for_data_clinical_patient = \"\"\ncase_list_id = \"\"\n\n# Aus der csv Datei die zweite Spalte für die \"UKER\" Nummer holen\nfile_name = (config['createMetaFiles']['file_name_csv'])\ncsv_file = open(file_name)\ncsv_reader = csv.reader(csv_file, delimiter=';')\nsecond_column = []\n\nfor line in csv_reader:\n second_column = (line[2])\n case_list_id += second_column + '\\t'\n case_list_id_without_header = case_list_id[12:]\n\n# Output Ordner nicht da, dann erstelle einen\nif not os.path.exists(os.path.dirname(filename1)):\n try:\n os.makedirs(os.path.dirname(filename1))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die cases_all Datei\nwith open(filename1, \"w\") as file:\n # Name der Studie in cBioPortal\n file.write(\"cancer_study_identifier:MTB\")\n file.write(\"\\n\")\n file.write(\"stable_id:MTB_all\")\n file.write(\"\\n\")\n # Standard Angabe für cases_all\n file.write(\"case_list_name: All Tumors\")\n file.write(\"\\n\")\n # Das Argument schreiben, wieviele Zeilen in der csv existieren\n file.write(\"case_list_description: All tumor samples:\" + str(value))\n file.write(\"\\n\")\n # Die Sample_Id \"UKER\", die aus CSV geholt wurde, schreiben\n file.write(\"case_list_ids:\" + case_list_id_without_header)\n file.write(\"\\n\")\n file.write(\"case_list_category: all_cases_in_study\")\n file.close() # close file\n\n# Erstelle Ordner wenn er noch nicht existiert\nif not os.path.exists(os.path.dirname(filename2)):\n try:\n os.makedirs(os.path.dirname(filename2))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe cases_sqeuenced Datei\nwith open(filename2, \"w\") as file:\n # Name der Studie in cBioPortal\n file.write(\"cancer_study_identifier:MTB\")\n file.write(\"\\n\")\n file.write(\"stable_id:MTB_sequenced\")\n file.write(\"\\n\")\n # Standard Angabe für cases_all\n file.write(\"case_list_name: All Tumors\")\n file.write(\"\\n\")\n # Das Argument schreiben, wieviele Zeilen in der csv existieren\n file.write(\"case_list_description: All tumor samples:\" + str(value))\n file.write(\"\\n\")\n # Die Sample_Id \"UKER\", die aus CSV geholt wurde, schreiben\n file.write(\"case_list_ids:\" + case_list_id_without_header)\n file.close() # close file\n" }, { "alpha_fraction": 0.6638942956924438, "alphanum_fraction": 0.6738013625144958, "avg_line_length": 37.38497543334961, "blob_id": "1cf4be2f2439424ad07c4e40deb161bbedb67e11", "content_id": "52dc6192dd70e1fe567dffb513e7a3326cd34b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8185, "license_type": "no_license", "max_line_length": 205, "num_lines": 213, "path": "/createMetaFiles.py", "repo_name": "NeugebauerAlex/MAFzucBioPortal", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport os\nimport errno\nimport random\nfrom os.path import abspath\nimport csv\nfrom backports import configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\n# Gebe das Argument und den Pfad, sowie den namen der Datei an, die am Ende erstellt werden soll\nfilename = abspath(config['createMetaFiles']['filename'])\nfilename1 = abspath(config['createMetaFiles']['filename1'])\nfilename2 = abspath(config['createMetaFiles']['filename2'])\nfilename3 = abspath(config['createMetaFiles']['filename3'])\nfilename4 = abspath(config['createMetaFiles']['filename4'])\nfilename5 = abspath(config['createMetaFiles']['filename5'])\n\n# Setze dies auf False, wenn du richtigen Patienten-Daten verwendet werden sollen\ndo_it_random = config['createMetaFiles']['Patienten_ID_zufällig?']\n\n\n#SAMPLE_ID herauskriegen\nos.chdir(config['createMetaFiles']['ord_name'])\n\nResult_for_data_clinical_sample = \"\"\nResult_for_data_clinical_patient = \"\"\n\n# Aus der angegebenen CSV Datei die gewünschten Spalten des Pathologie selektieren und kopieren\nfile_name = (config['createMetaFiles']['file_name_csv'])\ncsv_file = open(file_name)\ncsv_reader = csv.reader(csv_file, delimiter=';')\nfirst_column = []\nsecond_column = [] #empty list to store second column values\nthird_column = []\nforth_column = []\nfifth_column = []\nsix_column = []\nseventh_column = []\neigth_column = []\n\n# Wenn do_it_random = True, benutze den Zufallszahlen generator für die erste Spalte, der CSV Datei\n# Wenn do_it_random = False, kopiere alle gewünschten Spalten aus der CSV Datei\n\nif do_it_random == False:\n for line in csv_reader:\n first_column = (line[1]) #index 1 für zweite column\n second_column = (line[2])\n third_column = (line[3])\n forth_column = (line[4])\n fifth_column = (line[5])\n six_column = (line[6])\n seventh_column = (line[7])\n eigth_column = (line[8])\n # Kopiere die Selektierten Spalten, in die gewünschten Argumente, die jeweils in data_clinical_patient bzw. data_clinical_sample verwendet werden\n # Immer Tabular getrennt, sonst passt die Formatierung nicht, da ganze Spalten kopiert werden\n # Am Ende immer einen Absatz schreiben, da immer ganze Spalten kopiert werden\n Result_for_data_clinical_sample += first_column + '\\t' + second_column + '\\t' + eigth_column +'\\n'\n Result_for_data_clinical_patient += first_column + '\\t' + third_column + '\\t' + forth_column + '\\t' + fifth_column + '\\t' + six_column + '\\t' + seventh_column + '\\n'\n # Lösche die Überschriften, sodass nur die puren Daten genommen werden\n Result_for_data_clinical_sample_without_header = Result_for_data_clinical_sample[23:]\n Result_for_data_clinical_patient_without_header = Result_for_data_clinical_patient[54:]\nelse:\n for line in csv_reader:\n for index in range(0,1):\n first_column = random.randint(1000000000,9999999999)\n second_column = (line[2])\n third_column = (line[3])\n forth_column = (line[4])\n fifth_column = (line[5])\n six_column = (line[6])\n seventh_column = (line[7])\n eigth_column = (line[8])\n Result_for_data_clinical_sample += str(first_column) + '\\t' + str(second_column) + '\\t' + str(eigth_column) +'\\n'\n Result_for_data_clinical_patient += str(first_column) + '\\t' + str(third_column) + '\\t' + str(forth_column) + '\\t' + str(fifth_column) + '\\t' + str(six_column) + '\\t' + str(seventh_column) + '\\n'\n Result_for_data_clinical_sample_without_header = Result_for_data_clinical_sample[28:]\n Result_for_data_clinical_patient_without_header = Result_for_data_clinical_patient[58:]\n\n\n# Hole den Dateinamen für cancer_study_identifier\n#base = os.path.basename('/home/unberapp/import_test/Codierung_Testdaten_Erlangen.csv')\n\n# Wenn Ordner noch nicht existiert, erstelle einen\nif not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die Meta Datei\nwith open(filename, \"w\") as file:\n file.write(\"cancer_study_identifier:MTB\")\n file.write(\"\\n\")\n file.write(\"genetic_alteration_type:CLINICAL\")\n file.write(\"\\n\")\n file.write(\"datatype: SAMPLE_ATTRIBUTES\")\n file.write(\"\\n\")\n file.write(\"data_filename: data_clinical_sample.txt\")\n file.close() # close file\n\n\nif not os.path.exists(os.path.dirname(filename1)):\n try:\n os.makedirs(os.path.dirname(filename1))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die Meta Datei\nwith open(filename1, \"w\") as file:\n file.write(\"type_of_cancer: other\")\n file.write(\"\\n\")\n file.write(\"cancer_study_identifier:MTB\")\n file.write(\"\\n\")\n file.write(\"name: MTB\")\n file.write(\"\\n\")\n file.write(\"description: Galaxy Test\")\n file.write(\"\\n\")\n file.write(\"short_name: Galaxy Test\")\n file.close() # close file\n\n\nif not os.path.exists(os.path.dirname(filename2)):\n try:\n os.makedirs(os.path.dirname(filename2))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die Meta Datei\nwith open(filename2, \"w\") as file:\n file.write(\"cancer_study_identifier:MTB\")\n file.write(\"\\n\")\n file.write(\"genetic_alteration_type: CLINICAL\")\n file.write(\"\\n\")\n file.write(\"datatype: PATIENT_ATTRIBUTES\")\n file.write(\"\\n\")\n file.write(\"data_filename: data_clinical_patient.txt\")\n file.close() # close file\n\n\nif not os.path.exists(os.path.dirname(filename3)):\n try:\n os.makedirs(os.path.dirname(filename3))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die Meta Datei\nwith open(filename3, \"w\") as file:\n file.write(\"cancer_study_identifier:MTB\")\n file.write(\"\\n\")\n file.write(\"stable_id: mutations\")\n file.write(\"\\n\")\n file.write(\"profile_name: Mutations\")\n file.write(\"\\n\")\n file.write(\"profile_description: Extended MAF\")\n file.write(\"\\n\")\n file.write(\"genetic_alteration_type: MUTATION_EXTENDED\")\n file.write(\"\\n\")\n file.write(\"datatype: MAF\")\n file.write(\"\\n\")\n file.write(\"show_profile_in_analysis_tab: true\")\n file.write(\"\\n\")\n file.write(\"data_filename: data_mutations_extended.txt\")\n file.close() # close file\n\n\nif not os.path.exists(os.path.dirname(filename4)):\n try:\n os.makedirs(os.path.dirname(filename4))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die Klinische Datei, immer Tabular getrennt, sonst erkennt es cBioPortal nicht\nwith open(filename4, \"w\") as file:\n file.write(\"#Patient Identifier\\tSample Identifier\\tName\")\n file.write(\"\\n\")\n file.write(\"#Patient Identifier\\tSample Identifier\\tName\")\n file.write(\"\\n\")\n file.write('#NUMBER\\tSTRING\\tSTRING')\n file.write(\"\\n\")\n file.write('#1\\t1\\t1')\n file.write(\"\\n\")\n file.write(\"PATIENT_ID\\tSAMPLE_ID\\tNAME\")\n file.write(\"\\n\")\n file.write(Result_for_data_clinical_sample_without_header)\n\nif not os.path.exists(os.path.dirname(filename5)):\n try:\n os.makedirs(os.path.dirname(filename5))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Schreibe die Klinische Datei, immer Tabular getrennt, sonst erkennt es cBioPortal nicht\nwith open(filename5, \"w\") as file:\n file.write('#Patient Identifier\\tOncoTree_Code\\tCancerType\\tGeschlecht\\tAlter\\tEinweiser')\n file.write(\"\\n\")\n file.write('#Patient Identifier\\tOncoTree_Code\\tCancerType\\tGeschlecht\\tAlter\\tEinweiser')\n file.write(\"\\n\")\n file.write('#NUMBER\\tSTRING\\tSTRING\\tSTRING\\tNUMBER\\tSTRING')\n file.write(\"\\n\")\n file.write('#1\\t1\\t1\\t1\\t1\\t1')\n file.write(\"\\n\")\n file.write('PATIENT_ID\\tONCOTREE_CODE\\tCANCER_TYPE_DETAILED\\tGESCHLECHT\\tALTER\\tEINWEISER')\n file.write(\"\\n\")\n file.write(Result_for_data_clinical_patient_without_header)\n" }, { "alpha_fraction": 0.8015373945236206, "alphanum_fraction": 0.8078266978263855, "avg_line_length": 37.64864730834961, "blob_id": "85d187bb50e542b8bfe46a48a34e0de175cb7264", "content_id": "11394392deef4a3aa6640be61518ca7bf2a340bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1436, "license_type": "no_license", "max_line_length": 87, "num_lines": 37, "path": "/config.ini", "repo_name": "NeugebauerAlex/MAFzucBioPortal", "src_encoding": "UTF-8", "text": "[combineFiles]\n# Pfad zu den Maf Dateien, die eingelesen werden müssen\nPfad_zu_MAF = /Users/alexneugebauer/Desktop/MAF_Dateien/*.tabular\n\n# Den Namen nicht ändern, da cBioPortal diesen so möchte\nfilename= /Users/alexneugebauer/Desktop/data_mutations_extended.txt\n\n[createMetaFiles]\n# Sollen die Patienten_ID's zufällig generiert werden?\nPatienten_ID_zufällig? = True\n\n# Pfad in den richtigen Ordner, wo csv Datei zu finden ist\nord_name = /Users/alexneugebauer/Desktop/MAF_Dateien/\n\n# Name der CSV Datei, wo Spalten eingelesen werden sollen\nfile_name_csv = Codierung_Testdaten_Erlangen.csv\n\n#Namen der Meta_Dateien\nfilename = /Users/alexneugebauer/Desktop/meta_clinical_sample.txt\nfilename1 = /Users/alexneugebauer/Desktop/meta_study.txt\nfilename2 = /Users/alexneugebauer/Desktop/meta_clinical_patient.txt\nfilename3 = /Users/alexneugebauer/Desktop/meta_mutations_extended.txt\nfilename4 = /Users/alexneugebauer/Desktop/data_clinical_sample.txt\nfilename5 = /Users/alexneugebauer/Desktop/data_clinical_patient.txt\n\n\n[createCasefiles]\ninput_file = /Users/alexneugebauer/Desktop/MAF_Dateien/Codierung_Testdaten_Erlangen.csv\nfilename1 = /Users/alexneugebauer/Desktop/case_lists/cases_all.txt\nfilename2 = /Users/alexneugebauer/Desktop/case_lists/cases_sequenced.txt\n\n\n[deleteLine]\nord_name = /Users/alexneugebauer/Desktop/\noriginal_file= data_mutations_extended.txt\ndummy_file = cleaned_file.txt\nfile_name_2 = Artefact_List-Tabelle 1.csv\n\n" }, { "alpha_fraction": 0.6778964400291443, "alphanum_fraction": 0.6811832189559937, "avg_line_length": 32.342464447021484, "blob_id": "1bd13cdbd1dc30b835fa09f1341793ac68300737", "content_id": "b8e0c9c27c16dd390f2ec53d4eead399c17373e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2435, "license_type": "no_license", "max_line_length": 172, "num_lines": 73, "path": "/deleteLine.py", "repo_name": "NeugebauerAlex/MAFzucBioPortal", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf8 -*-\n\n\n# Wenn es Artefakte aus der Pipeline gibt, die nicht mit in cBioPortal sollen, bitte in einer CSV Datei sammeln und mit diesem Skript aus der kombinierten MAF Datei löschen\nimport csv\nimport os\nfrom backports import configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nos.chdir(config['deleteLine']['ord_name'])\nResult_for_artifact_selection = \"\"\noriginal_file = config['deleteLine']['original_file']\ndummy_file = config['deleteLine']['dummy_file']\n\n# Ziehe Spalte mit Artefakten aus CSV Datei\nfile_name = config['deleteLine']['file_name_2']\ncsv_file = open(file_name)\ncsv_reader = csv.reader(csv_file, delimiter=';')\n\nsecond_column = []\n\nfor line in csv_reader:\n second_column = (line[2])\n Result_for_artifact_selection += second_column + '\\n'\n Result_for_artifact_selection_without_header = Result_for_artifact_selection[12:]\n\n# Nehme die aneinandergereihten Strings und Forme sie in eine Liste\nstringIntoList = Result_for_artifact_selection_without_header.split('\\n')\n\n# Durch die Liste iterieren, um Duplikate zu eleminieren\nstringIntoList = list(dict.fromkeys(stringIntoList))\n\n\ndef main():\n length = len(stringIntoList)\n length_real = length - 1\n i = 0\n while i < length_real:\n delete_line_with_word(original_file, stringIntoList[i])\n i += 1\n\n\ndef delete_line_by_condition(original_file, condition):\n \"\"\" In a file, delete the lines at line number in given list\"\"\"\n dummy_file = original_file + '.bak'\n is_skipped = False\n # Open original file in read only mode and dummy file in write mode\n with open(original_file, 'r') as read_obj, open(dummy_file, 'w') as write_obj:\n # Line by line copy data from original file to dummy file\n for line in read_obj:\n # if current line matches the given condition then skip that line\n if condition(line) == False:\n write_obj.write(line)\n else:\n is_skipped = True\n # If any line is skipped then rename dummy file as original file\n if is_skipped:\n os.remove(original_file)\n os.rename(dummy_file, original_file)\n else:\n os.remove(dummy_file)\n\n\ndef delete_line_with_word(file_name, word):\n \"\"\"Delete lines from a file that contains a given word / sub-string \"\"\"\n delete_line_by_condition(file_name, lambda x: word in x)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6674346923828125, "alphanum_fraction": 0.6735790967941284, "avg_line_length": 32.38461685180664, "blob_id": "e2851094a3859dc1d97440653c4c60dc1242771c", "content_id": "badfc8ecc2370c719886a7488818ae57100d67a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 129, "num_lines": 39, "path": "/combineMAF.py", "repo_name": "NeugebauerAlex/MAFzucBioPortal", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport glob\nimport os\nimport errno\nfrom os.path import abspath\nfrom backports import configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\n\n# Definiere read_files - Wo findet das Skript die MAF Dateien die kombiniert werden sollen\ninput_files = config['combineFiles']['Pfad_zu_MAF']\nread_files = glob.glob(input_files)\n\n# Wo soll die kombinierte MAF Datei gespeichert werden\npath = config['combineFiles']['filename']\nfilename = abspath(path)\n\n# Wenn der angegebene Ordner nicht existiert, kreiere einen\nif not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n# Kombiniere alle Daten und lösche erste Zeile\nwith open(filename, \"w\") as outfile:\n for f, value in enumerate(read_files):\n with open(value, \"r\") as infile:\n if f == 0:\n outfile.write(infile.read())\n elif f != 0:\n # Delete die erste Zeile der folgenden MAF-Dateien, gebe hier die Zeichenanzahl an der Überschrift bei Python < 3\n # Wenn Python3 installiert ist, hier nur next()\n outfile.write(infile.read()[601:])\n" }, { "alpha_fraction": 0.817460298538208, "alphanum_fraction": 0.817460298538208, "avg_line_length": 13, "blob_id": "4a8b2cb3aa28d1ae025e2eabc9774c651fa67d55", "content_id": "1dc95ea0265d6dfd81d1989a6aa1d4fdf0fdf5a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 126, "license_type": "no_license", "max_line_length": 25, "num_lines": 9, "path": "/run.bash", "repo_name": "NeugebauerAlex/MAFzucBioPortal", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython combineFiles.py\nwait\npython createMetaFiles.py\nwait\npython createCaseFiles.py\n#wait\n#python deleteLine.py\n" } ]
6
momolas/coreml_conversion_hub
https://github.com/momolas/coreml_conversion_hub
01189f6973a4a364afbbc1dc8fbb9acfbdb27d46
4acf3bc5c5110ffbf0407e986a3b435e3020cf98
022eb85b62bb30e7e147b8e83cae30b5e739b80b
refs/heads/master
2023-03-16T04:47:08.237997
2020-06-25T21:31:08
2020-06-25T21:31:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.710865318775177, "alphanum_fraction": 0.7279473543167114, "avg_line_length": 56.60483932495117, "blob_id": "b71ec25ee03cabd5a5046e431a34d70b77d78d89", "content_id": "596c402eeb2461896350c79969147a9af7465706", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7142, "license_type": "no_license", "max_line_length": 134, "num_lines": 124, "path": "/tensorflow/detection/utils.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nimport coremltools\n\ndef add_squeeze_layer(spec, input_name, output_name, output_dims, input_dims):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].squeeze.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(output_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n spec.neuralNetwork.layers[-1].squeeze.squeezeAll = True\n spec.neuralNetwork.layers[-1].output.append(output_name)\n \n\ndef add_slicestatic_layer(spec, input_name, output_name, output_dims, input_dims, begin_id, end_id,\n begin_mask, end_mask):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].sliceStatic.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(output_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n spec.neuralNetwork.layers[-1].sliceStatic.strides.extend([1,1,1])\n spec.neuralNetwork.layers[-1].sliceStatic.beginIds.extend(begin_id)\n spec.neuralNetwork.layers[-1].sliceStatic.endIds.extend(end_id)\n spec.neuralNetwork.layers[-1].sliceStatic.beginMasks.extend(begin_mask)\n spec.neuralNetwork.layers[-1].sliceStatic.endMasks.extend(end_mask)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n \n\ndef add_constant_layer(spec, output_name, output_dims, constant_data):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].loadConstantND.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].loadConstantND.shape.extend(output_dims)\n spec.neuralNetwork.layers[-1].loadConstantND.data.floatValue.extend(map(float, constant_data.flatten()))\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = 2\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n \ndef add_elementwise_layer(spec, output_name, input_names, inputs_dims, alpha=None, mode=\"multiply\"):\n if len(input_names) == 1 and (not alpha):\n raise ValueError(\"Should provide alpha value when only one input is provided\")\n if len(input_names) == 2 and alpha:\n raise ValueError(\"Alpha should be provided only with one input\")\n spec.neuralNetwork.layers.add()\n if mode == \"multiply\":\n spec.neuralNetwork.layers[-1].multiply.MergeFromString(b'')\n elif mode == \"add\":\n spec.neuralNetwork.layers[-1].add.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].input.extend(input_names)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n for k, i in enumerate(inputs_dims):\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[k].rank = len(i)\n spec.neuralNetwork.layers[-1].inputTensor[k].dimValue.extend(i)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(inputs_dims[0])\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(inputs_dims[0])\n if len(inputs_dims) == 1: spec.neuralNetwork.layers[-1].multiply.alpha = alpha\n \ndef add_unary(spec, output_name, input_name, input_dims, mode=\"exp\"):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].unary.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].unary.shift = 0\n spec.neuralNetwork.layers[-1].unary.scale = 1\n spec.neuralNetwork.layers[-1].unary.epsilon = 1e-6\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n if mode == \"exp\":\n spec.neuralNetwork.layers[-1].unary.type = coremltools.proto.NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('EXP')\n elif mode == \"log\":\n spec.neuralNetwork.layers[-1].unary.type = coremltools.proto.NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('LOG')\n elif mode == \"abs\":\n spec.neuralNetwork.layers[-1].unary.type = coremltools.proto.NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('ABS')\n else:\n raise ValueError(\"Mode not understood\")\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(input_dims)\n \ndef add_concat_layer(spec, output_name, input_names, input_dims, output_dims, axis):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].concatND.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].input.extend(input_names)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n for k, i in enumerate(input_dims):\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[k].rank = len(i)\n spec.neuralNetwork.layers[-1].inputTensor[k].dimValue.extend(i)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(output_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n spec.neuralNetwork.layers[-1].concatND.axis = axis\n \ndef add_reverse_layer(spec, output_name, input_name, input_dims, axis):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].reverse.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].reverse.reverseDim.extend(axis)\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(input_dims)" }, { "alpha_fraction": 0.7695634961128235, "alphanum_fraction": 0.7919780015945435, "avg_line_length": 45.254547119140625, "blob_id": "b899eea918ce8f5bc708c6184f78b7a2c808db4d", "content_id": "f0c20e48629607a6e51c33604178813335ca8096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2543, "license_type": "no_license", "max_line_length": 115, "num_lines": 55, "path": "/tensorflow/classification/inception/inception_v3.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport os\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nimport sys\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\ninception_v3 = tf.keras.applications.InceptionV3(\n include_top=True, weights='imagenet', input_tensor = Input(shape=(299, 299, 3), batch_size=1))\ninception_v3.save(\"inception_v3.h5\")\n\n# CoreMl tools will automatically consider the model as NeuralNetwork classifier if you provide the \n# class_labels argument with the class names of classification model\ninceptionv3_mlmodel = tfcoreml.convert(\n\"./inception_v3.h5\",\noutput_feature_names = [get_output_name(inception_v3.outputs[0].name)],\ninput_name_shape_dict = {get_input_name(inception_v3.inputs[0].name): list(inception_v3.inputs[0].shape)},\nimage_input_names = [get_input_name(inception_v3.inputs[0].name)],\nimage_scale = 1/127.5,\nred_bias = -1,\ngreen_bias = -1,\nblue_bias=-1,\npredicted_probabilities_output = get_output_name(inception_v3.outputs[0].name),\npredicted_feature_name = \"classLabels\",\nclass_labels = imagenet_labels,\nminimum_ios_deployment_target='13'\n)\n\n#Chaning the input name and one of the output name for convenience. Also adding model descriptions\nspec = inceptionv3_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"229x229x3 image input of model\"\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\ninceptionv3_mlmodel.short_description = \"Inception V3 imagnet model\"\ninceptionv3_mlmodel.license = \"Open source academic license\"\ninceptionv3_mlmodel.save(\"inceptionV3.mlmodel\")" }, { "alpha_fraction": 0.7008652687072754, "alphanum_fraction": 0.7212607860565186, "avg_line_length": 52.1184196472168, "blob_id": "7149486da21a7c45418cfb5a88ffdc10a3b6f3f6", "content_id": "c34b122fb31ea1db12b340a6525bc9a76b791a9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8090, "license_type": "no_license", "max_line_length": 158, "num_lines": 152, "path": "/pytorch/detection/efficientDet/convert_effdet.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import torch\nimport yaml\nfrom torch import nn\nfrom eff_backbone import EfficientDetBackbone\nimport numpy as np\nimport onnx_coreml\nimport os\nfrom utils import *\nimport onnx\nimport argparse\nimport coremltools\nfrom coremltools.models import datatypes\nfrom coremltools.models.pipeline import *\n\n#at the time of writing this code coreml and onnx_coreml had some bugs in source code that needs to changed\n#for the conversion process to proceed\n#add the tem \"rank=4\" in line 46 and 337 at onnx_coreml/_operators_nd.py\n#change line 1454, 1455 to spec_layer_params.scalingFactor.append(int(scaling_factor_h)),\n#spec_layer_params.scalingFactor.append(int(scaling_factor_w)) in coremltools/models/neural_network/builder.py\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--type\", default=0, type=int, help=\"Coefficient of efficientdet. can be 0,1,2,3\")\nargs = parser.parse_args()\n\nmodel_name = \"efficientdet_{}.{}\"\n\nclass Params:\n def __init__(self, project_file):\n self.params = yaml.safe_load(open(project_file).read())\n\n def __getattr__(self, item):\n return self.params.get(item, None)\n\ndevice = torch.device('cpu')\nparams = Params(os.path.join(\"Yet-Another-EfficientDet-Pytorch\", \"projects\", \"coco.yml\"))\nmodel = EfficientDetBackbone(num_classes=len(params.obj_list), compound_coef=args.type, onnx_export=True,\n ratios=eval(params.anchors_ratios), scales=eval(params.anchors_scales)).to(device) \nmodel.backbone_net.model.set_swish(memory_efficient=False)\n#Downloading and loading model weights\nos.system(\"wget {}\".format(eff_urls[args.type]))\nmodel.load_state_dict(torch.load(eff_urls[args.type].split('/')[-1]))\nmodel.eval()\ndummy_input = torch.randn((1,3, model.input_sizes[args.type],model.input_sizes[args.type]), dtype=torch.float32).to(device)\nmodel(dummy_input) #running one forward pass so that all dynamism in forward pass is made as static\n\n# opset_version 11 is causing error\ntorch.onnx.export(model, dummy_input,\n model_name.format(args.type, \"onnx\"),\n verbose=True,\n input_names=['input_image'],\n opset_version=10)\n\nonnx_model = onnx.load(model_name.format(args.type, \"onnx\"))\nonnx.checker.check_model(onnx_model)\n\n#image scaling parameters(not exact one but approximating)\nimage_scale = 1.0/(255.0*0.226)\nred_bias = -0.485/0.226\ngreen_bias = -0.456/0.226\nblue_bias = -0.406/0.226\n\nml_model = onnx_coreml.convert(model_name.format(args.type, \"onnx\"), image_input_names=['input_image'], \n\t\t\t\t\t\t\tpreprocessing_args= {\"image_scale\": image_scale, \"red_bias\": red_bias, \n\t\t\t\t\t\t\t\"blue_bias\": blue_bias, \"green_bias\": green_bias},\n\t\t\t\t\t\t\tminimum_ios_deployment_target='13')\n#at the time of this code creation torch to onnx has an error because of which padding dimensions arent properly\n#transferred to onnx. so chaning it manually in the mlmodel graph\nfor i,layer in enumerate(ml_model._spec.neuralNetwork.layers):\n\tif \"pad\" in layer.name.lower():\n\t\tprint(\"Chaning pad of layer {}\".format(layer.name))\n\t\tchange_error_dimension(layer)\n\n\nprint(\"Total anchors\", model.total_anchors)\nchange_effdet_output_names(ml_model._spec) #chaning output node names in mlmodel graph\nadd_squeeze_layer(ml_model._spec, \"box_scores_pre\", \"box_scores\", [model.total_anchors, 90], [1, model.total_anchors, 90])\nadd_squeeze_layer(ml_model._spec, \"box_coordinates_pre\", \"box_coordinates\", [model.total_anchors, 4], [1, model.total_anchors, 4])\nadd_slicestatic_layer(ml_model._spec, \"box_coordinates\", \"box_loc_yx\", [model.total_anchors, 2], [model.total_anchors, 4],\n [0,0], [2147483647, 2], [True, True], [True, False])\nadd_slicestatic_layer(ml_model._spec, \"box_coordinates\", \"box_loc_hw\", [model.total_anchors, 2], [model.total_anchors, 4],\n [0,2], [2147483647, 4], [True, False], [True, True])\nadd_constant_layer(ml_model._spec, \"anchor_yx\", [model.total_anchors, 2], model.anchor_data[:, [0,1]])\nadd_constant_layer(ml_model._spec, \"anchor_hw\", [model.total_anchors, 2], model.anchor_data[:, [2,3]])\n# add_elementwise_layer(ml_model._spec, \"scale_yx\", [\"box_loc_yx\"], [[2034, 2]], alpha=0.1, mode=\"multiply\")\n# add_elementwise_layer(ml_model._spec, \"scale_hw\", [\"box_loc_hw\"], [[2034, 2]], alpha=0.2, mode=\"multiply\")\nadd_unary(ml_model._spec, \"hw_exp\", \"box_loc_hw\", [model.total_anchors, 2], mode=\"exp\")\nadd_elementwise_layer(ml_model._spec, \"final_hw_rev\", [\"hw_exp\", \"anchor_hw\"], [[model.total_anchors, 2], [model.total_anchors, 2]], mode=\"multiply\")\nadd_reverse_layer(ml_model._spec, \"final_wh\", \"final_hw_rev\", [model.total_anchors, 2], [False, True])\nadd_elementwise_layer(ml_model._spec, \"final_yx_pre\", [\"box_loc_yx\", \"anchor_hw\"], [[model.total_anchors, 2], [model.total_anchors, 2]], mode=\"multiply\")\nadd_elementwise_layer(ml_model._spec, \"final_yx_rev\", [\"final_yx_pre\", \"anchor_yx\"], [[model.total_anchors, 2], [model.total_anchors, 2]], mode=\"add\")\nadd_reverse_layer(ml_model._spec, \"final_xy\", \"final_yx_rev\", [model.total_anchors, 2], [False, True])\nadd_concat_layer(ml_model._spec, \"box_locations\", [\"final_xy\", \"final_wh\"], [[model.total_anchors, 2], [model.total_anchors, 2]], [model.total_anchors, 4], 1)\nml_model._spec.description.output[0].name = \"box_locations\"\nml_model._spec.description.output[1].name = \"box_scores\"\n\n# #adding nms and creating a pipeline model\nnms_spec = coremltools.proto.Model_pb2.Model()\nnms_spec.specificationVersion = 3\nfor i in range(2):\n detection_output = ml_model._spec.description.output[i].SerializeToString()\n nms_spec.description.input.add() \n nms_spec.description.input[i].ParseFromString(detection_output)\n nms_spec.description.output.add() \n nms_spec.description.output[i].ParseFromString(detection_output)\n \nnms_spec.description.output[0].name = \"confidence\"\nnms_spec.description.output[1].name = \"coordinates\"\n\noutput_sizes = [90, 4] \nfor i in range(2):\n ma_type = nms_spec.description.output[i].type.multiArrayType \n ma_type.shapeRange.sizeRanges.add() \n ma_type.shapeRange.sizeRanges[0].lowerBound = 0 \n ma_type.shapeRange.sizeRanges[0].upperBound = -1 \n ma_type.shapeRange.sizeRanges.add() \n ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] \n ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] \n del ma_type.shape[:]\n \nnms = nms_spec.nonMaximumSuppression \nnms.confidenceInputFeatureName = \"box_scores\" \nnms.coordinatesInputFeatureName = \"box_locations\" \nnms.confidenceOutputFeatureName = \"confidence\" \nnms.coordinatesOutputFeatureName = \"coordinates\" \nnms.iouThresholdInputFeatureName = \"iouThreshold\" \nnms.confidenceThresholdInputFeatureName = \"confidenceThreshold\"\n\ndefault_iou_threshold = 0.6\ndefault_confidence_threshold = 0.5\nnms.pickTop.perClass = True\nnms.iouThreshold = default_iou_threshold \nnms.confidenceThreshold = default_confidence_threshold\nlabels = np.loadtxt(\"../coco_labels.txt\", dtype=str, delimiter=\"\\n\") \nnms.stringClassLabels.vector.extend(labels)\nnms_model = coremltools.models.MLModel(nms_spec) \n\n#creating the pipeline model comprising efficientdet and NMS\ninput_features = [(\"input_image\", datatypes.Array(3,model.input_sizes[args.type],model.input_sizes[args.type])), (\"iouThreshold\", datatypes.Double()),\n(\"confidenceThreshold\", datatypes.Double())] #cannot directly pass imageType as input type here. \noutput_features = [ \"confidence\", \"coordinates\"]\npipeline = Pipeline(input_features, output_features)\n\npipeline.add_model(ml_model._spec)\npipeline.add_model(nms_model._spec)\npipeline.spec.description.input[0].ParseFromString(ml_model._spec.description.input[0].SerializeToString())\npipeline.spec.description.input[1].type.isOptional = True\npipeline.spec.description.input[2].type.isOptional = True\npipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())\npipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())\n\nfinal_model = coremltools.models.MLModel(pipeline.spec) \nfinal_model.save(model_name.format(args.type, \"mlmodel\"))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6490908861160278, "alphanum_fraction": 0.6990908980369568, "avg_line_length": 34.51612854003906, "blob_id": "526113064c3f12814eef23e172d18c704389b770", "content_id": "a5ec36c6f673cd0268c78b90da9ef51efb7cc620", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "no_license", "max_line_length": 106, "num_lines": 31, "path": "/tensorflow/classification/efficientnet_lite/efflite_utils.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tfcoreml\n\ndef get_output_tensor_index(operations):\n\tfor i, k in enumerate(operations):\n\t\tif \"softmax\" in k.name.lower():\n\t\t\tprint(\"Softmax output founded at index {}\".format(i))\n\t\t\tprint(k)\n\t\t\treturn i\n\ndef get_input_tensor_index(operations):\n\tfor i,k in enumerate(operations):\n\t\tif \"truediv\" in k.name.lower():\n\t\t\tprint(\"Truediv input founded at index {}\".format(i))\n\t\t\tprint(k)\n\t\t\treturn i\n\neff_lite_urls = {\n\t\"b0\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-lite0.tar.gz\",\n\t\"b1\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-lite1.tar.gz\",\n\t\"b2\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-lite2.tar.gz\",\n\t\"b3\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-lite3.tar.gz\",\n\t\"b4\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-lite4.tar.gz\"\n}\n\neff_input_size = {\n\t\"b0\": [1, 224, 224, 3],\n\t\"b1\": [1, 240, 240, 3],\n\t\"b2\": [1, 260, 260, 3],\n\t\"b3\": [1, 280, 280, 3],\n\t\"b4\": [1, 300, 300, 3],\n}" }, { "alpha_fraction": 0.691087007522583, "alphanum_fraction": 0.7290399074554443, "avg_line_length": 49.393333435058594, "blob_id": "0d4a39e3eb3e40f3d54ae5b202d1212a1292e78b", "content_id": "802a70e4034e2dbcbbc5115d24c4bf456beb14f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7562, "license_type": "no_license", "max_line_length": 128, "num_lines": 150, "path": "/tensorflow/detection/ssd_mobilenet_v3/ssd_mobilenetv3_large.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport os\nimport coremltools\nfrom coremltools.models import datatypes\nfrom coremltools.models.pipeline import *\nimport numpy as np\nimport tfcoreml\nimport sys\nsys.path.append('../')\nfrom utils import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\n\ntar_url = \"http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_large_coco_2020_01_14.tar.gz\"\ntar_file = tar_url.split(\"/\")[-1]\nfolder = tar_file.split('.')[0]\ncleaned_pb = \"ssd_mobilenet_v3_large_cleaned.pb\"\nmlmodel_file = \"ssd_mobilenet_v3_large.mlmodel\"\n\nos.system(\"wget {}\".format(tar_url))\nos.system(\"tar -xvf {}\".format(tar_file))\n\n\nwith tf.compat.v1.gfile.GFile(os.path.join(folder, \"frozen_inference_graph.pb\"), \"rb\") as f: \n graph_def = tf.compat.v1.GraphDef() \n graph_def.ParseFromString(f.read()) \n#the pb graph has tensorflowlite detection process node which have to be removed as its not\n#supported in coreml\noutput_def = tf.compat.v1.graph_util.extract_sub_graph(graph_def, [\"Squeeze\", \"convert_scores\"])\nwith tf.compat.v1.gfile.GFile(cleaned_pb, \"wb\") as f:\n f.write(output_def.SerializeToString())\n\n\nssd_mb_v3_large = tfcoreml.convert(\n cleaned_pb,\n output_feature_names = [\"Squeeze\", \"convert_scores\"],\n input_name_shape_dict = {\"normalized_input_image_tensor\": [1, 320, 320, 3]},\n image_input_names = [\"normalized_input_image_tensor\"],\n image_scale = 1/127.5,\n red_bias = -1,\n green_bias = -1,\n blue_bias= -1,\n minimum_ios_deployment_target='13'\n)\n\n#loading the tflite model and getting the prior anchor boxes from it\ninterpreter = tf.lite.Interpreter(model_path=os.path.join(folder, \"model.tflite\"))\ninterpreter.allocate_tensors()\nanchors = interpreter.get_tensor(418) #the tensor index corresponding to anchor boxes prior\n#use interpreter._get_ops_details() to know this details\n\n#changing the input image node name\nssd_mb_v3_large._spec.neuralNetwork.preprocessing[0].featureName = \"input_image\"\nssd_mb_v3_large._spec.neuralNetwork.layers[0].input[0] = \"input_image\"\nssd_mb_v3_large._spec.description.input[0].name = \"input_image\"\n\n#changing the node name of box location outputs\nssd_mb_v3_large._spec.neuralNetwork.layers[-3].name = \"box_coordinates\"\nssd_mb_v3_large._spec.neuralNetwork.layers[-3].output[0] = \"box_coordinates\"\nssd_mb_v3_large._spec.neuralNetwork.layers[-3].squeeze.axes.pop()\nssd_mb_v3_large._spec.neuralNetwork.layers[-3].squeeze.squeezeAll = True\nssd_mb_v3_large._spec.neuralNetwork.layers[-3].outputTensor[0].rank = 2\nssd_mb_v3_large._spec.neuralNetwork.layers[-3].outputTensor[0].dimValue.pop(0)\n\n\n#Doing anchor box decoding\nadd_squeeze_layer(ssd_mb_v3_large._spec, \"convert_scores\", \"box_scores\", [2034, 91], [1, 2034, 91])\nadd_slicestatic_layer(ssd_mb_v3_large._spec, \"box_coordinates\", \"box_loc_yx\", [2034, 2], [2034, 4],\n [0,0], [2147483647, 2], [True, True], [True, False])\nadd_slicestatic_layer(ssd_mb_v3_large._spec, \"box_coordinates\", \"box_loc_hw\", [2034, 2], [2034, 4],\n [0,2], [2147483647, 4], [True, False], [True, True])\nadd_constant_layer(ssd_mb_v3_large._spec, \"anchor_yx\", [2034, 2], anchors[:, [0,1]])\nadd_constant_layer(ssd_mb_v3_large._spec, \"anchor_hw\", [2034, 2], anchors[:, [2,3]])\nadd_elementwise_layer(ssd_mb_v3_large._spec, \"scale_yx\", [\"box_loc_yx\"], [[2034, 2]], alpha=0.1, mode=\"multiply\")\nadd_elementwise_layer(ssd_mb_v3_large._spec, \"scale_hw\", [\"box_loc_hw\"], [[2034, 2]], alpha=0.2, mode=\"multiply\")\nadd_unary(ssd_mb_v3_large._spec, \"hw_exp\", \"scale_hw\", [2034, 2], mode=\"exp\")\nadd_elementwise_layer(ssd_mb_v3_large._spec, \"final_hw_rev\", [\"hw_exp\", \"anchor_hw\"], [[2034, 2], [2034, 2]], mode=\"multiply\")\nadd_reverse_layer(ssd_mb_v3_large._spec, \"final_wh\", \"final_hw_rev\", [2034, 2], [False, True])\nadd_elementwise_layer(ssd_mb_v3_large._spec, \"final_yx_pre\", [\"scale_yx\", \"anchor_hw\"], [[2034, 2], [2034, 2]], mode=\"multiply\")\nadd_elementwise_layer(ssd_mb_v3_large._spec, \"final_yx_rev\", [\"final_yx_pre\", \"anchor_yx\"], [[2034, 2], [2034, 2]], mode=\"add\")\nadd_reverse_layer(ssd_mb_v3_large._spec, \"final_xy\", \"final_yx_rev\", [2034, 2], [False, True])\nadd_concat_layer(ssd_mb_v3_large._spec, \"box_locations\", [\"final_xy\", \"final_wh\"], [[2034, 2], [2034, 2]], [2034, 4], 1)\n\n#Chaning output description accordingly\nssd_mb_v3_large._spec.description.output[0].name = \"box_scores\"\nssd_mb_v3_large._spec.description.output[0].type.multiArrayType.shape.extend([2034, 91])\nssd_mb_v3_large._spec.description.output[0].type.multiArrayType.dataType = datatypes._FeatureTypes_pb2.ArrayFeatureType.DOUBLE\nssd_mb_v3_large._spec.description.output[1].name = \"box_locations\"\nssd_mb_v3_large._spec.description.output[1].type.multiArrayType.shape.extend([2034, 4])\nssd_mb_v3_large._spec.description.output[1].type.multiArrayType.dataType = datatypes._FeatureTypes_pb2.ArrayFeatureType.DOUBLE\n\n\n\n#creating nms layer\nnms_spec = coremltools.proto.Model_pb2.Model()\nnms_spec.specificationVersion = 3\nfor i in range(2):\n detection_output = ssd_mb_v3_large._spec.description.output[i].SerializeToString()\n nms_spec.description.input.add() \n nms_spec.description.input[i].ParseFromString(detection_output)\n nms_spec.description.output.add() \n nms_spec.description.output[i].ParseFromString(detection_output)\n \nnms_spec.description.output[0].name = \"confidence\"\nnms_spec.description.output[1].name = \"coordinates\"\n\noutput_sizes = [91, 4] \nfor i in range(2):\n ma_type = nms_spec.description.output[i].type.multiArrayType \n ma_type.shapeRange.sizeRanges.add() \n ma_type.shapeRange.sizeRanges[0].lowerBound = 0 \n ma_type.shapeRange.sizeRanges[0].upperBound = -1 \n ma_type.shapeRange.sizeRanges.add() \n ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] \n ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] \n del ma_type.shape[:]\n \nnms = nms_spec.nonMaximumSuppression \nnms.confidenceInputFeatureName = \"box_scores\" \nnms.coordinatesInputFeatureName = \"box_locations\" \nnms.confidenceOutputFeatureName = \"confidence\" \nnms.coordinatesOutputFeatureName = \"coordinates\" \nnms.iouThresholdInputFeatureName = \"iouThreshold\" \nnms.confidenceThresholdInputFeatureName = \"confidenceThreshold\"\n\ndefault_iou_threshold = 0.6\ndefault_confidence_threshold = 0.5\nnms.pickTop.perClass = True\nnms.iouThreshold = default_iou_threshold \nnms.confidenceThreshold = default_confidence_threshold\nlabels = np.loadtxt(\"../coco_labels.txt\", dtype=str, delimiter=\"\\n\") \nnms.stringClassLabels.vector.extend(labels)\nnms_model = coremltools.models.MLModel(nms_spec) \n\n#creating the pipeline model comprising mobilenet ssd and NMS\ninput_features = [(\"input_image\", datatypes.Array(3,320, 320)), (\"iouThreshold\", datatypes.Double()),\n(\"confidenceThreshold\", datatypes.Double())] #cannot directly pass imageType as input type here. \noutput_features = [ \"confidence\", \"coordinates\"]\npipeline = Pipeline(input_features, output_features)\n\npipeline.add_model(ssd_mb_v3_large._spec)\npipeline.add_model(nms_model._spec)\npipeline.spec.description.input[0].ParseFromString(ssd_mb_v3_large._spec.description.input[0].SerializeToString())\npipeline.spec.description.input[1].type.isOptional = True\npipeline.spec.description.input[2].type.isOptional = True\npipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())\npipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())\n\nfinal_model = coremltools.models.MLModel(pipeline.spec) \nfinal_model.save(mlmodel_file)\n\n\n\n" }, { "alpha_fraction": 0.7695504426956177, "alphanum_fraction": 0.79070645570755, "avg_line_length": 46.25, "blob_id": "545b3af524594221fe6f5fa74f8047de052af1ba", "content_id": "ee5ded8cad8f922b5bc1a0683b1a9fc22a429523", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2647, "license_type": "no_license", "max_line_length": 115, "num_lines": 56, "path": "/tensorflow/classification/mobilenet/mobilenet_v1.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nimport os\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nimport sys\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\n#we use the pretrained mobilenetv1 from keras library as it is easy to use\nmobilenet_v1 = tf.keras.applications.MobileNet(alpha=1.0, depth_multiplier=1, dropout=0,\n include_top=True, weights='imagenet', input_tensor=Input(shape=(224, 224, 3), batch_size=1))\nmobilenet_v1.save(\"mobilenet_v1.h5\")\n\n# CoreMl tools will automatically consider the model as NeuralNetwork classifier if you provide the \n# class_labels argument with the class names of classification model\nmobilenet_mlmodel = tfcoreml.convert(\n\"./mobilenet_v1.h5\",\noutput_feature_names = [get_output_name(mobilenet_v1.outputs[0].name)],\ninput_name_shape_dict = {get_input_name(mobilenet_v1.inputs[0].name): list(mobilenet_v1.inputs[0].shape)},\nimage_input_names = [get_input_name(mobilenet_v1.inputs[0].name)],\nimage_scale = 1/127.5,\nred_bias = -1,\ngreen_bias = -1,\nblue_bias=-1,\npredicted_probabilities_output = get_output_name(mobilenet_v1.outputs[0].name),\npredicted_feature_name = \"classLabels\",\nclass_labels = imagenet_labels,\nminimum_ios_deployment_target='13'\n)\n\n#Chaning the input name and one of the output name for convenience. Also adding model descriptions\nspec = mobilenet_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"224x224x3 image input of model\"\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\nmobilenet_mlmodel.short_description = \"Mobilenet V1 imagnet model\"\nmobilenet_mlmodel.license = \"Open source academic license\"\nmobilenet_mlmodel.save(\"mobilenetV1.mlmodel\")\n\n" }, { "alpha_fraction": 0.6659619212150574, "alphanum_fraction": 0.7167019248008728, "avg_line_length": 32.85714340209961, "blob_id": "db79e66828017b037fd87c2f5b8f25e321c42158", "content_id": "19d320a67d642ba28b7ba1fe871c8da113ebda8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 109, "num_lines": 14, "path": "/tensorflow/classification/efficientnet_edgetpu/effedge_utils.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tfcoreml\n\n\neff_edgetpu_urls = {\n\t\"small\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/efficientnet-edgetpu-S.tar.gz\",\n\t\"medium\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/efficientnet-edgetpu-M.tar.gz\",\n\t\"large\": \"https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/efficientnet-edgetpu-L.tar.gz\"\n}\n\neff_input_size = {\n\t\"small\": [1, 224, 224, 3],\n\t\"medium\": [1, 240, 240, 3],\n\t\"large\": [1, 300, 300, 3]\n}" }, { "alpha_fraction": 0.7192561626434326, "alphanum_fraction": 0.7456508874893188, "avg_line_length": 42.31168746948242, "blob_id": "de12eb1d1fa71969afd79ec10b03ec14c43f36fd", "content_id": "2bc64c6d8064fd98462e433013b77762a4fcd2c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3334, "license_type": "no_license", "max_line_length": 115, "num_lines": 77, "path": "/tensorflow/classification/mobilenet/mobilenet_v3_small_normal.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nimport os\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nimport sys\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\n#mobilenetv3_small conversion\nmobilenetv3_small_url = \"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-small_224_1.0_float.tgz\"\nos.system(\"wget {}\".format(mobilenetv3_small_url))\nos.system(\"tar -xvf v3-small_224_1.0_float.tgz\")\n\nmbv3_small_pb = \"./v3-small_224_1.0_float/v3-small_224_1.0_float.pb\"\nwith tf.compat.v1.gfile.GFile(mbv3_small_pb, \"rb\") as f: \n graph_def = tf.compat.v1.GraphDef() \n graph_def.ParseFromString(f.read()) \n\nwith tf.Graph().as_default() as graph: \n tf.import_graph_def(graph_def, \n input_map=None, \n return_elements=None, \n name=\"\" \n)\n\noperations = graph.get_operations()\ninput_graph_node = operations[2].outputs[0].name[:-2]\noutput_graph_node = operations[-1].outputs[0].name[:-2]\nreduced_graph = strip_unused_lib.strip_unused(input_graph_def = graph.as_graph_def(),\n input_node_names=[input_graph_node],\n output_node_names=[output_graph_node],\n placeholder_type_enum=dtypes.float32.as_datatype_enum)\n\nwith tf.compat.v1.gfile.GFile(\"mbv3_small_cleaned.pb\", \"wb\") as f:\n f.write(reduced_graph.SerializeToString())\n\n#if we are converting from pb then we shouldn't strip the '/' from tensor names\nmobilenetv3_small_mlmodel = tfcoreml.convert(\n\"mbv3_small_cleaned.pb\",\noutput_feature_names = [output_graph_node],\ninput_name_shape_dict = {input_graph_node: [1, 224, 224, 3]},\nimage_input_names = [input_graph_node],\nimage_scale = 1/127.5,\nred_bias = -1,\ngreen_bias = -1,\nblue_bias=-1,\npredicted_probabilities_output = output_graph_node,\npredicted_feature_name = \"classLabels\",\nclass_labels = [\"background\"]+imagenet_labels, #mobilenetv3 in tf repo has 1001 class where first one is background\nminimum_ios_deployment_target='13'\n)\n\nspec = mobilenetv3_small_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"224x224x3 image input of model\"\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\nmobilenetv3_small_mlmodel.short_description = \"Mobilenet V3-small imagnet model\"\nmobilenetv3_small_mlmodel.license = \"Open source academic license\"\nmobilenetv3_small_mlmodel.save(\"mobilenetV3_small.mlmodel\")" }, { "alpha_fraction": 0.5772442817687988, "alphanum_fraction": 0.592901885509491, "avg_line_length": 40.69565200805664, "blob_id": "942f099b3b4b7583f8ca57f4465986cce479fbf6", "content_id": "a801f60c27047ee2d2c8da29132a87f8726c44fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "no_license", "max_line_length": 142, "num_lines": 23, "path": "/tensorflow/classification/utils.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import urllib\nimport tfcoreml\n\ndef get_input_name(name):\n return name[:-2].split('/')[-1]\n\ndef get_output_name(name):\n return name[:-2].split('/')[-1]\n\nimagenet_labels = urllib.request.urlopen(\"https://bitbucket.org/goutham98/images/downloads/imagenetLabels.txt\") \nimagenet_labels = [i.decode(\"utf-8\").strip()[:-1].split(\"'\")[1] for i in imagenet_labels]\n\n# this function changes the input and output name of layers by finding the given names\ndef change_names(nn, old_input=None, new_input=None, old_output=None, new_output=None):\n for i in range(len(nn.layers)):\n if old_input:\n if len(nn.layers[i].input) > 0:\n if nn.layers[i].input[0] == old_input:\n nn.layers[i].input[0] = new_input\n if old_output:\n if len(nn.layers[i].output) > 0:\n if nn.layers[i].output[0] == old_output:\n nn.layers[i].output[0] = new_output" }, { "alpha_fraction": 0.7406555414199829, "alphanum_fraction": 0.7553191781044006, "avg_line_length": 40.9156608581543, "blob_id": "bc983c57197a951e8858a98abc74e0ab549987ae", "content_id": "a6258d9e4b9cd95f68f1f4c40b04658c68a28cb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3478, "license_type": "no_license", "max_line_length": 133, "num_lines": 83, "path": "/tensorflow/classification/efficientnet_edgetpu/efficientnet_edgetpu.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport os\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nfrom effedge_utils import * \nimport sys\nfrom argparse import ArgumentParser\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\nparser = ArgumentParser()\nparser.add_argument(\"--type\", default=\"small\", type=str, help=\"type of effEdgetpu to convert options are \\\n\t\t\t\t\tsmall, medium , large\")\nargs = parser.parse_args()\n\ntar_url = eff_edgetpu_urls[args.type]\ntar_file = tar_url.split(\"/\")[-1]\nfolder = tar_file.split('.')[0]\n\nos.system(\"wget {}\".format(tar_url))\nos.system(\"tar -xvf {}\".format(tar_file))\n\ncleaned_pb = \"eff_edgetpu_{}.pb\".format(args.type)\nmlmodel_file = \"eff_edgetpu_{}.mlmodel\".format(args.type)\n\n#Efficientnet edge tpu provides model in saved model format. directlu using it in coreml is not working. So first convert\n#savedmodel to freezed graph and then use it with coreml\n\ngraph = tf.Graph()\nwith tf.compat.v1.Session(graph=graph) as sess:\n tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], os.path.join(folder, \"saved_model\"))\n output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(\n sess, # The session\n graph.as_graph_def(),\n [\"Softmax\"]\n )\nwith tf.compat.v1.gfile.GFile(cleaned_pb, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())\n\n#if we are converting from pb then we shouldn't strip the '/' from tensor names\noutput_graph_node = \"Softmax\"\ninput_graph_node = \"images\" #constant for all three models\n\neff_edgetpu_mlmodel = tfcoreml.convert(\ncleaned_pb,\noutput_feature_names = [output_graph_node],\ninput_name_shape_dict = {input_graph_node: eff_input_size[args.type]},\nimage_input_names = [input_graph_node],\nimage_scale = 1/128,\nred_bias = -0.9921875,\ngreen_bias = -0.9921875,\nblue_bias= -0.9921875,\npredicted_probabilities_output = output_graph_node,\npredicted_feature_name = \"classLabels\",\nclass_labels = [\"background\"] + imagenet_labels, #it has 1001 classes where 0 is background\nminimum_ios_deployment_target='13'\n)\n\nspec = eff_edgetpu_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"{}x{}x{} image input of model\".format(*eff_input_size[args.type][1:])\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\neff_edgetpu_mlmodel.short_description = \"Efficient-edgetpu {} imagnet model\".format(args.type)\neff_edgetpu_mlmodel.license = \"Open source academic license\"\neff_edgetpu_mlmodel.save(mlmodel_file)" }, { "alpha_fraction": 0.7251203656196594, "alphanum_fraction": 0.7387640476226807, "avg_line_length": 43.909908294677734, "blob_id": "31197f49031a0cccceef86f040953994e95ac50a", "content_id": "5ac59d9cf026f56cfc394f4a6dfaa4ade03c4ca0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4984, "license_type": "no_license", "max_line_length": 120, "num_lines": 111, "path": "/tensorflow/classification/efficientnet_lite/efficientnet_lite.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport os\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nfrom efflite_utils import * \nimport sys\nfrom argparse import ArgumentParser\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\nparser = ArgumentParser()\nparser.add_argument(\"--type\", default=\"b0\", type=str, help=\"type of efflite to convert options are \\\n\t\t\t\t\tb0, b1, b2, b3, b4\")\nargs = parser.parse_args()\n\ntar_url = eff_lite_urls[args.type]\ntar_file = tar_url.split(\"/\")[-1]\nfolder = tar_file.split('.')[0]\n\nos.system(\"wget {}\".format(tar_url))\nos.system(\"tar -xvf {}\".format(tar_file))\n\ninitial_pb = \"efflite_{}.pb\".format(args.type)\ncleaned_pb = \"efflite_{}_cleaned.pb\".format(args.type)\nmlmodel_file = \"efflite_{}.mlmodel\".format(args.type)\n\n#Efficient lite doesn't provide model in pb, h5 or saved format. It is in either tflite of ckpt.meta format of tf v1\n#So let us convert the ckpt to pb format and then convert it to coreml\n#Lot of unwanted initialized and saver variables are present inside the meta. have to remove them\n#using strip_unused directly will remove some essential nodes so first using a different method to create\n#a pb with data loader then cleaning that pb in next step\ntf.compat.v1.disable_eager_execution() #have to disable ee for importing meta graph\ngraph = tf.compat.v1.get_default_graph()\nsess = tf.compat.v1.Session()\nsaver = tf.compat.v1.train.import_meta_graph(os.path.join(folder, 'model.ckpt.meta'), clear_devices=True)\nsess.run(tf.compat.v1.global_variables_initializer())\nsaver.restore(sess, os.path.join(folder, 'model.ckpt'))\ninput_graph_def = graph.as_graph_def()\nout_index = get_output_tensor_index(sess.graph.get_operations())\noutput_node = sess.graph.get_operations()[out_index].outputs[0].name[:-2] #this varies for different version of efflite \nprint(\"Output Node: \", output_node)\noutput_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(\n sess, # The session\n input_graph_def, # input_graph_def is useful for retrieving the nodes \n output_node.split(\",\") \n)\nwith tf.compat.v1.gfile.GFile(initial_pb, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())\n\ntf.compat.v1.reset_default_graph()\nwith tf.Graph().as_default() as graph: \n tf.import_graph_def(output_graph_def, \n input_map=None, \n return_elements=None, \n name=\"\" \n)\n\noperations = graph.get_operations()\ninput_index = get_input_tensor_index(operations)\ninput_graph_node = operations[input_index].outputs[0].name[:-2]\noutput_graph_node = operations[-1].outputs[0].name[:-2]\nprint(input_graph_node, output_graph_node)\nreduced_graph = strip_unused_lib.strip_unused(input_graph_def = graph.as_graph_def(),\n input_node_names=[input_graph_node],\n output_node_names=[output_graph_node],\n placeholder_type_enum=dtypes.float32.as_datatype_enum)\nwith tf.compat.v1.gfile.GFile(cleaned_pb, \"wb\") as f:\n f.write(reduced_graph.SerializeToString())\n\ntf.compat.v1.enable_eager_execution()\n#if we are converting from pb then we shouldn't strip the '/' from tensor names\neff_lite_mlmodel = tfcoreml.convert(\ncleaned_pb,\noutput_feature_names = [output_graph_node],\ninput_name_shape_dict = {input_graph_node: eff_input_size[args.type]},\nimage_input_names = [input_graph_node],\nimage_scale = 1/128,\nred_bias = -0.9921875,\ngreen_bias = -0.9921875,\nblue_bias= -0.9921875,\npredicted_probabilities_output = output_graph_node,\npredicted_feature_name = \"classLabels\",\nclass_labels = imagenet_labels, \nminimum_ios_deployment_target='13'\n)\n\nspec = eff_lite_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"{}x{}x{} image input of model\".format(*eff_input_size[args.type][1:])\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\neff_lite_mlmodel.short_description = \"Efficient-lite {} imagnet model\".format(args.type)\neff_lite_mlmodel.license = \"Open source academic license\"\neff_lite_mlmodel.save(mlmodel_file)" }, { "alpha_fraction": 0.7003983855247498, "alphanum_fraction": 0.7207740545272827, "avg_line_length": 55.32051467895508, "blob_id": "6504ad99ddb211837ff9060603dba90ecd16e4aa", "content_id": "d5b5803bc71a2b621f5065233d4e636dabaf0a52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8785, "license_type": "no_license", "max_line_length": 134, "num_lines": 156, "path": "/pytorch/detection/efficientDet/utils.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import os\nfrom coremltools.models import datatypes\nfrom coremltools.models.pipeline import *\nimport coremltools\n\neff_urls = {\n 0 : \"https://github.com/zylo117/Yet-Another-Efficient-Pytorch/releases/download/1.0/efficientdet-d0.pth\",\n 1 : \"https://github.com/zylo117/Yet-Another-Efficient-Pytorch/releases/download/1.0/efficientdet-d1.pth\",\n 2 : \"https://github.com/zylo117/Yet-Another-Efficient-Pytorch/releases/download/1.0/efficientdet-d2.pth\",\n 3 : \"https://github.com/zylo117/Yet-Another-Efficient-Pytorch/releases/download/1.0/efficientdet-d3.pth\"\n}\n\ndef change_error_dimension(layer_spec):\n old_pad = layer_spec.constantPad.padAmounts \n dim = len(old_pad) \n new_pad = [0, 0, 0, 0, old_pad[3], old_pad[7], old_pad[2], old_pad[6]] \n for i in range(dim):\n layer_spec.constantPad.padAmounts.pop() \n layer_spec.constantPad.padAmounts.extend(new_pad) \n\ndef change_effdet_output_names(model_spec):\n output1 = model_spec.description.output[0].name\n model_spec.description.output[0].type.multiArrayType.dataType = datatypes._FeatureTypes_pb2.ArrayFeatureType.DOUBLE\n output2 = model_spec.description.output[1].name\n model_spec.description.output[1].type.multiArrayType.dataType = datatypes._FeatureTypes_pb2.ArrayFeatureType.DOUBLE\n new_name1 = \"box_coordinates_pre\"\n new_name2 = \"box_scores_pre\"\n model_spec.description.output[0].name = new_name1\n model_spec.description.output[1].name = new_name2\n for lay in model_spec.neuralNetwork.layers:\n if lay.output[0] == output1:\n lay.output[0] = new_name1\n elif lay.output[0] == output2:\n lay.output[0] = new_name2\n\n\ndef add_squeeze_layer(spec, input_name, output_name, output_dims, input_dims):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].squeeze.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(output_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n spec.neuralNetwork.layers[-1].squeeze.squeezeAll = True\n spec.neuralNetwork.layers[-1].output.append(output_name)\n \n\ndef add_slicestatic_layer(spec, input_name, output_name, output_dims, input_dims, begin_id, end_id,\n begin_mask, end_mask):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].sliceStatic.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(output_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n spec.neuralNetwork.layers[-1].sliceStatic.strides.extend([1,1])\n spec.neuralNetwork.layers[-1].sliceStatic.beginIds.extend(begin_id)\n spec.neuralNetwork.layers[-1].sliceStatic.endIds.extend(end_id)\n spec.neuralNetwork.layers[-1].sliceStatic.beginMasks.extend(begin_mask)\n spec.neuralNetwork.layers[-1].sliceStatic.endMasks.extend(end_mask)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n \n\ndef add_constant_layer(spec, output_name, output_dims, constant_data):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].loadConstantND.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].loadConstantND.shape.extend(output_dims)\n spec.neuralNetwork.layers[-1].loadConstantND.data.floatValue.extend(map(float, constant_data.flatten()))\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = 2\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n \ndef add_elementwise_layer(spec, output_name, input_names, inputs_dims, alpha=None, mode=\"multiply\"):\n if len(input_names) == 1 and (not alpha):\n raise ValueError(\"Should provide alpha value when only one input is provided\")\n if len(input_names) == 2 and alpha:\n raise ValueError(\"Alpha should be provided only with one input\")\n spec.neuralNetwork.layers.add()\n if mode == \"multiply\":\n spec.neuralNetwork.layers[-1].multiply.MergeFromString(b'')\n elif mode == \"add\":\n spec.neuralNetwork.layers[-1].add.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].input.extend(input_names)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n for k, i in enumerate(inputs_dims):\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[k].rank = len(i)\n spec.neuralNetwork.layers[-1].inputTensor[k].dimValue.extend(i)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(inputs_dims[0])\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(inputs_dims[0])\n if len(inputs_dims) == 1: spec.neuralNetwork.layers[-1].multiply.alpha = alpha\n \ndef add_unary(spec, output_name, input_name, input_dims, mode=\"exp\"):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].unary.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].unary.shift = 0\n spec.neuralNetwork.layers[-1].unary.scale = 1\n spec.neuralNetwork.layers[-1].unary.epsilon = 1e-6\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n if mode == \"exp\":\n spec.neuralNetwork.layers[-1].unary.type = coremltools.proto.NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('EXP')\n elif mode == \"log\":\n spec.neuralNetwork.layers[-1].unary.type = coremltools.proto.NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('LOG')\n elif mode == \"abs\":\n spec.neuralNetwork.layers[-1].unary.type = coremltools.proto.NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('ABS')\n else:\n raise ValueError(\"Mode not understood\")\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(input_dims)\n \ndef add_concat_layer(spec, output_name, input_names, input_dims, output_dims, axis):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].concatND.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].input.extend(input_names)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n for k, i in enumerate(input_dims):\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[k].rank = len(i)\n spec.neuralNetwork.layers[-1].inputTensor[k].dimValue.extend(i)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(output_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(output_dims)\n spec.neuralNetwork.layers[-1].concatND.axis = axis\n \ndef add_reverse_layer(spec, output_name, input_name, input_dims, axis):\n spec.neuralNetwork.layers.add()\n spec.neuralNetwork.layers[-1].reverse.MergeFromString(b'')\n spec.neuralNetwork.layers[-1].reverse.reverseDim.extend(axis)\n spec.neuralNetwork.layers[-1].input.append(input_name)\n spec.neuralNetwork.layers[-1].output.append(output_name)\n spec.neuralNetwork.layers[-1].name = output_name\n spec.neuralNetwork.layers[-1].inputTensor.add()\n spec.neuralNetwork.layers[-1].inputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].inputTensor[0].dimValue.extend(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor.add()\n spec.neuralNetwork.layers[-1].outputTensor[0].rank = len(input_dims)\n spec.neuralNetwork.layers[-1].outputTensor[0].dimValue.extend(input_dims)" }, { "alpha_fraction": 0.776862621307373, "alphanum_fraction": 0.7985773086547852, "avg_line_length": 46.71428680419922, "blob_id": "7d4738e1100f61b57ed0a034e08c8cb521510ae6", "content_id": "28789ac70b3ee06163155e9f05463927271c73b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 118, "num_lines": 56, "path": "/tensorflow/classification/inception/inception_resnetv2.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport os\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nimport sys\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\n#Inception resnet v2\ninception_resnetv2 = tf.keras.applications.InceptionResNetV2(\n include_top=True, weights='imagenet', input_tensor=Input(shape=(224, 224, 3), batch_size=1))\ninception_resnetv2.save(\"inception_resnetv2.h5\")\n\n# CoreMl tools will automatically consider the model as NeuralNetwork classifier if you provide the \n# class_labels argument with the class names of classification model\ninception_resnetv2_mlmodel = tfcoreml.convert(\n\"./inception_resnetv2.h5\",\noutput_feature_names = [get_output_name(inception_resnetv2.outputs[0].name)],\ninput_name_shape_dict = {get_input_name(inception_resnetv2.inputs[0].name): list(inception_resnetv2.inputs[0].shape)},\nimage_input_names = [get_input_name(inception_resnetv2.inputs[0].name)],\nimage_scale = 1/127.5,\nred_bias = -1,\ngreen_bias = -1,\nblue_bias=-1,\npredicted_probabilities_output = get_output_name(inception_resnetv2.outputs[0].name),\npredicted_feature_name = \"classLabels\",\nclass_labels = imagenet_labels,\nminimum_ios_deployment_target='13'\n)\n\n#Chaning the input name and one of the output name for convenience. Also adding model descriptions\nspec = inception_resnetv2_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"229x229x3 image input of model\"\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\ninception_resnetv2_mlmodel.short_description = \"Inception resnet v2 imagnet model\"\ninception_resnetv2_mlmodel.license = \"Open source academic license\"\ninception_resnetv2_mlmodel.save(\"inception_resnetv2.mlmodel\")" }, { "alpha_fraction": 0.7630677223205566, "alphanum_fraction": 0.7887746095657349, "avg_line_length": 43.056602478027344, "blob_id": "ba72d74e7ed7aa4a9e9eead3890deb9fc4d5acd1", "content_id": "f0f6b332bc4f320a4f6f08577559dfcd9bcf5aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2334, "license_type": "no_license", "max_line_length": 115, "num_lines": 53, "path": "/tensorflow/classification/mobilenet/mobilenet_v2.py", "repo_name": "momolas/coreml_conversion_hub", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport coremltools\nimport tfcoreml\nimport numpy as np\nimport urllib\nimport tarfile\nimport os\nfrom tensorflow.keras.layers import *\nfrom tensorflow.python.tools import strip_unused_lib\nfrom tensorflow.python.framework import dtypes\nimport sys\nsys.path.append(\"../\")\nfrom utils import *\nprint(tf.__version__)\n\n#Let's do the same conversion for mobilenetV2 model\nmobilenet_v2 = tf.keras.applications.MobileNetV2(alpha=1.0, include_top=True, weights='imagenet',\n input_tensor=Input(shape=(224, 224, 3), batch_size=1))\nmobilenet_v2.save(\"mobilenet_v2.h5\")\n\nmobilenetv2_mlmodel = tfcoreml.convert(\n\"./mobilenet_v2.h5\",\noutput_feature_names = [get_output_name(mobilenet_v2.outputs[0].name)],\ninput_name_shape_dict = {get_input_name(mobilenet_v2.inputs[0].name): list(mobilenet_v2.inputs[0].shape)},\nimage_input_names = [get_input_name(mobilenet_v2.inputs[0].name)],\nimage_scale = 1/127.5,\nred_bias = -1,\ngreen_bias = -1,\nblue_bias=-1,\npredicted_probabilities_output = get_output_name(mobilenet_v2.outputs[0].name),\npredicted_feature_name = \"classLabels\",\nclass_labels = imagenet_labels,\nminimum_ios_deployment_target='13'\n)\n\nspec = mobilenetv2_mlmodel._spec #changing this spec will automatically reflect in mlmodel properties\nold_input_name = spec.description.input[0].name\nold_output_name = spec.description.output[0].name\nspec.description.input[0].name = \"ImageInput\" #Old input name has reference at multiple places have to changle all\nspec.neuralNetworkClassifier.preprocessing[0].featureName = \"ImageInput\" #have to change the preprocessor also\n\nspec.description.output[0].name = \"classProbs\" #Old output name has reference at multiple places have to change all\nspec.description.predictedProbabilitiesName = \"classProbs\"\nspec.neuralNetworkClassifier.labelProbabilityLayerName = \"classProbs\"\n\nchange_names(spec.neuralNetworkClassifier, old_input_name, \"ImageInput\", old_output_name, \"classProbs\")\n\nspec.description.input[0].shortDescription = \"224x224x3 image input of model\"\nspec.description.output[0].shortDescription = \"Class to probability mapping dictionary\"\nspec.description.output[1].shortDescription = \"Correct class label\"\nmobilenetv2_mlmodel.short_description = \"Mobilenet V2 imagnet model\"\nmobilenetv2_mlmodel.license = \"Open source academic license\"\nmobilenetv2_mlmodel.save(\"mobilenetV2.mlmodel\")" } ]
14
KyleBibler/DayCounter
https://github.com/KyleBibler/DayCounter
d22af0c256f415786b07421c9d6bb89c0e238772
3516d70497eea63c8c0fde82bc0042ef9ea4cf8b
049d97d7d9297aef81002e16b22549f4eafbe5ca
refs/heads/master
2016-09-06T17:08:58.102594
2014-09-03T17:32:57
2014-09-03T17:32:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5846313834190369, "alphanum_fraction": 0.6407061219215393, "avg_line_length": 39.125, "blob_id": "2776a955b07915af60df2fab20ff5a6db1a171bf", "content_id": "929580b7bed0bec00b61627c3a924f4c0de00581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "no_license", "max_line_length": 112, "num_lines": 24, "path": "/Day Counter/Day_Counter.py", "repo_name": "KyleBibler/DayCounter", "src_encoding": "UTF-8", "text": "from flask import *\nfrom datetime import datetime\nfrom time import gmtime, strftime\n\napp = Flask(__name__)\nend_date = datetime.strptime('Aug 5 2014 4:00PM', '%b %d %Y %I:%M%p')\n\n\[email protected]('/')\ndef index():\n time_delta = end_date-datetime.now()\n time_string = strftime(\"This counter uses %Z time.\", gmtime())\n hours, remainder = divmod(time_delta.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n progress_days = 100 - (float(time_delta.days) / 90.0 * 100.0)\n progress_hours = 100 - (float(hours) / 24.0 * 100.0)\n progress_minutes = 100 - (float(minutes) / 60.0 * 100.0)\n progress_seconds = 100 - (float(seconds) / 60.0 * 100.0)\n progress = [(progress_days, \"Days\"), (progress_hours, \"Hours\"),\n (progress_minutes, \"Minutes\"), (progress_seconds, \"Seconds\")]\n return render_template(\"index.html\", progress_times=progress, days=time_delta.days, time_string=time_string)\n\nif __name__ == '__main__':\n app.run()\n" } ]
1
ahtn/python-kle
https://github.com/ahtn/python-kle
c2708ec4e8d6365b995f169e65d8dcd79237c7db
e5116d7390dce2563dfe60c0039148b996c229d4
7fc8c567309bff12ad6fb83f24c775b5ddf9ca14
refs/heads/master
2021-04-28T08:56:49.448307
2018-03-03T02:56:09
2018-03-03T02:56:09
122,027,480
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 40, "blob_id": "7d3932adc8d45e0532350cf484307f9602441cb8", "content_id": "e289681800df94e154fc74cc01971529ff6580ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/README.md", "repo_name": "ahtn/python-kle", "src_encoding": "UTF-8", "text": "# kle: parser for keyboard layout editor\n\n" }, { "alpha_fraction": 0.5990676283836365, "alphanum_fraction": 0.6072261333465576, "avg_line_length": 26.677419662475586, "blob_id": "4fed2916ebcd01af59e485babbc6261d54fc2baa", "content_id": "dafe2775f4ee46bbc823aaedff2d2c9ae3a5b591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 74, "num_lines": 31, "path": "/setup.py", "repo_name": "ahtn/python-kle", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2018 [email protected]\n# Licensed under the MIT license (http://opensource.org/licenses/MIT)\n\nfrom setuptools import setup\nimport os\n\ntry: # python3\n fields = {}\n with open(os.path.join(\"kle\", \"version.py\")) as f:\n exec(f.read(), fields)\n __version__ = fields['__version__']\nexcept: # python2\n execfile(os.path.join(\"kle\", \"version.py\"))\n\nsetup(\n name = 'kle',\n version = __version__,\n description = \"Python library for parsing keyboard layout files from \"\n \"keyboard-layout-editor.com.\",\n url = \"http://github.com/ahtn/python-kle\",\n author = \"jem\",\n author_email = \"[email protected]\",\n license = 'MIT',\n packages = ['kle'],\n install_requires = [ 'six' ],\n keywords = ['keyboard', 'keyboard-layout-editor'],\n scripts = ['bin/kle-view'],\n zip_safe = False\n)\n" }, { "alpha_fraction": 0.4709544777870178, "alphanum_fraction": 0.48488348722457886, "avg_line_length": 26.57345199584961, "blob_id": "ecc4bf94f7e579307a9efb8648073aaeb07ad0ca", "content_id": "6bd9ce3bd5590c91739341e3f57bc2c2f4c20616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15579, "license_type": "no_license", "max_line_length": 93, "num_lines": 565, "path": "/kle/kle.py", "repo_name": "ahtn/python-kle", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2017 [email protected]\n# Licensed under the MIT license (http://opensource.org/licenses/MIT)\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport math, cmath\nfrom collections import namedtuple\n\nclass KLEParseError(Exception):\n pass\n\ndef rotate_complex(radians):\n return cmath.exp(radians*1j)\n\nRect = namedtuple('Rect', 'x y w h')\n\nclass Point(namedtuple('Point', ('x', 'y'))):\n __slots__ = ()\n def __abs__(self):\n return type(self)(abs(self.x), abs(self.y))\n\n def __int__(self):\n return type(self)(int(self.x), int(self.y))\n\n def __add__(self, other):\n return type(self)(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return type(self)(self.x - other.x, self.y - other.y)\n\n def __neg__(self):\n return type(self)(-self.x, -self.y)\n\n def __mul__(self, other):\n return type(self)(self.x * other, self.y * other)\n\n def __rmul__(self, other):\n return type(self)(self.x * other, self.y * other)\n\n def __truediv__(self, other):\n return type(self)(self.x / other, self.y / other)\n\n def __rdiv__(self, other):\n return type(self)(self.x / other, self.y / other)\n\n def dot_product(self, other):\n return self.x * other.x + self.y * other.y\n\n def cross_product(self, other):\n return self.x * other.y - self.y * other.x\n\n def magnitude(self):\n return math.sqrt(self.x**2 + self.y**2)\n\n def distance_to(self, other):\n return math.hypot((self.x - other.x), (self.y - other.y))\n\n def normalize(self):\n if self.x == 0 and self.y == 0:\n return Point(self.x, self.y)\n else:\n return Point(self.x, self.y) / self.magnitude()\n\n\nclass KLEKey(object):\n \"\"\" ( x, y )\n 0 top left (-1, -1)\n 1 bottom left (-1, 1)\n 2 top right ( 1, -1)\n 3 bottom right ( 1, 1)\n 4 front left (-1, 2)\n 5 front right ( 1, 2)\n 6 center left (-1, 0)\n 7 center right ( 1, 0)\n 8 top center ( 0, 1)\n 9 center ( 0, 0)\n 10 bottom center ( 0, -1)\n 11 front center ( 0, 2)\n \"\"\"\n TOP_LEFT = 0\n BOT_LEFT = 1\n TOP_RIGHT = 2\n BOT_RIGHT = 3\n FRONT_LEFT = 4\n FRONT_RIGHT = 5\n CENTER_LEFT = 6\n CENTER_RIGHT = 7\n TOP_CENTER = 8\n CENTER = 9\n BOT_CENTER = 10\n FRONT_CENTER = 11\n\n LEGEND_MAP = {\n TOP_LEFT: (-1, -1),\n BOT_LEFT: (-1, 1),\n TOP_RIGHT: ( 1, -1),\n BOT_RIGHT: ( 1, 1),\n FRONT_LEFT: (-1, 2),\n FRONT_RIGHT: ( 1, 2),\n CENTER_LEFT: (-1, 0),\n CENTER_RIGHT: ( 1, 0),\n TOP_CENTER: ( 0, -1),\n CENTER: ( 0, 0),\n BOT_CENTER: ( 0, 1),\n FRONT_CENTER: ( 0, 2),\n }\n\n def __init__(self, ux, uy, uw, uh, text=\"\", properties=None, decal=False, spacing=19.0):\n \"\"\"@todo: to be defined1. \"\"\"\n # the coordinate system\n # the offset in the coordinate system\n self._u_x = ux * 1.0\n self._u_y = uy * 1.0\n self._u_w = uw * 1.0\n self._u_h = uh * 1.0\n self._spacing = spacing\n self.decal = decal\n if properties == None:\n self.properties = KbProperties()\n else:\n self.properties = copy.copy(properties)\n self._r = self.properties.r\n self._u_rx = self.properties.rx\n self._u_ry = self.properties.ry\n\n legends = text.split(\"\\n\")\n max_legends = len(KLEKey.LEGEND_MAP)\n if len(legends) > max_legends:\n raise Exception(KLEParseError(\"Too many legends for key. Got {}, max is {}\"\n .format(len(legends), max_legends)))\n\n self._legends = {}\n\n for key in range(max_legends):\n if key < len(legends):\n self._legends[key] = legends[key]\n else:\n self._legends[key] = \"\"\n\n for key in self._legends.keys():\n if not key in self._legends:\n self._legends [key] = ''\n\n def get_legend(self, key):\n assert(key in KLEKey.LEGEND_MAP)\n return self._legends[key]\n\n def set_legend(self, key, value):\n assert(key in KLEKey.LEGEND_MAP)\n assert(type(value) == str)\n self._legends[key] = value\n\n def get_legend_list(self):\n result = []\n for key in range(len(KLEKey.LEGEND_MAP)):\n if self._legends[key] != \"\":\n result.append((KLEKey.LEGEND_MAP[key], self._legends[key]))\n return result\n\n def get_legend_str(self):\n result = \"\"\n for key in range(len(KLEKey.LEGEND_MAP)):\n result += self._legends[key] + \"\\n\"\n return result.strip(\"\\n\")\n\n @property\n def spacing(self):\n return self._spacing\n\n @property\n def x(self):\n return self._spacing * (self._u_x + self._u_rx)\n\n @property\n def y(self):\n return self._spacing * (self._u_y + self._u_ry)\n\n @property\n def w(self):\n return self._spacing * self._u_w\n\n @property\n def h(self):\n return self._spacing * self._u_h\n\n @property\n def r(self):\n return self._r\n\n @property\n def r_rad(self):\n return math.radians(self._r)\n\n @property\n def rx(self):\n return self._u_rx * self._spacing\n\n @property\n def ry(self):\n return self._u_ry * self._spacing\n\n def get_rect_points(self):\n pos = self.get_pos()\n pos = pos[0] + pos[1]*1j\n\n edge_w = (self.w + 0j ) * rotate_complex(self.r_rad)\n edge_h = (0 + self.h*1j) * rotate_complex(self.r_rad)\n\n v0 = pos\n v1 = pos + edge_w\n v2 = pos + edge_w + edge_h\n v3 = pos + edge_h\n\n return [\n Point(v0.real, v0.imag),\n Point(v1.real, v1.imag),\n Point(v2.real, v2.imag),\n Point(v3.real, v3.imag),\n ]\n\n def get_center(self):\n pos = self.get_pos()\n pos = pos.x + pos.y*1j\n center = pos + 1/2 * (self.w + self.h*1j) * rotate_complex(self.r_rad)\n return Point(center.real, center.imag)\n\n def set_center(self, x, y):\n new_center = x + y*1j\n corner = new_center - 1/2 * (self.w + self.h*1j) * rotate_complex(self.r_rad)\n self.set_pos(corner.real, corner.imag)\n\n def bounding_box(self):\n points = self.get_rect_points()\n xmin = math.inf\n ymin = math.inf\n xmax = -math.inf\n ymax = -math.inf\n\n for point in points:\n xmin = min(xmin, point[0])\n ymin = min(ymin, point[1])\n xmax = max(xmax, point[0])\n ymax = max(ymax, point[1])\n\n return Rect(xmin, ymin, xmax-xmin, ymax-ymin)\n\n def bounding_box_points(self):\n (x, y, w, h) = self.bounding_box()\n return [\n Point(x , y ),\n Point(x + w, y ),\n Point(x + w, y + h),\n Point(x , y + h)\n ]\n\n @property\n def spacing(self):\n return self._spacing\n\n @spacing.setter\n def spacing(self, value):\n self._spacing = value\n\n def get_pos(self):\n rpos = (self._u_rx + self._u_ry*1j)\n pos = rpos + (self._u_x + self._u_y*1j) * rotate_complex(self.r_rad)\n pos *= self._spacing\n return Point(pos.real, pos.imag)\n\n def set_pos(self, x, y):\n u_pos = Point(x, y) / self._spacing # remove scaling\n u_pos -= Point(self._u_rx, self._u_ry) # remove translation offset\n u_pos = (u_pos.x + u_pos.y*1j) * rotate_complex(-self.r_rad) # remove rotation\n self._u_x = u_pos.real # left with unit positions\n self._u_y = u_pos.imag\n\n def set_angle(self, new_r):\n pos = self.get_center()\n self._r = new_r\n self.set_center(pos.x, pos.y)\n\n @property\n def u_x(self):\n return self._u_x + self._u_rx\n\n @property\n def u_y(self):\n return self._u_y + self._u_ry\n\n @property\n def u_w(self):\n return self._u_w\n\n @property\n def u_h(self):\n return self._u_h\n\n def get_u_pos(self):\n return Point(self._u_x, self._u_y)\n\n def set_u_pos(self, ux, uy, r=None, u_rx=None, u_ry=None):\n self._u_x = ux\n self._u_y = uy\n if r: self._r = r\n if u_rx: self._u_rx = u_rx\n if u_ry: self._u_ry = u_ry\n\n def __str__(self):\n return \"KLEKey(legend={}, ux={}, uy={}, uw={}, uh={}, r={})\".format(\n repr(self.get_legend_text()), self._u_x, self._u_y, self._u_w, self._u_h, self._r\n )\n\n def __repr__(self):\n return str(self)\n\n def get_properties(self):\n result = KeyProperties()\n result.x = self._u_x\n result.y = self._u_y\n result.w = self._u_w\n result.h = self._u_h\n result.rx = self._u_rx\n result.ry = self._u_ry\n return result\n\n\nclass KeyProperties(object):\n def __init__(self,\n x=0, y=0,\n w=1, h=1,\n x2=None, y2=None,\n w2=None, h2=None,\n stepped=False,\n homing=False,\n decal=False):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n\n self.x2 = None\n self.y2 = None\n self.w2 = None\n self.h2 = None\n\n self.rx = None\n self.ry = None\n\n self.stepped = False\n self.homing = False\n self.decal = False\n\n @staticmethod\n def from_json(obj):\n props = KeyProperties()\n if 'x' in obj:\n props.x = float(obj['x'])\n if 'y' in obj:\n props.y = float(obj['y'])\n if 'w' in obj:\n props.w = float(obj['w'])\n if 'h' in obj:\n props.h = float(obj['h'])\n if 'x2' in obj:\n props.x2 = float(obj['x2'])\n if 'y2' in obj:\n props.y2 = float(obj['y2'])\n if 'w2' in obj:\n props.w2 = float(obj['w2'])\n if 'h2' in obj:\n props.h2 = float(obj['h2'])\n if 'd' in obj:\n props.decal = bool(obj['d'])\n if 'l' in obj:\n props.stepped = bool(obj['l'])\n if 'n' in obj:\n props.stepped = bool(obj['n'])\n return props\n\n def to_json(self):\n result = {}\n for field in [\n 'x', 'y', 'w', 'h', 'x2', 'y2', 'w2', 'h2',\n 'rx', 'ry',\n ]:\n value = getattr(self, field)\n if value != None: result[field] = value\n return result\n\n\nclass KbProperties(object):\n \"\"\"\n KbProperties apply to all subsequent keycaps\n \"\"\"\n def __init__(self,\n keycap_color='#ffffff',\n text_color='#000000',\n ghosted=False,\n profile=None,\n text_alignment=None,\n font_primary=3,\n font_secondary=3,\n r = 0,\n rx = 0,\n ry = 0\n ):\n self.bg = keycap_color\n self.fg = text_color\n self.ghosted = False\n self.profile = None\n self.text_alignment = None\n self.font_primary = 3\n self.font_secondary = 3\n self.r = r\n self.rx = rx\n self.ry = ry\n\n def update(self, obj):\n if 'c' in obj:\n self.bg = str(obj['c'])\n if 't' in obj:\n self.fg = str(obj['t'])\n if 'g' in obj:\n self.ghosted = bool(obj['g'])\n if 'a' in obj:\n self.text_alignment = int(obj['a'])\n if 'f' in obj:\n self.font_primary = int(obj['f'])\n if 'f2' in obj:\n self.font_secondary = int(obj['f2'])\n if 'p' in obj:\n self.profile = str(obj['p'])\n if 'r' in obj:\n self.r = float(obj['r'])\n if 'rx' in obj:\n self.rx = float(obj['rx'])\n if 'ry' in obj:\n self.ry = float(obj['ry'])\n\n\nclass KLEKeyboard(object):\n \"\"\"Docstring for Keyboard. \"\"\"\n\n def __init__(self, spacing=19.0):\n \"\"\"@todo: to be defined1. \"\"\"\n self.keys = []\n self.col = 0\n self.row = -1\n self.global_props = KbProperties()\n self.cur_x = 0\n self.cur_y = 0\n self.spacing = spacing\n self.metadata = {}\n\n def get_keys(self):\n return iter(self.keys)\n\n def reset_pos(self, u_x, u_y):\n self.cur_x = u_x\n self.cur_y = u_y\n\n def add_row(self):\n self.cur_x = 0\n self.cur_y += 1\n self._col = 0\n self.row += 1\n\n def add_key(self, x=0, y=0, w=1, h=1, text=\"\", decal=False):\n self.cur_x += x\n self.cur_y += y\n pos_x = self.cur_x\n pos_y = self.cur_y\n key = KLEKey(pos_x, pos_y, w, h, text=text, properties=self.global_props,\n decal=decal, spacing=self.spacing)\n self.keys.append(key)\n self.cur_x += w\n self.col += 1\n\n def addFatKey(self, w=1, h=1, x=1, y=0, w2=1, h2=1, x2=1, y2=0):\n pass\n\n @staticmethod\n def from_file(file_name, spacing=19.0):\n with open(file_name) as json_file:\n json_layout = json.loads(json_file.read())\n return KLEKeyboard.from_json(json_layout, spacing=spacing)\n\n @staticmethod\n def from_json(json_layout, spacing=19.0):\n keyboard = KLEKeyboard(spacing=spacing)\n props = KeyProperties()\n pos = 0\n for row in json_layout:\n if isinstance(row, dict):\n # Copy all the fields to the metadata\n for key in row:\n keyboard.metadata[key] = row[key]\n continue\n for key in row:\n if type(key) == str:\n x = props.x\n y = props.y\n w = props.w\n h = props.h\n d = props.decal\n key_text = key\n keyboard.add_key(x, y, w, h, text=key_text, decal=d)\n # reset properties for next key\n props = KeyProperties()\n pos += 1\n elif type(key) == dict:\n props = KeyProperties.from_json(key)\n\n keyboard.global_props.update(key)\n\n if 'rx' in key:\n keyboard.cur_x = 0\n if 'ry' in key:\n keyboard.cur_y = 0\n keyboard.add_row()\n return keyboard\n\n def to_json(self):\n result = []\n\n if self.metadata != {}:\n result.append(copy.copy(self.metadata))\n\n\n\n last_key = None\n for key in self.get_keys():\n # TODO: make output cleaner.\n # TODO: handle all fields correctly\n # if last_key:\n # last_properties = key.get_properties()\n # last_key = key\n props = key.get_properties()\n row = [props.to_json(), key.get_legend_str()]\n result.append(row)\n\n return result\n\n def mirror(self, axis='x'):\n \"\"\"\n Mirror the keys in the layout\n\n Args:\n axis: the axis on which to mirror, either 'x' or 'y'\n \"\"\"\n if axis == 'x':\n mirror = Point(-1, 1)\n elif axis == 'y':\n mirror = Point(1, -1)\n # elif isinstance(axis, Point):\n # mirror = axis\n\n for key in self.get_keys():\n old_center = key.get_center()\n key.set_center(mirror.x * old_center.x, mirror.y * old_center.y)\n key.set_angle(-key.r)\n" }, { "alpha_fraction": 0.5850019454956055, "alphanum_fraction": 0.6085590720176697, "avg_line_length": 23.257143020629883, "blob_id": "e18dae6a7f0fd45888da9231133b3f5a56ffdcbc", "content_id": "cb55c29a5caa7bb2f8ce296ca7d8504db7945033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2547, "license_type": "no_license", "max_line_length": 82, "num_lines": 105, "path": "/bin/kle-view", "repo_name": "ahtn/python-kle", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2018 [email protected]\n# Licensed under the MIT license (http://opensource.org/licenses/MIT)\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tkinter\nimport argparse\nimport json\n\nimport kle\n\narg_parser = argparse.ArgumentParser(\n description='KLE test script'\n)\narg_parser.add_argument(\n 'layout', type=str, action='store',\n help='The layout file to test'\n),\narg_parser.add_argument(\n \"-m\", \"--mirror\",\n action = 'store_const', const = True, default = False,\n dest = \"mirror\",\n help = \"Mirror the supplied layout\",\n)\n\narg_parser.add_argument(\n \"-o\", \"--out-file\", type=str,\n help = \"The output file to write to.\",\n)\n\nargs = arg_parser.parse_args()\n\nif 0:\n leg1 = \"0\\n6\\n2\\n8\\n9\\nb\\n3\\n5\\n1\\n4\\n7\\na\"\n leg2 = \"0\\n\\n\\n\\n\\nb\\n3\"\n key1 = kle.KLEKey(0, 0, 1, 1, leg1)\n key2 = kle.KLEKey(1, 0, 1, 1, leg2)\n\n print(key1)\n print(key1.get_legend_list())\n\n print(key2)\n print(key2.get_legend_list())\n\ndef tk_draw_key(can, key, offset):\n verts = key.get_rect_points()\n bb_verts = key.bounding_box_points()\n\n trans = [(x+offset[0], y+offset[1]) for (x, y) in verts]\n trans_bb = [(x+offset[0], y+offset[1]) for (x, y) in bb_verts]\n\n # draw the bounding boxes as well\n can.create_polygon(\n trans_bb,\n fill='',\n outline=\"red\",\n )\n\n # draw the key outlines\n can.create_polygon(\n trans,\n fill=key.properties.bg,\n outline=key.properties.fg\n )\n\n # draw the center of the keys\n center = key.get_center()\n can.create_text(\n center.x + offset[0], center.y + offset[1],\n text=\"x\"\n )\n\ndef tk_draw_layout(keyboard):\n main_win = tkinter.Tk()\n can = tkinter.Canvas(main_win, width=800, height=800)\n main_win.geometry(\"+400+400\")\n main_win.title(\"kle viewer\")\n can.pack()\n\n min_x = 0\n min_y = 0\n for key in keyboard.get_keys():\n bbox = key.bounding_box_points()\n min_x = min(min_x, *[point.x for point in bbox])\n min_y = min(min_y, *[point.y for point in bbox])\n\n\n for key in keyboard.get_keys():\n # draw_key(t, key)\n tk_draw_key(can, key, (-min_x + key.spacing//2, -min_y + key.spacing//2))\n tkinter.mainloop()\n\n\nkeyboard = kle.KLEKeyboard.from_file(args.layout, spacing=50)\n\nif args.mirror:\n keyboard.mirror()\n\nif args.out_file != None:\n with open(args.out_file, 'w') as out_file:\n out_file.write(json.dumps(keyboard.to_json()))\n\ntk_draw_layout(keyboard)\n" } ]
4
Niranjan16/Pima-Indian-Tribe
https://github.com/Niranjan16/Pima-Indian-Tribe
47cfcb97d847dd665db4bec2488d678a6a54a880
8bcf74cb07a6314b6591c32e415798f486bded61
492cc94c1ad11ccf9d781dac97d582f73a0131dc
refs/heads/master
2022-08-13T03:03:28.271334
2020-05-16T10:41:27
2020-05-16T10:41:27
264,410,831
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6923766732215881, "alphanum_fraction": 0.7103139162063599, "avg_line_length": 31.787878036499023, "blob_id": "19a63a5a33923ebe45740a5dccef4c00bbf69fbc", "content_id": "baa59be6fa3b37ac0ca80bd6b0ec773d7966383c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 111, "num_lines": 33, "path": "/Code.py", "repo_name": "Niranjan16/Pima-Indian-Tribe", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import metrics\r\n\r\ndf = pd.read_csv(\"diabetes.csv\")\r\ndel df[\"SkinThickness\"]\r\n\r\nX = df.drop(columns=[\"Outcome\"])\r\ny = df.drop(columns=[\"Pregnancies\",\"Glucose\",\"BloodPressure\",\"Insulin\",\"BMI\",\"DiabetesPedigreeFunction\",\"Age\"])\r\nsplit_test_size = 0.2\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split_test_size, random_state=42)\r\n\r\n\r\nprint(\"{0:0.2f}% in training set\".format((len(X_train)/len(df.index)) * 100))\r\nprint(\"{0:0.2f}% in test set\".format((len(X_test)/len(df.index)) * 100))\r\n\r\n\r\nnb_model = DecisionTreeClassifier()\r\n\r\nnb_model.fit(X_train,y_train)\r\nnb_predict_train = nb_model.predict(X_train)\r\n\r\n# Accuracy\r\nprint(\"Accuracy: {0:.4f}\".format(metrics.accuracy_score(y_train, nb_predict_train)))\r\n\r\n\r\n# predict values using the testing data\r\nnb_predict_test = nb_model.predict(X_test)\r\n\r\n# training metrics\r\nprint(\"Accuracy: {0:.4f}\".format(metrics.accuracy_score(y_test, nb_predict_test)))\r\n" }, { "alpha_fraction": 0.8415841460227966, "alphanum_fraction": 0.8415841460227966, "avg_line_length": 49.5, "blob_id": "2c7db8966145f0748bae70706a93582989523267", "content_id": "2a69504b6b4fcc62a8473ace7989159eed97efe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 101, "license_type": "no_license", "max_line_length": 64, "num_lines": 2, "path": "/Readme.md", "repo_name": "Niranjan16/Pima-Indian-Tribe", "src_encoding": "UTF-8", "text": "Diabetes Prediction on pima indian tribe using Machine Learning \nLibraries use: Numpy,Pandas,Sklearn\n" } ]
2
shaneausmus/hangman
https://github.com/shaneausmus/hangman
be5c959100bf17705a1137e667c237e5da64cca5
cd566a0bc5b6f72ff73c9b7a997cf9271ba5b2a5
49db14bba437ff8d41d6a26484b91631884eeaa1
refs/heads/master
2022-11-20T13:08:47.790085
2020-07-19T20:36:26
2020-07-19T20:36:26
280,941,883
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5553803443908691, "alphanum_fraction": 0.5624753832817078, "avg_line_length": 29.939023971557617, "blob_id": "acb81f9594855368e13fd6c7bc396eae39446b6f", "content_id": "0c14e1e5cfdf1d764bf1feca769990e5597fb32f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2537, "license_type": "no_license", "max_line_length": 84, "num_lines": 82, "path": "/hangman.py", "repo_name": "shaneausmus/hangman", "src_encoding": "UTF-8", "text": "from random import *\n\n\ndef select_word():\n\n seeds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n seed(choice(seeds))\n\n word_list = list()\n print(\"Selecting word now...\\n\")\n with open(\"ospd.txt\", \"r\") as file:\n # each line is only one word; append the word to the list\n # and strip it just in case of whitespace\n for line in file:\n word_list.append(line.strip())\n shuffle(word_list)\n hangman_word = choice(word_list)\n # can initialize a list like this out of a string, tuple, etc.\n return hangman_word\n\n\ndef verify_user_input(user_input):\n return len(user_input) == 1 and user_input.isalpha()\n\n\ndef guess_info(guess_limit, guess_count, wrong_chars, test_base):\n print(\"Guesses made: \" + str(guess_count) + \"\\n\")\n print(\"You have: \" + str(round(guess_limit - guess_count)) + \" guesses left.\\n\")\n print(\"This is what you've already entered: \" + wrong_chars)\n print(\"Here's what the word looks like: \")\n str_base = \"\"\n for i in range(len(test_base)):\n str_base += test_base[i]\n print(str_base + \"\\n\\n\")\n\n\ndef run_game():\n print(\"Welcome to Hangman!\\n\")\n word = select_word()\n wrong_chars = test_word = test_base_str = \"\"\n word_list = list(word)\n for el in word_list:\n test_base_str += \"_\"\n test_base = list(test_base_str)\n guess_count = 0\n start_len: int = len(word)\n # setting limit on guesses to 1.5 times the length of the string\n guess_limit: int = start_len * 2\n user_input: str = input(\"Pick a character: \")\n while guess_count < guess_limit and test_word != word:\n if verify_user_input(user_input):\n if word_list.count(user_input) > 0:\n for i in range(len(word)):\n if word_list[i] == user_input:\n test_base[i] = word_list[i]\n word_list[i] = \"_\"\n else:\n wrong_chars += user_input\n else:\n print(\"That guess doesn't work - try again!\")\n guess_count += 1\n guess_info(guess_limit, guess_count, wrong_chars, test_base)\n test_word = \"\"\n for i in range(len(test_base)):\n test_word += str(test_base[i])\n if test_word != word:\n user_input = input(\"Enter in another guess:\\n\")\n\n if test_word == word:\n print(\"Congrats, you won the game!\\n\")\n else:\n print(\"Sorry, you lost the game :(\\n\")\n\n print(\"The correct word was \" + word + \".\")\n\n\ndef main():\n run_game()\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
1
albertpuente/gls
https://github.com/albertpuente/gls
ccfaf7c797cb96a1ebf535ff0631ee5f07b0b49e
df4e86723c554ffc82f9314e55ab89debdfd6978
589bbcea5c3508e3f89258121e918671f19dff2d
refs/heads/master
2023-01-29T14:01:45.444411
2023-01-10T12:20:47
2023-01-10T12:20:47
60,358,336
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.7178571224212646, "avg_line_length": 25.25, "blob_id": "b74df347c37840c19e8110fb4bcf4580d8c40452", "content_id": "353e00e16c4b5eba160d99334b037a2a0577460a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 840, "license_type": "permissive", "max_line_length": 158, "num_lines": 32, "path": "/README.md", "repo_name": "albertpuente/gls", "src_encoding": "UTF-8", "text": "# gls - Graphical List Directory\n\nSimple file browser for the terminal with code highlighting and basic file information.\n\nThis new version drops all the previous custom-code in favor of a simple built-in feature from [Textual: Code Browser](https://github.com/Textualize/textual).\n\n![](docs/screenshot.png)\n\nAdditional info:\n- Size.\n- File permissions.\n- Last modification timestamp.\n\n## Installation\n```bash\ngit clone https://github.com/albertpuente/gls.git && cd gls\npip install -r requirements.txt\n\n# Change python to your binary if needed (e.g. python3)\necho \"alias gls=\\\"python $PWD/gls.py\\\"\" >> ~/.bashrc && source ~/.bashrc\n```\n## Usage\n\n```bash\ngls # Uses current dir\ngls /path/to/folder # Specific dir\n```\n\n## Next (nice to have)\n- Built-in file edit (nano, vim, emacs...)\n- File management (move, copy, delete).\n- SSH support.\n" }, { "alpha_fraction": 0.5318302512168884, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 30.94915199279785, "blob_id": "5ae8bccbd07effdb254ce50bcd13f7922b896b57", "content_id": "8fc9158bb06e112093f2db64e3e79475e689391c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3770, "license_type": "permissive", "max_line_length": 103, "num_lines": 118, "path": "/gls.py", "repo_name": "albertpuente/gls", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport math\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\nfrom stat import filemode\n\nfrom rich.syntax import Syntax\nfrom rich.text import Text\nfrom rich.traceback import Traceback\nfrom textual import events\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container, Horizontal, Vertical\nfrom textual.reactive import var\nfrom textual.widgets import Button, DirectoryTree, Footer, Header, Static\n\n\ndef sizeof_fmt(num, suffix='B'):\n magnitude = int(math.floor(math.log(num, 1024)))\n val = num / math.pow(1024, magnitude)\n if magnitude > 7:\n return '{:.1f}{}{}'.format(val, 'Yi', suffix)\n return '{:3.1f}{}{}'.format(val, ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'][magnitude], suffix)\n\n\nclass GLS(App):\n \"\"\"Textual code browser app.\"\"\"\n\n CSS_PATH = \"browser.css\"\n BINDINGS = [\n (\"f\", \"toggle_files\", \"Toggle Files\"),\n # (\"d\", \"toggle_dark\", \"Toggle Dark Mode\"),\n # (\"e\", \"edit\", \"Edit\"),\n # (\"d\", \"delete\", \"Delete\"),\n (\"q\", \"quit\", \"Quit\"),\n ]\n\n show_tree = var(True)\n\n def watch_show_tree(self, show_tree: bool) -> None:\n \"\"\"Called when show_tree is modified.\"\"\"\n self.set_class(show_tree, \"-show-tree\")\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose our UI.\"\"\"\n path = \"./\" if len(sys.argv) < 2 else sys.argv[1]\n yield Header(show_clock=True)\n yield Container(\n DirectoryTree(path, id=\"tree-view\"),\n Vertical(Static(id=\"code\", expand=True), id=\"code-view\"),\n Horizontal(\n Static(\n id=\"info\",\n expand=True,\n renderable=Text.assemble(\n (\"Select a file to see its contents\", \"bold #0078D4\"))\n ),\n id=\"info-view\"\n ),\n )\n yield Footer()\n\n def on_mount(self, event: events.Mount) -> None:\n self.query_one(DirectoryTree).focus()\n\n def on_directory_tree_file_selected(\n self, event: DirectoryTree.FileSelected\n ) -> None:\n \"\"\"Called when the user click a file in the directory tree.\"\"\"\n event.stop()\n code_view = self.query_one(\"#code\", Static)\n try:\n syntax = Syntax.from_path(\n event.path,\n line_numbers=True,\n word_wrap=False,\n indent_guides=True,\n # theme=\"github-dark\",\n )\n except Exception:\n # code_view.update(Traceback(theme=\"github-dark\", width=None))\n code_view.update(\"Cannot display this file\")\n self.sub_title = \"ERROR\"\n else:\n code_view.update(syntax)\n self.query_one(\"#code-view\").scroll_home(animate=False)\n self.sub_title = event.path\n\n # Info view\n status = Path(event.path).stat()\n info_view = self.query_one(\"#info\", Static)\n mod_date = datetime.utcfromtimestamp(\n status.st_mtime).strftime('%Y-%m-%d %H:%M:%S')\n label_style = \"bold #0078D4\"\n info_view.update(\n Text.assemble(\n (\"Size: \", label_style),\n f\"{sizeof_fmt(status.st_size)} \",\n (\"Perms: \", label_style),\n f\"{filemode(status.st_mode)} \",\n (\"Modified: \", label_style),\n f\"{mod_date}\"\n )\n )\n\n def action_toggle_files(self) -> None:\n \"\"\"Called in response to key binding.\"\"\"\n self.show_tree = not self.show_tree\n\n # def action_toggle_dark(self) -> None:\n # \"\"\"An action to toggle dark mode.\"\"\"\n # self.dark = not self.dark\n\n\nif __name__ == \"__main__\":\n GLS().run()\n" } ]
2
bergalexandre/S5-picar-simulation-blender
https://github.com/bergalexandre/S5-picar-simulation-blender
f1093cb4007f0fd8c121bd74e5b63da0b7f470e2
6105ffcc80b826bff4316ae931ea249938576348
0d15d6d6bc9bd4b36538633a7f97efdacdfc7f4b
refs/heads/master
2023-02-05T12:29:17.541964
2020-12-22T17:41:37
2020-12-22T17:41:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.57151198387146, "alphanum_fraction": 0.5920898914337158, "avg_line_length": 31.789474487304688, "blob_id": "230b43ff454e7bf3950e83b6f25a3d8f5158fcb2", "content_id": "39185037f1fe3cabac6716afde0b3ce4b0e6c948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6877, "license_type": "no_license", "max_line_length": 124, "num_lines": 209, "path": "/bille.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "import numpy as np\nimport time\nfrom threading import Thread\n\n\n#plot le contenant\n\nclass bille2D():\n deltaH = -0.0015\n\n x = 0\n z = -0.0014999\n\n #constantes\n angle = np.radians(-4.3)\n g = -9.81\n m = 0.1 #estimé\n rayon = 0.02\n friction = 0.06\n zLimit = 0.0\n #pour le thread\n vitesseBille = 0\n deltaHFinal = deltaH\n end = False\n\n def __init__(self, x, y, z, framerate):\n self.framerate = framerate\n if(x and y and z):\n raise Exception(\"Juste 2 dimensions à True\")\n\n self.xRegister = x\n self.yRegister = y\n self.zRegister = z\n\n def nouveauDeltah(self, vitesse):\n deltah = ((vitesse**2)/(2*self.g))\n return deltah\n\n def calculeVitesse(self, deltah):\n vitesse = ((self.g*2*deltah*(1-self.friction))**0.5)\n \n if(vitesse < 0.05):\n vitesse = 0\n \n return vitesse\n\n #retourne la vitesse maximal du véhicule pour préserver la bille.\n def calculVitesseMax(self):\n vMax = np.sqrt((2*self.g*self.m*-0.0015)/(0.97*self.m)) \n return vMax\n\n #met à jour les paramètres d'accélération de la bille\n def appliqueAcceleration(self, vitesseVehicule, z):\n #retourne la vitesse de la bille selon le support du véhicule (C'est à dire un point fixe)\n self.z = z\n self.first = True\n self.vitesseBille = -vitesseVehicule\n #calcul du nouveau deltaH\n #step1: Calculer l'énergie cinétique de la bille\n #step2: Calculer l'élévation maximale que la bille aura\n self.deltaHFinal = self.nouveauDeltah(self.vitesseBille)\n self.zLimit = self.deltaH - self.deltaHFinal\n self.directionX = 1\n\n if(self.vitesseBille > 0):\n self.angle = -self.angle\n self.directionX = -self.directionX\n\n self.xLimit = -self.deltaHFinal/np.sin(self.angle)\n\n\n if((self.deltaHFinal + self.z) > 0):\n print(\"Bille pu dans le moule\")\n\n def limitX(self, lim, position, offset):\n positionFinal = position + offset\n\n if(lim < 0):\n if(lim > positionFinal):\n return lim-position\n elif(lim > 0):\n if(lim < positionFinal):\n return lim-position\n else:\n if(offset < 0):\n if(lim > positionFinal):\n return lim-position\n else:\n if(lim < positionFinal):\n return lim-position\n return offset\n\n def limitZ(self, lim, position, offset):\n positionFinal = position + offset\n if(lim != self.deltaH):\n if(lim < positionFinal):\n return lim-position\n else:\n if(lim > positionFinal):\n return lim-position\n return offset\n\n\n #Met à jour la position de la bille en fonction de la vitesse du véhicule\n def updatePosition(self):\n #position en Z de la bille\n #à l'extrémité, la bille doit se retourner dans l'autre sense.\n #self.vitesseBille = self.calculeVitesse() #calcul la vitesse actuel\n if(self.vitesseBille == 0):\n return np.array([0,0,0])\n\n hauteurMax = self.deltaH-self.deltaHFinal\n if((self.z >= hauteurMax or self.z <= self.deltaH) and self.first == False):\n if(self.z <= self.deltaH):\n self.deltaHFinal =self.nouveauDeltah(self.vitesseBille)\n hauteurMax = self.deltaH-self.deltaHFinal\n\n #recalcule la vitesse en fonction de la hauteur maximale\n vitesse = self.calculeVitesse(self.deltaHFinal)\n #si vitesse était positive, devient négative\n if(self.vitesseBille > 0):\n self.vitesseBille = vitesse\n self.angle = -self.angle\n else:\n self.vitesseBille = -vitesse\n self.angle = -self.angle\n\n #si self.z > deltaH, changer la direction en X\n self.zLimit = hauteurMax\n self.xLimit = (self.deltaHFinal/np.tan(self.angle))\n if(self.z>=hauteurMax):\n self.directionX = -self.directionX\n self.xLimit = 0.0\n self.zLimit = self.deltaH\n \n \n\n \n \n self.first = False # patch pour éviter un cas particulier\n viteseParFrame = self.vitesseBille/self.framerate\n deltaX = self.limitX(self.xLimit, self.x, self.directionX * viteseParFrame * np.cos(self.angle))\n deltaZ = self.limitZ(self.zLimit, self.z, viteseParFrame * np.sin(self.angle))\n self.x += deltaX\n self.z += deltaZ\n #step3: Valider que la bille est toujours dans le moule\n \n #step4: Calculer les keyframes de la bille\n return np.array([deltaX if self.xRegister else 0, deltaX if self.yRegister else 0, deltaZ if self.zRegister else 0])\n \n\nclass BilleMath():\n position = np.array([0, 0, -0.0015])\n vitesseAngulaire = 0\n\n def __init__(self, framerate):\n self.billeXZ = bille2D(True, False, True, framerate)\n self.billeYZ = bille2D(False, True, True, framerate)\n\n def appliqueAcceleration(self, X_vitesse=None, Y_vitesse=None):\n if(X_vitesse is not None):\n self.billeXZ.appliqueAcceleration(X_vitesse, self.position[2])\n if(Y_vitesse is not None):\n self.billeYZ.appliqueAcceleration(Y_vitesse, self.position[2])\n \n def appliqueRotation(self, vitesseAngulaire):\n raise Exception(\"à voir si pertinant\")\n\n def updatePosition(self):\n offset1 = self.billeXZ.updatePosition()\n offset2 = self.billeYZ.updatePosition()\n self.position = self.position+offset1+offset2\n return self.position\n\ndef test():\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n\n vmax = np.sqrt(2*-9.81*-0.0015)\n\n #test\n #print(f\"vitesse vertical = {vmax*np.sin(np.radians(4.7))}\")\n #print(f\"temps de montée = {0.0015/(vmax*np.sin(np.radians(4.7)))}\")\n #print(f\"vitesse horizontal = {(vmax*np.cos(np.radians(4.7)))}\")\n #print(f\"tempps de hor = {0.02/(vmax*np.cos(np.radians(4.7)))}\")\n\n bille = BilleMath(100)\n bille.appliqueAcceleration(0, 0.20)\n position = []\n for i in range(100*10):\n position.append(bille.updatePosition())\n print(position[-1])\n\n position = np.array(position)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n #plt.plot(position[:,0])\n ax.plot(position[:,0], position[:,1], position[:,2])\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n plt.show()\n #print(f\"Vitesse en x = {vitesse[0]}\")\n #print(f\"Vitesse en y = {vitesse[1]}\")\n #print(f\"norme = {np.sqrt(vitesse[0]**2 + vitesse[1]**2)}\")\n\n#test() #pour afficher les graphiques, parcontre ça marche pas avec blender donc commenter" }, { "alpha_fraction": 0.5660630464553833, "alphanum_fraction": 0.5942320823669434, "avg_line_length": 32.494380950927734, "blob_id": "1aa3be948e735039e0614c1b50b45bc8bffe0511", "content_id": "ae149c8dc1104a143176792e98eca4d1f83f2fd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2985, "license_type": "no_license", "max_line_length": 170, "num_lines": 89, "path": "/infrasonic.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "import numpy as np\n\nclass Infrasonic():\n\n longueur = 2\n champsVisionRadar = np.radians(15)\n\n #Crée deux triangle pour simuler la position du capteur\n def __init__(self, positionSonar, milieuVehicule):\n #déterminer dans quel cadrant du cercle on est\n droite = False\n haut = False\n angleOffset = 0\n if(milieuVehicule[0] <= positionSonar[0]):\n droite = True\n if(milieuVehicule[1] <= positionSonar[1]):\n haut = True\n\n if(droite and haut):\n angleOffset = 0\n elif(not droite and haut):\n angleOffset = np.pi/2\n elif(not droite and not haut):\n angleOffset = np.pi\n else:\n angleOffset = 3*np.pi/2\n \n\n pentes = positionSonar - milieuVehicule\n rotationActuelle = 0\n try:\n rotationActuelle = np.arctan(pentes[1]/pentes[0])\n except Exception as e:\n rotationActuelle = pi/2\n\n rotationActuelle += angleOffset\n self.verts = np.array([\n np.array(positionSonar),\n np.array(positionSonar+[self.longueur * np.cos(rotationActuelle-self.champsVisionRadar), self.longueur * np.sin(rotationActuelle-self.champsVisionRadar), 0]),\n np.array(positionSonar+[self.longueur * np.cos(rotationActuelle+self.champsVisionRadar), self.longueur * np.sin(rotationActuelle+self.champsVisionRadar), 0]),\n np.array(positionSonar+[self.longueur * np.cos(rotationActuelle), self.longueur * np.sin(rotationActuelle), 0])\n ])\n self.aires = []\n a = self.verts[1][1] - self.verts[0][1]\n b = self.verts[1][1] - self.verts[0][1]\n c = self.verts[2][0] - self.verts[1][0]\n self.aires.append(abs(round(self.calculAireTriangle(a, b, c), 6)))\n \n a = self.verts[3][1]-self.verts[1][1]\n b = self.verts[3][1]-self.verts[2][1]\n c = self.verts[2][0]-self.verts[1][0]\n self.aires.append(abs(round(self.calculAireTriangle(a, b, c), 6)))\n\n print(self.verts)\n \n def calculNorme(self, position1, position2):\n longueur = position2-position1\n return np.sqrt(longueur**2 + longueur**2)\n \n #formule de héron\n def calculAireTriangle(self, a,b,c):\n p = (a+b+c)/2\n return np.sqrt(p*(p-a)*(p-b)*(p-c))\n\n def estDansOnde(self, position):\n return -1\n\n\n\ndef test():\n import matplotlib.pyplot as plt\n positionOrigine = np.array([0, 0, 0])\n positionCapteur = np.array([0.05, 0, 0])\n\n sonar = Infrasonic(positionCapteur, positionOrigine)\n\n plt.figure(1)\n plt.plot(positionOrigine[0], positionOrigine[1], \"rx\")\n plt.plot(positionCapteur[0], positionCapteur[1], \"bx\")\n\n plt.plot(sonar.verts[:,0], sonar.verts[:,1])\n\n for coord in [[1, 0, 0], [-4.6, 4.9, 0], [-4.9, 4.9, 0]]:\n print(f\"coord: [{coord}] est {True if sonar.estDansOnde(coord) != -1 else False}\")\n plt.plot(coord[0], coord[1], \"gx\")\n\n plt.show()\n\ntest()\n\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 62, "blob_id": "42634c5716cd4e700858118f0577159de8f8d436", "content_id": "197b5038293c61df63dbfffa1006e32258fbbea1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 62, "num_lines": 1, "path": "/__init__.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "#garder pour dire à python d'importer des trucs dans ce folder" }, { "alpha_fraction": 0.7087719440460205, "alphanum_fraction": 0.7257310152053833, "avg_line_length": 38.71428680419922, "blob_id": "8e8cdc97fcfbe94eaa68d1597cfa86150dad3ef2", "content_id": "ca0a7c0580cc86f425a394d16052918163ac99cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1725, "license_type": "no_license", "max_line_length": 281, "num_lines": 42, "path": "/readme.md", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "# Simulation du picar S2 sous blender.\r\n\r\nMettre les fichiers blender dans le dossier \"blender\"\r\n\r\n## **pour faire un script qui affecte une scène blender:**\r\n\r\n### **setup de vscode**\r\n\r\nle python se trouve ici (pour 2.90 sur windows 10):\r\n* \"C:\\Program Files\\Blender Foundation\\Blender 2.90\\2.90\\python\\bin\"\r\n\r\nPour faire fonctionner blender avec le script, en ligne de commande:\r\n* \"C:\\Program Files\\Blender Foundation\\Blender 2.90\\2.90\\python\\bin\\python\" -m pip install matplotlib --user\r\n* \"C:\\Program Files\\Blender Foundation\\Blender 2.90\\2.90\\python\\bin\\python\" -m pip install threading --user\r\n* <span style=\"color:red\">C'est incomplet, il y a un bug avec le pip de blender. Il n'arrive pas à trouver les modules après. Pour corriger ça j'ai copier les fichier à la main dans le dossier de module du python à blender. Je me souviens pu exactement du path où il le met.</span>\r\n* Installer l'extension blender Development:\r\n\r\n **Name**: Blender Development\r\n \r\n **Id**: jacqueslucke.blender-development\r\n\r\n **Description**: Tools to simplify Blender development.\r\n\r\n **Version**: 0.0.12\r\n\r\n **Publisher**: Jacques Lucke\r\n\r\n VS Marketplace Link: https://marketplace.visualstudio.com/items?itemName=JacquesLucke.blender-development\r\n \r\n **note**: * <span style=\"color:red\">L'extension est buggé et supporte pas nos caractère français. Donc pas de suprise si é se transforme en @ quelque chose</span>\r\n\r\n* Démarer blender: ctrl+shift+p: Blender start\r\n \r\n ![blender Start](markdown/startBlender.png) \r\n\r\n ![Image of Yaktocat](markdown/debugger.png)\r\n\r\n* Allez dans le fichier python désiré puis ctrl+shift+p: blender run script\r\n\r\n\r\n\r\nProjet S5 UDES, robot suiveur de ligne, université de sherbrooke\r\n" }, { "alpha_fraction": 0.6031712889671326, "alphanum_fraction": 0.615783154964447, "avg_line_length": 36.10679626464844, "blob_id": "c0808641b58aef354877d512f31e0037cb008d33", "content_id": "cfe2f1dc9c442086e83a459535d98dfb813ca84d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19151, "license_type": "no_license", "max_line_length": 142, "num_lines": 515, "path": "/vehicule.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "import numpy as np\n#import matplotlib.image as mpimg\n#import matplotlib.pyplot as plt\nimport time\nimport bpy\nimport os\nimport time\nimport copy\nfrom pathlib import Path\nfrom threading import Thread, Lock\n\nimport sys\n#sinon il trouvera pas les modules custom si on change pas son path d'import\nsys.path.insert(0, r\"C:\\Users\\Alexandre.Bergeron\\OneDrive - USherbrooke\\university\\projet\\S5_projet_simulation\")\nfrom bille import BilleMath\nimport creationLigne\nimport infrasonic\n\n#### Tout ce qui a trait à blender ####\nblenderMutex = Lock()\nframeNb = 0\n\nclass blenderObject():\n scale = 1\n radius = 2\n name = \"undefined\" #name of the blender object\n name_2 = \"\"\n framerate = 100 #besoin de savoir ça?\n angle = 0\n _dernierePosition = np.array([0,0,0])\n\n def __init__(self, x, y, z, name=\"undefined\", scene=None, parent=None):\n self.position = np.array([x*self.scale, y*self.scale, z*self.scale], dtype=np.float)\n self.name = name\n self.scene = scene\n self.parent = parent\n if scene is not None:\n i = 1\n while(self.name + self.name_2 in bpy.data.objects):\n self.name_2 = \".\" + str(i).zfill(3)\n i += 1\n \n path = Path(os.getcwd()) / \"blender\" / f\"{self.name}.dae\"\n bpy.ops.wm.collada_import(filepath=str(path), import_units=False, keep_bind_info=False)\n self.blenderObj = bpy.data.objects[self.name+self.name_2]\n self.blenderObj.animation_data_clear()\n \n #place l'objet à son point de départ\n global frameNb\n self.scene.frame_set(frameNb)\n self.blenderObj.location = tuple(self.position/self.scale)\n if(parent is not None):\n self.blenderObj.parent = parent\n self.blenderObj.matrix_parent_inverse = parent.matrix_world.inverted()\n else:\n self.blenderObj.location = tuple(self.position/self.scale)\n self.blenderObj.keyframe_insert(data_path=\"location\", index=-1)\n self.blenderObj.animation_data.action.fcurves[-1].keyframe_points[-1].interpolation = 'LINEAR'\n\n #initialise la rotation à 0\n self.rotation(0)\n\n mat = bpy.data.materials.new(name=self.name+self.name_2)\n mat.diffuse_color = (1,1,1, 1)\n self.blenderObj.data.materials.append(mat)\n bpy.data.materials.get(self.name+self.name_2).keyframe_insert(data_path=\"diffuse_color\", index=-1)\n \n #pour un mouvement local, on assume que on veut avancer sur la position X par exemple mais celle \"local\". \n #en gros, elle fait toujours face à l'origine avec un certain angle. (voir blender local versus global)\n def matriceRotation(self, deltaPosition, angle):\n #matrice de rotation\n matrice_rotation = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n matrice_rotation = np.round(matrice_rotation, decimals=5)\n\n #pour info, @ = produit de matrice\n deltaPosition[:2] = deltaPosition[:2] @ matrice_rotation\n\n return deltaPosition \n\n def mouvementLocal(self, deltaPosition):\n if self.scene is not None:\n blenderMutex.acquire()\n global frameNb\n self.scene.frame_set(frameNb)\n self._dernierePosition = self.position\n self.position += deltaPosition\n self.blenderObj.location = tuple(self.position/self.scale)\n self.blenderObj.keyframe_insert(data_path=\"location\", index=-1)\n self.blenderObj.animation_data.action.fcurves[-1].keyframe_points[-1].interpolation = 'LINEAR'\n blenderMutex.release()\n \n def ajouteOffset(self, offset):\n if self.scene is not None:\n blenderMutex.acquire()\n offset = offset\n position = self.position + offset\n self.blenderObj.location = tuple(position/self.scale)\n self.blenderObj.keyframe_insert(data_path=\"location\", index=-1)\n self.blenderObj.animation_data.action.fcurves[-1].keyframe_points[-1].interpolation = 'LINEAR'\n blenderMutex.release()\n\n def show(self, fig, ax):\n ax.plot(self.position[0], self.position[1], \"xr\")\n\n def rotation(self, angle):\n if self.scene is not None:\n blenderMutex.acquire()\n self.blenderObj.rotation_euler[2] = angle\n self.blenderObj.keyframe_insert(data_path=\"rotation_euler\", index=-1)\n blenderMutex.release()\n\n def enregistreRotation(self):\n if self.scene is not None:\n blenderMutex.acquire()\n self.blenderObj.keyframe_insert(data_path=\"rotation_euler\", index=-1)\n blenderMutex.release()\n\n def couleurRouge(self):\n blenderMutex.acquire()\n global frameNb\n self.scene.frame_set(frameNb)\n bpy.data.materials.get(self.name+self.name_2).diffuse_color = (1, 0, 0, 1)\n bpy.data.materials.get(self.name+self.name_2).keyframe_insert(data_path=\"diffuse_color\", index=-1)\n blenderMutex.release()\n\n def couleurVert(self):\n blenderMutex.acquire()\n global frameNb\n self.scene.frame_set(frameNb)\n bpy.data.materials.get(self.name+self.name_2).diffuse_color = (0, 1, 0, 1)\n bpy.data.materials.get(self.name+self.name_2).keyframe_insert(data_path=\"diffuse_color\", index=-1)\n blenderMutex.release()\n \n #compare avec la dernière position connu\n def determineAngle(self, A, B):\n\n if (B[0]-A[0] == 0 and B[1]-A[1] == 0):\n return self.angle\n droite = False\n haut = False\n angleOffset = 0\n if(A[0] <= B[0]):\n droite = True\n if(A[1] <= B[1]):\n haut = True\n\n if(droite and haut):\n angleOffset = 0\n elif(not droite and haut):\n angleOffset = np.pi/2\n elif(not droite and not haut):\n angleOffset = np.pi\n else:\n angleOffset = 3*np.pi/2\n \n longueur = B - A\n rotationActuelle = np.arctan(longueur[1]/longueur[0])\n if np.isnan(rotationActuelle): #pour la division par 0\n rotationActuelle = 0.0\n\n rotationActuelle += angleOffset\n return rotationActuelle\n\n\nclass vehicule(blenderObject):\n length = 0.15\n \n def __init__(self, x,y,z,scene):\n super().__init__(x,y,z, \"vehicule\", scene)\n global frameNb\n frameNb = 0\n self.angleVirage = 0\n self.bille = bille(x, y, z+0.035, scene, self.blenderObj)\n self.sonar = sonar(x+0.065, y, z+0.05, scene, self.blenderObj)\n self.suiveurligne = CapteurLigne(x+0.065, y, z, scene, self.blenderObj)\n #ajout du sonar à l'avant\n\n def mouvementLocal(self, deltaPosition, omega=0, t=0, rot=True):\n #deltaPosition est l'équivalent du backwheel pour le véhicule\n if(rot is True):\n deltaPosition = self.matriceRotation(deltaPosition, self.angle)\n super().mouvementLocal(deltaPosition)\n self.mouvementFrontwheel(omega, t)\n #déterminer accélération x et y pis shooter ça à bille\n #je pense que deltaposition serait une accélération en fait\n self.bille.bougeBille(deltaPosition)\n self.enregistreRotation()\n \n def mouvementFrontwheel(self, omega, t):\n fw_position = self.length*np.array([np.cos(omega*t), np.sin(omega*t), 0])\n self.ajouteOffset(self.matriceRotation(fw_position, self.angle)) #le point d'origine du vehicule blender est le milieu des frontwheels\n\n def virage(self, v, alpha, t):\n #https://math.stackexchange.com/questions/3055263/path-of-a-simple-turning-car\n #calculer par rapport à T\n omega = v/self.length*np.tan(alpha) \n vitesseAngulaire = v/omega if omega != 0.0 else 0\n position_BackWheel = vitesseAngulaire * np.array([np.sin(omega*t), 1-np.cos(omega*t), 0])\n return self.matriceRotation(position_BackWheel, self.angle), omega\n\n def avance(self, vitesse, angleRoue, t, T0):\n if(angleRoue == 0.0):\n self.angleVirage = 0\n self.mouvementLocal([vitesse, 0, 0])\n else:\n position1 = self.position[:] #[:] pour forcer une copie des valeurs (sinon la valeur de la ref va changer)\n position_BackWheel, omega = self.virage(vitesse*self.framerate, angleRoue, t)\n position2 = (position_BackWheel+T0)-self.position\n self.mouvementLocal(position2, omega=omega, t=t, rot=False)\n self.angleVirage = omega*t\n self.rotation(-self.angle + self.angleVirage)\n\n def detection(self, listeObj):\n pass\n\n \n\nclass bille(blenderObject):\n\n def __init__(self, x, y, z, scene, parent):\n super().__init__(x, y, z, \"bille\", scene, parent)\n self._vielleVitesse = np.array([0,0])\n self.billeMath = BilleMath(scene.render.fps)\n #qu'est-ce qu'on a besoin de savoir? Accélération en x et y? angle en z? faire le z ou pas?\n\n def bougeBille(self, deltaPosition):\n #vérifier si la vitesse à changer:\n vitesseCourante = deltaPosition\n if(vitesseCourante[0] != self._vielleVitesse[0]):\n self.billeMath.appliqueAcceleration(X_vitesse=vitesseCourante[0]*self.scene.render.fps)\n\n if(vitesseCourante[1] != self._vielleVitesse[1]):\n self.billeMath.appliqueAcceleration(Y_vitesse=vitesseCourante[1]*self.scene.render.fps)\n \n self._vielleVitesse = vitesseCourante\n positionBille = self.billeMath.updatePosition()\n self.ajouteOffset(positionBille)\n\nclass DetecteurLigne(blenderObject):\n def configureLigne(self, ligne):\n self.ligne = ligne \n\n def detection(self):\n blenderMutex.acquire()\n position = np.asarray(self.parent.location)+np.asarray(self.blenderObj.location)\n blenderMutex.release()\n detect = self.ligne.estDansLigne(position)\n if(detect == 1):\n self.couleurVert()\n else:\n self.couleurRouge()\n return detect\n\n#représente le module avec les 5 détecteurs\nclass CapteurLigne(blenderObject):\n def __init__(self, x, y, z, scene, parent):\n #super().__init__(x, y, z, \"undefined\", scene, parent)\n self.detecteurs = []\n self.detecteurs.append(DetecteurLigne(x, y-0.06, z, scene=scene, parent=parent))\n self.detecteurs.append(DetecteurLigne(x, y-0.03, z, scene=scene, parent=parent))\n self.detecteurs.append(DetecteurLigne(x, y, z, scene=scene, parent=parent))\n self.detecteurs.append(DetecteurLigne(x, y+0.03, z, scene=scene, parent=parent))\n self.detecteurs.append(DetecteurLigne(x, y+0.06, z, scene=scene, parent=parent))\n \n def mouvementLocal(self, deltaPosition):\n super().mouvementLocal(deltaPosition)\n for detecteur in self.detecteurs:\n detecteur.mouvementLocal(deltaPosition)\n\n def rotation(self, angle):\n super().rotation(angle)\n for detecteur in self.detecteurs:\n detecteur.rotation(angle)\n\n def detection(self):\n resultat = []\n for detecteur in self.detecteurs:\n resultat.append(detecteur.detection())\n return resultat\n\n def configureLigne(self, ligne):\n resultat = []\n for detecteur in self.detecteurs:\n detecteur.configureLigne(ligne)\n\n\nclass sonar(blenderObject): \n def __init__(self, x, y, z, scene, parent):\n super().__init__(x,y,z, \"undefined\", scene, parent)\n self.max_range = 4.5*self.scale #mètre\n self.angle = 30 # angle en degrées\n self.precision = 0.01*self.scale \n\n def detection(self, listeObj):\n blenderMutex.acquire()\n positionRobot = np.asarray(self.parent.location)\n positionSelf = np.asarray(self.blenderObj.location) + positionRobot\n capteur = infrasonic.Infrasonic(positionRobot, positionSelf)\n distanceList = []\n for obj in listeObj:\n D = capteur.estDansOnde(obj.position)\n if(D != -1):\n distanceList.append(D)\n blenderMutex.release()\n return min(distanceList) if len(distanceList)>0 else -1\n\n#gere la connection blender au script\n\nclass blenderManager(Thread):\n _foward_speed = 0\n _rotationServo = 0\n _distanceSonar = 0\n\n #constant\n _circonference_roue = 0.04*2*np.pi\n framerate = 100\n _step = 1/framerate #100 serait 100fps, donc 1 seconde.\n _rpsMax = 0.20/_circonference_roue # rotation par seconde à confirmer\n\n #liste d'état\n _avance = False\n _stop = True\n _recule = False\n\n #pour la sim\n turning_max = 135\n \n ## getter\n @property\n def speed(self):\n return self._foward_speed\n\n #Fonction(setter) pour ajuster la vitesse\n @speed.setter\n def speed(self, value):\n print(f\"{frameNb} set_speed({value})\")\n if(value < 0 or value > 100):\n raise Exception(f\"Vitesse invalide dans set_speed({value})\")\n self._foward_speed = value/100\n\n #l'argument secondes est la durée de la simulation\n def __init__(self, secondes, nomDeLigne):\n super().__init__()\n self._tempsDeSimulation = secondes\n #delete tout les trucs\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=True, confirm=False)\n #load blender scene\n scene = bpy.context.scene\n\n if scene is not None:\n scene.render.fps = self.framerate\n for material in bpy.data.materials:\n material.user_clear()\n bpy.data.materials.remove(material)\n \n for obj in bpy.data.objects:\n obj.user_clear()\n bpy.data.objects.remove(obj)\n\n self.vehicule = vehicule(0, 0, 0, scene)\n self.T0 = self.vehicule.position[:]\n bpy.context.scene.frame_end = int(secondes*self.framerate)\n \n self.listeObj = []\n self.listeObj.append(blenderObject(1, 0, 0, name=\"obstacle\", scene=scene))\n self.listeObj.append(blenderObject(3, 3, 0, name=\"obstacle\", scene=scene))\n self.listeObj.append(blenderObject(3, -3, 0, name=\"obstacle\", scene=scene))\n\n self._nombreStep = 0\n self.turn(90)\n\n #crée la ligne\n ligne = creationLigne.Ligne(nomDeLigne, getattr(creationLigne, nomDeLigne), 10)\n self.vehicule.suiveurligne.configureLigne(ligne)\n\n #lecture du capteur de ligne\n def read_digital(self):\n lecture = self.vehicule.suiveurligne.detection()\n print(f\"{frameNb} {lecture} = read_digital()\")\n return lecture\n\n #Thread qui roule poiur la durée de la simulation. Son but est de comminiquer les informations aux classes simulés.\n def run(self):\n nombreStepAvantLaFin = self.framerate*self._tempsDeSimulation\n \n while(self._nombreStep < nombreStepAvantLaFin):\n start = time.time()\n if self._avance:\n vitesse = (self._foward_speed)*self._step*self._circonference_roue*self._rpsMax\n elif self._recule:\n vitesse = -(self._foward_speed)*self._step*self._circonference_roue*self._rpsMax\n elif self._stop:\n vitesse = 0\n else:\n raise Exception(\"Aucun mode actif pour la classe blenderManager\")\n \n\n t = (self._nombreStep - self._debutVirage)/self.framerate\n self.vehicule.avance(vitesse, self._angleRoue, t, self.T0)\n global frameNb\n frameNb += 1\n self._nombreStep += 1\n tempsEcoule = time.time() - start\n if(tempsEcoule > 1/self.framerate):\n #c'est peut-être juste un breakpoint aussi, donc pas d'exception on continue\n pass\n else:\n time.sleep((5/self.framerate)-tempsEcoule)\n self.stop()\n return frameNb, nombreStepAvantLaFin\n #maintenant qu'on a la distance, le convertir en x, y et z\n\n #fonction qui permet de domir un temps X par rapport à la simulation et non au temps réel.\n def sleep(self, seconde):\n global frameNb\n target = (seconde*self.framerate)+frameNb\n while(frameNb < target):\n time.sleep(0.001)\n if(self.is_alive() is not True):\n raise Exception(\"La simulation est over\")\n\n #fonction qui permet d'avancer\n def forward(self):\n global frameNb\n print(f\"{frameNb} forward()\")\n self._avance = True\n self._stop = False\n self._recule = False\n \n #fonction qui permet de reculer\n def backward(self):\n global frameNb\n print(f\"{frameNb} backward()\")\n self._avance = False\n self._stop = False\n self._recule = True\n\n #fonction qui permet de stop les roues du véhicule\n def stop(self):\n global frameNb\n print(f\"{frameNb} stop()\")\n self._avance = False\n self._stop = True\n self._recule = False\n\n #fonction qui permet d'ajuster un angle de virage\n def turn(self, angle):\n print(f\"{frameNb} turn({angle})\")\n #raise Exception(f\"Angle invalide dans turn({angle})\")\n self.vehicule.angle -= self.vehicule.angleVirage\n self._angleRoue = np.radians(angle-90) #-90 pour centrer à 0\n self._debutVirage = self._nombreStep-1\n self.T0 = copy.deepcopy(self.vehicule.position)\n\n #fonction qui attend de retrouver le centre de la ligne\n def wait_tile_center(self):\n global frameNb\n print(f\"{frameNb} wait_tile_center()\")\n while True:\n lt_status = self.read_digital()\n if lt_status[2] == 1:\n break\n if(self.is_alive() is not True):\n raise Exception(\"La simulation est over\")\n \n #fonction qui remet les roues droites\n def turn_straight(self):\n global frameNb\n print(f\"{frameNb} turn_straight()\")\n self.turn(90)\n self.forward()\n\n #fonction qui tourne à gauche au maximum\n def turn_left(self):\n global frameNb\n print(f\"{frameNb} turn_left()\")\n self.forward()\n self.turn(135)\n\n #fonction qui tourne à droite au maximum\n def turn_right(self):\n global frameNb\n print(f\"{frameNb} turn_right()\")\n self.forward()\n self.turn(45)\n\n #fonction présente pour minimiser les changements dans le code du picar.\n def ready(self):\n pass\n\n #Fonction qui retourne -1 ou une distance en mètre d'un obstacle.\n def get_distance(self):\n distance = self.vehicule.sonar.detection(self.listeObj)\n global frameNb\n #print(f\"{self.frameNb} {distance} = get_distance()\")\n return distance\n \n #fonction présente pour minimiser les changements dans le code du picar.\n def setup(self):\n pass\n\n\ndef test():\n blender = blenderManager(8, \"crochet\")\n print(\"tourne de 45\", f\" temps = {frameNb}\")\n blender.turn(90)\n blender.speed = 100\n blender.start()\n blender.forward()\n blender.sleep(1)\n blender.stop()\n\n#test()" }, { "alpha_fraction": 0.5723253488540649, "alphanum_fraction": 0.6153111457824707, "avg_line_length": 20, "blob_id": "08c65999621d5ee88f378a7745452fcdc77edbbb", "content_id": "6b865d1e0d4e8f23a8d8503f83e8a57d37a36c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7332, "license_type": "no_license", "max_line_length": 112, "num_lines": 349, "path": "/demo.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''\n**********************************************************************\n* Filename : line_follower\n* Description : An example for sensor car kit to followe line\n* Author : Dream\n* Brand : SunFounder\n* E-mail : [email protected]\n* Website : www.sunfounder.com\n* Update : Dream 2016-09-21 New release\n**********************************************************************\n'''\nimport sys\n#sinon il trouvera pas les modules custom si on change pas son path d'import\nsys.path.insert(0, r\"C:\\Users\\Alexandre.Bergeron\\OneDrive - USherbrooke\\university\\projet\\S5_projet_simulation\")\nimport vehicule\n\nsim = vehicule.blenderManager(60, \"crochet\")\ntime = sim\nLine_Follower = sim\nUltrasonic_Avoidance = sim\nfront_wheels = sim\nback_wheels = sim\n\n#from lib.SunFounder_Line_Follower import Line_Follower\n#from lib.SunFounder_Ultrasonic_Avoidance import Ultrasonic_Avoidance\n#from picar import front_wheels\n#from picar import back_wheels\n#import time\nfrom os import system\nimport random\nimport threading\n\n#line follow\nREFERENCES = [20.0, 20.0, 22.0, 22.0, 19.0]\nforward_speed = 65\nbackward_speed = 65\n\ndelay = 0.0005\n\n#fw = front_wheels.Front_Wheels(db='config')\n#bw = back_wheels.Back_Wheels(db='config')\n#lf = Line_Follower.Line_Follower()\nfw = front_wheels\nbw = back_wheels\nlf = Line_Follower\nua = Ultrasonic_Avoidance\npicar = sim\n\nlf.references = REFERENCES\nfw.ready()\nbw.ready()\nfw.turning_max = 45\n\na_step = 3\nb_step = 10\nc_step = 30\nd_step = 45\n\n#avoid\nforce_turning = 2 # 0 = random direction, 1 = force left, 2 = force right, 3 = orderdly\n\n\nturn_distance = 0.1\n\ntimeout = 10\nlast_angle = 90\nlast_dir = 0\n\nclass active_moving():\n\tinput = ''\n\tturning_angle = 40\n\toff_track_count = 0\n\tmax_off_track_count = 1000\n\n\tdef __init__(self):\n\t\tpicar.setup()\n\t\tbw.start()\n\t\tbw.speed = 0\n\n\tdef run(self):\n\t\t\n\t\tt = threading.Thread(target=self.manage_input)\n\t\tt.start()\n\n\t\twhile self.input != 'stop':\n\t\t\tif self.input == 'sortie':\n\t\t\t\tself.input = ''\n\t\t\t\tt = threading.Thread(target=self.manage_input)\n\t\t\t\tt.start()\n\t\t\t\tself.sortie()\n\t\t\t\t\n\t\t\tself.follow()\n\t\t\tself.avoid()\n\t\t\ttime.sleep(delay)\n\n\t\t\tline = lf.read_digital()\n\t\t\tif all(line):\n\t\t\t\tbw.stop()\n\t\t\t\tbreak\n\n\t\tbw.stop()\n\n\tdef manage_input(self):\n\t\tself.input = input('Instructions :')\n\n\tdef turn(self, angle):\n\t\tfw.turn(angle)\n\n\tdef accelerate(self, s):\n\t\tbw.forward()\n\t\tbw.speed = s\n\n\tdef go(self, d, f=True, v=forward_speed):\n\t\tcm_per_s = 11.5\n\t\tt = d/cm_per_s\n\n\t\tbw.speed = v\n\t\t\n\t\tif f:\n\t\t\tbw.forward()\n\t\telse:\n\t\t\tbw.backward()\n\n\t\ttime.sleep(t)\n\n\t\tbw.stop()\n\n\tdef turn90(self, R=True):\n\t\tif R:\n\t\t\tfw.turn_right()\n\t\telse:\n\t\t\tfw.turn_left()\n\n\t\tself.go(38)\n\t\tbw.stop()\n\t\tfw.turn_straight()\n\t\t\n\tdef follow(self):\n\t\tlt_status_now = lf.read_digital()\n\t\t# Angle calculate\n\t\tif\tlt_status_now == [0,0,1,0,0]:\n\t\t\tstep = 0\n\t\telif lt_status_now == [0,1,1,0,0] or lt_status_now == [0,0,1,1,0]:\n\t\t\tstep = a_step\n\t\telif lt_status_now == [0,1,0,0,0] or lt_status_now == [0,0,0,1,0]:\n\t\t\tstep = b_step\n\t\telif lt_status_now == [1,1,0,0,0] or lt_status_now == [0,0,0,1,1]:\n\t\t\tstep = c_step\n\t\telif lt_status_now == [1,0,0,0,0] or lt_status_now == [0,0,0,0,1]:\n\t\t\tstep = d_step\n\n\t\t# Direction calculate\n\t\tif\tlt_status_now == [0,0,1,0,0]:\n\t\t\tself.off_track_count = 0\n\t\t\tfw.turn(90)\n\t\t# turn right\n\t\telif lt_status_now in ([0,1,1,0,0],[0,1,0,0,0],[1,1,0,0,0],[1,0,0,0,0]):\n\t\t\tself.off_track_count = 0\n\t\t\tself.turning_angle = int(90 - step)\n\t\t# turn left\n\t\telif lt_status_now in ([0,0,1,1,0],[0,0,0,1,0],[0,0,0,1,1],[0,0,0,0,1]):\n\t\t\tself.off_track_count = 0\n\t\t\tself.turning_angle = int(90 + step)\n\t\telif lt_status_now == [0,0,0,0,0]:\n\t\t\tself.off_track_count += 1\n\t\t\tif self.off_track_count > self.max_off_track_count:\n\t\t\t\ttmp_angle = (self.turning_angle-90)/abs(90-self.turning_angle)\n\t\t\t\ttmp_angle *= fw.turning_max\n\t\t\t\tself.accelerate(backward_speed)\n\t\t\t\tbw.backward()\n\t\t\t\tfw.turn(tmp_angle)\n\n\t\t\t\tlf.wait_tile_center()\n\t\t\t\tbw.stop()\n\n\t\t\t\tfw.turn(self.turning_angle)\n\t\t\t\ttime.sleep(0.2)\n\t\t\t\tbw.forward()\n\t\t\t\tself.accelerate(forward_speed)\n\t\t\t\ttime.sleep(0.2)\n\n\t\telse:\n\t\t\tself.off_track_count = 0\n\n\t\tself.turn(self.turning_angle)\n\t\tself.accelerate(forward_speed)\n\n\tdef sortie(self):\n\t\ttime.sleep(2)\n\n\t\t#maneuvres d'évasion\n\t\tself.turn90(R=False)\n\t\ttime.sleep(2.5)\n\n\t\tself.turn90()\n\t\ttime.sleep(2.5)\n\n\t\t#cherche la ligne\n\t\tfw.turn_right()\n\t\tself.go(34)\n\t\t\n\t\tbw.speed = 25\n\t\tbw.forward()\n\n\t\tline = lf.read_digital()\n\t\twhile any(line):\n\t\t\tline = lf.read_digital()\n\n\t\tbw.stop()\n\t\ttime.sleep(1)\n\n\tdef avoid(self):\n\n\t\tdistance = ua.get_distance()\n\t\tif distance < turn_distance: # turn\n\t\t\t\n\t\t\tbw.stop()\n\t\t\ttime.sleep(3)\n\n\t\t\tdistance = ua.get_distance()\n\t\t\tif distance < turn_distance and distance != -1:\n\n\t\t\t\ttime.sleep(2)\n\n\t\t\t\t#recule\n\t\t\t\tfw.turn_straight()\n\t\t\t\tself.go(20, f=False)\n\t\t\t\ttime.sleep(2)\n\n\t\t\t\t#maneuvres d'évasion\n\t\t\t\tfw.cali_left()\n\t\t\t\tself.go(30, v=15)\n\t\t\t\ttime.sleep(2)\n\n\t\t\t\tself.turn90()\n\t\t\t\tself.go(1, v=15)\n\t\t\t\ttime.sleep(3)\n\n\t\t\t\t#cherche la ligne\n\t\t\t\tfw.turn_right()\n\t\t\t\tself.go(34, v=15)\n\t\t\t\t\n\t\t\t\tbw.speed = 25\n\t\t\t\tbw.forward()\n\n\t\t\t\tline = lf.read_digital()\n\t\t\t\twhile any(line):\n\t\t\t\t\tline = lf.read_digital()\n\n\t\t\t\tbw.stop()\n\t\t\t\ttime.sleep(1)\n\n\tdef rand_dir(self):\n\t\tglobal last_angle, last_dir\n\t\tif force_turning == 0:\n\t\t\t_dir = random.randint(0, 1)\n\t\telif force_turning == 3:\n\t\t\t_dir = not last_dir\n\t\t\tlast_dir = _dir\n\t\t\tprint('last dir %s' % last_dir)\n\t\telse:\n\t\t\t_dir = force_turning - 1\n\t\tangle = (90 - fw.turning_max) + (_dir * 2* fw.turning_max)\n\t\tlast_angle = angle\n\t\treturn angle\n\n\tdef opposite_angle(self):\n\t\tglobal last_angle\n\t\tif last_angle < 90:\n\t\t\tangle = last_angle + 2* fw.turning_max\n\t\telse:\n\t\t\tangle = last_angle - 2* fw.turning_max\n\t\tlast_angle = angle\n\t\treturn angle\n\n\tdef destroy(self):\n\t\tbw.stop()\n\t\tfw.turn(90)\n\nclass demo_AB():\n\tdef __init__(self, sim=False):\n\t\tbw.start()\n\t\tself.temps = 0\n\t\tself.temps_max = 1\n\n\tdef avoid(self):\n\t\tdistance = ua.get_distance()\n\t\tif(distance != -1 and distance < 0.1):\n\t\t\tfw.turn(45)\n\t\t\ttime.sleep(0.5)\n\t\t\tfw.turn(135)\n\t\t\ttime.sleep(0.5)\n\t\t\tfw.turn(90)\n\t\t\ttime.sleep(1)\n\t\t\tfw.turn(135)\n\t\t\ttime.sleep(0.5)\n\t\t\tfw.turn(45)\n\t\t\ttime.sleep(0.5)\n\n\tdef follow(self, angle):\n\t\tlecture = lf.read_digital()\n\t\t#if(lecture != derniereLecture):\n\t\tif(lecture[4] == 1):\n\t\t\tvariation = 10\n\t\t\tnouvelAngle = angle + variation\n\t\t\tangle = nouvelAngle if nouvelAngle <= (90+fw.turning_max) else (90+fw.turning_max)\n\t\tif(lecture[3] == 1):\n\t\t\tvariation = 5\n\t\t\tnouvelAngle = angle + variation\n\t\t\tangle = nouvelAngle if nouvelAngle <= (90+fw.turning_max) else (90+fw.turning_max)\n\t\tif(lecture[2] == 1):\n\t\t\tangle = 90\n\t\tif(lecture[1] == 1):\n\t\t\tvariation = 5\n\t\t\tnouvelAngle = angle - variation\n\t\t\tangle = nouvelAngle if nouvelAngle >= (90-fw.turning_max) else (90-fw.turning_max)\n\t\tif(lecture[0] == 1):\n\t\t\tvariation = 10\n\t\t\tnouvelAngle = angle - variation\n\t\t\tangle = nouvelAngle if nouvelAngle >= (90-fw.turning_max) else (90-fw.turning_max)\n\n\t\tif(sum(lecture) == 0):\n\t\t\tangle = 90\n\t\tif(sum(lecture) == 5):\n\t\t\tangle == 90\n\t\t\tstop()\n\t\treturn angle\n\n\tdef run(self):\n\t\t\n\t\tbw.speed = 50\n\t\tbw.forward()\n\t\tangle = 90\n\t\tdernierAngle = 0\n\t\tderniereLecture = [0,0,0,0,0]\n\t\twhile(bw.is_alive()):\n\t\t\t#self.avoid()\n\t\t\tangle = self.follow(angle)\n\t\t\t\n\t\t\t#si aucune ligne détecté, avance en ligne droite\n\t\t\tif(angle != dernierAngle):\n\t\t\t\tfw.turn(angle) \n\t\t\t\tdernierAngle = angle\n\t\t\t\n\n#demo = active_moving()\ndemo = demo_AB()\ndemo.run()" }, { "alpha_fraction": 0.5288859009742737, "alphanum_fraction": 0.5686457753181458, "avg_line_length": 36.108909606933594, "blob_id": "6bcbccca4e69f0d6198bf162806a1e2a736af82e", "content_id": "ff221c88236cf0fcd9b947ffe302bf4f36e82f75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7508, "license_type": "no_license", "max_line_length": 172, "num_lines": 202, "path": "/creationLigne.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "import bpy\nimport numpy as np\nimport time\nclass Ligne():\n \n\n #attention les x et y sont inversé\n #scene: l'objet blender où ajouter le mesh\n #fonction: f(x) qui donne un y\n def __init__(self, nom, fonction, L, rotationZ=0):\n self._largeur = 0.018/2\n self.verts = []\n edges = []\n self.faces = []\n self.aires = []\n self.interval = 0.2\n\n mesh = bpy.data.meshes.new(nom)\n obj = bpy.data.objects.new(mesh.name, mesh)\n col = bpy.data.collections.get(\"Collection\")\n col.objects.link(obj)\n bpy.context.view_layer.objects.active = obj\n\n X_array = np.linspace(0, L, num=int(L*100)+1)\n y = fonction(X_array[0])\n self.fonction = fonction\n self.verts.append(np.array([X_array[0], y-self._largeur, 0.0]))\n self.verts.append(np.array([X_array[0], y+self._largeur, 0.0]))\n for x in X_array[1:]:\n y = fonction(x)\n self.verts.append(np.array([x, y-self._largeur, 0.0]))\n self.verts.append(np.array([x, y+self._largeur, 0.0]))\n taille = len(self.verts)\n self.faces.append([taille-4, taille-3, taille-2])\n a = self.calculNorme(self.verts[self.faces[-1][0]], self.verts[self.faces[-1][2]])\n b = self.calculNorme(self.verts[self.faces[-1][1]], self.verts[self.faces[-1][2]])\n c = self.calculNorme(self.verts[self.faces[-1][0]], self.verts[self.faces[-1][1]])\n self.aires.append(round(self.calculAireTriangle(a, b, c), 6))\n\n self.faces.append([taille-1, taille-2, taille-3])\n a = self.calculNorme(self.verts[self.faces[-1][0]], self.verts[self.faces[-1][2]])\n b = self.calculNorme(self.verts[self.faces[-1][1]], self.verts[self.faces[-1][2]])\n c = self.calculNorme(self.verts[self.faces[-1][0]], self.verts[self.faces[-1][1]])\n self.aires.append(round(self.calculAireTriangle(a, b, c), 6))\n\n self.verts = self.matriceRotation(self.verts, np.radians(rotationZ))\n mesh.from_pydata(tuple(map(tuple, self.verts)), edges, tuple(map(tuple, self.faces)))\n \n self.faceDict = self.classifieFaces(self.faces, 0.1, L, 0)\n for key in self.faceDict:\n self.faceDict[key] = self.classifieFaces(self.faceDict[key], 0.1, L, 1)\n #transformation en numpy arraay\n\n #arrondie vers le haut au dixième près\n def round_up(self, n):\n if(n < 0):\n return (int(n*10)-1)/10\n else:\n return (int(n*10)+1)/10\n \n #arrondie vers la bas au dixième près\n def round_down(self, n):\n if(n < 0):\n return (int(n*10)-1)/10\n else:\n return (int(n*10))/10\n\n #Cette fonction va classer les faces dans un dictionnaire pour permettre de retrouver rapidement un index X, Y donnée (sans passez au travers tout les vertex possible.)\n def classifieFaces(self, faces, interval, L, pos):\n facesDict = {}\n min_value = min(self.verts[:,pos])\n min_value = self.round_down(min_value)\n max_value = max(self.verts[:,pos])\n max_value = self.round_up(max_value)\n N = np.linspace(min_value, max_value-0.1, num=(max_value-min_value)/interval)\n for n in N:\n key = round(n, 1)\n if(key not in facesDict):\n facesDict[key] = []\n for face in faces:\n valeur = abs(self.verts[face[0]][pos])\n #2 fois l'interval pour augmenter la porté du dictionnaire à la valeur n\n if(valeur < (abs(n)+2*interval) and valeur >= (abs(n)-2*interval)):\n facesDict[key].append(face)\n return facesDict\n\n\n\n #détermine la longueur des lignes du triangle\n def determineTriangle(self, P0, P1, P2):\n a = self.calculNorme(P0, P2)\n b = self.calculNorme(P1, P2)\n c = self.calculNorme(P0, P1)\n return a, b, c\n\n #formule de héron\n def calculAireTriangle(self, a,b,c):\n p = (a+b+c)/2\n return np.sqrt(p*(p-a)*(p-b)*(p-c))\n\n #calcul une norme entre un point A et B\n def calculNorme(self, position1, position2):\n longueur = position2[:2]-position1[:2]\n return np.sqrt(longueur[0]**2 + longueur[1]**2)\n \n #calcul de ligne en se fiant sur la fonction (pas fiable)\n def estDansLigne2(self, position):\n y = self.fonction(position[0])\n borne1 = y-self._largeur\n borne2 = y+self._largeur\n if(borne1 <= position[1] and position[1] <= borne2):\n return 1\n return 0\n\n\n #Calcul l'air de chaque face identifié contenant potentielement la position. \n def estDansLigne(self, position):\n position = np.array(position)\n keyX = self.round_down(position[0])\n keyY = self.round_down(position[1])\n try:\n for face in self.faceDict[keyX][keyY]:\n #print(f\"{self.verts[face[0]]} et {self.verts[face[1]]} et {self.verts[face[2]]}\")\n a, b, c = self.determineTriangle(self.verts[face[0]], self.verts[face[1]], self.verts[face[2]])\n aire = self.calculAireTriangle(a, b, c)\n a, b, c = self.determineTriangle(self.verts[face[0]], self.verts[face[1]], position)\n aireTotal = self.calculAireTriangle(a, b, c)\n a, b, c = self.determineTriangle(self.verts[face[2]], self.verts[face[1]], position)\n aireTotal += self.calculAireTriangle(a, b, c)\n a, b, c = self.determineTriangle(self.verts[face[0]], self.verts[face[2]], position)\n aireTotal += self.calculAireTriangle(a, b, c)\n aireTotal = round(aireTotal, 6)\n if (aireTotal == round(aire, 6)):\n return 1\n except KeyError as e:\n pass \n return 0\n\n #Permet de rotationner une matrice de matrice\n def matriceRotation(self, vecteur, angle):\n #matrice de rotation\n matrice_rotation = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n matrice_rotation = np.round(matrice_rotation, decimals=5)\n\n #pour info, @ = produit de matrice\n vecteur = np.asarray(vecteur)\n vecteur[:,:2] = vecteur[:,:2] @ matrice_rotation\n\n return vecteur\n\n#forme un crochet (parcourt 1)\ndef crochet(x):\n y = 0\n if(x <= 2):\n y = 0\n else:\n y = np.sin(x-2)\n return y\n\n#forme un crochet inversé (parcourt 2)\ndef crochet_negatif(x):\n y = 0\n if(x <= 2):\n y = 0\n else:\n y = -np.sin(x-2)\n return y\n\n#parcourt 3\ndef ligne(x):\n return y\n\ndef test(x, y, ligne):\n print(f\"[{x}, {y}] est: {True if ligne.estDansLigne2([x, y]) == 1 else False}\")\n start = time.time()\n print(f\"[{x}, {y}] est: {True if ligne.estDansLigne([x, y]) == 1 else False}\") \n stop = time.time()\n print(f\"time took {stop-start}\")\n\n\ndef tests():\n ligne = Ligne(\"test\", crochet, 10, 35)\n\n test(0.005, 0.005, ligne)\n test(-0.0105, 0.01, ligne)\n test(-0.005, 0.005, ligne)\n test(0.005, -0.005, ligne)\n test(-0.005, -0.005, ligne)\n test(0.0, 0.0, ligne)\n test(0.011, 0.011, ligne)\n test(0.003, 0.005, ligne)\n test(0.012, 0.005, ligne)\n test(0.012, 0.012, ligne)\n test(0.0, 0.015, ligne)\n test(1, -0.699018, ligne)\n test(3.00988, -1.03765, ligne)\n test(2.94736, -1.13303, ligne)\n test(8.75612, -4.91907, ligne)\n test(3.93757, -1.81637, ligne)\n\n#tests()" }, { "alpha_fraction": 0.5870020985603333, "alphanum_fraction": 0.6200209856033325, "avg_line_length": 26.27142906188965, "blob_id": "b2a3b352cfae52eb05e5c3e835f4ff1c667baab8", "content_id": "973e241d8cc451387b9b1e27678edbe68323c8b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 100, "num_lines": 70, "path": "/testVirage.py", "repo_name": "bergalexandre/S5-picar-simulation-blender", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass test():\n _nombreStep = 0\n _debutVirage = 0\n framerate = 100\n\n def virage(self, v, L, alpha):\n #https://math.stackexchange.com/questions/3055263/path-of-a-simple-turning-car\n #calculer par rapport à T\n t = (self._nombreStep-self._debutVirage)/self.framerate\n omega = v/L*np.tan(alpha)\n \n vitesseAngulaire = v/omega if omega != 0.0 else 0\n\n position_BackWheel = vitesseAngulaire * np.array([np.sin(omega*t), 1-np.cos(omega*t)])\n #position_FrontWheel = position_BackWheel + L * np.array([np.cos(omega*t), np.sin(omega*t)])\n\n return position_BackWheel, omega, t\n\n def update(self):\n self._nombreStep += 1\n\n\n #matrice de rotation\n def rotation(self, vecteur, angle):\n theta = np.radians(angle)\n matrice_rotation = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n matrice_rotation = np.round(matrice_rotation, decimals=5)\n return vecteur @ matrice_rotation\n\n\n\ntestobj = test()\n\nv = 10\nN = 2000\n\nresult = []\nresultFW = []\nangleDepart = 90\nfor n in range(N):\n bw, omega, t = testobj.virage(0.10, 0.15, np.radians(45))\n result.append(bw)\n testobj.update()\n resultFW.append(result[-1] + 0.15*np.array([np.cos(omega*t), np.sin(omega*t)]))\n\n\n\nplt.figure(1)\nresult = np.array(result)\nresultFW = np.array(resultFW)\nplt.plot(result[0][0], result[1][0], \"xr\")\nplt.plot(result[:,0], result[:,1], \"b\")\nplt.plot(resultFW[:,0], resultFW[:,1], \"r\")\n\nresult = testobj.rotation(result, 90)\nresultFW = testobj.rotation(resultFW, 90)\nplt.plot(result[0][0], result[1][0], \"xg\")\nplt.plot(result[:,0], result[:,1], \"g\")\nplt.plot(resultFW[:,0], resultFW[:,1])\n\nangle = np.arctan((resultFW[:,1]-result[:,1])/(resultFW[:,0]-result[:,0]))\nplt.figure(\"angle\")\nplt.plot(angle)\n\nplt.show()" } ]
8
ShooperGames/Shooper-Bingo-Tracker
https://github.com/ShooperGames/Shooper-Bingo-Tracker
1619fabc2df103f6be5af24941f58fe51d4bf882
8a429f75a7af1261d4ea3a9480254ea4c283122c
23dbf83a3cc2fa2c300fe180cea780b49e8cd92e
refs/heads/main
2023-01-24T02:49:20.432522
2020-12-02T00:59:04
2020-12-02T00:59:04
317,705,854
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5551648139953613, "alphanum_fraction": 0.5731868147850037, "avg_line_length": 27.4375, "blob_id": "8d7011ff937c8f4739af52cd0c60ce67de512eb1", "content_id": "2962151861985b3cfbf9aef88caa61fac72fbe18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2275, "license_type": "permissive", "max_line_length": 60, "num_lines": 80, "path": "/main.py", "repo_name": "ShooperGames/Shooper-Bingo-Tracker", "src_encoding": "UTF-8", "text": "import kivy\n\nfrom kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\n\n#Main\nclass BingoMain(FloatLayout):\n def __init__(self, **kwargs):\n super(BingoMain, self).__init__(**kwargs)\n #self.add_widget(BingoMenu())\n gameB = GameBoard()\n gameB.size_hint_y = 0.9\n gameB.pos_hint = {'x':0, 'y':.1}\n gameB.add_widget(Label(text=\"B\"))\n gameB.add_widget(Label(text=\"I\"))\n gameB.add_widget(Label(text=\"N\"))\n gameB.add_widget(Label(text=\"G\"))\n gameB.add_widget(Label(text=\"O\"))\n cBut = ClearBut(text=\"Clear\")\n cBut.size_hint_y = 0.1\n for i in range(1,16):\n for j in range(5):\n gameB.add_widget(NumBut(text=str(i+(j*15))))\n self.add_widget(gameB)\n self.add_widget(cBut)\n\n#UIs\nclass GameBoard(GridLayout):\n pass\n\nclass ClearBut(Button):\n always_release = False\n min_state_time = 0.1\n def on_release(self):\n hRoot = self.parent\n hRoot.clear_widgets()\n gameB = GameBoard()\n gameB.size_hint_y = 0.9\n gameB.pos_hint = {'x':0, 'y':.1}\n gameB.add_widget(Label(text=\"B\"))\n gameB.add_widget(Label(text=\"I\"))\n gameB.add_widget(Label(text=\"N\"))\n gameB.add_widget(Label(text=\"G\"))\n gameB.add_widget(Label(text=\"O\"))\n cBut = ClearBut(text=\"Clear\")\n cBut.size_hint_y = 0.1\n for i in range(1,16):\n for j in range(5):\n gameB.add_widget(NumBut(text=str(i+(j*15))))\n hRoot.add_widget(gameB)\n hRoot.add_widget(cBut)\n\nclass NumBut(Button):\n always_release = False\n min_state_time = 0.1\n background_normal = \"\"\n\n def __init__(self, **kwargs):\n super(NumBut, self).__init__(**kwargs)\n self.background_color = [0,0,0,1]\n self.selected = False\n\n def on_release(self):\n self.selected = not self.selected\n if self.selected:\n self.background_color = [0,0,0.5,1]\n else:\n self.background_color = [0,0,0,1]\n\n#App\nclass BingoApp(App):\n def build(self):\n return BingoMain()\n\n#Main\nif __name__ == '__main__':\n BingoApp().run()\n" }, { "alpha_fraction": 0.792682945728302, "alphanum_fraction": 0.792682945728302, "avg_line_length": 40, "blob_id": "fb1a3abaf944c3651af6d4d5d98c5ad0b825ba5f", "content_id": "14cd480bda25b8e321a6ad1dbcc7fa8d9dfec046", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 82, "license_type": "permissive", "max_line_length": 57, "num_lines": 2, "path": "/README.md", "repo_name": "ShooperGames/Shooper-Bingo-Tracker", "src_encoding": "UTF-8", "text": "# Shooper-Bingo-Tracker\nA board to track which numbers have been called in Bingo.\n" } ]
2
bbookman/Rice-Universy-IPIP-Parts-1-and-2
https://github.com/bbookman/Rice-Universy-IPIP-Parts-1-and-2
e5354a2347b5826e88cbd97e1c6e0d4a881b14fe
2ef1149d2dbc94aa0601db51e9ded4b05046324f
c529a2831ddb90c2bddb779b23cadae114f7c3c9
refs/heads/master
2020-04-06T17:12:17.592615
2018-11-15T04:29:00
2018-11-15T04:29:00
157,650,301
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5364428162574768, "alphanum_fraction": 0.5601813197135925, "avg_line_length": 31.674697875976562, "blob_id": "671883f0998914fa43e1c9631ebe22b129508011", "content_id": "9ec58f29a33b3e2a6d0caf1c394e903577917e95", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8383, "license_type": "permissive", "max_line_length": 152, "num_lines": 249, "path": "/IPIP Part2/week_6_blackjack.py", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "# Mini-project #6 - Blackjack\r\n\r\nimport simplegui\r\nimport random\r\n\r\n# load card sprite - 936x384 - source: jfitz.com\r\nCARD_SIZE = (72, 96)\r\nCARD_CENTER = (36, 48)\r\ncard_images = simplegui.load_image(\"http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png\")\r\n\r\nCARD_BACK_SIZE = (72, 96)\r\nCARD_BACK_CENTER = (36, 48)\r\ncard_back = simplegui.load_image(\"http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png\") \r\n\r\n# initialize some useful global variables\r\nin_play = False\r\noutcome = \"\"\r\nscore = 0\r\n\r\n\r\n# define globals for cards\r\nSUITS = ('C', 'S', 'H', 'D')\r\nRANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')\r\nVALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}\r\n\r\n\r\n# define card class\r\nclass Card:\r\n def __init__(self, suit, rank):\r\n self.face_down = False\r\n if (suit in SUITS) and (rank in RANKS):\r\n self.suit = suit\r\n self.rank = rank\r\n else:\r\n self.suit = None\r\n self.rank = None\r\n print \"Invalid card: \", suit, rank\r\n\r\n def __str__(self):\r\n if self.face_down:\r\n return \"XX\"\r\n else:\r\n return self.suit + self.rank\r\n\r\n def get_suit(self):\r\n return self.suit\r\n\r\n def get_rank(self):\r\n return self.rank\r\n\r\n def draw(self, canvas, pos):\r\n \r\n if not self.face_down:\r\n card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank), \r\n CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))\r\n canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)\r\n else:\r\n card_loc = (CARD_BACK_CENTER[0], CARD_BACK_CENTER[1])\r\n canvas.draw_image(card_back, card_loc, CARD_BACK_SIZE, [pos[0] + CARD_BACK_CENTER[0] + 1, pos[1] + CARD_BACK_CENTER[1] + 1], CARD_BACK_SIZE)\r\n \r\n\r\n def set_face_down(self, state):\r\n self.face_down = state\r\n \r\n def is_face_down(self):\r\n return self.face_down\r\n \r\n \r\n# define hand class\r\nclass Hand:\r\n def __init__(self):\r\n self.cards = []\r\n self.value = 0\r\n\r\n def __str__(self):\r\n hand_string = \"\"\r\n for card in self.cards:\r\n hand_string += str(card) + \" \"\r\n return hand_string\r\n\r\n def add_card(self, card):\r\n self.cards.append(card)\r\n\r\n def get_value(self):\r\n self.value = 0\r\n for card in self.cards:\r\n if not card.is_face_down():\r\n self.value += VALUES[card.rank]\r\n for card in self.cards:\r\n if card.get_rank() == \"A\" and (self.value + 10) <= 21:\r\n return self.value + 10\r\n return self.value\r\n\r\n def draw(self, canvas, pos):\r\n for card in self.cards:\r\n pos[0] = pos[0] + CARD_SIZE[0] + 10\r\n card.draw(canvas, pos)\r\n \r\n def flip_cards(self):\r\n for card in self.cards:\r\n card.set_face_down(False)\r\n \r\n# define deck class \r\nclass Deck:\r\n def __init__(self):\r\n self.cards = []\r\n for suit in SUITS:\r\n for rank in RANKS:\r\n card = Card(suit, rank)\r\n self.cards.append(card)\r\n\r\n def shuffle(self):\r\n random.shuffle(self.cards)\r\n\r\n def deal_card(self):\r\n card = self.cards[0]\r\n self.cards.remove(card)\r\n return card\r\n \r\n def __str__(self):\r\n deck_string = \"\"\r\n for card in self.cards:\r\n deck_string += str(card) + \" \"\r\n return deck_string\r\n\r\ndeck = Deck()\r\n\r\n#define event handlers for buttons\r\ndef deal():\r\n global outcome, in_play, dealers_hand, players_hand, deck, score\r\n outcome = \"Hit or Stand?\"\r\n if in_play:\r\n outcome = \"You were already playing and now lost a point for giving up!\"\r\n print outcome\r\n score -= 1\r\n in_play = False\r\n \r\n in_play = True\r\n deck.shuffle()\r\n dealers_hand = Hand()\r\n players_hand = Hand()\t\r\n players_hand.add_card(deck.deal_card())\r\n players_hand.add_card(deck.deal_card())\r\n dealers_hand.add_card(deck.deal_card())\r\n dealers_second_card = deck.deal_card()\r\n dealers_second_card.set_face_down(True)\r\n dealers_hand.add_card(dealers_second_card)\r\n print \"Dealer's hand \" + str(dealers_hand)\r\n print \"Player's hand \" + str(players_hand) + \" Value \" + str(players_hand.get_value())\r\n \r\n if dealers_hand.get_value() == 21:\r\n dealers_hand.flip_cards()\r\n outcome = \"Dealer gets Blackjack. Dealer wins. Press the Deal button for a new game.\"\r\n score -=1\r\n in_play = False\r\n print outcome\r\n outcome = \"No hand in play. Press the Deal button for a new game.\"\r\n print outcome\r\n elif players_hand.get_value() == 21:\r\n dealers_hand.flip_cards()\r\n outcome = \"Player gets Blackjack. Dealer has chance to tie ( and win). Press Stand button to continue.\"\r\n print outcome\r\n \r\n\r\ndef hit():\r\n global in_play, score, outcome\r\n # if the hand is in play, hit the player\r\n if in_play:\r\n players_hand.add_card(deck.deal_card())\r\n print \"Dealer's hand \" + str(dealers_hand) \r\n print \"Player's hand \" + str(players_hand) + \" Value \" + str(players_hand.get_value())\r\n # if busted, assign a message to outcome, update in_play and score\r\n if players_hand.get_value() > 21 and in_play:\r\n outcome = \"Player Busted and lost a point! New Deal?\"\r\n print outcome\r\n in_play = False\r\n score -= 1\r\n elif players_hand.get_value() == 21 and in_play:\r\n outcome = \"Player gets Blackjack. Dealer has chance to tie (and win!). Press Stand button to continue.\"\r\n print outcome \r\n else:\r\n outcome = \"No hand in play. Press the Deal button for a new game.\"\r\n print outcome\r\n \r\n \r\ndef stand():\r\n global in_play, score, outcome\r\n dealers_hand.flip_cards()\r\n # if hand is in play, repeatedly hit dealer until his hand has value 17 or more\r\n while dealers_hand.get_value() < 17 and in_play:\r\n outcome = \"Dealer hits\"\r\n print outcome\r\n dealers_hand.add_card(deck.deal_card())\r\n print \"Dealer's hand \" + str(dealers_hand) + \" Value \" + str(dealers_hand.get_value())\r\n print \"Player's hand \" + str(players_hand) + \" Value \" + str(players_hand.get_value())\r\n if dealers_hand.get_value() > 21 and in_play:\r\n outcome = \"Dealer Busted! Press the Deal button for a new game.\"\r\n print outcome\r\n score +=1\r\n in_play = False\r\n else:\r\n if dealers_hand.get_value() >= players_hand.get_value() and in_play:\r\n outcome = \"Dealer wins! Press the Deal button for a new game.\"\r\n score -= 1\r\n print outcome\r\n in_play = False\r\n elif in_play:\r\n outcome = \"Player wins! Press the Deal button for a new game.\"\r\n print outcome\r\n score += 1\r\n in_play = False\r\n elif in_play == False:\r\n outcome = \"No hand in play. Press the Deal button for a new game.\"\r\n print outcome\r\n# draw handler \r\ndef draw(canvas):\r\n global outcome, players_hand, dealers_hand\r\n canvas.draw_text(\"Blackjack\",[30,50],40,\"White\")\r\n canvas.draw_text(\"Score \"+str(score),[300,50],40,\"White\")\r\n \r\n canvas.draw_text(\"Dealer\",[30,140],30,\"White\")\r\n canvas.draw_text(\"Showing \" + str(dealers_hand.get_value()), [120, 140], 30, \"White\")\r\n #canvas.draw_text(str(dealers_hand), [300, 100], 30, \"Red\")\r\n dealers_hand.draw(canvas, [200,75])\r\n \r\n canvas.draw_text(outcome,[30,220],20,\"Yellow\")\r\n \r\n canvas.draw_text(\"Player\",[30,300],30,\"White\")\r\n #canvas.draw_text(str(players_hand), [300, 200], 30, \"Red\")\r\n canvas.draw_text(\"Showing \" + str(players_hand.get_value()), [120, 300], 30, \"White\")\r\n players_hand.draw(canvas, [200,250])\r\n \r\n \r\nframe = simplegui.create_frame(\"Blackjack\", 800, 400)\r\nframe.set_canvas_background(\"Green\")\r\n\r\n#create buttons and canvas callback\r\nframe.add_button(\"Deal\", deal, 200)\r\nframe.add_button(\"Hit\", hit, 200)\r\nframe.add_button(\"Stand\", stand, 200)\r\nframe.set_draw_handler(draw)\r\n\r\n\r\n# get things rolling\r\ndeal()\r\nframe.start()\r\n\r\n\r\n# remember to review the gradic rubric" }, { "alpha_fraction": 0.5782766938209534, "alphanum_fraction": 0.6134708523750305, "avg_line_length": 22.205883026123047, "blob_id": "2318d6679b50031d0dacf2f5b7cecdf2bd00c282", "content_id": "570df648809c7c51db4153b1b9756110a9273a2a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1648, "license_type": "permissive", "max_line_length": 91, "num_lines": 68, "path": "/IPIP Part1/week_3_stop_watch_game.py", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "# template for \"Stopwatch: The Game\"\r\n\r\nimport simplegui\r\n\r\n\r\n# define global variables\r\ntime = 0 #in miliseconds\r\ntotal_stops = 0\r\nstops_on_whole_second = 0\r\n\r\n# define helper function format that converts time\r\n# in tenths of seconds into formatted string A:BC.D\r\ndef format(t):\r\n D = t % 10\r\n ABC = (t - D) / 10\r\n A = ABC / 60\r\n BC = ABC % 60\r\n B = BC / 10\r\n C = BC % 10\r\n return str(A) + ':' + str(B) + str(C) + '.' + str(D)\r\n \r\n \r\n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\r\ndef start_handler():\r\n timer_handler()\r\n\r\ndef stop_handler():\r\n global total_stops, time, stops_on_whole_second\r\n total_stops += 1\r\n timer.stop()\r\n if time % 10 == 0:\r\n stops_on_whole_second += 1\r\n \r\n\r\ndef reset_handler():\r\n global time, total_stops, stops_on_whole_second\r\n time = 0\r\n total_stops = 0\r\n stops_on_whole_second = 0\r\n timer.stop()\r\n \r\n# define event handler for timer with 0.1 sec interval\r\ndef timer_handler():\r\n timer.start()\r\n global time\r\n time += 1\r\n print format(time)\r\n\r\n# define draw handler\r\n\r\ndef draw(canvas):\r\n global time\r\n canvas.draw_text(format(time), [100, 200], 70, 'White')\r\n canvas.draw_text(str(stops_on_whole_second)+'/'+str(total_stops), [250, 40], 30, 'Red')\r\n\r\n# create frame\r\nframe = simplegui.create_frame('Testing', 400, 400)\r\nframe.set_draw_handler(draw)\r\n\r\n# register event handlers\r\nframe.add_button('Start', start_handler, 200)\r\nframe.add_button('Stop', stop_handler, 200)\r\nframe.add_button('Reset', reset_handler, 200)\r\ntimer = simplegui.create_timer(10, timer_handler)\r\n\r\n\r\n# start frame\r\nframe.start()\r\n\r\n" }, { "alpha_fraction": 0.5329195857048035, "alphanum_fraction": 0.5760928392410278, "avg_line_length": 31.981651306152344, "blob_id": "97a611913a91e0f875524086d7df825410f7d362", "content_id": "f0e74152cd3ca2d031cbeb949e0f5a48941f8338", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3706, "license_type": "permissive", "max_line_length": 127, "num_lines": 109, "path": "/IPIP Part1/week_4_pong.py", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "import simplegui\r\nimport random\r\n\r\n# initialize globals - pos and vel encode vertical info for paddles\r\nWIDTH = 800\r\nHEIGHT = 500 \r\nBALL_RADIUS = 20\r\nPAD_WIDTH = 20\r\nPAD_HEIGHT = 100\r\nHALF_PAD_WIDTH = PAD_WIDTH / 2\r\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\r\nLEFT = False\r\nRIGHT = True\r\nSIDE = LEFT\r\nVELOCITY_MULTIPLIER = 1.1\r\n\r\ndef spawn_ball(direction):\r\n global ball_pos, ball_vel \r\n ball_pos = [WIDTH / 2, HEIGHT / 2]\t\r\n if direction == RIGHT:\r\n ball_vel = [random.randrange(120, 240) / 60, - (random.randrange(60, 180) /60)]\r\n else:\r\n ball_vel = [-(random.randrange(120, 240) / 60), - (random.randrange(60, 180) / 60)]\r\n\r\n\r\ndef new_game():\r\n global score1, score2, SIDE, paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel \r\n \r\n paddle1_pos, paddle2_pos = (HEIGHT - PAD_HEIGHT)/2, (HEIGHT - PAD_HEIGHT)/2\r\n paddle1_vel = paddle2_vel = 0\r\n score1, score2 = 0, 0\r\n SIDE = not SIDE\r\n spawn_ball(SIDE)\r\n\r\ndef draw(canvas):\r\n global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel\r\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\r\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\r\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\r\n \r\n if ball_pos[0] <= BALL_RADIUS + PAD_WIDTH:\r\n if paddle1_pos <= ball_pos[1] <= (paddle1_pos+PAD_HEIGHT):\r\n ball_vel[0] = - VELOCITY_MULTIPLIER * ball_vel[0]\r\n else:\r\n spawn_ball(RIGHT)\r\n score2 += 1\r\n if ball_pos[0] >= (WIDTH - BALL_RADIUS - PAD_WIDTH):\r\n if paddle2_pos <= ball_pos[1] <= (paddle2_pos+PAD_HEIGHT):\r\n ball_vel[0] = - VELOCITY_MULTIPLIER * ball_vel[0]\r\n else:\r\n spawn_ball(LEFT)\r\n score1 += 1\r\n if ball_pos[1] <= BALL_RADIUS:\r\n ball_vel[1] = - ball_vel[1]\r\n if ball_pos[1] >= (HEIGHT - BALL_RADIUS):\r\n ball_vel[1] = - ball_vel[1]\r\n ball_pos[0] += ball_vel[0]\r\n ball_pos[1] += ball_vel[1]\r\n \r\n # draw ball\r\n canvas.draw_circle(ball_pos, BALL_RADIUS, 0.1, \"White\", \"Red\")\r\n \r\n if 0 <= (paddle1_pos + paddle1_vel) <= HEIGHT - PAD_HEIGHT:\r\n paddle1_pos += paddle1_vel\r\n if 0 <= (paddle2_pos + paddle2_vel) <= HEIGHT - PAD_HEIGHT:\r\n paddle2_pos += paddle2_vel\r\n \r\n # draw paddles\r\n canvas.draw_line([WIDTH - PAD_WIDTH / 2, paddle2_pos],[WIDTH- PAD_WIDTH / 2, paddle2_pos + PAD_HEIGHT], PAD_WIDTH, \"White\")\r\n canvas.draw_line([PAD_WIDTH / 2, paddle1_pos],[PAD_WIDTH / 2, paddle1_pos + PAD_HEIGHT], PAD_WIDTH, \"White\")\r\n canvas.draw_text(str(score1), (WIDTH / 2 - 30, 40), 40, \"White\")\r\n canvas.draw_text(str(score2), (WIDTH / 2 + 30, 40), 40, \"White\")\r\n \r\ndef keydown(key):\r\n global paddle1_vel, paddle2_vel\r\n vel = 4\r\n if key == simplegui.KEY_MAP[\"s\"]:\r\n paddle1_vel = vel\r\n if key == simplegui.KEY_MAP[\"w\"]:\r\n paddle1_vel = -vel \r\n if key == simplegui.KEY_MAP[\"down\"]:\r\n paddle2_vel = vel\r\n if key == simplegui.KEY_MAP[\"up\"]:\r\n paddle2_vel = -vel\r\n \r\ndef keyup(key):\r\n global paddle1_vel, paddle2_vel\r\n if key == simplegui.KEY_MAP[\"s\"]:\r\n paddle1_vel = 0\r\n if key == simplegui.KEY_MAP[\"w\"]:\r\n paddle1_vel = 0 \r\n if key == simplegui.KEY_MAP[\"down\"]:\r\n paddle2_vel = 0\r\n if key == simplegui.KEY_MAP[\"up\"]:\r\n paddle2_vel = 0\r\n \r\ndef game_restart():\r\n new_game()\r\n\r\n# create frame\r\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\r\nframe.set_draw_handler(draw)\r\nframe.set_keydown_handler(keydown)\r\nframe.set_keyup_handler(keyup)\r\nframe.add_button(\"Restart\", game_restart, 100)\r\n\r\n# start frame\r\nnew_game()\r\nframe.start()\r\n\r\n" }, { "alpha_fraction": 0.5678859353065491, "alphanum_fraction": 0.5802851915359497, "avg_line_length": 23.95161247253418, "blob_id": "dcdeeb7fecbcefb8a4aa17184fc5b74b81d64fe1", "content_id": "adf411b387dd22a5957b8db7a21a0fab8c47a970", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1613, "license_type": "permissive", "max_line_length": 76, "num_lines": 62, "path": "/IPIP Part1/week_1_rock_paper_scissors_lizzard_spock.py", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "# Rock-paper-scissors-lizard-Spock template\r\n\r\nimport random\r\n\r\ndef name_to_number(name):\r\n if name == \"rock\":\r\n return 0\r\n elif name == \"Spock\":\r\n return 1\r\n elif name == \"paper\":\r\n return 2\r\n elif name == \"lizard\":\r\n return 3\r\n elif name == \"scissors\":\r\n return 4\r\n else:\r\n return -1\r\n \r\ndef number_to_name(number):\r\n if number == 0:\r\n return \"rock\"\r\n elif number == 1:\r\n return \"Spock\"\r\n elif number == 2:\r\n return \"paper\"\r\n elif number == 3:\r\n return \"lizard\"\r\n elif number == 4:\r\n return \"scissors\"\r\n else:\r\n return \"ERROR \" + str(number) + \" is not a valid option.\"\r\n\r\n\r\ndef rpsls(player_choice): \r\n print\r\n print \"Player chooses \" + player_choice\r\n player_number = name_to_number(player_choice)\r\n \r\n if player_number < 0:\r\n print \"ERROR invalid player choice '\" + player_choice + \"'\"\r\n break\r\n computer_choice = random.randrange(0,5)\r\n print \"Computer chooses \" + number_to_name(computer_choice)\r\n result = (player_number - computer_choice) %5\r\n if result == 0:\r\n print \"Tie Game\"\r\n if result == 1 or result == 2:\r\n print \"Player wins!\"\r\n if result == 3 or result == 4:\r\n print \"Computer wins!\"\r\n\r\n # use if/elif/else to determine winner, print winner message\r\n\r\n \r\n# test your code - THESE CALLS MUST BE PRESENT IN YOUR SUBMITTED CODE\r\nrpsls(\"rock\")\r\nrpsls(\"Spock\")\r\nrpsls(\"paper\")\r\nrpsls(\"lizard\")\r\nrpsls(\"scissors\")\r\nrpsls(\"test error\")\r\n# always remember to check your completed program against the grading rubric\r\n\r\n\r\n" }, { "alpha_fraction": 0.7927461266517639, "alphanum_fraction": 0.8134714961051941, "avg_line_length": 47.25, "blob_id": "d0bb71d6b4f099aa59fd4c1469f75bdba024eaae", "content_id": "368d2ae509b46d760bc96ea3f6df9c36011ab279", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 193, "license_type": "permissive", "max_line_length": 101, "num_lines": 4, "path": "/README.md", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "# Rice Universy IPIP Parts1 and 2\nAll programming mini-projects for Rice University Interactive Programming in Python Part 1 and Part 2\n\nRun each in Codesculptor at http://www.codeskulptor.org\n" }, { "alpha_fraction": 0.5834155678749084, "alphanum_fraction": 0.6357354521751404, "avg_line_length": 28.17910385131836, "blob_id": "a0df4292a8a0549e8b567dcf9e147bcbd9ba389b", "content_id": "bc606f97f22fcbc301a8a3b7586dd79391b32960", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2026, "license_type": "permissive", "max_line_length": 83, "num_lines": 67, "path": "/IPIP Part1/week_2_guess_the_number.py", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "# template for \"Guess the number\" mini-project\r\n# input will come from buttons and an input field\r\n# all output for the game will be printed in the console\r\nimport simplegui\r\nimport math\r\nimport random\r\n\r\n\r\nsecret_number = 0\r\nchosen_range = 0\r\nguess_count = 7\r\n\r\n# helper function to start and restart the game\r\ndef new_game(chosen_range):\r\n global secret_number\r\n global guess_count\r\n if chosen_range == 0 or chosen_range == 100:\r\n print \"Starting new game with range from 0 to 100\"\r\n print \"You have 7 guesses \\n\"\r\n guess_count = 7\r\n secret_number = range100()\r\n elif chosen_range == 1000:\r\n guess_count = 10\r\n secret_number = range1000()\r\n print \"Starting new game with range from 0 to 1000\"\r\n print \"You have 10 guesses \\n\"\r\n else:\r\n print \"Error! No such range\"\r\n\r\n\r\n# define event handlers for control panel\r\ndef range100():\r\n return random.randrange(0,100)\r\n\r\ndef range1000():\r\n return random.randrange(0,1000)\r\n\r\ndef input_guess(guess):\r\n global secret_number\r\n global guess_count\r\n guess_count -=1\r\n print \"Guess was \" + str(guess)\r\n if int(guess) < secret_number:\r\n print \"Higher! You have \" + str(guess_count) + \" guesses remaining \\n\"\r\n if int(guess) > secret_number:\r\n print \"Lower! You have \" + str(guess_count) + \" guesses remaining \\n\"\r\n elif int(guess) == secret_number:\r\n print \"Correct!!! You got it with \" + str(guess_count) + \" guesses left \\n\"\r\n new_game(0)\r\n\r\n\r\ndef button_100_handler():\r\n global guess_count\r\n guess_count = 7\r\n new_game(100)\r\n \r\ndef button_1000_handler():\r\n global guess_count\r\n guess_count = 10\r\n new_game(1000)\r\n \r\n# create frame\r\nframe = simplegui.create_frame('Guess the number', 300, 300)\r\nbutton100 = frame.add_button('Range is 0 - 100', button_100_handler, 200)\r\nbutton1000 = frame.add_button('Range is 0 - 1000', button_1000_handler, 200)\r\nuser_guess = frame.add_input('Your guess ', input_guess, 50)\r\nnew_game(0)\r\n\r\n\r\n" }, { "alpha_fraction": 0.5786644220352173, "alphanum_fraction": 0.59111487865448, "avg_line_length": 33.0098991394043, "blob_id": "44a89726f2e6b09d41bdf3e72446e0a16e9ae6e2", "content_id": "f80daf485a2e2dc0a5f34f02ef0833a43dc9c39a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3534, "license_type": "permissive", "max_line_length": 153, "num_lines": 101, "path": "/IPIP Part2/week_5_memory.py", "repo_name": "bbookman/Rice-Universy-IPIP-Parts-1-and-2", "src_encoding": "UTF-8", "text": "# implementation of card game - Memory\r\n\r\nimport simplegui\r\nimport random\r\n\r\nWIDTH = 800\r\nHEIGHT = 100\r\nTOTAL_CARDS = 16\r\nCARD_WIDTH = WIDTH // TOTAL_CARDS\r\n\r\n\r\n# helper function to initialize globals\r\ndef new_game():\r\n global exposed_cards, state, turns, deck\r\n global index_of_first_chosen_card, index_of_second_chosen_card\r\n index_of_first_chosen_card = -1\r\n index_of_second_chosen_card = -1\r\n turns = 0\r\n state = 0 #State 0 corresponds to the start of the game. \r\n print \"Start game\"\r\n \r\n exposed_cards = []\r\n for card in range(TOTAL_CARDS):\r\n exposed_cards.append(False)\r\n deck = []\r\n for card in range(TOTAL_CARDS / 2):\r\n deck.append(card)\r\n deck += deck\r\n random.shuffle(deck)\r\n turn_label.set_text(\"Turns = \" + str(turns))\r\n\r\ndef mouseclick(pos):\r\n global exposed_cards, state, deck, turns\r\n global index_of_first_chosen_card, index_of_second_chosen_card\r\n click_position = list(pos)\r\n chosen_card_index = click_position[0] // CARD_WIDTH\r\n \r\n if exposed_cards[chosen_card_index] == False: #Ignore exposed card\r\n \r\n \r\n #In state 0 if you click on a card, that card is exposed, and you switch to state 1\r\n if state <= 0:\r\n exposed_cards[chosen_card_index] = True\r\n index_of_first_chosen_card = chosen_card_index\r\n state = 1\r\n \r\n elif state == 1:\r\n \r\n #State 1 corresponds to a single exposed unpaired card.\r\n #In state 1, if you click on an unexposed card, that card is exposed and you switch to state 2\r\n exposed_cards[chosen_card_index] = True\r\n turns += 1\r\n turn_label.set_text(\"Turns = \" + str(turns))\r\n index_of_second_chosen_card = chosen_card_index\r\n state = 2\r\n \r\n #State 2 corresponds to the end of a turn. In state 2, if you click on an unexposed card, that card is exposed and you switch to state 1.\r\n else:\r\n if deck[index_of_first_chosen_card] == deck[index_of_second_chosen_card]:\r\n print \"Match!!\"\r\n exposed_cards[index_of_first_chosen_card] = True\r\n exposed_cards[index_of_second_chosen_card] = True\r\n index_of_second_chosen_card = -1\r\n \r\n else:\r\n print \"No Match\"\r\n exposed_cards[index_of_first_chosen_card] = False\r\n exposed_cards[index_of_second_chosen_card] = False\r\n index_of_first_chosen_card = -1\r\n index_of_second_chosen_card = -1\r\n state = 1\r\n exposed_cards[chosen_card_index] = True\r\n index_of_first_chosen_card = chosen_card_index\r\n \r\n \r\n else:\r\n print \"That card is already face up\"\r\n\r\n\r\ndef draw(canvas):\r\n for i in range(TOTAL_CARDS):\r\n if exposed_cards[i]:\r\n canvas.draw_text(str(deck[i]), [CARD_WIDTH * i + WIDTH / 60, HEIGHT / 2], 60, \"White\")\r\n else:\r\n canvas.draw_polygon([(i*CARD_WIDTH,0),((i+1)*CARD_WIDTH,0),((i+1)*CARD_WIDTH,HEIGHT),(i*CARD_WIDTH,HEIGHT),(i*CARD_WIDTH,0)],1,\"White\",\"Red\")\r\n\r\n# create frame and add a button and labels\r\nframe = simplegui.create_frame(\"Memory\",WIDTH , HEIGHT)\r\nframe.add_button(\"Reset\", new_game)\r\nturn_label = frame.add_label(\"Turns = 0\")\r\n\r\n# register event handler\r\nframe.set_mouseclick_handler(mouseclick)\r\nframe.set_draw_handler(draw)\r\n\r\n# get things rolling\r\nnew_game()\r\nframe.start()\r\n\r\n\r\n# Always remember to review the grading rubric" } ]
7
physicodes/password-cracking
https://github.com/physicodes/password-cracking
44ed8feb09fd2a1111e5812eda124b04962db61a
29c14ed393ab21be9aaea185d72abbfa71312b8f
b6faef9e5507b34b53f78f908d20a3a9eb1858e2
refs/heads/master
2020-08-06T13:16:54.553739
2019-11-11T14:45:21
2019-11-11T14:45:21
212,989,145
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6067742705345154, "alphanum_fraction": 0.6128605604171753, "avg_line_length": 27.41353416442871, "blob_id": "b784f55bb22519e8d91611ea5a5afe37c76ffc2e", "content_id": "20ebdfe8ab65bf44a7783fd864cdf02696327071", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3779, "license_type": "permissive", "max_line_length": 80, "num_lines": 133, "path": "/bf_python.py", "repo_name": "physicodes/password-cracking", "src_encoding": "UTF-8", "text": "import time\nimport itertools\nimport numpy as np\n\n\n# Create list of possible characters\nCHARS = [chr(i) for i in range(33, 127)]\n\n\ndef check_password(password, guess):\n \"\"\"Takes two string variables: the password and the guess. Returns true if\n they match, or false if they do not.\"\"\"\n return True if password == guess else False\n\n\ndef crack_password(password, permutations):\n \"\"\"Step through an iterable of possible permutations and return the password\n when it is successfully matched.\"\"\"\n # Step through permuations iterable\n for guess in permutations:\n matched = check_password(password, guess)\n if matched:\n print(\"Password cracked!\\n\"\n \"Password was \\\"{}\\\".\\n\".format(guess))\n break\n else:\n pass\n\n\ndef time_crack_password(permutations):\n \"\"\"Returns the time taken to check all possible permutations\"\"\"\n # Include all the same logic as would be needed to check properly\n for guess in permutations:\n matched = check_password('', guess)\n if matched:\n pass\n else:\n pass\n\n\ndef man_list(length):\n \"\"\"Takes integer length of the password and returns a list of all possible\n combinations of CHARS of that length.\"\"\"\n # Base case: return the list of possible characters as the list of guesses\n if length < 2:\n guess_list = CHARS\n # Append every previous guess to every possible character\n else:\n guess_list = []\n for c in CHARS:\n for g in man_list(length - 1):\n guess = \"\".join([c, g])\n guess_list.append(guess)\n return guess_list\n\n\ndef man_generator(length):\n \"\"\"Takes integer length of the password and returns a generator of all\n possible combinations of CHARS of that length.\"\"\"\n # Base case: return generator of possible characters as the generator of\n # guesses\n if length < 2:\n for c in CHARS:\n guess = c\n yield guess\n # Append every previous guess to every possible character\n else:\n for c in CHARS:\n for g in man_list(length - 1):\n guess = \"\".join([c, g])\n yield guess\n\n\ndef auto_generator(length):\n \"\"\"Takes integer length of the password and returns a generator of all\n possible combinations of CHARS of that length, making use of the\n itertools.permutations method (probably written in C).\"\"\"\n # step through itertools generator of permutation lists\n for p in itertools.permutations(CHARS, length):\n # join list t to form guess string\n guess = \"\".join(p)\n yield guess\n\n\ndef test_method(method, password_length):\n\n n_repeats = 5\n\n timings = []\n\n for i in range(n_repeats):\n\n temp = []\n\n t1 = time.process_time()\n permutations = method(password_length)\n t2 = time.process_time()\n\n t_permutations = t2 - t1\n temp.append(t_permutations)\n\n t1 = time.process_time()\n time_crack_password(permutations)\n t2 = time.process_time()\n\n t_cracking = t2 - t1\n temp.append(t_cracking)\n\n t_total = t_permutations + t_cracking\n temp.append(t_total)\n\n timings.append(temp)\n\n timings = np.array(timings)\n mean = np.mean(timings, axis=0)\n error = np.std(timings, axis=0)/np.sqrt(n_repeats)\n\n zipped = np.vstack([mean, error]).T\n\n return zipped\n\n\nif __name__ == '__main__':\n\n max_password_length = 1\n\n methods = [man_list, man_generator, auto_generator]\n\n for l in range(1, max_password_length+1):\n for m in methods:\n print(f\"\\nTesting {m.__name__} with \"\n f\"{l} character passwords.\\n\")\n print(test_method(method=m, password_length=l))\n" }, { "alpha_fraction": 0.7594936490058899, "alphanum_fraction": 0.7594936490058899, "avg_line_length": 18.75, "blob_id": "2f50514664f88be8fb60740a95f9dbaa6131ee54", "content_id": "ec3432db2d0f9493f782929f0a8f764fabc21880", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "permissive", "max_line_length": 37, "num_lines": 4, "path": "/bf_cython.py", "repo_name": "physicodes/password-cracking", "src_encoding": "UTF-8", "text": "import pyximport; pyximport.install()\nimport bf_ctools\n\nprint(\"Hello, world!\")\n" } ]
2
superpowerng/baseline-noise-detector
https://github.com/superpowerng/baseline-noise-detector
f169ccaab62808d32ff612066894412840c6d585
e19d3d8a42810baafd99e62cd37a913e6cd3bcae
c78829eba6f00ee15dc1bfa3aa5426990cd04ee2
refs/heads/master
2021-01-19T21:29:11.811479
2017-02-20T02:51:09
2017-02-20T02:51:09
82,509,530
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7890625, "alphanum_fraction": 0.7890625, "avg_line_length": 31, "blob_id": "20d463fc46e9d30fbcfff9af306206f6e261f2e8", "content_id": "2a33f35ae31c0a493776925823743ecc682427a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 128, "license_type": "no_license", "max_line_length": 57, "num_lines": 4, "path": "/README.md", "repo_name": "superpowerng/baseline-noise-detector", "src_encoding": "UTF-8", "text": "# baseline-noise-detector\n\nUse of pyaudio to detect background noise.\nWill trigger a response if level is abot a pre-set limit.\n" }, { "alpha_fraction": 0.46846845746040344, "alphanum_fraction": 0.4954954981803894, "avg_line_length": 25.428571701049805, "blob_id": "c51b4446060441ee392b93e313e9d20135e19bcf", "content_id": "6433edef2f70e9f1e76f07d77a7aa3bad17168c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/baseline2.py", "repo_name": "superpowerng/baseline-noise-detector", "src_encoding": "UTF-8", "text": "import pyaudio\nimport audioop\n\nTHRESHOLD = 500\nCHUNK = 250\n\npa = pyaudio.PyAudio()\nstream = pa.open(\n format = pyaudio.paInt16,\n channels = 1,\n rate = 4000,\n input = True,\n input_device_index = 5,\n frames_per_buffer = CHUNK)\nwhile True:\n data = stream.read(CHUNK)\n amplitude = audioop.rms(data,2)\n if amplitude > THRESHOLD:\n print(\"voice detected, amplitude = \")\n print amplitude\n break\n" } ]
2
Cruzzzzz/kanyinsola
https://github.com/Cruzzzzz/kanyinsola
0377a05fbe97cc2210864419afe69d96b3860c99
5ececd2e3d921b6d8e449fe4ffde1e5d92508aad
562c5fc9b7340f34e38d47faa2d50066443b22ef
refs/heads/master
2020-04-07T16:46:24.919962
2018-11-21T13:14:34
2018-11-21T13:14:34
158,542,329
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5378151535987854, "alphanum_fraction": 0.5378151535987854, "avg_line_length": 13.866666793823242, "blob_id": "d05c222cad5d66a363f1ce25aaf6d162bc49a895", "content_id": "c8775cc0b177d119cfdd1ea4fac3b5c5ba251553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 33, "num_lines": 15, "path": "/kanyinsola.py", "repo_name": "Cruzzzzz/kanyinsola", "src_encoding": "UTF-8", "text": "'''\r\n\r\ncontrol and conditional statment\r\nif condition\r\n'''\r\na =('abdul') \r\nb =('aye')\r\nif(a==b):\r\n print('yes')\r\nelif(b=='aye'):\r\n print('aye , abdul printed')\r\nelif(a=='abdul')\r\n print('abdul printed')\r\nelse:\r\n print('no')\r\n" } ]
1
mayank13/LearnPython
https://github.com/mayank13/LearnPython
80369a7a2c9df7839a36120f46466dc626082af8
9949f730b7f9f655c837d2021b432d6c920b5997
b8fae6bb9fcb09313759c671c5144011342e407c
refs/heads/master
2021-04-30T17:40:56.329271
2017-02-02T11:27:08
2017-02-02T11:27:08
80,243,523
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.42175573110580444, "alphanum_fraction": 0.4370228946208954, "avg_line_length": 29.882352828979492, "blob_id": "2b0078280f369dc4e7d14dab9236c11f6c268641", "content_id": "15066478d0d4d3b97efd3c32b0288b4343869cd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 57, "num_lines": 17, "path": "/count_substrings.py", "repo_name": "mayank13/LearnPython", "src_encoding": "UTF-8", "text": "def count_substring(string, sub_string):\n count = 0\n for i in range(0, len(string) - len(sub_string) + 1):\n if(string[i] == sub_string[0]):\n incr = 0\n match = True\n for j in range(0, len(sub_string)):\n if ((string[i + incr]) != sub_string[j]):\n match = False\n break\n else:\n incr += 1\n if match:\n count += 1\n return count\n\nprint(count_substring('ABCDCDC', 'CDC'))" }, { "alpha_fraction": 0.5950177907943726, "alphanum_fraction": 0.6120996475219727, "avg_line_length": 20.615385055541992, "blob_id": "0547be5293c301f0e1da4505813b9f8bb70779c9", "content_id": "8ed2ec9af6968fe034d3e10d12bb7fc47b196074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 64, "num_lines": 65, "path": "/Tree.py", "repo_name": "mayank13/LearnPython", "src_encoding": "UTF-8", "text": "class Node:\n \"\"\"A simple Binary Node to be used in a Tree\"\"\"\n\n def __init__(self, value=-1, leftNode=None, rightNode=None):\n self.value = value\n self.leftNode = leftNode\n self.rightNode = rightNode\n\n def __str__(self):\n return ' Node value --> %s' % self.value\n\n\nclass Tree:\n \"\"\"A Binary Tree \"\"\"\n\n def __init__(self, root):\n self.root = root\n\n def preorder(self, root):\n if root is None:\n return\n print(root.value)\n self.preorder(root.leftNode)\n self.preorder(root.rightNode)\n\n def inorder(self, root):\n if root is None:\n return\n self.inorder(root.leftNode)\n print(root.value)\n self.inorder(root.rightNode)\n\n def postorder(self, root):\n if root is None:\n return\n self.postorder(root.leftNode)\n self.postorder(root.rightNode)\n print(root.value)\n\n\nnodeTest = Node()\nprint(nodeTest)\nprint(Node)\nrootNode = Node(1)\ntree = Tree(rootNode)\n\nnode2 = Node(2)\nnode3 = Node(3)\nnode4 = Node(4)\nnode5 = Node(5)\nnode6 = Node(6)\nnode7 = Node(7)\n\nrootNode.leftNode = node2\nrootNode.rightNode = node3\nnode2.leftNode = node4\nnode2.rightNode = node5\nnode3.leftNode = node6\nnode3.rightNode = node7\nprint('----Preorder----')\ntree.preorder(rootNode)\nprint('----Inorder----')\ntree.inorder(rootNode)\nprint('----Postorder----')\ntree.postorder(rootNode)\n" }, { "alpha_fraction": 0.6509695053100586, "alphanum_fraction": 0.6537396311759949, "avg_line_length": 20.294116973876953, "blob_id": "38f2817d70334113e318c7395a7073251b5195dd", "content_id": "43d9a89185959d052e02019d6b9c58369966f71e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 33, "num_lines": 17, "path": "/ch4.py", "repo_name": "mayank13/LearnPython", "src_encoding": "UTF-8", "text": "s = 'qA2'\nis_alnum = False\nis_alpha = False\nis_digit = False\nis_lower = False\nis_upper = False\nfor letter in s:\n is_alnum = (letter.isalnum())\n is_alpha = (letter.isalpha())\n is_digit = (letter.isdigit())\n is_lower = (letter.islower())\n is_upper = (letter.isupper())\nprint(is_alnum)\nprint(is_alpha)\nprint(is_digit)\nprint(is_lower)\nprint(is_upper)" }, { "alpha_fraction": 0.5198813080787659, "alphanum_fraction": 0.5367952585220337, "avg_line_length": 26.177419662475586, "blob_id": "de98ffd0adc39d1ec0b13cf70c8b0231edd09f65", "content_id": "5bbf46b6f7094ffbd7a7b1f601c8e6500b7558ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3370, "license_type": "no_license", "max_line_length": 60, "num_lines": 124, "path": "/BinaryTree.py", "repo_name": "mayank13/LearnPython", "src_encoding": "UTF-8", "text": "from Tree import Node\nfrom collections import deque\n\n\nclass BinaryTree:\n def __init__(self, root=None):\n self.root = root\n self.noOfLeftNodes = 0\n self.noOfRightNodes = 0\n\n def addNode(self, root, node):\n if self.root is None:\n self.root = node\n return\n if root.leftNode is None:\n root.leftNode = node\n self.noOfLeftNodes += 1\n return\n elif root.rightNode is None:\n root.rightNode = node\n self.noOfRightNodes += 1\n return\n\n else:\n if self.noOfLeftNodes - self.noOfRightNodes < 2:\n self.addNode(root.leftNode, node)\n else:\n self.addNode(root.rightNode, node)\n\n def preorder(self, root):\n if root is None:\n return\n print(root.value)\n self.preorder(root.leftNode)\n self.preorder(root.rightNode)\n\n def levelorder(self, root):\n queue = deque([])\n queue.append(root)\n while queue:\n curr = queue.popleft()\n print(curr)\n if curr.leftNode:\n queue.append(curr.leftNode)\n if curr.rightNode:\n queue.append(curr.rightNode)\n\n def add_node_inorder(self, node):\n queue = deque([])\n if self.root is None:\n self.root = node\n return\n queue.append(self.root)\n curr_node = self.root\n while queue:\n curr_node = queue.popleft()\n if curr_node.leftNode is not None:\n queue.append(curr_node.leftNode)\n else:\n curr_node.leftNode = node\n return\n if curr_node.rightNode is not None:\n queue.append(curr_node.rightNode)\n else:\n curr_node.rightNode = node\n return\n return self.root\n\n def level_wise(self, root):\n queue = deque([])\n level = 0\n queue.append(root)\n queue.append(\"EOL\")\n while queue:\n curr = queue.popleft()\n if curr == \"EOL\" :\n if queue:\n queue.append(\"EOL\")\n level += 1\n else:\n print(\"level-\" + str(level))\n print(curr)\n if curr.leftNode:\n queue.append(curr.leftNode)\n if curr.rightNode:\n queue.append(curr.rightNode)\n\n\nbt = BinaryTree()\nnodes = [1, 2, 3, 4, 5, 6, 7]\nfor i in range(len(nodes)):\n bt.addNode(bt.root, Node(nodes[i]))\nprint('---Binary Tee---')\nbt.preorder(bt.root)\n\nbt2 = BinaryTree()\nbt2.add_node_inorder(Node(1))\nbt2.add_node_inorder(Node(2))\nbt2.add_node_inorder(Node(3))\nbt2.add_node_inorder(Node(4))\nbt2.add_node_inorder(Node(5))\nbt2.add_node_inorder(Node(6))\nbt2.add_node_inorder(Node(7))\nprint('---Pre Order Traversal---')\nbt2.preorder(bt2.root)\nprint(\"***Level Order Traversal***\")\nbt2.levelorder(bt2.root)\nprint(\"###Level Wise Traversal###\")\nbt2.level_wise(bt2.root)\n\n# -- Queue Test --\n# q2 = deque([1, 2, 3])\n# print(q2.popleft())\n# print(q2.popleft())\n# print(q2.popleft())\n# el = q2.popleft()\n# print(el)\n\nbt3 = BinaryTree()\nbt3Nodes = [1, 2, 5, 3, 4, 6, 7]\nfor i in range(len(bt3Nodes)):\n bt3.add_node_inorder(Node(bt3Nodes[i]))\nprint(\"--Pre Order Traversal\")\nbt3.preorder(bt3.root)\n" } ]
4
veronica-f-reyes/numpy-pandas-visualization-exercises
https://github.com/veronica-f-reyes/numpy-pandas-visualization-exercises
be30c6292d73932969f652e9b7bdbfd20aee6e37
498ab0543f87fca522ac54a795772a709ccd5685
70556f752d69c053fa87885addf216fcd4246bf8
refs/heads/main
2023-06-27T16:48:56.275599
2021-07-30T21:49:00
2021-07-30T21:49:00
386,357,938
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5375858545303345, "alphanum_fraction": 0.5835132598876953, "avg_line_length": 12.91939926147461, "blob_id": "e5c3661bcbaff53860a3da3cf2faa6f8664f6717", "content_id": "7e247f4730e4007d956b9cfe9abb357a6cad8634", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10194, "license_type": "no_license", "max_line_length": 210, "num_lines": 732, "path": "/numpy_exercises.py", "repo_name": "veronica-f-reyes/numpy-pandas-visualization-exercises", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[2]:\n\n\na = np.array([1, 2, 3])\na\n\n\n# In[3]:\n\n\nmatrix = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\nmatrix\n\n\n# In[4]:\n\n\na[0]\n\n\n# In[5]:\n\n\nprint('a == {}'.format(a))\nprint('a[0] == {}'.format(a[0]))\nprint('a[1] == {}'.format(a[1]))\nprint('a[2] == {}'.format(a[2]))\n\n\n# In[6]:\n\n\nmatrix[1, 1]\n\n\n# In[7]:\n\n\nshould_include_elements = [True, False, True]\na[should_include_elements]\n\n\n# In[8]:\n\n\noriginal_array = np.array([1, 2, 3, 4, 5])\noriginal_array + 1\n\n\n# In[9]:\n\n\nmy_array = np.array([-3, 0, 3, 16])\n\nprint('my_array == {}'.format(my_array))\nprint('my_array - 5 == {}'.format(my_array - 5))\nprint('my_array * 4 == {}'.format(my_array * 4))\nprint('my_array / 2 == {}'.format(my_array / 2))\nprint('my_array ** 2 == {}'.format(my_array ** 2))\nprint('my_array % 2 == {}'.format(my_array % 2))\n\n\n# In[10]:\n\n\nmy_array = np.array([-3, 0, 3, 16])\n\n\n# In[11]:\n\n\nmy_array[my_array > 0]\n\n\n# In[12]:\n\n\nmy_array[my_array % 2 == 0]\n\n\n# ## Numpy Exercises\n\n# 1. How many negative numbers are there?\n\n# In[134]:\n\n\na = np.array([4, 10, 12, 23, -2, -1, 0, 0, 0, -6, 3, -7])\n\n\n# In[138]:\n\n\nprint(a[a < 0])\nprint(\"Number of negative numbers: \", len(a[a < 0]))\n\n\n# In[136]:\n\n\n#Another approach\nmask = a < 0\nneg_nums = a[mask]\nneg_nums.size\n\n\n# 2. How many positive numbers are there?\n\n# In[139]:\n\n\nprint(a[a > 0])\nprint(\"Number of positive numbers: \", len(a[a > 0]))\n\n\n# In[137]:\n\n\n#Another approach\nmask = a > 0\npos_nums = a[mask]\npos_nums.size\n\n\n# 3. How many even positive numbers are there?\n\n# In[140]:\n\n\nmask = (a > 0) & (a % 2 == 0)\n\nprint(a[mask])\n\nprint(\"Number of even positive numbers: \", len(a[mask]))\n\n\n# 4. If you were to add 3 to each data point, how many positive numbers would there be?\n\n# In[52]:\n\n\nb = a + 3 \n\nprint(a)\n\nprint(b)\n\nmask = b > 0\n\nprint(b[mask])\n\nprint(\"Number of positive numbers after adding 3: \", len(b[mask]))\n\n\n# 5. If you squared each number, what would the new mean and standard deviation be?\n\n# In[141]:\n\n\nsq = a ** 2\nprint(a)\nprint(sq)\n\nm = sq.mean()\ns_d = sq.std()\n\nprint(\"New mean is : \", m)\nprint(\"New standard deviation is: \", s_d)\n\n\n# 6. A common statistical operation on a dataset is centering. This means to adjust the data such that the mean of the data is 0. This is done by subtracting the mean from each data point. Center the data set. \n\n# In[56]:\n\n\nprint(a)\n\nm = a.mean()\n\ncentered = a - m\n\nprint(\"Mean of array a is : \", m)\n\nprint(centered)\n\n\n# 7. Calculate the z-score for each data point. Recall that the z-score is given by:\n# \n# Z = (x − μ) / σ == Z = (x - avg(x)) / std(x)\n# \n\n# In[60]:\n\n\nprint(a)\n\nm = a.mean()\ns_d = a.std()\n\nprint(\"Mean is : \", m)\nprint(\"Standard deviation is: \", s_d)\n\nz_score = (a - m) / s_d\nprint(\"\\nZ score is: \", z_score)\n\n\n# ## More Numpy Practice\n\n# In[64]:\n\n\nimport numpy as np\n\n\n# # Life w/o numpy to life with numpy\n# \n# ### Setup 1\n\n# In[68]:\n\n\na = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n\n# ### Use python's built in functionality/operators to determine the following:\n# \n# ### Exercise 1 \n# Make a variable called sum_of_a to hold the sum of all the numbers in above list\n# \n# \n\n# In[69]:\n\n\nsum_of_a = a.sum()\nprint(sum_of_a)\n\n\n# #### Exercise 2 \n# Make a variable named min_of_a to hold the minimum of all the numbers in the above list\n# \n# \n\n# In[70]:\n\n\nmin_of_a = a.min()\nprint(min_of_a)\n\n\n# ### Exercise 3\n# Make a variable named max_of_a to hold the max number of all the numbers in the above list\n# \n# \n\n# In[71]:\n\n\nmax_of_a = a.max()\nprint(max_of_a)\n\n\n# ### Exercise 4\n# Make a variable named mean_of_a to hold the average of all the numbers in the above list\n# \n# \n\n# In[73]:\n\n\nmean_of_a = a.mean()\nprint(mean_of_a)\n\n\n# ### Exercise 5 \n# Make a variable named product_of_a to hold the product of multiplying all the numbers in the above list together\n# \n# \n\n# In[76]:\n\n\nproduct_of_a = a.prod()\nprint(product_of_a)\n\n\n# \n# ### Exercise 6 \n# Make a variable named squares_of_a. It should hold each number in a squared like [1, 4, 9, 16, 25...]\n# \n# \n\n# In[80]:\n\n\nsquares_of_a = np.square(a)\nprint('a:', a)\nprint(\"a squared: \", squares_of_a)\n\n\n# \n# ### Exercise 7 \n# Make a variable named odds_in_a. It should hold only the odd numbers\n# \n# \n\n# In[84]:\n\n\nodds_in_a = a[a % 2 != 0]\n\nprint(a)\nprint(odds_in_a)\n\n\n# ### Exercise 8\n# Make a variable named evens_in_a. It should hold only the evens.\n\n# In[86]:\n\n\nevens_in_a = a[a % 2 == 0]\n\nprint(a)\nprint(evens_in_a)\n\n\n# ## What about life in two dimensions? A list of lists is matrix, a table, a spreadsheet, a chessboard...\n# ### Setup 2: Consider what it would take to find the sum, min, max, average, sum, product, and list of squares for this list of two lists.\n# b = [\n# [3, 4, 5],\n# [6, 7, 8]\n# ]\n\n# In[87]:\n\n\nb = np.array([[3, 4, 5], [6, 7, 8] ])\n\n\n# ### Exercise 1 \n# Refactor the following to use numpy. Use sum_of_b as the variable. **Hint, you'll first need to make sure that the \"b\" variable is a numpy array**\n# sum_of_b = 0\n# for row in b:\n# sum_of_b += sum(row)\n\n# In[88]:\n\n\nsum_of_b = b.sum()\nprint(sum_of_b)\n\n\n# ### Exercise 2 \n# Refactor the following to use numpy. \n# min_of_b = min(b[0]) if min(b[0]) <= min(b[1]) else min(b[1])\n\n# In[89]:\n\n\nmin_of_b = b.min()\nprint(min_of_b)\n\n\n# ### Exercise 3 \n# Refactor the following maximum calculation to find the answer with numpy.\n# max_of_b = max(b[0]) if max(b[0]) >= max(b[1]) else max(b[1])\n\n# In[90]:\n\n\nmax_of_b = b.max()\nprint(max_of_b)\n\n\n# ### Exercise 4 \n# Refactor the following using numpy to find the mean of b\n# mean_of_b = (sum(b[0]) + sum(b[1])) / (len(b[0]) + len(b[1]))\n# \n# \n\n# In[91]:\n\n\nmean_of_b = b.mean()\nprint(mean_of_b)\n\n\n# ### Exercise 5 \n# Refactor the following to use numpy for calculating the product of all numbers multiplied together.\n# product_of_b = 1\n# for row in b:\n# for number in row:\n# product_of_b *= number\n\n# In[92]:\n\n\nproduct_of_b = b.prod()\nprint(product_of_b)\n\n\n# ### Exercise 6 \n# Refactor the following to use numpy to find the list of squares \n# squares_of_b = []\n# for row in b:\n# for number in row:\n# squares_of_b.append(number**2)\n\n# In[93]:\n\n\nsquares_of_b = np.square(b)\nprint(squares_of_b)\n\n\n# \n# ### Exercise 7 \n# Refactor using numpy to determine the odds_in_b\n# odds_in_b = []\n# for row in b:\n# for number in row:\n# if(number % 2 != 0):\n# odds_in_b.append(number)\n\n# In[95]:\n\n\nodds_in_b = b[b % 2 != 0]\n\nprint(b)\nprint('\\n',odds_in_b)\n\n\n# ### Exercise 8\n# Refactor the following to use numpy to filter only the even numbers\n# evens_in_b = []\n# for row in b:\n# for number in row:\n# if(number % 2 == 0):\n# evens_in_b.append(number)\n\n# In[98]:\n\n\nevens_in_b = b[b % 2 == 0]\n\nprint(b)\nprint('\\n',evens_in_b)\n\n\n# ### Exercise 9 \n# Print out the shape of the array b.\n\n# In[99]:\n\n\nprint(b.shape)\n\n\n# \n# ### Exercise 10 \n# Transpose the array b.\n\n# In[100]:\n\n\nprint(b, '\\n')\nprint(b.transpose())\n\n\n# ### Exercise 11 \n# Reshape the array b to be a single list of 6 numbers. (1 x 6)\n\n# In[101]:\n\n\nprint(b, '\\n')\nprint(b.reshape(1,6))\n\n\n# ### Exercise 12 \n# Reshape the array b to be a list of 6 lists, each containing only 1 number (6 x 1)\n# \n# \n\n# In[102]:\n\n\nprint(b, '\\n')\nprint(b.reshape(6,1))\n\n\n# \n# ## Setup 3\n# c = [\n# [1, 2, 3],\n# [4, 5, 6],\n# [7, 8, 9]\n# ]\n# \n# ### HINT, you'll first need to make sure that the \"c\" variable is a numpy array prior to using numpy array methods.\n# \n# \n\n# In[103]:\n\n\nc = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ])\n\n\n# ### Exercise 1\n# Find the min, max, sum, and product of c.\n\n# In[105]:\n\n\nmin_of_c = c.min()\nprint(min_of_c)\n\nmax_of_c = c.max()\nprint('\\n',max_of_c)\n\nsum_of_c = c.sum()\nprint('\\n',sum_of_c)\n\n\n# ### Exercise 2 \n# Determine the standard deviation of c.\n\n# In[107]:\n\n\nc_s_d = c.std()\n\nprint(\"Standard deviation is: \", c_s_d)\n\n\n# ### Exercise 3 \n# Determine the variance of c.\n\n# In[109]:\n\n\nc_var = c.var()\n\nprint(\"Variance is: \", c_var)\n\n\n# ### Exercise 4 \n# Print out the shape of the array c\n\n# In[110]:\n\n\nprint(c.shape)\n\n\n# ### Exercise 5 \n# Transpose c and print out transposed result.\n\n# In[111]:\n\n\nprint(c, '\\n')\nprint(c.transpose())\n\n\n# ### Exercise 6 \n# Get the dot product of the array c with c. \n# \n# \n\n# In[114]:\n\n\ndot = c * c\nprint(c)\nprint('\\n', dot)\n\n\n# ### Exercise 7\n# Write the code necessary to sum up the result of c times c transposed. Answer should be 261\n# \n# \n\n# In[119]:\n\n\nc_transposed = c.transpose()\nres = c * c_transposed\nsum_c_and_ctransposed = res.sum() \nprint(sum_c_and_ctransposed)\n\n\n# ### Exercise 8\n# Write the code necessary to determine the product of c times c transposed. Answer should be 131681894400.\n# \n# \n# \n\n# In[120]:\n\n\nc_transposed = c.transpose()\nres = c * c_transposed\nprod_c_and_ctransposed = res.prod() \nprint(prod_c_and_ctransposed)\n\n\n# ## Setup 4\n# d = [\n# [90, 30, 45, 0, 120, 180],\n# [45, -90, -30, 270, 90, 0],\n# [60, 45, -45, 90, -45, 180]\n# ]\n# \n# \n\n# In[121]:\n\n\nd = np.array( [ [90, 30, 45, 0, 120, 180], [45, -90, -30, 270, 90, 0], [60, 45, -45, 90, -45, 180] ])\n\n\n# ### Exercise 1 - Find the sine of all the numbers in d\n\n# In[123]:\n\n\nd_sin = np.sin(d)\nprint(d_sin)\n\n\n# ### Exercise 2 \n# Find the cosine of all the numbers in d\n\n# In[124]:\n\n\nd_cos = np.cos(d)\nprint(d_cos)\n\n\n# ### Exercise 3\n# Find the tangent of all the numbers in d\n\n# In[125]:\n\n\nd_tan = np.tan(d)\nprint(d_tan)\n\n\n# ### Exercise 4 \n# Find all the negative numbers in d\n\n# In[129]:\n\n\n\nprint(\"Number of negative numbers: \", len(d[d < 0]))\n\n\n# ### Exercise 5\n# Find all the positive numbers in d\n\n# In[130]:\n\n\nprint(\"Number of positive numbers: \", len(d[d > 0]))\n\n\n# \n# ### Exercise 6 \n# Return an array of only the unique numbers in d.\n\n# In[131]:\n\n\nd_unique = np.unique(d)\nprint(d_unique)\n\n\n# ### Exercise 7 \n# Determine how many unique numbers there are in d.\n\n# In[132]:\n\n\nd_unique = np.unique(d)\nprint(len(d_unique))\n\n\n# ### Exercise 8 \n# Print out the shape of d.\n\n# In[126]:\n\n\nprint(d.shape)\n\n\n# ### Exercise 9 \n# Transpose and then print out the shape of d.\n\n# In[127]:\n\n\nprint(d.transpose())\nprint(d.shape)\n\n\n# ### Exercise 10 \n# Reshape d into an array of 9 x 2\n\n# In[128]:\n\n\nprint(d, '\\n')\nprint(d.reshape(9,2))\n\n" } ]
1
xuxiaoyu89/crypotography
https://github.com/xuxiaoyu89/crypotography
1648855d7551aa4ccac52193f2e38c3e84e96018
746b294b526267673c1d8f821b88578c562eb732
06f4ebaf5ee9cbc1b35a7f6be5311f07f1313960
refs/heads/master
2020-06-03T19:11:43.689219
2015-09-21T15:31:39
2015-09-21T15:31:39
42,875,124
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.584511399269104, "alphanum_fraction": 0.6183159351348877, "avg_line_length": 17.078651428222656, "blob_id": "f746a49502937499763695d0fcca50855e591944", "content_id": "08003c588e820614bc31993fd4b1c13165bdfae5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1627, "license_type": "no_license", "max_line_length": 66, "num_lines": 89, "path": "/main.py", "repo_name": "xuxiaoyu89/crypotography", "src_encoding": "UTF-8", "text": "# generate two prime numbers (7 bits long)\nfrom helper import *\n\n\ndef createKeyPair():\n\tp = generatePrime()\n\tq = generatePrime()\n\twhile True:\n\t\tif p == q:\n\t\t\tq = generatePrime()\n\t\t\tcontinue\n\t\telse: break\n\n\t# create the public key\n\tn = p*q\n\tphi = (p-1)*(q-1)\n\n\t# find a relative prime of phi and its reverse\n\tkeyPair = extendEuclidean(phi)\n\t#print keyPair\n \tkeyPair.append(n)\n \treturn keyPair\n\n\n\naliceKey = createKeyPair()\ntrentKey = createKeyPair()\n\n\n#########################################\n# create a certificate for Alice\n# consist of pair r, and a signature\n\nr = '00000000'\nname = \"Alice\"\nfor c in list(name):\n\tr += intToBinaryString(ord(c), 8)\n\nn = aliceKey[2]\nr += intToBinaryString(n, 32)\nr += intToBinaryString(aliceKey[1], 32)\n\n\n\nhash_r = myHash(r)\nprint trentKey[0]\ns = fastExponentiation(int(hash_r, 2), trentKey[0], trentKey[0]-1)\n\n#########################################\n# doing authenticates\n\nnString = intToBinaryString(n, 32)\nuString = \"\"\nk,i = 31, 31\nwhile i >= 0:\n\tif nString[31-i] == \"1\":\n\t\tk = i\n\t\tbreak\n\telse:\n\t\tuString += \"0\"\n\t\ti -= 1\n\nprint k\n\nuString = uString + \"01\"\ni -= 2\nwhile i >= 0:\n\trandBit = getRandomBit()\n\tuString = uString + str(randBit)\n\ti -= 1\n\nprint nString\nprint uString\n\n\n# alice compute h(u) and decrypt it\n# hash_u = myHash(uString)\nhash_u = \"1010101\"\nv = fastExponentiation(int(hash_u, 2), aliceKey[0], aliceKey[2])\nv_encrypted = fastExponentiation(v, aliceKey[1], aliceKey[2])\n\n\nprint \"hash_u: \" + str(int(hash_u,2))\nprint \"v: \" + str(v)\nprint \"aliceKey: \" + str(aliceKey[0]) + \", \" + str(aliceKey[1])\nprint \"v_encrypted: \" + str(v_encrypted)\n\n\nprint printText\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5059744715690613, "alphanum_fraction": 0.5533580780029297, "avg_line_length": 15.595890045166016, "blob_id": "5db103b68e5ac46e43007f27fc0fda2af0f08dac", "content_id": "ff5c71763265fd03c14493a42ebc0844d541c61a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2427, "license_type": "no_license", "max_line_length": 54, "num_lines": 146, "path": "/helper.py", "repo_name": "xuxiaoyu89/crypotography", "src_encoding": "UTF-8", "text": "from random import *\nfrom math import *\n\ndef getRandomBit():\n\tr = randint(0,2**20)\n\tresult = 1 if r%2 == 1 else 0\n\tprint \"random number: \", r, \", last bit: \", result\n\treturn result\n\ndef multModM(x, y, m):\n\treturn (x*y)%m\n\ndef raiseModM():\n\treturn\n\ndef isPrime(n):\n\t# use Miller Rabin Algorithm to check if n is a prime\n\ta = randint(1,n+1)\n\ty = 1\n\tk = int(log(n,2))\n\tremain = n-1\n\tfor i in xrange(k, -1, -1):\n\t\txi = remain/(2**i)\n\t\t#print remain, xi\n\t\tremain = remain%(2**i)\n\t\tz = y\n\t\ty = multModM(y,y,n)\n\t\tif y==1 and z!= 1 and z!=(n-1):\n\t\t\treturn False\n\t\tif xi == 1:\n\t\t\ty = multModM(y, a, n)\n\t\t#print i, xi, z, y\n\n\tif y != 1:\n\t\treturn False\n\treturn True\n\ndef inverse(e):\n\t# find the multiplicatve inverse of e\n\treturn\n\ndef generateCandidate():\n\tn = 65\n\tfor i in xrange(1,6):\n\t\trBit = getRandomBit()\n\t\tn += rBit*(2**i)\n\tprint \"genereated: \", n\n\treturn n\n\ndef generatePrime():\n\twhile True:\n\t\tn = generateCandidate()\n\t\t# test if n is prime\n\t\tflag = True\n\t\tfor i in xrange(21):\n\t\t\ttemp = isPrime(n)\n\t\t\tflag = flag and temp\n\t\tif flag: return n\n\t\telse: continue\nq = generatePrime();\n\n\ndef extendEuclidean(phi): \n\tphi, e = phi, 3\n\td = 0\n\twhile True:\n\t\tr1, r2 = phi, e\n\t\tq = r1/r2\n\t\tq_1 = q\n\t\tq_2 = q\n\t\tr3 = r1%r2\n\t\ts1, t1 = 1, 0\n\t\ts2, t2 = 0, 1\n\t\ts, t = 1, 0\n\t\ti = 1\n\t\twhile r3!=0:\n\t\t\tr1, r2 = r2, r3\n\t\t\tq = r1/r2\n\t\t\tr3 = r1%r2\n\t\t\tif i == 1:\n\t\t\t\ts1, t1 = s,t\n\t\t\t\ts, t = 0, 1\n\t\t\t\tq_1 = q\n\t\t\telse:\n\t\t\t\ts2, t2 = s1, t1\n\t\t\t\ts1, t1 = s, t \n\t\t\t\ts = s2 - q_2*s1\n\t\t\t\tt = t2 - q_2*t1\n\t\t\t\tq_2 = q_1\n\t\t\t\tq_1 = q\n\n\t\t\tprint q, r1, r2, r3, s, t \n\t\t\ti += 1\n\n\t\tif r2 == 1:\n\t\t\t# e is relative prime of phi\n\t\t\t# find the inverse\n\t\t\td = t1 - q_2*t\n\t\t\tif d < 0: d += phi\n\t\t\treturn [d, e]\n\t\telse:\n\t\t\t# continue looking \n\t\t\te += 1\n\t\t\tcontinue\n\n\ndef intToBinaryString(n, length):\n\tdigits = []\n\tr = n\n\twhile r > 0:\n\t\trightBit = str(r%2)\n\t\tdigits.append(rightBit)\n\t\tr = (r-r%2)/2\n\twhile len(digits) < length:\n\t\tdigits.append('0')\n\n\tdigits.reverse()\n\treturn \"\".join(digits)\n\n\ndef fastExponentiation(a, x, n):\n\ty = 1\n\tk = int(log(x,2))\n\tremain = x;\n\tfor i in xrange(k, -1, -1):\n\t\ty = multModM(y, y, n)\n\t\txi = remain/(2**i)\n\t\tremain = remain%(2**i)\n\t\tif xi == 1:\n\t\t\ty = multModM(a, y, n)\n\treturn y\n\ndef stringXOR(s1, s2):\n\tresult = []\n\tfor i in xrange(len(s1)):\n\t\tresult.append(str(int(s1[i]) ^ int(s2[i])))\n\treturn \"\".join(result)\n\n\ndef myHash(s):\n\tresult = s[0:8]\n\ti = 8\n\twhile i < len(s):\n\t\tresult = stringXOR(result, s[i:i+8])\n\t\ti += 8\n\treturn result\n\n\n\n\n" } ]
2
mike88macedon/image_to_audio_to_image
https://github.com/mike88macedon/image_to_audio_to_image
ebf4937fd10a497120b73b3dd0db0ff5efff278b
58aaba3cd4b5ea3d3a689ac4b6717bf821c29e8c
cd731c2591e43be03f75844cd8b72a4e129b0ecf
refs/heads/master
2021-01-20T08:41:09.018567
2017-05-03T18:05:54
2017-05-03T18:05:54
90,177,948
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.7874564528465271, "avg_line_length": 30.88888931274414, "blob_id": "28872a1b8f3af3602fe865abb8f5b833237effcc", "content_id": "7b0e19f0853c931c64f7453f0878eab6ad5b171c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 287, "license_type": "no_license", "max_line_length": 151, "num_lines": 9, "path": "/README.md", "repo_name": "mike88macedon/image_to_audio_to_image", "src_encoding": "UTF-8", "text": "# image_to_AUDIO_to_image\n\n\nDESCRIPTION:\n\nTAKES REALTIME IMAGES FROM WEBCAM CONVERTS THE IMAGE TO AUDIO NOISE WRITTEN INTO .WAV AUDIO FILE FORMAT AND RECONSTRUCTS THE IMAGE FROM THE AUDIO DATA.\n\n\n![image](https://github.com/mike88macedon/image_to_audio_to_image/blob/master/preview.png)\n" }, { "alpha_fraction": 0.6135792136192322, "alphanum_fraction": 0.6429170370101929, "avg_line_length": 31.243244171142578, "blob_id": "40b87172e20bda9e9a3db0c354adcdbbf4110bbc", "content_id": "2863eb065161607826548f4fbc49cbded57f0d83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 119, "num_lines": 37, "path": "/main.py", "repo_name": "mike88macedon/image_to_audio_to_image", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport scipy.io.wavfile\n\n\ndef start_video(cam_num):\n cap = cv2.VideoCapture(cam_num)\n while True:\n # grab frames\n ret, frame = cap.read()\n # get numpy array\n img_vector = np.asarray(frame)\n # write image pixels to audio\n scale_to_audio(img_vector)\n # continuous image render from camera\n cv2.imshow(\"CAMERA WINDOW\", frame)\n # wait for keyboard interrupt\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()\n\n\ndef scale_to_audio(data):\n # create vector from image matrix\n flatten_arr = data.flatten()\n # persist the vector uint8 pixel data to wav format\n scipy.io.wavfile.write('test.wav', 44100, flatten_arr)\n # read persisted data from the wav back to numpy array\n rate, data_wav = scipy.io.wavfile.read('test.wav')\n # reshape the vector to 640x480 matrix with 3 channels [[1,2,3],[1,3,4]] (reconstructing the image from audio data)\n reshaped_arr = np.reshape(data_wav, (480, 640, 3))\n # display the reconstructed image\n cv2.imshow(\"RECREATED IMAGE FROM SOUND\", reshaped_arr)\n\n\nif __name__ == \"__main__\":\n start_video(0)\n" } ]
2
ilnanny/Inkscape-addons
https://github.com/ilnanny/Inkscape-addons
0b1ba7780a1f6a6c2cbceeb9bbe6065a4140af0d
a30cdde2093fa2da68b90213e057519d0304433f
b628efe4b4f0327b6bf2af455a9b5ef8eec1bd53
refs/heads/master
2020-04-27T19:31:33.302450
2019-03-09T21:29:46
2019-03-09T21:29:46
174,622,581
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.7150537371635437, "alphanum_fraction": 0.7204301357269287, "avg_line_length": 15.818181991577148, "blob_id": "922506e341994610580c2368cc0cba82ad951092", "content_id": "b454b2811652c77ac223003e55e3ea73e65dfd4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "permissive", "max_line_length": 48, "num_lines": 11, "path": "/svg2ico-002/svg2ico-README.md", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "# svg2ico\n\nInkscape extension to save image to WinIco file.\n\n## How to install\n\nCopy all files under Inkscape extesions folder.\n\n## About\n\n* Author: [Maurizio Aru](https://github.com/ginopc)\n\n" }, { "alpha_fraction": 0.5702479481697083, "alphanum_fraction": 0.5736914873123169, "avg_line_length": 37.89285659790039, "blob_id": "df01c4dddca541ecf50aacbba8588822ebb5bbb7", "content_id": "786de210064608ef219d7a2578760c92b471fc0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4356, "license_type": "permissive", "max_line_length": 147, "num_lines": 112, "path": "/inkscape-export-layers-master/export_layers.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nimport sys\nsys.path.append('/usr/share/inkscape/extensions')\nimport inkex\nimport os\nimport subprocess\nimport tempfile\nimport shutil\nimport copy\n\n\nclass PNGExport(inkex.Effect):\n def __init__(self):\n \"\"\"init the effetc library and get options from gui\"\"\"\n inkex.Effect.__init__(self)\n self.OptionParser.add_option(\"--path\", action=\"store\", type=\"string\", dest=\"path\", default=\"~/\", help=\"\")\n self.OptionParser.add_option('-f', '--filetype', action='store', type='string', dest='filetype', default='jpeg', help='Exported file type')\n self.OptionParser.add_option(\"--crop\", action=\"store\", type=\"inkbool\", dest=\"crop\", default=False)\n self.OptionParser.add_option(\"--dpi\", action=\"store\", type=\"float\", dest=\"dpi\", default=90.0)\n\n def effect(self):\n output_path = os.path.expanduser(self.options.path)\n curfile = self.args[-1]\n layers = self.get_layers(curfile)\n counter = 1\n\n for (layer_id, layer_label, layer_type) in layers:\n if layer_type == \"fixed\":\n continue\n\n show_layer_ids = [layer[0] for layer in layers if layer[2] == \"fixed\" or layer[0] == layer_id]\n\n if not os.path.exists(os.path.join(output_path)):\n os.makedirs(os.path.join(output_path))\n\n with tempfile.NamedTemporaryFile() as fp_svg:\n layer_dest_svg_path = fp_svg.name\n self.export_layers(layer_dest_svg_path, show_layer_ids)\n\n if self.options.filetype == \"jpeg\":\n with tempfile.NamedTemporaryFile() as fp_png:\n self.exportToPng(layer_dest_svg_path, fp_png.name)\n layer_dest_jpg_path = os.path.join(output_path, \"%s_%s.jpg\" % (str(counter).zfill(3), layer_label))\n self.convertPngToJpg(fp_png.name, layer_dest_jpg_path)\n else:\n layer_dest_png_path = os.path.join(output_path, \"%s_%s.png\" % (str(counter).zfill(3), layer_label))\n self.exportToPng(layer_dest_svg_path, layer_dest_png_path)\n\n counter += 1\n\n def export_layers(self, dest, show):\n \"\"\"\n Export selected layers of SVG to the file `dest`.\n :arg str dest: path to export SVG file.\n :arg list hide: layers to hide. each element is a string.\n :arg list show: layers to show. each element is a string.\n \"\"\"\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)\n\n def get_layers(self, src):\n svg_layers = self.document.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS)\n layers = []\n\n for layer in svg_layers:\n label_attrib_name = \"{%s}label\" % layer.nsmap['inkscape']\n if label_attrib_name not in layer.attrib:\n continue\n\n layer_id = layer.attrib[\"id\"]\n layer_label = layer.attrib[label_attrib_name]\n\n if layer_label.lower().startswith(\"[fixed] \"):\n layer_type = \"fixed\"\n layer_label = layer_label[8:]\n elif layer_label.lower().startswith(\"[export] \"):\n layer_type = \"export\"\n layer_label = layer_label[9:]\n else:\n continue\n\n layers.append([layer_id, layer_label, layer_type])\n\n return layers\n\n def exportToPng(self, svg_path, output_path):\n area_param = '-D' if self.options.crop else 'C'\n command = \"inkscape %s -d %s -e \\\"%s\\\" \\\"%s\\\"\" % (area_param, self.options.dpi, output_path, svg_path)\n\n p = subprocess.Popen(command.encode(\"utf-8\"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n\n def convertPngToJpg(self, png_path, output_path):\n command = \"convert \\\"%s\\\" \\\"%s\\\"\" % (png_path, output_path)\n p = subprocess.Popen(command.encode(\"utf-8\"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n\n\ndef _main():\n e = PNGExport()\n e.affect()\n exit()\n\nif __name__ == \"__main__\":\n _main()\n" }, { "alpha_fraction": 0.543739914894104, "alphanum_fraction": 0.5557615756988525, "avg_line_length": 35.35248565673828, "blob_id": "4a9fa9dc0909802ceaabf87ba712c6b92faa5b94", "content_id": "6be58c88979b602f80b8efd4afb65da78e18865f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65798, "license_type": "no_license", "max_line_length": 232, "num_lines": 1810, "path": "/InkscapeShapeReco_vY9sMm8/shapereco.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''\nCopyright (C) 2017 , Pierre-Antoine Delsart\n\nThis file is part of InkscapeShapeReco.\n\nInkscapeShapeReco is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with InkscapeShapeReco; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\n\nQuick description:\nThis extension uses all selected path, ignoring all other selected objects.\nIt tries to regularize hand drawn paths BY :\n - evaluating if the path is a full circle or ellipse\n - else finding sequences of aligned points and replacing them by a simple segment.\n - changing the segments angles to the closest remarkable angle (pi/2, pi/3, pi/6, etc...)\n - eqalizing all segments lengths which are close to each other\n - replacing 4 segments paths by a rectangle object if this makes sens (giving the correct rotation to the rectangle). \n\nRequires numpy.\n\n'''\n\nimport sys\nsys.path.append('/usr/share/inkscape/extensions')\nimport inkex\nimport simplepath\nimport gettext\n_ = gettext.gettext\n\n\n\n\nimport numpy\nnumpy.set_printoptions(precision=3)\n# *************************************************************\n# a list of geometric helper functions \ndef toArray(parsedList):\n \"\"\"Interprets a list of [(command, args),...]\n where command is a letter coding for a svg path command\n args are the argument of the command\n \"\"\"\n interpretCommand = {\n 'C' : lambda x, prevL : x[-2:], # bezier curve. Ignore the curve.\n 'L' : lambda x, prevL : x[0:2],\n 'M' : lambda x, prevL : x[0:2],\n 'Z' : lambda x, prevL : prevL[0],\n }\n\n points =[]\n for i,(c, arg) in enumerate(parsedList):\n #debug('toArray ', i, c , arg)\n newp = interpretCommand[c](arg, points)\n points.append( newp)\n a=numpy.array( points )\n\n # Some times we have points *very* close to each other\n # these do not bring any meaning full info, so we remove them\n #\n x,y, w,h = computeBox(a)\n sizeC = 0.5*(w+h)\n #deltas = numpy.zeros((len(a),2) )\n deltas = a[1:] - a[:-1] \n #deltas[-1] = a[0] - a[-1]\n deltaD = numpy.sqrt(numpy.sum( deltas**2, 1 ))\n sortedDind = numpy.argsort(deltaD)\n # expand longuest segments\n nexp = int(len(deltaD)*0.9)\n newpoints=[ None ]*len(a)\n medDelta = deltaD[sortedDind[len(deltaD)/2] ]\n for i,ind in enumerate(sortedDind):\n if deltaD[ind]/sizeC<0.005: continue\n if i>nexp:\n np = int(deltaD[ind]/medDelta)\n pL = [a[ind]]\n #print i,'=',ind,'adding ', np,' _ ', deltaD[ind], a[ind], a[ind+1]\n for j in range(np-1):\n f = float(j+1)/np\n #print '------> ', (1-f)*a[ind]+f*a[ind+1]\n pL.append( (1-f)*a[ind]+f*a[ind+1] )\n newpoints[ind] = pL\n else:\n newpoints[ind]=[a[ind]]\n if(D(a[0],a[-1])/sizeC > 0.005 ) :\n newpoints[-1]=[a[-1]]\n\n points = numpy.concatenate([p for p in newpoints if p!=None] )\n ## print ' medDelta ', medDelta, deltaD[sortedDind[-1]]\n ## print len(a) ,' ------> ', len(points)\n\n rel_norms = numpy.sqrt(numpy.sum( deltas**2, 1 )) / sizeC\n keep = numpy.concatenate([numpy.where( rel_norms >0.005 )[0],numpy.array([len(a)-1])])\n\n #return a[keep] , [ parsedList[i] for i in keep]\n #print len(a),' ',len(points)\n return points , []\n\nrotMat = numpy.matrix( [[1,-1],[1,1]] )/numpy.sqrt(2)\nunrotMat = numpy.matrix( [[1,1],[-1,1]] )/numpy.sqrt(2)\n\ndef setupKnownAngles():\n pi = numpy.pi\n #l = [ i*pi/8 for i in range(0, 9)] +[ i*pi/6 for i in [1,2,4,5,] ]\n l = [ i*pi/8 for i in range(0, 9)] +[ i*pi/6 for i in [1,2,4,5,] ] + [i*pi/12 for i in (1,5,7,11)]\n knownAngle = numpy.array( l )\n return numpy.concatenate( [-knownAngle[:0:-1], knownAngle ])\nknownAngle = setupKnownAngles()\n\n_twopi = 2*numpy.pi\n_pi = numpy.pi\n\ndef deltaAngle(a1,a2):\n d = a1 - a2 \n return d if d > -_pi else d+_twopi\n\ndef closeAngleAbs(a1,a2):\n d = abs(a1 - a2 )\n return min( abs(d-_pi), abs( _twopi - d) , d)\n\ndef deltaAngleAbs(a1,a2):\n return abs(in_mPi_pPi(a1 - a2 ))\n\ndef in_mPi_pPi(a):\n if(a>_pi): return a-_twopi\n if(a<-_pi): return a+_twopi\n return a\nvec_in_mPi_pPi = numpy.vectorize(in_mPi_pPi)\nfrom numpy import sqrt\n\ndef D2(p1, p2):\n return ((p1-p2)**2).sum()\n\ndef D(p1, p2):\n return sqrt(D2(p1,p2) )\n\ndef norm(p):\n return sqrt( (p**2).sum() )\n\ndef computeBox(a):\n \"\"\"returns the bounding box enclosing the array of points a\n in the form (x,y, width, height) \"\"\"\n xmin , ymin = a[:,0].min(), a[:,1].min()\n xmax , ymax = a[:,0].max(), a[:,1].max()\n\n return xmin, ymin, xmax-xmin, ymax-ymin\n\ndef dirAndLength(p1,p2):\n #l = max(D(p1, p2) ,1e-4)\n l = D(p1,p2)\n uv = (p1-p2)/l\n return l,uv\n\ndef length(p1,p2):\n return sqrt( D2(p1,p2) )\n\ndef barycenter(points):\n \"\"\"\n \"\"\"\n return points.sum(axis=0)/len(points)\n\n\n# *************************************************************\n# debugging \ndef void(*l):\n pass\ndef debug_on(*l):\n sys.stderr.write(' '.join(str(i) for i in l) +'\\n') \ndebug = void\n#debug = debug_on\n\n# *************************************************************\n# Internal Objects\nclass Path(object):\n \"\"\"Private representation of a sequence of points.\n A SVG node of type 'path' is splitted in several of these Path objects.\n \"\"\"\n next = None # next Path in the sequence of path corresponding to a SVG node\n prev = None # previous Path in the sequence of path corresponding to a SVG node\n sourcepoints = None # the full list of points from which this path is a subset\n\n normalv = None # normal vector to this Path \n \n def __init__(self, points):\n \"\"\"points an array of points \"\"\"\n self.points = points\n self.init()\n\n def init(self):\n self.effectiveNPoints = len(self.points)\n if self.effectiveNPoints>1:\n self.length , self.univ = dirAndLength(self.points[0], self.points[-1])\n else:\n self.length , self.univ = 0, numpy.array([0,0])\n if self.effectiveNPoints>0:\n self.pointN=self.points[-1]\n self.point1=self.points[0]\n \n def isSegment(self):\n return False\n\n def quality(self):\n return 1000 \n\n def dump(self):\n n = len(self.points)\n if n>0:\n return 'path at '+str(self.points[0])+ ' to '+ str(self.points[-1])+' npoints=%d / %d (eff)'%(n,self.effectiveNPoints)\n else:\n return 'path Void !'\n\n def setNewLength(self, l):\n self.newLength = l\n \n def removeLastPoints(self,n):\n self.points = self.points[:-n]\n self.init()\n def removeFirstPoints(self,n):\n self.points = self.points[n:]\n self.init()\n\n def costheta(self,seg):\n return self.unitv.dot(seg.unitv)\n\n def translate(self, tr):\n \"\"\"Translate this path by tr\"\"\"\n self.points = self.points + tr\n\n def asSVGCommand(self, firstP=False):\n svgCommands = []\n com = 'M' if firstP else 'L'\n for p in self.points:\n svgCommands.append( [com, [p[0], p[1]] ] )\n com='L'\n return svgCommands\n\n\n def setIntersectWithNext(self, next=None):\n pass\n\n def mergedWithNext(self, newPath=None):\n \"\"\" Returns the combination of self and self.next.\n sourcepoints has to be set\n \"\"\"\n if newPath is None: newPath = Path( numpy.concatenate([self.points, self.next.points]) )\n\n newPath.sourcepoints = self.sourcepoints\n newPath.prev = self.prev\n if self.prev : newPath.prev.next = newPath\n newPath.next = self.next.next\n if newPath.next:\n newPath.next.prev = newPath\n return newPath\n\n# *************************************************************\n# \nclass Segment(Path):\n \"\"\" A segment. Defined by its line equation ax+by+c=0 and the points from orignal paths\n it is ensured that a**2+b**2 = 1\n \"\"\"\n QUALITYCUT = 0.9\n \n newAngle = None # temporary angle set during the \"parralelization\" step\n newLength = None # temporary lenght set during the \"parralelization\" step\n\n # Segment Builders\n @staticmethod\n def from2Points( p1, p2, refPoints = None):\n dirV = p2-p1\n center = 0.5*(p2+p1)\n return Segment.fromCenterAndDir(center, dirV, refPoints)\n\n @staticmethod\n def fromCenterAndDir( center, dirV, refPoints=None):\n b = dirV[0]\n a = -dirV[1]\n c = - (a*center[0]+b*center[1])\n\n if refPoints is None:\n refPoints = numpy.array([ center-0.5*dirV, center+0.5*dirV] )\n s = Segment( a, b, c, refPoints)\n return s\n\n \n def __init__(self, a,b,c, points, doinit=True):\n \"\"\"a,b,c: the line parameters.\n points : the array of 2D points represented by this Segment\n doinit : if true will compute additionnal parameters to this Segment (first/last points, unit vector,...)\n \"\"\"\n self.a = a\n self.b = b\n self.c = c\n \n self.points = points\n d = numpy.sqrt(a**2+b**2)\n if d != 1. :\n self.a /= d\n self.b /= d\n self.c /= d\n\n if doinit :\n self.init()\n\n\n def init(self):\n a,b,c = self.a, self.b, self.c\n x,y = self.points[0]\n self.point1 = numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )\n x,y = self.points[-1]\n self.pointN = numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )\n uv = self.computeDirLength()\n self.distancesToLine = self.computeDistancesToLine(self.points)\n self.normalv = numpy.array( [ a, b ])\n\n self.angle = numpy.arccos( uv[0] )*numpy.sign(uv[1] )\n\n\n def computeDirLength(self):\n \"\"\"re-compute and set unit vector and length \"\"\"\n self.length , uv = dirAndLength(self.pointN, self.point1)\n self.unitv = uv\n return uv\n\n def isSegment(self):\n return True\n\n def recomputeEndPoints(self):\n a,b,c = self.a, self.b, self.c\n x,y = self.points[0]\n self.point1 = numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )\n x,y = self.points[-1]\n self.length = numpy.sqrt( D2(self.pointN, self.point1) )\n\n def projectPoint(self,p):\n \"\"\" return the point projection of p onto this segment\"\"\"\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] ) \n \n\n def intersect(self, seg):\n \"\"\"Returns the intersection of this line with the line seg\"\"\"\n nu, nv = self.normalv, seg.normalv\n u = numpy.array([[-self.c],[-seg.c]])\n doRotation = min(nu.min(),nv.min()) <1e-4\n if doRotation:\n # rotate to avoid numerical issues\n nu = numpy.array(rotMat.dot(nu))[0]\n nv = numpy.array(rotMat.dot(nv))[0]\n m = numpy.matrix( (nu, nv) ) \n\n i = (m**-1).dot(u) \n i=numpy.array( i).swapaxes(0,1)[0]\n debug(' intersection ' ,nu, nv, self.angle, seg.angle, ' --> ',i)\n if doRotation:\n i = unrotMat.dot(i).A1\n debug(' ' ,i)\n \n \n return i\n\n def setIntersectWithNext(self, next=None):\n \"\"\"Modify self such as self.pointN is the intersection with next segment \"\"\"\n if next is None:\n next = self.next\n if next and next.isSegment():\n if abs(self.normalv.dot(next.unitv)) < 1e-3:\n return\n debug(' Intersect',self, next, ' from ', self.point1, self.pointN, ' to ' ,next.point1, next.pointN,)\n inter = self.intersect(next)\n debug(' --> ', inter, ' d=', D(self.pointN, inter) )\n next.point1 = inter\n self.pointN = inter\n self.computeDirLength()\n next.computeDirLength()\n \n def computeDistancesToLine(self, points):\n \"\"\"points: array of points.\n returns the array of distances to this segment\"\"\"\n return abs(self.a*points[:,0]+self.b*points[:,1]+self.c)\n\n\n def distanceTo(self,point):\n return abs(self.a*point[0]+self.b*point[1]+self.c) \n\n def inverse(self):\n \"\"\"swap all x and y values. \"\"\"\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return\n\n def dumpShort(self):\n return 'seg '+' '+str(self.point1 )+'to '+str(self.pointN)+ ' npoints=%d | angle,offset=(%.2f,%.2f )'%(len(self.points),self.angle, self.c)+' ',self.normalv\n\n def dump(self):\n v = self.variance()\n n = len(self.points)\n return 'seg '+str(self.point1 )+' , '+str(self.pointN)+ ' v/l=%.2f / %.2f = %.2f r*sqrt(n)=%.2f npoints=%d | angle,offset=(%.2f,%.2f )'%(v, self.length, v/self.length,v/self.length*numpy.sqrt(n) ,n , self.angle, self.c)\n \n def variance(self):\n d = self.distancesToLine\n return numpy.sqrt( (d**2).sum()/len(d) )\n\n def quality(self):\n n = len(self.points)\n return min(self.variance()/self.length*numpy.sqrt(n) , 1000)\n\n def formatedSegment(self, firstP=False):\n return self.asSVGCommand(firstP)\n \n def asSVGCommand(self, firstP=False):\n\n if firstP: \n segment = [ ['M',[self.point1[0],self.point1[1] ] ],\n ['L',[self.pointN[0],self.pointN[1] ] ]\n ]\n else:\n segment = [ ['L',[self.pointN[0],self.pointN[1] ] ] ]\n #debug(\"Segment, format : \", segment)\n return segment\n \n def replaceInList(self, startPos, fullList):\n code0 = fullList[startPos][0]\n segment = [ [code0,[self.point1[0],self.point1[1] ] ],\n ['L',[self.pointN[0],self.pointN[1] ] ]\n ]\n l = fullList[:startPos]+segment+fullList[startPos+len(self.points):]\n return l\n\n\n\n\n def mergedWithNext(self, doRefit=True):\n \"\"\" Returns the combination of self and self.next.\n sourcepoints has to be set\n \"\"\"\n spoints = numpy.concatenate([self.points,self.next.points])\n\n if doRefit:\n newSeg = fitSingleSegment(spoints)\n else:\n newSeg = Segment.fromCenterAndDir(barycenter(spoints), self.unitv, spoints)\n \n newSeg = Path.mergedWithNext(self, newSeg)\n return newSeg\n\n \n\n def center(self):\n return 0.5*(self.point1+self.pointN)\n\n def box(self):\n return computeBox(self.points)\n\n\n def translate(self, tr):\n \"\"\"Translate this segment by tr \"\"\"\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr\n \n def adjustToNewAngle(self): \n \"\"\"reset all parameters so that self.angle is change to self.newAngle \"\"\"\n\n self.a,self.b,self.c = parametersFromPointAngle( 0.5*(self.point1+self.pointN), self.newAngle)\n\n #print 'adjustToNewAngle ', self, self.angle, self.newAngle\n self.angle = self.newAngle\n self.normalv = numpy.array( [ self.a, self.b ])\n self.unitv = numpy.array( [ self.b, -self.a ])\n if abs(self.angle) > numpy.pi/2 :\n if self.b > 0: self.unitv *= -1\n elif self.b<0 : self.unitv *= -1\n\n self.point1 = self.projectPoint(self.point1) # reset point1 \n if self.next is None or not self.next.isSegment():\n # move the last point (no intersect with next)\n\n pN = self.projectPoint(self.pointN)\n dirN = pN - self.point1 \n lN = length(pN, self.point1)\n self.pointN = dirN/lN*self.length + self.point1\n #print ' ... adjusting last seg angle ',p.dump() , ' normalv=', p.normalv, 'unitv ', p.unitv\n else:\n self.setIntersectWithNext()\n\n def adjustToNewDistance(self):\n self.pointN = self.newLength* self.unitv + self.point1\n self.length = self.newLength\n\n def tempLength(self):\n if self.newLength : return self.newLength\n else : return self.length\n\n def tempAngle(self):\n if self.newAngle: return self.newAngle\n return self.angle\n\n\n\n\n# *************************************************************\n# *************************************************************\n# Groups of Path\n#\nclass PathGroup(object):\n \"\"\"A group of Path representing one SVG node.\n - a list of Path\n - a list of SVG commands describe the full node (=SVG path element)\n - a reference to the inkscape node object\n \n \"\"\"\n listOfPaths = []\n refSVGPathList = []\n isClosing = False\n refNode = None\n \n def __init__(self, listOfPaths, refSVGPathList, refNode=None, isClosing=False):\n self.refNode = refNode\n self.listOfPaths = listOfPaths\n self.refSVGPathList = refSVGPathList\n self.isClosing=isClosing\n \n def addToNode(self, node):\n newList = reformatList( self.listOfPaths) \n ele = addPath( newList , node)\n debug(\"PathGroup \", newList)\n return ele\n\n def setNodeStyle(self,ele, node):\n style = node.get('style')\n ele.set('style', style)\n \n\n\n @staticmethod\n def toSegments(points, refSVGPathList, refNode, isClosing=False):\n \"\"\"\n \"\"\"\n segs = [ Segment.from2Points(p, points[i+1], points[i:i+2] ) for (i,p) in enumerate(points[:-1]) ]\n resetPrevNextSegment(segs)\n return PathGroup( segs, refSVGPathList, refNode , isClosing)\n\nclass TangentEnvelop(PathGroup):\n \"\"\"Specialization where the Path objects are all Segments and represent tangents to a curve \"\"\"\n def addToNode(self, node):\n newList = [ ]\n for s in self.listOfPaths:\n newList += s.asSVGCommand(firstP=True)\n debug(\"TangentEnvelop \", newList)\n ele = addPath( newList , node)\n return ele\n\n def setNodeStyle(self,ele, node):\n style = node.get('style')+';marker-end:url(#Arrow1Lend)'\n style\n ele.set('style', style)\n\n\nclass Circle(PathGroup):\n \"\"\"Specialization where the list of Path objects\n is to be replaced by a Circle specified by a center and a radius.\n\n If an other radius 'rmax' is given than the object represents an ellipse.\n \"\"\"\n isClosing= True\n def __init__(self, center, rad, refNode=None, rmax=None, angle=0.):\n self.listOfPaths = []\n self.refNode = refNode\n self.center = numpy.array(center)\n self.radius = rad\n if rmax:\n self.type ='ellipse'\n else:\n self.type = 'circle'\n self.rmax = rmax\n self.angle = angle\n \n def addToNode(self, refnode):\n \"\"\"Add a node in the xml structure corresponding to this rect\n refnode : xml node used as a reference, new point will be inserted a same level\"\"\"\n ele = inkex.etree.Element('{http://www.w3.org/2000/svg}'+self.type)\n\n ele.set('cx',str(self.center[0]))\n ele.set('cy',str(self.center[1]))\n if self.rmax:\n ele.set('ry',str(self.radius))\n ele.set('rx',str(self.rmax))\n ele.set('transform', 'rotate(%3.2f,%f,%f)'%(numpy.degrees(self.angle),self.center[0],self.center[1]))\n else:\n ele.set('r',str(self.radius))\n refnode.xpath('..')[0].append(ele)\n return ele\n\n \nclass Rectangle(PathGroup):\n \"\"\"Specialization where the list of Path objects\n is to be replaced by a Rectangle specified by a center and size (w,h) and a rotation angle.\n\n \"\"\"\n def __init__(self, center, size, angle, listOfPaths, refNode=None):\n self.listOfPaths = listOfPaths\n self.refNode = refNode\n self.center = center\n self.size = size\n self.bbox = size\n self.angle = angle\n pos = self.center - numpy.array( size )/2\n if angle != 0. :\n cosa = numpy.cos(angle)\n sina = numpy.sin(angle) \n self.rotMat = numpy.matrix( [ [ cosa, sina], [-sina, cosa] ] )\n self.rotMatstr = 'matrix(%1.7f,%1.7f,%1.7f,%1.7f,0,0)'%(cosa, sina, -sina, cosa)\n\n\n #debug(' !!!!! Rotated rectangle !!', self.size, self.bbox, ' angles ', a, self.angle ,' center',self.center)\n else :\n self.rotMatstr = None\n self.pos = pos\n debug(' !!!!! Rectangle !!', self.size, self.bbox, ' angles ', self.angle ,' center',self.center)\n\n def addToNode(self, refnode):\n \"\"\"Add a node in the xml structure corresponding to this rect\n refnode : xml node used as a reference, new point will be inserted a same level\"\"\"\n ele = inkex.etree.Element('{http://www.w3.org/2000/svg}rect')\n self.fill(ele)\n refnode.xpath('..')[0].append(ele)\n return ele\n \n def fill(self,ele):\n w, h = self.size\n ele.set('width',str(w))\n ele.set('height',str(h))\n w, h = self.bbox\n ele.set('x',str(self.pos[0]))\n ele.set('y',str(self.pos[1]))\n if self.rotMatstr:\n ele.set('transform', 'rotate(%3.2f,%f,%f)'%(numpy.degrees(self.angle),self.center[0],self.center[1]))\n #ele.set('transform', self.rotMatstr)\n\n @staticmethod\n def isRectangle( pathGroup):\n \"\"\"Check if the segments in pathGroups can form a rectangle.\n Returns a Rectangle or None\"\"\"\n #print 'xxxxxxxx isRectangle',pathGroups\n if isinstance(pathGroup, Circle ): return None\n segmentList = [p for p in pathGroup.listOfPaths if p.isSegment() ]#or p.effectiveNPoints >0]\n if len(segmentList) != 4:\n debug( 'rectangle Failed at length ', len(segmentList))\n return None\n a,b,c,d = segmentList\n\n if length(a.point1, d.pointN)> 0.2*(a.length+d.length)*0.5:\n debug('rectangle test failed closing ', length(a.point1, d.pointN), a.length, d.length)\n return None\n \n Aac , Abd = closeAngleAbs(a.angle,c.angle), closeAngleAbs(b.angle , d.angle)\n if min(Aac,Abd) > 0.07 or max(Aac, Abd) >0.27 :\n debug( 'rectangle Failed at angles', Aac, Abd)\n return None\n notsimilarL = lambda d1,d2: abs(d1-d2)>0.20*min(d1,d2)\n\n pi , twopi = numpy.pi,2*numpy.pi\n angles = numpy.array( [p.angle for p in segmentList] )\n minAngleInd = numpy.argmin( numpy.minimum( abs(angles), abs( abs(angles)-pi), abs( abs(angles)-twopi) ) )\n rotAngle = angles[minAngleInd]\n width = (segmentList[minAngleInd].length + segmentList[(minAngleInd+2)%4].length)*0.5\n height = (segmentList[(minAngleInd+1)%4].length + segmentList[(minAngleInd+3)%4].length)*0.5\n # set rectangle center as the bbox center\n x,y,w,h = computeBox( numpy.concatenate( [ p.points for p in segmentList]) )\n r = Rectangle( numpy.array( [x+w/2, y+h/2]), (width, height), rotAngle, pathGroup.listOfPaths, pathGroup.refNode)\n \n debug( ' found a rectangle !! ', a.length, b.length, c.length, d.length )\n return r\n\n\n# *************************************************************\n# Object manipulation functions\n\ndef toRemarkableShape( group ):\n \"\"\"Test if PathGroup instance 'group' looks like a remarkable shape (ex: Rectangle).\n if so returns a new shape instance else returns group unchanged\"\"\"\n r = Rectangle.isRectangle( group )\n if r : return r\n return group\n\n\ndef resetPrevNextSegment(segs):\n for i, seg in enumerate(segs[:-1]):\n s = segs[i+1]\n seg.next = s\n s.prev = seg \n return segs\n\n\ndef fitSingleSegment(a):\n xmin,ymin,w,h = computeBox(a)\n inverse = w<h\n if inverse:\n a = numpy.roll(a,1,axis=1)\n\n seg = regLin(a)\n if inverse:\n seg.inverse()\n #a = numpy.roll(a,1,axis=0)\n return seg\n \ndef regLin(a , returnOnlyPars=False):\n \"\"\"perform a linear regression on 2dim array a. Creates a segment object in return \"\"\"\n sumX = a[:,0].sum()\n sumY = a[:,1].sum()\n sumXY = (a[:,1]*a[:,0]).sum()\n a2 = a*a\n sumX2 = a2[:,0].sum()\n sumY2 = a2[:,1].sum()\n N = a.shape[0]\n\n pa = (N*sumXY - sumX*sumY)/ ( N*sumX2 - sumX*sumX)\n pb = (sumY - pa*sumX) /N\n if returnOnlyPars:\n return pa,-1, pb\n return Segment(pa, -1, pb, a)\n\n\ndef smoothArray(a, n=2):\n count = numpy.zeros(a.shape)\n smootha = numpy.array(a)\n for i in range(n):\n count[i]=n+i+1\n count[-i-1] = n+i+1\n count[n:-n] = n+n+1\n #debug('smooth ', len(smooth[:-2]) [)\n for i in range(1,n+1):\n smootha[:-i] += a[i:]\n smootha[i:] += a[:-i]\n return smootha/count\n\ndef buildTangents( points , averaged=True, isClosing=False):\n \"\"\"build tangent vectors to the curve 'points'.\n if averaged==True, the tangents are averaged with their direct neighbours (use case : smoother tangents)\"\"\"\n tangents = numpy.zeros( (len(points),2) )\n i=1\n tangents[:-i] += points[i:] - points[:-i] # i <- p_i+1 - p_i \n tangents[i:] += points[i:] - points[:-i] # i <- p_i - p_i-1\n if isClosing:\n tangents[0] += tangents[0] - tangents[-1]\n tangents[-1] += tangents[0] - tangents[-1]\n tangents *= 0.5\n if not isClosing:\n tangents[0] *=2\n tangents[-1] *=2\n\n\n ## debug('points ', points)\n ## debug('buildTangents --> ', tangents )\n \n if averaged:\n # average over neighbours\n avTan = numpy.array(tangents)\n avTan[:-1] += tangents[1:]\n avTan[1:] += tangents[:-1]\n if isClosing:\n tangents[0]+=tangents[-1]\n tangents[1]+=tangents[0]\n avTan *= 1./3\n if not isClosing:\n avTan[0] *=1.5\n avTan[-1] *=1.5\n\n return avTan\n\n\ndef clusterAngles(array, dAng=0.15):\n \"\"\"Cluster together consecutive angles with similar values (within 'dAng').\n array : flat array of angles\n returns [ ..., (indi_0, indi_1),...] where each tuple are indices of cluster i\n \"\"\"\n N = len(array)\n\n closebyAng = numpy.zeros( (N,4) , dtype=int)\n\n for i,a in enumerate(array):\n cb = closebyAng[i]\n cb[0] =i\n cb[2]=i\n cb[3]=i\n c=i-1\n # find number of angles within dAng in nearby positions\n while c>-1: # indices below i\n d=closeAngleAbs(a,array[c])\n if d>dAng:\n break\n cb[1]+=1 \n cb[2]=c\n c-=1\n c=i+1\n while c<N-1:# indices above i\n d=closeAngleAbs(a,array[c])\n if d>dAng:\n break\n cb[1]+=1 \n cb[3]=c\n c+=1\n closebyAng= closebyAng[numpy.argsort(closebyAng[:,1]) ]\n\n clusteredPos = numpy.zeros(N, dtype=int)\n clusters = []\n for cb in reversed(closebyAng):\n if clusteredPos[cb[0]]==1:\n continue\n # try to build a cluster\n minI = cb[2]\n while clusteredPos[minI]==1:\n minI+=1\n maxI = cb[3]\n while clusteredPos[maxI]==1:\n maxI-=1\n for i in range(minI, maxI+1):\n clusteredPos[i] = 1\n clusters.append( (minI, maxI) )\n\n return clusters\n \n \n \n\ndef adjustAllAngles(paths):\n for p in paths:\n if p.isSegment() and p.newAngle is not None:\n p.adjustToNewAngle()\n # next translate to fit end points\n tr = numpy.zeros(2)\n for p in paths[1:]:\n if p.isSegment() and p.prev.isSegment():\n tr = p.prev.pointN - p.point1\n debug(' translating ',p,' prev is', p.prev, ' ',tr, )\n p.translate(tr)\n\ndef adjustAllDistances(paths):\n for p in paths:\n if p.isSegment() and p.newLength is not None: \n p.adjustToNewDistance()\n # next translate to fit end points\n tr = numpy.zeros(2)\n for p in paths[1:]:\n if p.isSegment() and p.prev.isSegment():\n tr = p.prev.pointN - p.point1\n p.translate(tr)\n\n\ndef mergeConsecutiveParralels(segments):\n ignoreNext=False\n newList=[]\n for s in segments:\n if ignoreNext:\n ignoreNext=False\n continue\n if not s.isSegment():\n newList.append(s)\n continue\n if s.next is None:\n newList.append(s)\n continue\n if not s.next.isSegment():\n newList.append(s)\n continue\n d = closeAngleAbs(s.angle ,s.next.angle)\n if d<0.001:\n debug(\"merging \", s.angle ,s.next.angle )\n snew = s.mergedWithNext(doRefit=False)\n ignoreNext=True\n newList.append(snew)\n else:\n newList.append(s)\n if len(segments)>len(newList):\n debug(\"merged parallel \", segments , '-->', newList)\n return newList\n\n\n\n##**************************************\n## \nclass SegmentExtender:\n \"\"\"Extend Segments part of a list of Path by aggregating points from neighbouring Path objects.\n\n There are 2 concrete subclasses for extending forward and backward (due to technical reasons).\n \"\"\"\n\n def __init__(self, relD, fitQ):\n self.relD = relD\n self.fitQ = fitQ\n \n def nextPaths(self,seg):\n pL = []\n p = self.getNext(seg) # prev or next\n while p :\n if p.isSegment(): break\n if p.mergedObj is None: break\n pL.append(p)\n p = self.getNext(p)\n if pL==[]:\n return []\n return pL\n\n def extend(self,seg):\n nextPathL = self.nextPaths(seg)\n debug('extend ',self.extDir, seg , nextPathL, seg.length , len(nextPathL))\n if nextPathL==[]: return seg\n pointsToTest = numpy.concatenate( [p.points for p in nextPathL] )\n mergeD = seg.length*self.relD\n #print seg.point1 , seg.pointN, pointsToTest\n pointsToFit, addedPoints = self.pointsToFit(seg,pointsToTest , mergeD)\n if len(pointsToFit)==0:\n return seg\n newseg = fitSingleSegment(pointsToFit)\n if newseg.quality()>self.fitQ: # fit failed\n return seg\n debug( ' EXTENDING ! ', len(seg.points), len(addedPoints) )\n self.removePath(seg, newseg, nextPathL, addedPoints )\n newseg.points = pointsToFit\n seg.mergedObj= newseg\n newseg.sourcepoints = seg.sourcepoints\n\n return newseg\n\n @staticmethod\n def extendSegments(segmentList, relD=0.03, qual=0.5):\n \"\"\"Perform Segment extension from list of Path segmentList\n returns the updated list of Path objects\"\"\"\n fwdExt = FwdExtender(relD, qual)\n bwdExt = BwdExtender(relD, qual)\n # tag all objects with an attribute pointing to the extended object\n for seg in segmentList: \n seg.mergedObj = seg # by default the extended object is self\n # extend each segments, starting by the longest \n for seg in sorted(segmentList, key = lambda s : s.length, reverse=True):\n if seg.isSegment():\n newseg=fwdExt.extend(seg)\n seg.mergedObj = bwdExt.extend(newseg)\n # the extension procedure has marked as None the mergedObj\n # which have been swallowed by an extension.\n # filter them out :\n updatedSegs=[seg.mergedObj for seg in segmentList if seg.mergedObj]\n return updatedSegs\n\n\nclass FwdExtender(SegmentExtender):\n extDir='Fwd'\n def getNext(self, seg):\n return seg.next\n def pointsToFit(self, seg, pointsToTest, mergeD):\n distancesToLine =abs(seg.a*pointsToTest[:,0]+seg.b*pointsToTest[:,1]+seg.c) \n goodInd=len(pointsToTest)\n for i,d in reversed(list(enumerate(distancesToLine))):\n if d<mergeD: goodInd=i;break\n addedPoints = pointsToTest[:len(pointsToTest-goodInd)]\n #debug( ' ++ pointsToFit ' , mergeD, i ,len(pointsToTest), addedPoints , seg.points )\n return numpy.concatenate([seg.points, addedPoints]), addedPoints\n def removePath(self, seg, newseg, nextPathL, addedPoints):\n npoints = len(addedPoints)\n acc=0\n newseg.prev = seg.prev\n for p in nextPathL:\n if (acc+len(p.points))<=npoints:\n p.mergedObj = None\n acc += len(p.points)\n else:\n newseg.next = p\n p.points = p.points[:(npoints-acc-len(p.points))]\n break\n\nclass BwdExtender(SegmentExtender):\n extDir='Bwd'\n def getNext(self, seg):\n return seg.prev\n def pointsToFit(self, seg, pointsToTest, mergeD):\n distancesToLine =abs(seg.a*pointsToTest[:,0]+seg.b*pointsToTest[:,1]+seg.c)\n goodInd=len(pointsToTest) \n for i,d in enumerate(distancesToLine):\n if d<mergeD: goodInd=i; break\n addedPoints = pointsToTest[goodInd:]\n #debug( ' ++ pointsToFit ' , mergeD, i ,len(pointsToTest), addedPoints , seg.points )\n return numpy.concatenate([addedPoints, seg.points]), addedPoints\n def removePath(self,seg, newseg, nextPathL, addedPoints):\n npoints = len(addedPoints)\n acc=0\n newseg.next = seg.next \n for p in reversed(nextPathL):\n if (acc+len(p.points))<=npoints:\n p.mergedObj = None\n acc += len(p.points)\n else:\n newseg.prev = p \n p.points = p.points[(npoints-acc-len(p.points)):] \n break\n\n\n\n# merge consecutive segments with close angle\n\ndef mergeConsecutiveCloseAngles( segList , mangle =0.25 , q=0.5):\n\n def toMerge(seg):\n l=[seg]\n setattr(seg, 'merged', True)\n if seg.next and seg.next.isSegment() :\n debug('merging segs ', seg.angle, ' with : ' ,seg.next.point1, seg.next.pointN, ' ang=',seg.next.angle)\n if deltaAngleAbs( seg.angle, seg.next.angle) < mangle:\n l += toMerge(seg.next)\n return l\n\n updatedSegs = []\n for i,seg in enumerate(segList[:-1]):\n if not seg.isSegment() :\n updatedSegs.append(seg)\n continue\n if hasattr(seg,'merged'):\n continue\n debug(i,' inspect merge : ', seg.point1,'-',seg.pointN, seg.angle , ' q=',seg.quality())\n mList = toMerge(seg)\n debug(' --> tomerge ', len(mList))\n if len(mList)<2:\n delattr(seg, 'merged')\n updatedSegs.append(seg)\n continue\n points= numpy.concatenate( [p.points for p in mList] )\n newseg = fitSingleSegment(points)\n if newseg.quality()>q:\n delattr(seg, 'merged')\n updatedSegs.append(seg)\n continue\n for p in mList:\n setattr(seg, 'merged',True)\n newseg.sourcepoints = seg.sourcepoints\n debug(' --> post merge qual = ', newseg.quality() , seg.pointN, ' --> ', newseg.pointN, newseg.angle)\n newseg.prev = mList[0].prev\n newseg.next = mList[-1].next\n updatedSegs.append(newseg)\n if not hasattr(segList[-1], 'merged') : updatedSegs.append( segList[-1])\n return updatedSegs\n\n\n\n\ndef parametersFromPointAngle(point, angle):\n unitv = numpy.array([ numpy.cos(angle), numpy.sin(angle) ])\n ortangle = angle+numpy.pi/2\n normal = numpy.array([ numpy.cos(ortangle), numpy.sin(ortangle) ])\n genOffset = -normal.dot(point)\n a, b = normal\n return a, b , genOffset\n \n\n\ndef addPath(newList, refnode):\n \"\"\"Add a node in the xml structure corresponding to the content of newList\n newList : list of Segment or Path\n refnode : xml node used as a reference, new point will be inserted a same level\"\"\"\n ele = inkex.etree.Element('{http://www.w3.org/2000/svg}path')\n ele.set('d', simplepath.formatPath(newList))\n refnode.xpath('..')[0].append(ele)\n return ele\n\ndef reformatList( listOfPaths):\n \"\"\" Returns a SVG paths list (same format as simplepath.parsePath) from a list of Path objects\n - Segments in paths are added in the new list\n - simple Path are retrieved from the original refSVGPathList and put in the new list (thus preserving original bezier curves)\n \"\"\"\n newList = []\n first = True\n for seg in listOfPaths: \n newList += seg.asSVGCommand(first)\n first = False\n return newList\n\n\ndef clusterValues( values, relS=0.1 , refScaleAbs='range' ):\n \"\"\"form clusters of similar quantities from input 'values'.\n Clustered values are not necessarily contiguous in the input array. \n Clusters size (that is max-min) is < relS*cluster_average \"\"\"\n if len(values)==0:\n return []\n if len(values.shape)==1:\n sortedV = numpy.stack([ values , numpy.arange(len(values))] ,1)\n else:\n # Assume value.shape = (N,2) and index are ok\n sortedV = values \n sortedV = sortedV[ numpy.argsort(sortedV[:,0]) ]\n\n sortedVV = sortedV[:,0]\n refScale = sortedVV[-1]-sortedVV[0]\n #sortedVV += 2*min(sortedVV)) # shift to avoid numerical issues around 0\n\n #print sortedVV\n class Cluster:\n def __init__(self, delta, sum, indices):\n self.delta = delta\n self.sum = sum\n self.N=len(indices)\n self.indices = indices\n def size(self):\n return self.delta/refScale\n \n def combine(self, c):\n #print ' combine ', self.indices[0], c.indices[-1], ' -> ', sortedVV[c.indices[-1]] - sortedVV[self.indices[0]]\n newC = Cluster(sortedVV[c.indices[-1]] - sortedVV[self.indices[0]],\n self.sum+c.sum,\n self.indices+c.indices)\n return newC\n\n def originIndices(self):\n return tuple(int(sortedV[i][1]) for i in self.indices)\n\n def size_local(self):\n return self.delta / sum( sortedVV[i] for i in self.indices) *len(self.indices)\n def size_range(self):\n return self.delta/refScale\n def size_abs(self):\n return self.delta\n\n if refScaleAbs=='range':\n Cluster.size = size_range\n elif refScaleAbs=='local':\n Cluster.size = size_local\n elif refScaleAbs=='abs':\n Cluster.size = size_abs\n \n class ClusterPair:\n next=None\n prev=None\n def __init__(self, c1, c2 ):\n self.c1=c1\n self.c2=c2\n self.refresh()\n def refresh(self):\n self.potentialC =self.c1.combine(self.c2)\n self.size = self.potentialC.size()\n def setC1(self, c1):\n self.c1=c1\n self.refresh()\n def setC2(self, c2):\n self.c2=c2\n self.refresh()\n \n #ave = 0.5*(sortedVV[1:,0]+sortedV[:-1,0])\n #deltaR = (sortedV[1:,0]-sortedV[:-1,0])/ave\n\n cList = [Cluster(0,v,(i,)) for (i,v) in enumerate(sortedVV) ]\n cpList = [ ClusterPair( c, cList[i+1] ) for (i,c) in enumerate(cList[:-1]) ]\n resetPrevNextSegment( cpList )\n\n #print cpList\n def reduceCL( cList ):\n if len(cList)<=1:\n return cList\n cp = min(cList, key=lambda cp:cp.size) \n #print '==', cp.size , relS, cp.c1.indices , cp.c2.indices, cp.potentialC.indices\n\n while cp.size < relS:\n if cp.next:\n cp.next.setC1(cp.potentialC)\n cp.next.prev = cp.prev\n if cp.prev:\n cp.prev.setC2(cp.potentialC)\n cp.prev.next = cp.next\n cList.remove(cp)\n if len(cList)<2:\n break\n cp = min(cList, key=lambda cp:cp.size) \n #print ' -----> ', [ (cp.c1.indices , cp.c2.indices) for cp in cList]\n return cList\n\n cpList = reduceCL(cpList)\n if len(cpList)==1:\n cp = cpList[0]\n if cp.potentialC.size()<relS:\n return [ cp.potentialC.originIndices() ]\n #print cpList\n if cpList==[]:\n return []\n finalCL = [ cp.c1.originIndices() for cp in cpList ]+[ cpList[-1].c2.originIndices() ]\n return finalCL\n\n\n\n\n# *************************************************************\n# The inkscape extension\n# *************************************************************\nclass ShapeReco(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n self.OptionParser.add_option(\"--title\")\n self.OptionParser.add_option(\"-k\", \"--keepOrigin\", dest=\"keepOrigin\", default=False,\n action=\"store\", type=\"inkbool\", \n help=\"Do not replace path\")\n\n self.OptionParser.add_option( \"--MainTabs\")\n #self.OptionParser.add_option( \"--Basic\")\n\n self.OptionParser.add_option( \"--segExtensionDtoSeg\", dest=\"segExtensionDtoSeg\", default=0.03,\n action=\"store\", type=\"float\", \n help=\"max distance from point to segment\")\n self.OptionParser.add_option( \"--segExtensionQual\", dest=\"segExtensionQual\", default=0.5,\n action=\"store\", type=\"float\", \n help=\"segment extension fit quality\")\n self.OptionParser.add_option( \"--segExtensionEnable\", dest=\"segExtensionEnable\", default=True,\n action=\"store\", type=\"inkbool\", \n help=\"Enable segment extension\")\n\n\n self.OptionParser.add_option( \"--segAngleMergeEnable\", dest=\"segAngleMergeEnable\", default=True,\n action=\"store\", type=\"inkbool\", \n help=\"Enable merging of almost aligned consecutive segments\")\n\n self.OptionParser.add_option( \"--segRemoveSmallEdge\", dest=\"segRemoveSmallEdge\", default=True,\n action=\"store\", type=\"inkbool\", \n help=\"Enable removing very small segments\")\n\n self.OptionParser.add_option( \"--doUniformization\", dest=\"doUniformization\", default=True,\n action=\"store\", type=\"inkbool\", \n help=\"Preform angles and distances uniformization\")\n\n for opt in [\"doParrallelize\", \"doKnownAngle\", \"doEqualizeDist\" , \"doEqualizeRadius\" , \"doCenterCircOnSeg\"]:\n self.OptionParser.add_option( \"--\"+opt, dest=opt, default=True,\n action=\"store\", type=\"inkbool\", \n help=opt)\n\n \n def effect(self):\n\n rej='{http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd}type'\n paths = []\n for id, node in self.selected.iteritems():\n if node.tag == '{http://www.w3.org/2000/svg}path' and rej not in node.keys(): \n paths.append(node)\n\n shapes = self.extractShapes(paths)\n # add new shapes in SVG document\n self.addShapesToDoc( shapes )\n\n\n def removeSmallEdge(self, paths, wTot,hTot):\n \"\"\"Remove small Path objects which stand between 2 Segments (or at the ends of the sequence).\n Small means the bbox of the path is less then 5% of the mean of the 2 segments.\"\"\"\n if len(paths)<2:\n return\n def getdiag(points):\n xmin,ymin,w,h = computeBox(points)\n return sqrt(w**2+h**2), w, h\n removeSeg=[]\n def remove(p):\n removeSeg.append(p)\n if p.next : p.next.prev = p.prev\n if p.prev: p.prev.next = p.next\n p.effectiveNPoints =0\n debug(' --> remove !', p, p.length , len(p.points))\n for p in paths:\n if len(p.points)==0 :\n remove(p)\n continue\n # select only path between 2 segments\n next, prev = p.next, p.prev\n if next is None: next = prev\n if prev is None: prev = next\n if not next.isSegment() or not prev.isSegment() : continue\n #diag = getdiag(p.points)\n diag ,w, h = getdiag(p.points)\n\n debug(p, p.pointN, ' removing edge diag = ', diag, p.length, ' l=',next.length+prev.length , 'totDim ', (wTot,hTot))\n debug( ' ---> ',prev, next)\n\n\n # remove last or first very small in anycase\n doRemove = prev==next and (diag < 0.05*(wTot+hTot)*0.5 )\n if not doRemove:\n # check if this small\n isLarge = diag > (next.length+prev.length)*0.1 # check size relative to neighbour\n isLarge = isLarge or w > 0.2*wTot or h > 0.2*hTot # check size w.r.t total size\n \n # is it the small side of a long rectangle ?\n dd = prev.distanceTo(next.pointN)\n rect = abs(prev.unitv.dot(next.unitv))>0.98 and diag > dd*0.5\n doRemove = not( isLarge or rect )\n\n if doRemove:\n remove(p)\n\n if next != prev:\n prev.setIntersectWithNext(next)\n debug('removed Segments ', removeSeg)\n for p in removeSeg:\n paths.remove(p)\n\n\n \n \n \n def prepareParrallelize(self,segs):\n \"\"\"Group Segment by their angles (segments are grouped together if their deltAangle is within 0.15 rad)\n The 'newAngle' member of segments in a group are then set to the mean angle of the group (where angles are all\n considered in [-pi, pi])\n\n segs : list of segments\n \"\"\"\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi\n\n\n def prepareDistanceEqualization(self,segs, relDelta=0.1):\n \"\"\" Input segments are grouped according to their length :\n - for each length L, find all other lengths within L*relDelta. of L.\n - Find the larger of such subgroup.\n - repeat the procedure on remaining lengths until none is left.\n Each length in a group is set to the mean length of the group\n\n segs : a list of segments\n relDelta : float, minimum relative distance.\n \"\"\"\n\n lengths = numpy.array( [x.tempLength() for x in segs] )\n clusters = clusterValues(lengths)\n\n if len(clusters)==1:\n # deal with special case with low num of segments\n # --> don't let a single segment alone\n if len(clusters[0])+1==len(segs):\n clusters[0]=range(len(segs)) # all\n\n allDist = []\n for cl in clusters:\n dmean = sum( lengths[i] for i in cl ) / len(cl)\n allDist.append(dmean)\n for i in cl:\n segs[i].setNewLength(dmean)\n debug( i,' set newLength ',dmean, segs[i].length, segs[i].dumpShort())\n \n return allDist\n\n\n def prepareRadiusEqualization(self, circles, otherDists, relSize=0.2):\n \"\"\"group circles radius and distances into cluster.\n Then set circles radius according to the mean of the clusters they belong to.\"\"\"\n ncircles = len(circles)\n lengths = numpy.array( [c.radius for c in circles]+otherDists )\n indices = numpy.array( range(ncircles+len(otherDists) ) )\n clusters = clusterValues(numpy.stack([ lengths, indices ],1 ), relSize, refScaleAbs='local' )\n\n debug('prepareRadiusEqualization radius ', repr(lengths))\n debug('prepareRadiusEqualization clusters ', clusters)\n allDist = []\n for cl in clusters:\n dmean = sum( lengths[i] for i in cl ) / len(cl)\n #print cl , dmean , \n allDist.append(dmean)\n if len(cl)==1:\n continue\n for i in cl:\n if i< ncircles:\n circles[i].radius = dmean\n debug(' post radius ',[c.radius for c in circles] )\n return allDist\n\n\n def centerCircOnSeg(self, circles, segments, relSize=0.18):\n \"\"\" move centers of circles onto the segments if close enough\"\"\"\n for circ in circles:\n circ.moved = False\n for seg in segments:\n for circ in circles: \n d = seg.distanceTo(circ.center)\n #debug( ' ', seg.projectPoint(circ.center))\n if d < circ.radius*relSize and not circ.moved :\n circ.center = seg.projectPoint(circ.center)\n circ.moved = True\n \n\n def adjustToKnownAngle(self, paths):\n \"\"\" Check current angle against remarkable angles. If close enough, change it\n paths : a list of segments\"\"\"\n for seg in paths:\n a = seg.tempAngle()\n i = (abs(vec_in_mPi_pPi(knownAngle - a) )).argmin()\n seg.newAngle = knownAngle[i]\n debug( ' Known angle ', seg, seg.tempAngle(),' -> ', knownAngle[i]) \n ## if abs(knownAngle[i] - a) < 0.08:\n\n \n\n def checkForCircle(self, points, tangents):\n \"\"\"Determine if the points and their tangents represent a circle\n\n The difficulty is to be able to recognize ellipse while avoiding paths small fluctuations a\n nd false positive due to badly drawn rectangle or non-convex closed curves.\n \n Method : we consider angle of tangent as function of lenght on path.\n For circles these are : angle = c1 x lenght + c0. (c1 ~1)\n\n We calculate dadl = d(angle)/d(length) and compare to c1.\n We use 3 criteria :\n * num(dadl > 6) : number of sharp angles\n * length(dadl<0.3)/totalLength : lengths of straight lines within the path.\n * totalLength/(2pi x radius) : fraction of lenght vs a plain circle\n\n Still failing to recognize elongated ellipses...\n \n \"\"\"\n if len(points)<10:\n return False, 0\n\n if all(points[0]==points[-1]): # last exactly equals the first.\n # Ignore last point for this check\n points = points[:-1]\n tangents = tangents[:-1]\n #print 'Removed last ', points\n xmin,ymin, w, h = computeBox( points)\n diag2=(w*w+h*h)\n \n diag = sqrt(diag2)*0.5\n norms = numpy.sqrt(numpy.sum( tangents**2, 1 ))\n\n angles = numpy.arctan2( tangents[:,1], tangents[:,0] ) \n #debug( 'angle = ', repr(angles))\n N = len(angles)\n \n deltas = points[1:] - points[:-1] \n deltasD = numpy.concatenate([ [D(points[0],points[-1])/diag], numpy.sqrt(numpy.sum( deltas**2, 1 )) / diag] )\n\n # locate and avoid the point when swicthing\n # from -pi to +pi. The point is around the minimum\n imin = numpy.argmin(angles)\n debug(' imin ',imin)\n angles = numpy.roll(angles, -imin)\n deltasD = numpy.roll(deltasD, -imin)\n n=int(N*0.1)\n # avoid fluctuations by removing points around the min\n angles=angles[n:-n]\n deltasD=deltasD[n:-n]\n deltasD = deltasD.cumsum()\n N = len(angles)\n\n # smooth angles to avoid artificial bumps\n angles = smoothArray(angles, n=max(int(N*0.03),2) )\n\n deltaA = angles[1:] - angles[:-1]\n deltasDD = (deltasD[1:] -deltasD[:-1])\n deltasDD[numpy.where(deltasDD==0.)] = 1e-5*deltasD[0]\n dAdD = abs(deltaA/deltasDD)\n belowT, count = True,0\n for v in dAdD:\n if v>6 and belowT:\n count+=1\n belowT = False\n belowT= (v<6)\n\n self.temp = (deltasD,angles, tangents, dAdD )\n fracStraight = numpy.sum(deltasDD[numpy.where(dAdD<0.3)])/(deltasD[-1]-deltasD[0])\n curveLength = deltasD[-1]/3.14\n #print \"SSS \",count , fracStraight\n if curveLength> 1.4 or fracStraight>0.4 or count > 6:\n isCircle =False\n else: \n isCircle= (count < 4 and fracStraight<=0.3) or \\\n (fracStraight<=0.1 and count<5)\n\n if not isCircle:\n return False, 0\n \n # It's a circle !\n radius = points - numpy.array([xmin+w*0.5,ymin+h*0.5])\n radius_n = numpy.sqrt(numpy.sum( radius**2, 1 )) # normalize\n\n mini = numpy.argmin(radius_n) \n rmin = radius_n[mini]\n maxi = numpy.argmax(radius_n) \n rmax = radius_n[maxi]\n # void points around maxi and mini to make sure the 2nd max is found\n # on the \"other\" side\n n = len(radius_n)\n radius_n[maxi]=0 \n radius_n[mini]=0 \n for i in range(1,n/8+1):\n radius_n[(maxi+i)%n]=0\n radius_n[(maxi-i)%n]=0\n radius_n[(mini+i)%n]=0\n radius_n[(mini-i)%n]=0\n radius_n_2 = [ r for r in radius_n if r>0]\n rmax_2 = max(radius_n_2)\n rmin_2 = min(radius_n_2) # not good !!\n anglemax = numpy.arccos( radius[maxi][0]/rmax)*numpy.sign(radius[maxi][1])\n return True, (xmin+w*0.5,ymin+h*0.5, 0.5*(rmin+rmin_2), 0.5*(rmax+rmax_2), anglemax)\n\n\n\n\n def tangentEnvelop(self, svgCommandsList, refNode):\n a, svgCommandsList = toArray(svgCommandsList)\n tangents = buildTangents(a)\n\n newSegs = [ Segment.fromCenterAndDir( p, t ) for (p,t) in zip(a,tangents) ]\n debug(\"build envelop \", newSegs[0].point1 , newSegs[0].pointN)\n clustersInd = clusterAngles( [s.angle for s in newSegs] )\n debug(\"build envelop cluster: \", clustersInd)\n\n return TangentEnvelop( newSegs, svgCommandsList, refNode)\n\n\n def segsFromTangents(self,svgCommandsList, refNode):\n \"\"\"Finds segments part in a list of points represented by svgCommandsList.\n\n The method is to build the (averaged) tangent vectors to the curve.\n Aligned points will have tangent with similar angle, so we cluster consecutive angles together\n to define segments.\n Then we extend segments to connected points not already part of other segments.\n Then we merge consecutive segments with similar angles.\n \n \"\"\"\n sourcepoints, svgCommandsList = toArray(svgCommandsList)\n\n d = D(sourcepoints[0],sourcepoints[-1])\n x,y,wTot,hTot = computeBox(sourcepoints)\n aR = min(wTot/hTot, hTot/wTot)\n maxDim = max(wTot, hTot)\n isClosing = aR*0.2 > d/maxDim\n debug('isClosing ', isClosing, maxDim, d)\n if d==0:\n # then we remove the last point to avoid null distance\n # in other calculations\n sourcepoints = sourcepoints[:-1]\n svgCommandsList = svgCommandsList[:-1]\n\n if len(sourcepoints) < 4:\n return PathGroup.toSegments(sourcepoints, svgCommandsList, refNode, isClosing=isClosing)\n \n tangents = buildTangents(sourcepoints, isClosing=isClosing)\n\n # global quantities :\n\n # Check if circle -----------------------\n if isClosing:\n if len(sourcepoints)<9:\n return PathGroup.toSegments(sourcepoints, svgCommandsList, refNode, isClosing=True)\n isCircle, res = self.checkForCircle( sourcepoints, tangents) \n debug(\"Is Circle = \", isCircle )\n if isCircle:\n x,y,rmin, rmax,angle = res\n debug(\"Circle -> \", rmin, rmax,angle )\n if rmin/rmax>0.7:\n circ = Circle((x,y),0.5*(rmin+rmax), refNode )\n else:\n circ = Circle((x,y),rmin, refNode, rmax=rmax, angle=angle)\n circ.points = sourcepoints\n return circ\n # -----------------------\n \n\n\n # cluster points by angle of their tangents -------------\n tgSegs = [ Segment.fromCenterAndDir( p, t ) for (p,t) in zip(sourcepoints,tangents) ]\n clustersInd = clusterAngles( [s.angle for s in tgSegs] )\n clustersInd.sort( )\n debug(\"build envelop cluster: \", clustersInd)\n\n # build Segments from clusters \n newSegs = []\n for imin, imax in clustersInd:\n if imin+1< imax: # consider clusters with more than 3 points\n seg = fitSingleSegment(sourcepoints[imin:imax+1])\n elif imin+1==imax: # 2 point path : we build a segment\n seg = Segment.from2Points(sourcepoints[imin], sourcepoints[imax] , sourcepoints[imin:imax+1])\n else:\n seg = Path( sourcepoints[imin:imax+1] )\n seg.sourcepoints = sourcepoints\n newSegs.append( seg )\n resetPrevNextSegment( newSegs )\n debug(newSegs)\n # -----------------------\n\n\n # -----------------------\n # Merge consecutive Path objects \n updatedSegs=[]\n def toMerge(p):\n l=[p]\n setattr(p, 'merged', True)\n if p.next and not p.next.isSegment():\n l += toMerge(p.next)\n return l\n \n for i,seg in enumerate(newSegs[:-1]):\n if seg.isSegment():\n updatedSegs.append( seg) \n continue\n if hasattr(seg,'merged'): continue\n mergeList = toMerge(seg)\n debug('merging ', mergeList)\n p = Path(numpy.concatenate([ p.points for p in mergeList]) )\n debug('merged == ', p.points)\n updatedSegs.append(p)\n\n if not hasattr(newSegs[-1],'merged'): updatedSegs.append( newSegs[-1]) \n debug(\"merged path\", updatedSegs)\n newSegs = resetPrevNextSegment( updatedSegs )\n\n\n # Extend segments -----------------------------------\n if self.options.segExtensionEnable:\n newSegs = SegmentExtender.extendSegments( newSegs, self.options.segExtensionDtoSeg, self.options.segExtensionQual )\n debug(\"extended segs\", newSegs)\n newSegs = resetPrevNextSegment( newSegs )\n debug(\"extended segs\", newSegs)\n\n # ----------------------------------------\n \n\n # ---------------------------------------\n # merge consecutive segments with close angle\n updatedSegs=[]\n\n if self.options.segAngleMergeEnable:\n newSegs = mergeConsecutiveCloseAngles( newSegs , mangle=0.2 )\n newSegs=resetPrevNextSegment(newSegs)\n debug(' __ 2nd angle merge')\n newSegs = mergeConsecutiveCloseAngles( newSegs, mangle=0.35 ) # 2nd pass\n newSegs=resetPrevNextSegment(newSegs)\n debug('after merge ', len(newSegs), newSegs)\n # Check if first and last also have close angles.\n if isClosing and len(newSegs)>2 :\n first ,last = newSegs[0], newSegs[-1]\n if first.isSegment() and last.isSegment():\n if closeAngleAbs( first.angle, last.angle) < 0.1:\n # force merge\n points= numpy.concatenate( [ last.points, first.points] )\n newseg = fitSingleSegment(points)\n newseg.next = first.next\n last.prev.next = None\n newSegs[0]=newseg\n newSegs.pop()\n\n # -----------------------------------------------------\n # remove negligible Path/Segments between 2 large Segments\n if self.options.segRemoveSmallEdge:\n self.removeSmallEdge(newSegs , wTot, hTot)\n newSegs=resetPrevNextSegment(newSegs)\n\n debug('after remove small ', len(newSegs),newSegs)\n # -----------------------------------------------------\n\n # -----------------------------------------------------\n # Extend segments to their intersections\n for p in newSegs:\n if p.isSegment() and p.next:\n p.setIntersectWithNext()\n # -----------------------------------------------------\n \n return PathGroup(newSegs, svgCommandsList, refNode, isClosing)\n\n\n\n def extractShapesFromID( self, *nids, **options ):\n \"\"\"for debugging purpose \"\"\"\n eList = []\n for nid in nids:\n el = self.getElementById(nid)\n if el is None:\n print \"Cant find \", nid\n return\n eList.append(el)\n class tmp:\n pass\n\n self.options = self.OptionParser.parse_args()[0]\n self.options._update_careful(options)\n nodes=self.extractShapes(eList)\n self.shape = nodes[0]\n\n\n def buildShape(self, node):\n def rotationAngle(tr):\n if tr and tr.startswith('rotate'):\n # retrieve the angle :\n return float(tr[7:-1].split(','))\n else:\n return 0.\n \n if node.tag.endswith('path'):\n parsedSVGCommands = node.get('d')\n g = self.segsFromTangents(simplepath.parsePath(parsedSVGCommands), node)\n #g = self.tangentEnvelop(simplepath.parsePath(parsedSVGCommands), node)\n elif node.tag.endswith('rect'):\n tr = node.get('transform',None)\n if tr and tr.startswith('matrix'):\n return None # can't deal with scaling\n recSize = numpy.array([node.get('width'),node.get('height')])\n recCenter = numpy.array([node.get('x'),node.get('y')]) + recSize/2\n angle=rotationAngle(tr)\n g = Rectangle( recSize, recCenter, 0 , [], node)\n elif node.tag.endswith('circle'):\n g = Circle(node.get('cx'),node.get('cy'), node.get('r'), [], node )\n elif node.tag.endswith('ellipse'):\n if tr and tr.startswith('matrix'):\n return None # can't deal with scaling\n angle=rotationAngle(tr)\n rx = node.get('rx')\n ry = node.get('ry')\n g = Circle(node.get('cx'),node.get('cy'), ry, rmax=rx , angle=angle, refNode=node )\n\n return g\n \n def extractShapes( self, nodes ):\n \"\"\"The main function.\n nodes : a list of nodes\"\"\"\n analyzedNodes = []\n\n # convert nodes to list of segments (PathGroup) or Circle\n for n in nodes :\n g = self.buildShape(n)\n if g :\n analyzedNodes.append( g )\n\n # uniformize shapes\n if self.options.doUniformization:\n analyzedNodes = self.uniformizeShapes(analyzedNodes)\n\n return analyzedNodes\n\n\n def uniformizeShapes(self, pathGroupList):\n allSegs = [ p for g in pathGroupList for p in g.listOfPaths if p.isSegment() ]\n\n if self.options.doParrallelize:\n self.prepareParrallelize(allSegs)\n if self.options.doKnownAngle:\n self.adjustToKnownAngle(allSegs)\n\n adjustAng = self.options.doKnownAngle or self.options.doParrallelize\n for group in pathGroupList:\n # first pass : independently per path\n if adjustAng:\n adjustAllAngles(group.listOfPaths)\n group.listOfPaths[:] = mergeConsecutiveParralels(group.listOfPaths)\n if self.options.doEqualizeDist:\n self.prepareDistanceEqualization([p for p in group.listOfPaths if p.isSegment()], 0.12)\n adjustAllDistances(group.listOfPaths) \n ## # then 2nd global pass, with tighter criteria\n if self.options.doEqualizeDist:\n allShapeDist=self.prepareDistanceEqualization(allSegs, 0.05)\n for group in pathGroupList:\n adjustAllDistances(group.listOfPaths)\n else:\n allShapeDist = []\n \n for g in pathGroupList: \n if g.isClosing and not isinstance(g,Circle):\n debug('Closing intersec ', g.listOfPaths[0].point1, g.listOfPaths[0].pointN )\n g.listOfPaths[-1].setIntersectWithNext(g.listOfPaths[0]) \n\n\n circles=[ group for group in pathGroupList if isinstance(group, Circle)]\n if self.options.doEqualizeRadius:\n self.prepareRadiusEqualization(circles, allShapeDist)\n if self.options.doCenterCircOnSeg:\n self.centerCircOnSeg(circles, allSegs)\n\n pathGroupList = [toRemarkableShape(g) for g in pathGroupList]\n return pathGroupList\n \n \n def addShapesToDoc(self, pathGroupList):\n for group in pathGroupList: \n debug(\"final \", group.listOfPaths, group.refNode )\n # change to Rectangle if possible :\n #finalshape = toRemarkableShape( group )\n ele = group.addToNode( group.refNode)\n group.setNodeStyle(ele, group.refNode)\n if not self.options.keepOrigin:\n group.refNode.xpath('..')[0].remove(group.refNode)\n\n\n \nif __name__ == '__main__':\n e = ShapeReco()\n e.affect()\n" }, { "alpha_fraction": 0.49728021025657654, "alphanum_fraction": 0.5034788250923157, "avg_line_length": 34.60360336303711, "blob_id": "4ea9f9038a622d2ae7a45e5e8e6706d6396bda52", "content_id": "7adc62f94d1a9c15bea035caffbf9d539110085f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7905, "license_type": "no_license", "max_line_length": 112, "num_lines": 222, "path": "/streaks/streaks.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# These two lines are only needed if you don't put the script directly into\n# the installation directory\nimport sys\nsys.path.append('/usr/share/inkscape/extensions')\n\nimport random\nrr = random.randint(1,10)\n\n# We will use the inkex module with the predefined Effect base class.\nimport inkex\n# The simplestyle module provides functions for style parsing.\nfrom simplestyle import *\n\nclass StreaksEffect(inkex.Effect):\n \"\"\"\n Fill a box with vertical streaks.\n \"\"\"\n def __init__(self):\n # Call the base class constructor.\n inkex.Effect.__init__(self)\n\n self.OptionParser.add_option('-b', '--blur', action = 'store',\n type = 'int', dest = 'blur', default = 2,\n help = 'No help')\n\n self.OptionParser.add_option('-l', '--linno', action = 'store',\n type = 'int', dest = 'linno', default = 50,\n help = 'No help')\n\n self.OptionParser.add_option('-x', '--xrand', action = 'store',\n type = 'inkbool', dest = 'xrand', default = True,\n help = 'No help')\n\n self.OptionParser.add_option('-p', '--pagep', action = 'store',\n type = 'inkbool', dest = 'pagep', default = True,\n help = 'No help')\n\n self.OptionParser.add_option('-X', '--cusx', action = 'store',\n type = 'int', dest = 'cusx', default = 500,\n help = 'No help')\n\n self.OptionParser.add_option('-Y', '--cusy', action = 'store',\n type = 'int', dest = 'cusy', default = 500,\n help = 'No help')\n\n self.OptionParser.add_option('-s', '--segLen', action = 'store',\n type = 'int', dest = 'segLen', default = 8,\n help = 'No help')\n\n self.OptionParser.add_option('-y', '--yrand', action = 'store',\n type = 'inkbool', dest = 'yrand', default = True,\n help = 'No help')\n\n self.OptionParser.add_option('-u', '--dashp', action = 'store',\n type = 'inkbool', dest = 'dashp', default = True,\n help = 'No help')\n\n self.OptionParser.add_option('-v', '--blankp', action = 'store',\n type = 'inkbool', dest = 'blankp', default = True,\n help = 'No help')\n\n self.OptionParser.add_option('-w', '--dotp', action = 'store',\n type = 'inkbool', dest = 'dotp', default = True,\n help = 'No help')\n\n self.OptionParser.add_option('-d', '--dits', action = 'store',\n type = 'int', dest = 'dits', default = 100,\n help = 'No help')\n\n self.OptionParser.add_option('', '--strokeColour', action = 'store',\n type = 'str', dest = 'strokeColour', default = 255,\n help = 'No help')\n\n self.OptionParser.add_option('', '--strokeWidth', action = 'store',\n type = 'int', dest = 'strokeWidth', default = 2,\n help = 'No help')\n\n self.OptionParser.add_option(\"\", \"--Nmain\", action=\"store\",\n type=\"string\", dest=\"active_tab\", default='title',\n help=\"Active tab.\")\n\n def effect(self):\n \"\"\"\n Effect behaviour.\n Overrides base class' method.\n \"\"\"\n blur = int( self.options.blur )\n linno = int( self.options.linno )\n xrand = bool( self.options.xrand )\n pagep = bool( self.options.pagep )\n cusx = int( self.options.cusx )\n cusy = int( self.options.cusy )\n segLen = int( self.options.segLen )\n yrand = bool( self.options.yrand )\n dashp = bool( self.options.dashp )\n blankp = bool( self.options.blankp )\n dotp = bool( self.options.dotp )\n dits = int( self.options.dits )\n strokeColour = int( self.options.strokeColour )\n strokeWidth = int( self.options.strokeWidth )\n\n # Get access to main SVG document element and get its dimensions.\n svg = self.document.getroot()\n\n if pagep :\n try :\n width = inkex.unittouu(svg.get('width'))\n height = inkex.unittouu(svg.attrib['height'])\n except AttributeError :\n width = self.unittouu(svg.get('width'))\n height = self.unittouu(svg.attrib['height'])\n# inkex.errormsg(\"Page size %d %d\" % (width, height))\n else :\n width = cusx\n height = cusy\n\n \n # Find defs node.\n for child in svg :\n if -1 != child.tag.find(\"defs\") :\n break\n else:\n inkex.errormsg(\"No defs child found\")\n defs = child\n\n if blur :\n filter = inkex.etree.SubElement(defs, \"filter\")\n filter.set(inkex.addNS('collect', 'inkscape'), 'always' )\n filname = self.uniqueId( 'filter' )\n filter.set('id' , filname)\n\n finfo = inkex.etree.SubElement(filter, 'feGaussianBlur' )\n finfo.set(inkex.addNS('collect', 'inkscape'), 'always' )\n finfo.set( 'stdDeviation', str( blur ) )\n\n \"\"\" Debug\n for i in xrange( len(svg)) :\n k = svg[i].attrib \n for ky in k :\n inkex.errormsg(ky)\n\n # Clean any old layers\n flag = False\n for i in xrange( len(svg)) :\n dic = svg[i].attrib\n for key in dic:\n if -1 != key.find(\"label\") :\n if 'Streak Layer' == dic[key] :\n del svg[i]\n flag = True\n if flag :\n inkex.errormsg(\"Found old Streak layer\")\n else:\n inkex.errormsg(\"Clean\")\n\"\"\" \n # Create a new layer.\n layer = inkex.etree.SubElement(svg, 'g')\n layer.set(inkex.addNS('label', 'inkscape'), 'Streak Layer' )\n layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')\n\n # Create path element\n path = inkex.etree.Element(inkex.addNS('path','svg'))\n\n alpha = strokeColour & 255\n color = ( strokeColour >> 8 ) & int( 'ffffff', 16 )\n style = {\n 'stroke' : '#%06X' % color,\n 'stroke-width' : str(strokeWidth),\n }\n# inkex.errormsg(\"Colour %s\" % strokeColour)\n\n if blur : style['filter'] = 'url(#' + filname +')'\n\n\n path.set('style', formatStyle(style))\n \n pathstring = ''\n seglim = int(height / segLen)\n ditlen = int(height / dits)\n\n\n xco = 0\n while xco < width :\n y = 0\n flag = random.randint(0, 2)\n while y < height :\n if yrand :\n yinc = random.randint(1, seglim)\n else :\n yinc = seglim\n if flag == 1 and dashp: #Draw dash\n pathstring += ' M '+str(xco)+','+str(y)+' L '+str(xco)+','+str(min( y + yinc, height))\n y += yinc + ditlen\n elif flag == 2 and dotp: #Draw dots\n ylim = min( y + yinc, height )\n while y < ylim :\n pathstring += ' M '+str(xco)+','+str(y)+' L '+str(xco)+','+str(min( y + ditlen, height))\n y += 2*ditlen\n elif flag == 0 and blankp :\n y += yinc #Adding blank space \n elif not (dashp or dotp or blankp) : #Squiggle if user turns them off\n sdit = str(2*ditlen)+' '\n pathstring += ' M '+str(xco)+','+str(y)+' q '+ 2*sdit + '0 ' +sdit\n for i in xrange(int(height/(2*ditlen))) :\n pathstring += 't 0 '+sdit\n y = height\n flag = (flag + 1)%3\n if xrand :\n xco += random.randint(0, int(2 * width / linno) )\n else :\n xco += width / linno\n path.set('d', pathstring )\n\n # Connect elements together.\n layer.append(path)\n\n# Create effect instance and apply it.\neffect = StreaksEffect()\neffect.affect()\nsys.exit( 0 )\n\n" }, { "alpha_fraction": 0.5382165312767029, "alphanum_fraction": 0.5558209419250488, "avg_line_length": 38.52447509765625, "blob_id": "23cb67afa141bd88510d8ad9e72bd08f8cd0ed9a", "content_id": "47257aaf874b6d0a8d9098a048c8e0d4602d1193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11304, "license_type": "no_license", "max_line_length": 152, "num_lines": 286, "path": "/randompathalongpath/randompathalongpath.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n'''\nCopyright (C) 2018 Philipp G. Haselwarter, [email protected]\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\[email protected]\n\nBased on 'pathalongpath.py' by Jean-Francois Barraud.\n\nQuick description:\nThis script randomly places and deforms an object (the pattern)\nalong other paths (skeletons).\nThe first selected object is the pattern.\nThe last selected ones are the skeletons.\n\nImagine a straight horizontal line l in the middle of the bounding box of the pattern.\nConsider the normal bundle of l: the collection of all the vertical lines meeting l.\nConsider this as the initial state of the plane; in particular, think of the pattern\nas painted on these lines.\n\nNow move and bend l to make it fit a skeleton, and see what happens to the normals:\nthey move and rotate, deforming the pattern.\n'''\n# standard library\nimport copy\nimport random\n# local library\nimport inkex\nimport cubicsuperpath\nimport bezmisc\nimport pathmodifier\nimport simpletransform\n\n\ndef flipxy(path):\n for pathcomp in path:\n for ctl in pathcomp:\n for pt in ctl:\n tmp=pt[0]\n pt[0]=-pt[1]\n pt[1]=-tmp\n\ndef offset(pathcomp,dx,dy):\n for ctl in pathcomp:\n for pt in ctl:\n pt[0]+=dx\n pt[1]+=dy\n\ndef stretch(pathcomp,xscale,yscale,org):\n for ctl in pathcomp:\n for pt in ctl:\n pt[0]=org[0]+(pt[0]-org[0])*xscale\n pt[1]=org[1]+(pt[1]-org[1])*yscale\n\ndef linearize(p,tolerance=0.001):\n '''\n This function receives a component of a 'cubicsuperpath' and returns two things:\n The path subdivided in many straight segments, and an array containing the length of each segment.\n\n We could work with bezier path as well, but bezier arc lengths are (re)computed for each point\n in the deformed object. For complex paths, this might take a while.\n '''\n zero=0.000001\n i=0\n d=0\n lengths=[]\n while i<len(p)-1:\n box = bezmisc.pointdistance(p[i ][1],p[i ][2])\n box += bezmisc.pointdistance(p[i ][2],p[i+1][0])\n box += bezmisc.pointdistance(p[i+1][0],p[i+1][1])\n chord = bezmisc.pointdistance(p[i][1], p[i+1][1])\n if (box - chord) > tolerance:\n b1, b2 = bezmisc.beziersplitatt([p[i][1],p[i][2],p[i+1][0],p[i+1][1]], 0.5)\n p[i ][2][0],p[i ][2][1]=b1[1]\n p[i+1][0][0],p[i+1][0][1]=b2[2]\n p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])\n else:\n d=(box+chord)/2\n lengths.append(d)\n i+=1\n new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]\n new.append(p[-1][1])\n lengths=[l for l in lengths if l>zero]\n return(new,lengths)\n\nclass PathAlongPath(pathmodifier.Diffeo):\n def __init__(self):\n random.seed(None)\n pathmodifier.Diffeo.__init__(self)\n self.OptionParser.add_option(\"--title\")\n self.OptionParser.add_option(\"-n\", \"--noffset\",\n action=\"store\", type=\"float\", \n dest=\"noffset\", default=0.0, help=\"normal offset\")\n self.OptionParser.add_option(\"--random_toffset_min\",\n action=\"store\", type=\"float\",\n dest=\"random_toffset_min\", default=0.0, help=\"minimal random tangential offset\")\n self.OptionParser.add_option(\"--random_toffset_max\",\n action=\"store\", type=\"float\",\n dest=\"random_toffset_max\", default=100.0, help=\"maximum random tangential offset\")\n self.OptionParser.add_option(\"--number_repetitions\",\n action=\"store\", type=\"int\",\n dest=\"number_repetitions\", default=1, help=\"number of random repetitions\")\n self.OptionParser.add_option(\"-k\", \"--kind\",\n action=\"store\", type=\"string\", \n dest=\"kind\", default=True,\n help=\"choose between wave or snake effect\")\n self.OptionParser.add_option(\"-c\", \"--copymode\",\n action=\"store\", type=\"string\", \n dest=\"copymode\", default=True,\n help=\"repeat the path to fit deformer's length\")\n self.OptionParser.add_option(\"-v\", \"--vertical\",\n action=\"store\", type=\"inkbool\", \n dest=\"vertical\", default=False,\n help=\"reference path is vertical\")\n self.OptionParser.add_option(\"-d\", \"--duplicate\",\n action=\"store\", type=\"inkbool\", \n dest=\"duplicate\", default=False,\n help=\"duplicate pattern before deformation\")\n self.OptionParser.add_option(\"--tab\",\n action=\"store\", type=\"string\",\n dest=\"tab\",\n help=\"The selected UI-tab when OK was pressed\")\n\n def prepareSelectionList(self):\n\n idList=self.options.ids\n idList=pathmodifier.zSort(self.document.getroot(),idList)\n id = idList[-1]\n self.patterns={id:self.selected[id]}\n\n## ##first selected->pattern, all but first selected-> skeletons\n## id = self.options.ids[-1]\n## self.patterns={id:self.selected[id]}\n\n if self.options.duplicate:\n self.patterns=self.duplicateNodes(self.patterns)\n self.expandGroupsUnlinkClones(self.patterns, True, True)\n self.objectsToPaths(self.patterns)\n del self.selected[id]\n\n self.skeletons=self.selected\n self.expandGroupsUnlinkClones(self.skeletons, True, False)\n self.objectsToPaths(self.skeletons)\n\n def lengthtotime(self,l):\n '''\n Recieves an arc length l, and returns the index of the segment in self.skelcomp\n containing the corresponding point, to gether with the position of the point on this segment.\n\n If the deformer is closed, do computations modulo the toal length.\n '''\n if self.skelcompIsClosed:\n l=l % sum(self.lengths)\n if l<=0:\n return 0,l/self.lengths[0]\n i=0\n while (i<len(self.lengths)) and (self.lengths[i]<=l):\n l-=self.lengths[i]\n i+=1\n t=l/self.lengths[min(i,len(self.lengths)-1)]\n return i, t\n\n def applyDiffeo(self,bpt,vects=()):\n '''\n The kernel of this stuff:\n bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.\n '''\n s=bpt[0]-self.skelcomp[0][0]\n i,t=self.lengthtotime(s)\n if i==len(self.skelcomp)-1:\n x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)\n dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]\n dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]\n else:\n x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)\n dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]\n dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]\n\n vx=0\n vy=bpt[1]-self.skelcomp[0][1]\n if self.options.wave:\n bpt[0]=x+vx*dx\n bpt[1]=y+vy+vx*dy\n else:\n bpt[0]=x+vx*dx-vy*dy\n bpt[1]=y+vx*dy+vy*dx\n\n for v in vects:\n vx=v[0]-self.skelcomp[0][0]-s\n vy=v[1]-self.skelcomp[0][1]\n if self.options.wave:\n v[0]=x+vx*dx\n v[1]=y+vy+vx*dy\n else:\n v[0]=x+vx*dx-vy*dy\n v[1]=y+vx*dy+vy*dx\n\n def effect(self):\n if len(self.options.ids)<2:\n inkex.errormsg(_(\"This extension requires two selected paths.\"))\n return\n self.prepareSelectionList()\n self.options.wave = (self.options.kind==\"Ribbon\")\n if self.options.copymode==\"Single\":\n self.options.repeat =False\n self.options.stretch=False\n elif self.options.copymode==\"Single, stretched\":\n self.options.repeat =False\n self.options.stretch=True\n\n bbox = simpletransform.computeBBox(self.patterns.values())\n\n if self.options.vertical:\n #flipxy(bbox)...\n bbox=(-bbox[3],-bbox[2],-bbox[1],-bbox[0])\n\n width=bbox[1]-bbox[0]\n\n for id, node in self.patterns.iteritems():\n if node.tag == inkex.addNS('path','svg') or node.tag == 'path':\n d = node.get('d')\n p0 = cubicsuperpath.parsePath(d)\n if self.options.vertical:\n flipxy(p0)\n\n newp = []\n\n for i in range(0, self.options.number_repetitions):\n\n for skelnode in self.skeletons.itervalues():\n\n self.curSkeleton = cubicsuperpath.parsePath(skelnode.get('d'))\n\n if self.options.vertical:\n flipxy(self.curSkeleton)\n\n for comp in self.curSkeleton:\n p = copy.deepcopy(p0)\n self.skelcomp, self.lengths = linearize(comp)\n #!!!!>----> TODO: really test if path is closed! end point==start point is not enough!\n self.skelcompIsClosed = (self.skelcomp[0] == self.skelcomp[-1])\n\n length = sum(self.lengths)\n tangential_offset = (random.uniform(self.options.random_toffset_min, self.options.random_toffset_max) / 100.0) * length\n\n xoffset = self.skelcomp[0][0] - bbox[0] + tangential_offset\n yoffset = self.skelcomp[0][1] - (bbox[2]+bbox[3]) / 2 - self.options.noffset\n\n for sub in p:\n offset(sub,xoffset,yoffset)\n\n if self.options.stretch:\n if not width:\n exit(_(\"The 'stretch' option requires that the pattern must have non-zero width :\\nPlease edit the pattern width.\"))\n for sub in p:\n stretch(sub,length/width,1,self.skelcomp[0])\n\n for sub in p:\n for ctlpt in sub:\n self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))\n\n if self.options.vertical:\n flipxy(p)\n newp += p\n\n node.set('d', cubicsuperpath.formatPath(newp))\n return\n\nif __name__ == '__main__':\n e = PathAlongPath()\n e.affect()\n\n\n# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99\n" }, { "alpha_fraction": 0.48049673438072205, "alphanum_fraction": 0.5228466987609863, "avg_line_length": 45.96183395385742, "blob_id": "4a3eb78340a3a8fbd6ec7106b9c455b2e831ad5b", "content_id": "dacaaf6f2bbe36a891c337b462027b3f5ebb617d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6281, "license_type": "no_license", "max_line_length": 236, "num_lines": 131, "path": "/ids_to_text_Inkscape/ids_to_text.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\n# We will use the inkex module with the predefined Effect base class.\r\nimport inkex\r\nimport simpletransform\r\nimport cubicsuperpath\r\nimport bezmisc\r\n# The simplestyle module provides functions for style parsing.\r\nimport simplestyle\r\n\r\n# third party\r\ntry:\r\n import numpy\r\nexcept:\r\n inkex.errormsg(_(\"Failed to import the numpy modules. These modules are required by this extension. Please install them and try again. On a Debian-like system this can be done with the command, sudo apt-get install python-numpy.\"))\r\n exit()\r\n\r\nmat_area = numpy.matrix([[ 0, 2, 1, -3],[ -2, 0, 1, 1],[ -1, -1, 0, 2],[ 3, -1, -2, 0]])\r\nmat_cofm_0 = numpy.matrix([[ 0, 35, 10,-45],[-35, 0, 12, 23],[-10,-12, 0, 22],[ 45,-23,-22, 0]])\r\nmat_cofm_1 = numpy.matrix([[ 0, 15, 3,-18],[-15, 0, 9, 6],[ -3, -9, 0, 12],[ 18, -6,-12, 0]])\r\nmat_cofm_2 = numpy.matrix([[ 0, 12, 6,-18],[-12, 0, 9, 3],[ -6, -9, 0, 15],[ 18, -3,-15, 0]])\r\nmat_cofm_3 = numpy.matrix([[ 0, 22, 23,-45],[-22, 0, 12, 10],[-23,-12, 0, 35],[ 45,-10,-35, 0]])\r\n\r\ndef csparea(csp):\r\n area = 0.0\r\n for sp in csp:\r\n if len(sp) < 2: continue\r\n for i in range(len(sp)): # calculate polygon area\r\n area += 0.5*sp[i-1][1][0]*(sp[i][1][1] - sp[i-2][1][1])\r\n for i in range(1, len(sp)): # add contribution from cubic Bezier\r\n vec_x = numpy.matrix([sp[i-1][1][0], sp[i-1][2][0], sp[i][0][0], sp[i][1][0]])\r\n vec_y = numpy.matrix([sp[i-1][1][1], sp[i-1][2][1], sp[i][0][1], sp[i][1][1]])\r\n area += 0.15*(vec_x*mat_area*vec_y.T)[0,0]\r\n return -area # require positive area for CCW\r\ndef cspcofm(csp):\r\n area = csparea(csp)\r\n xc = 0.0\r\n yc = 0.0\r\n if abs(area) < 1.e-8:\r\n inkex.errormsg(_(\"Area is zero, cannot calculate Center of Mass\"))\r\n return 0, 0\r\n for sp in csp:\r\n for i in range(len(sp)): # calculate polygon moment\r\n xc += sp[i-1][1][1]*(sp[i-2][1][0] - sp[i][1][0])*(sp[i-2][1][0] + sp[i-1][1][0] + sp[i][1][0])/6\r\n yc += sp[i-1][1][0]*(sp[i][1][1] - sp[i-2][1][1])*(sp[i-2][1][1] + sp[i-1][1][1] + sp[i][1][1])/6\r\n for i in range(1, len(sp)): # add contribution from cubic Bezier\r\n vec_x = numpy.matrix([sp[i-1][1][0], sp[i-1][2][0], sp[i][0][0], sp[i][1][0]])\r\n vec_y = numpy.matrix([sp[i-1][1][1], sp[i-1][2][1], sp[i][0][1], sp[i][1][1]])\r\n vec_t = numpy.matrix([(vec_x*mat_cofm_0*vec_y.T)[0,0], (vec_x*mat_cofm_1*vec_y.T)[0,0], (vec_x*mat_cofm_2*vec_y.T)[0,0], (vec_x*mat_cofm_3*vec_y.T)[0,0]])\r\n xc += (vec_x*vec_t.T)[0,0]/280\r\n yc += (vec_y*vec_t.T)[0,0]/280\r\n return -xc/area, -yc/area\r\n\r\nclass IdsToText(inkex.Effect):\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Constructor.\r\n \"\"\"\r\n \r\n # Call the base class constructor.\r\n inkex.Effect.__init__(self)\r\n\r\n self.OptionParser.add_option('-s', '--fontsize', action = 'store',\r\n type = 'int', dest = 'fontsize', default = '10',\r\n help = 'Font Size')\r\n self.OptionParser.add_option('-c', '--color', action = 'store',\r\n type = 'string', dest = 'color', default = '#000000',\r\n help = 'Color')\r\n self.OptionParser.add_option('-f', '--font', action = 'store',\r\n type = 'string', dest = 'font', default = 'Roboto',\r\n help = 'Font Family')\r\n self.OptionParser.add_option('-w', '--fontweight', action = 'store',\r\n type = 'string', dest = 'fontweight', default = 'bold',\r\n help = 'Font Weight')\r\n self.OptionParser.add_option('-r', '--replaced', action = 'store',\r\n type = 'string', dest = 'replaced', default = '',\r\n help = 'Text to replace')\r\n self.OptionParser.add_option('-q', '--replacewith', action = 'store',\r\n type = 'string', dest = 'replacewith', default = '',\r\n help = 'Replace with this text')\r\n self.OptionParser.add_option('-a', '--angle', action = 'store',\r\n type = 'float', dest = 'angle', default = 0,\r\n help = 'Rotation angle')\r\n self.OptionParser.add_option('-p', '--capitals', action = 'store',\r\n type = 'inkbool', dest = 'capitals', default = False,\r\n help = 'Capitalize')\r\n self.OptionParser.add_option(\"\", \"--active-tab\",\r\n action=\"store\", type=\"string\",\r\n dest=\"active_tab\", default='title',\r\n help=\"Active tab.\")\r\n\r\n def effect(self):\r\n \"\"\"\r\n Effect behaviour.\r\n \"\"\"\r\n # Get script's \"--what\" option value.\r\n fontsize = str(self.options.fontsize) + 'px'\r\n color = self.options.color\r\n font = self.options.font\r\n fontweight = self.options.fontweight\r\n replaced = self.options.replaced\r\n replacewith = self.options.replacewith\r\n angle = -int(self.options.angle)\r\n capitals = self.options.capitals\r\n\r\n if len(self.selected) == 0:\r\n inkex.errormsg(_(\"Please select some paths first.\"))\r\n exit()\r\n\r\n for id, node in self.selected.iteritems():\r\n mat = simpletransform.composeParents(node, [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])\r\n p = cubicsuperpath.parsePath(node.get('d'))\r\n simpletransform.applyTransformToPath(mat, p)\r\n self.group = inkex.etree.SubElement(node.getparent(),inkex.addNS('text','svg'))\r\n tx, ty = cspcofm(p)\r\n new = inkex.etree.SubElement(self.group, inkex.addNS('tspan','svg'), {inkex.addNS('role','sodipodi'): 'line'})\r\n s = {'text-align': 'center', 'vertical-align': 'bottom',\r\n 'text-anchor': 'middle', 'font-size': fontsize,\r\n 'font-weight': fontweight, 'font-style': 'normal', 'font-family': font, 'fill': color}\r\n new.set('style', simplestyle.formatStyle(s))\r\n if capitals:\r\n id = id.upper()\r\n new.text = id.replace(replaced, replacewith)\r\n self.group.set('x', str(tx))\r\n self.group.set('y', str(ty))\r\n self.group.set('transform', 'rotate(%s, %s, %s)' % (angle, tx, ty))\r\n\r\n# Create effect instance and apply it.\r\neffect = IdsToText()\r\neffect.affect()" }, { "alpha_fraction": 0.46192893385887146, "alphanum_fraction": 0.4706308841705322, "avg_line_length": 35.51655578613281, "blob_id": "d9b23287318d4323cee45347a113dcad1b4e5818", "content_id": "184ef1b0a89f580b3fccb270190f94433e079e0c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5516, "license_type": "permissive", "max_line_length": 107, "num_lines": 151, "path": "/Apollonian-master/apollon_inx.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!$HOME/anaconda/bin/python\n# -*- coding: utf-8 -*-\n'''\nRipped from template.py \n- makes an apollonian gasket\n'''\n\nimport inkex # Required\nimport simplestyle # will be needed here for styles support\nimport ag\n\n__version__ = '0.0'\n\ninkex.localize()\n\n\n### Your helper functions go here\n\n\ndef cplxs2pts(zs):\n tt = []\n for z in zs:\n tt.extend([z.real,z.imag])\n return tt\n\n\ndef draw_SVG_circle(parent, r, cx, cy, name):\n \" structre an SVG circle entity under parent \"\n circ_attribs = { 'cx': str(cx), 'cy': str(cy), \n 'r': str(r),\n inkex.addNS('label','inkscape'): name}\n \n \n circle = inkex.etree.SubElement(parent, inkex.addNS('circle','svg'), circ_attribs )\n \n \nclass Myextension(inkex.Effect): # choose a better name\n \n def __init__(self):\n \" define how the options are mapped from the inx file \"\n inkex.Effect.__init__(self) # initialize the super class\n \n \n # list of parameters defined in the .inx file\n self.OptionParser.add_option(\"-d\", \"--depth\",\n action=\"store\", type=\"int\",\n dest=\"depth\", default=3,\n help=\"command line help\")\n \n self.OptionParser.add_option(\"\", \"--c1\",\n action=\"store\", type=\"float\",\n dest=\"c1\", default=2.0,\n help=\"command line help\")\n \n self.OptionParser.add_option(\"\", \"--c2\",\n action=\"store\", type=\"float\",\n dest=\"c2\", default=3.0,\n help=\"command line help\")\n \n self.OptionParser.add_option(\"\", \"--c3\",\n action=\"store\", type=\"float\",\n dest=\"c3\", default=3.0,\n help=\"command line help\")\n \n \n self.OptionParser.add_option(\"-x\", \"--shrink\",\n action=\"store\", type=\"inkbool\", \n dest=\"shrink\", default=True,\n help=\"command line help\")\n \n # here so we can have tabs - but we do not use it directly - else error\n self.OptionParser.add_option(\"\", \"--active-tab\",\n action=\"store\", type=\"string\",\n dest=\"active_tab\", default='title', # use a legitmate default\n help=\"Active tab.\")\n \n \n \n def calc_unit_factor(self):\n \"\"\" return the scale factor for all dimension conversions.\n - The document units are always irrelevant as\n everything in inkscape is expected to be in 90dpi pixel units\n \"\"\"\n # namedView = self.document.getroot().find(inkex.addNS('namedview', 'sodipodi'))\n # doc_units = self.getUnittouu(str(1.0) + namedView.get(inkex.addNS('document-units', 'inkscape')))\n unit_factor = self.getUnittouu(str(1.0) + self.options.units)\n return unit_factor\n\n\n### -------------------------------------------------------------------\n### Main function and is called when the extension is run.\n\n \n def effect(self):\n\n #set up path styles\n path_stroke = '#DD0000' # take color from tab3\n path_fill = 'none' # no fill - just a line\n path_stroke_width = 1. # can also be in form '0.6mm'\n page_id = self.options.active_tab # sometimes wrong the very first time\n \n style_curve = { 'stroke': path_stroke,\n 'fill': 'none',\n 'stroke-width': path_stroke_width }\n\n \n # This finds center of current view in inkscape\n t = 'translate(%s,%s)' % (self.view_center[0], self.view_center[1] )\n \n # add a group to the document's current layer\n #all the circles inherit style from this group\n g_attribs = { inkex.addNS('label','inkscape'): 'zengon' + \"_%d\"%(self.options.depth),\n inkex.addNS('transform-center-x','inkscape'): str(0),\n inkex.addNS('transform-center-y','inkscape'): str(0),\n 'transform': t,\n 'style' : simplestyle.formatStyle(style_curve),\n 'info':'N: '}\n topgroup = inkex.etree.SubElement(self.current_layer, 'g', g_attribs )\n \n \n circles = ag.main(c1=self.options.c1,\n c2=self.options.c2,\n c3=self.options.c3,\n depth=self.options.depth)\n \n #shrink the circles so they don't touch\n #useful for laser cutting\n \n if self.options.shrink:\n circles = circles[1:]\n for cc in circles:\n cc.r = abs(cc.r)\n if cc.r >.5:\n cc.r -= .1\n else:\n cc.r *= .9\n \n scale_factor = 200\n for c in circles: \n cx, cy, r = c.m.real, c.m.imag, abs(c.r)\n \n #rescale and add circle to document\n cx, cy, r = scale_factor*cx , scale_factor*cy, scale_factor*r\n draw_SVG_circle(topgroup,r,cx,cy,'apo') \n \n \n\n\nif __name__ == '__main__':\n e = Myextension()\n e.affect()\n\n\n" }, { "alpha_fraction": 0.5984848737716675, "alphanum_fraction": 0.6054292917251587, "avg_line_length": 35.83720779418945, "blob_id": "a1e80360b5b1754b5f519d6ba9d8acb1fe08b016", "content_id": "3cb46b1de4b51cf4d037817a63478f076b1d5708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3168, "license_type": "no_license", "max_line_length": 78, "num_lines": 86, "path": "/graphviz_ext/graphviz.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#! /usr/bin/python\n# -*- coding: cp1252 -*-\n\"\"\"\ngraphviz.py\nThis extension renders DOT files to SVG paths.\n\nCopyright (C) 2017 Thomas Flynn <[email protected]>\n\nBased on EqTexSvg by Julien Vitard <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA\n\n\"\"\"\n\nimport inkex, os, tempfile, sys, xml.dom.minidom\n\nclass DOTSVG(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n # The one option is 'dotfile', the filename that is passed to dot.\n self.OptionParser.add_option(\"-f\", \"--dotfile\",\n action=\"store\", type=\"string\",\n dest=\"dotfile\", default=\"\",\n help=\"DOT file\")\n \n #effect: this function provides the main feature of the plugin.\n #it calls dot with the users' file and imports the resulting svg\n #onto the inkscape canvas.\n def effect(self):\n base_dir = tempfile.mkdtemp(\"\", \"inkscape-\");\n svg_file = os.path.join(base_dir, \"eq.svg\") #svg output from DOT\n out_file = os.path.join(base_dir, \"eq.out\") #stdout messages from DOT\n err_file = os.path.join(base_dir, \"eq.err\") #stderr messages from DOT\n\n # Clean: remove temporary files generated during plugin execution.\n def clean():\n if os.path.exists(svg_file):\n os.remove(svg_file)\n if os.path.exists(out_file):\n os.remove(out_file)\n if os.path.exists(err_file):\n os.remove(err_file)\n os.rmdir(base_dir)\n\n # Execute the dot command, to generate the svg from the users file.\n os.system('dot \"%s\" -Tsvg:cairo -o \"%s\" > \"%s\" 2> \"%s\"' \\\n % (self.options.dotfile, svg_file, out_file, err_file))\n \n #if there was no output svg_file, tell the user\n try:\n os.stat(svg_file)\n except OSError:\n print >>sys.stderr, \"Invalid DOT input:\"\n print >>sys.stderr, self.options.dotfile\n print >>sys.stderr, \"temporary files were left in:\", base_dir\n sys.exit(1)\n\n if os.path.exists(err_file):\n err_stream = open(err_file, 'r')\n for line in err_stream:\n sys.stderr.write(line + '\\n')\n err_stream.close()\n \n # Attempt to open the svg and place it on the inkscape canvas.\n doc = inkex.etree.parse(svg_file)\n svg = doc.getroot()\n self.current_layer.append(svg)\n\n # Finish up\n clean()\n\nif __name__ == '__main__':\n e = DOTSVG()\n e.affect()\n" }, { "alpha_fraction": 0.5980139970779419, "alphanum_fraction": 0.6086796522140503, "avg_line_length": 34.77631759643555, "blob_id": "82c877fd9e5869cfdadf8d1630eb4151d1f23d8e", "content_id": "371a2bff88ac624e88ab3969c1ff4355854d0bc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2719, "license_type": "no_license", "max_line_length": 73, "num_lines": 76, "path": "/string_us_object/string_to_object.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n'''\nThis extension get the \"outersvg\" of selected elements.\n\nCopyright (C) 2012 Jabiertxo Arraiza, [email protected]\n\nVersion 0.2\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n'''\n\nimport inkex,sys\nfrom lxml import etree\n\nclass objectToString(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n self.OptionParser.add_option('--useId', action = 'store', \n type = 'string', dest = 'useId', default = 'false', \n help = 'ID to overwrite')\n self.OptionParser.add_option('--outerSvg', action = 'store', \n type = 'string', dest = 'outerSvg', default = 'false', \n help = 'Data to replace')\n self.OptionParser.add_option(\"-d\", \"--useSelection\",\n action=\"store\", type=\"inkbool\", \n dest=\"useSelection\", default=False,\n help=\"Use current selection\")\n\n def effect(self):\n saveout = sys.stdout\n sys.stdout = sys.stderr\n svg = self.document.getroot()\n if self.options.useSelection == True:\n if len(self.selected) > 1 or len(self.selected) == 0:\n print \"Select one\"\n sys.stdout = saveout\n return;\n for id, node in self.selected.iteritems():\n parentEl = node.getparent()\n parsedData = etree.fromstring(self.options.outerSvg)\n c = 0\n for child in parentEl:\n if child == node:\n parentEl.remove(child)\n parentEl.insert(c, parsedData);\n c = c+1\n else:\n xpathStr = '//svg:*[@id=\"' + self.options.useId + '\"]'\n el = svg.xpath(xpathStr, namespaces=inkex.NSS)\n if el == []:\n print \"This ID dont exist\"\n sys.stdout = saveout\n else:\n parentEl = el[0].getparent()\n parsedData = etree.fromstring(self.options.outerSvg)\n c = 0\n for child in parentEl:\n if child == el[0]:\n parentEl.remove(child)\n parentEl.insert(c, parsedData);\n c = c+1\n sys.stdout = saveout\nc = objectToString()\nc.affect()\n" }, { "alpha_fraction": 0.5654205679893494, "alphanum_fraction": 0.5749695301055908, "avg_line_length": 37.453125, "blob_id": "de4ca7f7da5f346aa4be914ba03d4f5f6d6bc9bc", "content_id": "4225fcb983131e944f3acbfb462c06c6916d3ccc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4922, "license_type": "no_license", "max_line_length": 90, "num_lines": 128, "path": "/paste_length/paste_length.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''\nInkscape extension to copy length of the source path to the selected \ndestination path(s)\n\nCopyright (C) 2018 Shrinivas Kulkarni\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License along\nwith this program; if not, write to the Free Software Foundation, Inc.,\n51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n'''\n\nimport inkex, cubicsuperpath, bezmisc, simpletransform, sys\n\nclass PasteLengthEffect(inkex.Effect):\n\n def __init__(self):\n\n inkex.Effect.__init__(self)\n\n self.OptionParser.add_option('-s', '--scale', action = 'store',\n type = 'float', dest = 'scale', default = '1',\n help = 'Additionally scale the length by')\n\n self.OptionParser.add_option('-f', '--scaleFrom', action = 'store',\n type = 'string', dest = 'scaleFrom', default = 'center',\n help = 'Scale Path From')\n\n self.OptionParser.add_option('-p', '--precision', action = 'store',\n type = 'int', dest = 'precision', default = '5',\n help = 'Number of significant digits')\n\n self.OptionParser.add_option(\"--tab\", action=\"store\", \n type=\"string\", dest=\"tab\", default=\"sampling\", help=\"Tab\") \n\n def scaleCubicSuper(self, cspath, scaleFactor, scaleFrom):\n\n xmin, xmax, ymin, ymax = simpletransform.refinedBBox(cspath)\n \n if(scaleFrom == 'topLeft'):\n oldOrigin= [xmin, ymin]\n elif(scaleFrom == 'topRight'):\n oldOrigin= [xmax, ymin]\n elif(scaleFrom == 'bottomLeft'):\n oldOrigin= [xmin, ymax]\n elif(scaleFrom == 'bottomRight'):\n oldOrigin= [xmax, ymax]\n else: #if(scaleFrom == 'center'):\n oldOrigin= [xmin + (xmax - xmin) / 2., ymin + (ymax - ymin) / 2.]\n \n newOrigin = [oldOrigin[0] * scaleFactor , oldOrigin[1] * scaleFactor ]\n \n for subpath in cspath:\n for bezierPt in subpath:\n for i in range(0, len(bezierPt)):\n \n bezierPt[i] = [bezierPt[i][0] * scaleFactor, \n bezierPt[i][1] * scaleFactor]\n \n bezierPt[i][0] += (oldOrigin[0] - newOrigin[0])\n bezierPt[i][1] += (oldOrigin[1] - newOrigin[1])\n \n def getPartsFromCubicSuper(self, cspath):\n parts = []\n for subpath in cspath:\n part = []\n prevBezPt = None \n for i, bezierPt in enumerate(subpath):\n if(prevBezPt != None):\n seg = [prevBezPt[1], prevBezPt[2], bezierPt[0], bezierPt[1]]\n part.append(seg)\n prevBezPt = bezierPt\n parts.append(part)\n return parts\n \n def getLength(self, cspath, tolerance):\n parts = self.getPartsFromCubicSuper(cspath)\n curveLen = 0\n \n for i, part in enumerate(parts):\n for j, seg in enumerate(part):\n curveLen += bezmisc.bezierlengthSimpson((seg[0], seg[1], seg[2], seg[3]), \n tolerance = tolerance)\n \n return curveLen\n\n def effect(self):\n scale = self.options.scale\n scaleFrom = self.options.scaleFrom\n \n tolerance = 10 ** (-1 * self.options.precision)\n \n printOut = False\n selections = self.selected \n pathNodes = self.document.xpath('//svg:path',namespaces=inkex.NSS)\n outStrs = [str(len(pathNodes))]\n\n paths = [(pathNode.get('id'), cubicsuperpath.parsePath(pathNode.get('d'))) \\\n for pathNode in pathNodes if (pathNode.get('id') in selections.keys())]\n\n if(len(paths) > 1):\n srcPath = paths[-1][1]\n srclen = self.getLength(srcPath, tolerance)\n paths = paths[:len(paths)-1]\n for key, cspath in paths:\n curveLen = self.getLength(cspath, tolerance)\n \n self.scaleCubicSuper(cspath, scaleFactor = scale * (srclen / curveLen), \\\n scaleFrom = scaleFrom)\n selections[key].set('d', cubicsuperpath.formatPath(cspath))\n else:\n inkex.errormsg(_(\"Please select at least two paths, with the path whose \\\n length is to be copied at the top. You may have to convert the shape \\\n to path with path->Object to Path.\"))\n\neffect = PasteLengthEffect()\neffect.affect()\n" }, { "alpha_fraction": 0.7486209869384766, "alphanum_fraction": 0.7533490657806396, "avg_line_length": 49.7599983215332, "blob_id": "ec9a097c348b5bd6fac3cd11443c1140fbbe107e", "content_id": "8a67d6203616c20aa8301650b0f66591b7259e1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1269, "license_type": "permissive", "max_line_length": 172, "num_lines": 25, "path": "/inkscape-export-layers-master/README.MD", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "# Inkscape Export Layers\n\nInkscape extension to export SVG layers as (JPG, PNG) images. The exported images can be a combination of multiple layers.\n\n## How it works\nTo export your Inkscape file as an image open an inkscape file with multiple layers (otherwise you can simply use the in-built export tool 'Ctrl + E').\n\nThere are two options for your layers when exporting:\n- **Fixed**: If a layer label starts with \"[fixed]\"\" this layer will always be exported and combined with other layers. It is very useful for backgrounds or fixed elements.\n- **Export**: If a layer label starts with \"[export]\" this layer will be exported along with any [Fixed] layer and combined in a single image.\n\nCheck this example:\n\n![Layer export example](http://i.imgur.com/StUpSd1.png)\n\nThe layer **[fixed] background** is a fixed layer and will always be exported combined with the **[export]** layers.\n\nIf could make this export, we will get two images: the combination of background + version1 and background + version2\n\n## Using it\nTo use the extension once your layers are ready:\n\n1. Go to **Extensions > Export > Export layers**\n2. Choose the path you want to save your file to (inkscape does not allow to use a file explorer, sorry)\n3. Choose the format of your export (JPG, PNG)\n" }, { "alpha_fraction": 0.5109597444534302, "alphanum_fraction": 0.5212787389755249, "avg_line_length": 47.930694580078125, "blob_id": "480247df7eabdbd5a969edbb0c9a527daaf87303", "content_id": "fba7d121feac103d398d9a29b8dcdaf17e3bc168", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14827, "license_type": "permissive", "max_line_length": 113, "num_lines": 303, "path": "/inkscape-Zoetrope-master/zoetrope.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nZoetrope maker.\n- prints disk of given diameter and number of images around the outside.\nAlso includes a pulse trigger ring to trigger a strobe.\n- Width and phase of the pulse can be defined.\nPrints a distorted and undistorted image reference sizes\n- for use in a paint program to distort the source inages to fit onto the Disk.\n\nNeon22 - github 2016\nMIT license\n'''\n\nimport inkex # Required\nimport simplestyle # will be needed here for styles support\n\nfrom math import cos, sin, radians, pi\n\n__version__ = '0.2'\n\ninkex.localize()\n\n### Helper functions\ndef point_on_circle(radius, angle):\n \" return xy coord of the point at distance radius from origin at angle \"\n x = radius * cos(angle)\n y = radius * sin(angle)\n return (x, y)\n\ndef draw_SVG_circle(parent, r, cx, cy, name, style):\n \" structre an SVG circle entity under parent \"\n circ_attribs = {'style': simplestyle.formatStyle(style),\n 'cx': str(cx), 'cy': str(cy), \n 'r': str(r),\n inkex.addNS('label','inkscape'): name}\n circle = inkex.etree.SubElement(parent, inkex.addNS('circle','svg'), circ_attribs )\n\n\nBlack = '#000000'\n\n\nclass Zoetrope(inkex.Effect): \n \n def __init__(self):\n \" define how the options are mapped from the inx file \"\n inkex.Effect.__init__(self) # initialize the super class\n\n # Define your list of parameters defined in the .inx file\n self.OptionParser.add_option(\"-u\", \"--units\",\n action=\"store\", type=\"string\",\n dest=\"units\", default='mm',\n help=\"Units this dialog is using\")\n\n self.OptionParser.add_option(\"-d\", \"--diameter\",\n action=\"store\", type=\"float\",\n dest=\"diameter\", default=1.0,\n help=\"Diameter of disk\")\n\n self.OptionParser.add_option(\"-n\", \"--divisions\",\n action=\"store\", type=\"int\",\n dest=\"divisions\", default=24,\n help=\"Number of divisions\")\n\n self.OptionParser.add_option(\"-i\", \"--height\",\n action=\"store\", type=\"float\",\n dest=\"height\", default=1.0,\n help=\"Image height\")\n\n self.OptionParser.add_option(\"-t\", \"--trigger\",\n action=\"store\", type=\"inkbool\", \n dest=\"trigger\", default=False,\n help=\"Trigger\")\n\n self.OptionParser.add_option(\"-q\", \"--triggerradius\",\n action=\"store\", type=\"float\",\n dest=\"triggerradius\", default=1.0,\n help=\"Height of trigger line\")\n\n self.OptionParser.add_option(\"-e\", \"--thick\",\n action=\"store\", type=\"float\",\n dest=\"thick\", default=1.0,\n help=\"Thickness of trigger line\")\n\n self.OptionParser.add_option(\"-r\", \"--ratio\",\n action=\"store\", type=\"float\",\n dest=\"ratio\", default=0.5,\n help=\"Ratio of trigger pulse\")\n\n self.OptionParser.add_option(\"-p\", \"--phase\",\n action=\"store\", type=\"float\",\n dest=\"phase\", default=0,\n help=\"Delay of trigger pulse\")\n \n self.OptionParser.add_option(\"-w\", \"--stroke_width\",\n action=\"store\", type=\"float\",\n dest=\"stroke_width\", default=0.1,\n help=\"Line thickness\")\n\n self.OptionParser.add_option(\"-m\", \"--template\",\n action=\"store\", type=\"inkbool\", \n dest=\"template\", default=False,\n help=\"Show Image Distortion template\")\n\n self.OptionParser.add_option(\"-k\", \"--dpi\",\n action=\"store\", type=\"int\",\n dest=\"dpi\", default=300,\n help=\"To calculate useful image size\")\n\n # here so we can have tabs - but we do not use it directly - else error\n self.OptionParser.add_option(\"\", \"--active-tab\",\n action=\"store\", type=\"string\",\n dest=\"active_tab\", default='',\n help=\"Active tab. Not used now.\")\n \n def getUnittouu(self, param):\n \" for 0.48 and 0.91 compatibility \"\n try:\n return inkex.unittouu(param)\n except AttributeError:\n return self.unittouu(param)\n \n def calc_unit_factor(self):\n \"\"\" return the scale factor for all dimension conversions.\n - Everything in inkscape is expected to be in 90dpi pixel units\n \"\"\"\n unit_factor = self.getUnittouu(str(1.0) + self.options.units)\n return unit_factor\n\n def polar_to_cartesian(self, cx, cy, radius, angle):\n \" So we can make arcs in the 'A' svg syntax. \"\n angle_radians = radians(angle)\n return (cx + (radius * cos(angle_radians)),\n cy + (radius * sin(angle_radians)))\n \n def build_arc(self, x,y, start_angle, end_angle, radius, reverse=True):\n \" Make a filled arc \"\n # Not using internal arc rep - instead construct path A in svg style directly\n # so we can append lines to make single path\n start = self.polar_to_cartesian(x, y, radius, end_angle)\n end = self.polar_to_cartesian(x, y, radius, start_angle)\n arc_flag = 0 if reverse else 1\n sweep = 0 if (end_angle-start_angle) <=180 else 1\n path = 'M %s,%s' % (start[0], start[1])\n path += \" A %s,%s 0 %d %d %s %s\" % (radius, radius, sweep, arc_flag, end[0], end[1])\n return path\n \n def build_trigger_arc(self, angle, radius1, radius2):\n \"\"\" return path \n - using -ve angles to get pulse on CCW side of division line\n \"\"\"\n path = self.build_arc(0,0, -angle, 0, radius1)\n # shorten and reverse second arc to connect\n path += \" L\"+self.build_arc(0,0, 0, -angle, radius2, False)[1:]\n path += \" Z\" # close\n return path\n \n \n \n### -------------------------------------------------------------------\n### This is the main function and is called when the extension is run.\n \n def effect(self):\n \"\"\" Calculate Zoetrope from inputs.\n - Make gropups for each drawn entity type. \n - add explanatory text\n - Show trigger pulse ring, distortion and image templates\n \"\"\"\n # convert import options\n unit_factor = self.calc_unit_factor()\n path_stroke_width = self.options.stroke_width * unit_factor\n diameter = self.options.diameter * unit_factor\n divisions = self.options.divisions \n image_height = self.options.height * unit_factor\n triggerradius = self.options.triggerradius * unit_factor\n thick = self.options.thick * unit_factor\n cross = diameter/50\n \n # This finds center of current view in inkscape\n t = 'translate(%s,%s)' % (self.view_center[0], self.view_center[1] )\n # Make a nice useful name\n g_attribs = { inkex.addNS('label','inkscape'): 'Zoetrope',\n 'transform': t,\n 'info':'N: '+str(divisions)+';' }\n # add the group to the document's current layer\n topgroup = inkex.etree.SubElement(self.current_layer, 'g', g_attribs )\n # Group for pulse triggers\n g_attr = { inkex.addNS('label','inkscape'): 'Pulse track'}\n pulsegroup = inkex.etree.SubElement(topgroup, 'g', g_attr )\n # Group for Labels\n t = 'translate(%s,%s)' % (0, diameter/1.9 )\n g_attr = { inkex.addNS('label','inkscape'): 'Label', 'transform': t }\n labelgroup = inkex.etree.SubElement(topgroup, 'g', g_attr )\n\n # Center cross\n line_style = { 'stroke': Black, 'fill': 'none', 'stroke-width': path_stroke_width }\n fill_style = { 'stroke': 'none', 'fill': Black, 'stroke-width': 'none' }\n d = 'M {0},0 L {1},0 M 0,{0} L 0,{1}'.format(-cross,cross)\n cross_attribs = { inkex.addNS('label','inkscape'): 'Center cross',\n 'style': simplestyle.formatStyle(line_style), 'd': d }\n cross_path = inkex.etree.SubElement(topgroup, inkex.addNS('path','svg'), cross_attribs )\n \n # Main Disk\n draw_SVG_circle(topgroup, diameter/2, 0, 0, 'outer_ring', line_style)\n draw_SVG_circle(topgroup, diameter/2-image_height, 0, 0, 'image_ring', line_style)\n # radials\n trigger_angle = (360.0/divisions) * self.options.ratio\n angle = 360.0/divisions\n angle_radians = radians(angle)\n arc_path = self.build_trigger_arc(trigger_angle, triggerradius, triggerradius + thick)\n for i in range(divisions):\n startpt = point_on_circle(cross*2, angle_radians*i)\n if self.options.trigger:\n endpt = point_on_circle(triggerradius, angle_radians*i)\n else:\n endpt = point_on_circle(diameter/2, angle_radians*i)\n path = \"M%s,%s L%s,%s\"%(startpt[0], startpt[1], endpt[0], endpt[1])\n radial_attr = {inkex.addNS('label','inkscape'): 'radial',\n 'style': simplestyle.formatStyle(line_style), 'd': path }\n inkex.etree.SubElement(topgroup, inkex.addNS('path','svg'), radial_attr )\n # second part of radial line (and trigger ring) if needed\n if self.options.trigger:\n # radial lines\n startpt = point_on_circle(triggerradius + thick, angle_radians*i)\n endpt = point_on_circle(diameter/2, angle_radians*i)\n path = \"M%s,%s L%s,%s\"%(startpt[0], startpt[1], endpt[0], endpt[1])\n radial_attr = {inkex.addNS('label','inkscape'): 'radial',\n 'style': simplestyle.formatStyle(line_style), 'd': path }\n inkex.etree.SubElement(topgroup, inkex.addNS('path','svg'), radial_attr )\n # add the arcs # CCW rotation\n arc_offset = angle*i - (angle-trigger_angle)*self.options.phase\n t = 'rotate(%s)' % (arc_offset) \n attribs = { inkex.addNS('label','inkscape'): 'trigger',\n 'style': simplestyle.formatStyle(fill_style), 'd': arc_path , 'transform': t,}\n inkex.etree.SubElement(pulsegroup, inkex.addNS('path','svg'), attribs )\n # Add animation of bouncing ball\n # Add pale grid on each image so can draw directly on template\n \n #\n if self.options.trigger:\n draw_SVG_circle(pulsegroup, triggerradius, 0, 0, 'trigger_ring', line_style)\n draw_SVG_circle(pulsegroup, triggerradius + thick, 0, 0, 'trigger_ring', line_style)\n \n # text Label\n font_height = min(32, max( 8, int(diameter/50.0)))\n text_style = { 'font-size': str(font_height),\n 'font-family': 'sans-serif',\n 'text-anchor': 'middle',\n 'text-align': 'center',\n 'fill': Black }\n text_atts = {'style':simplestyle.formatStyle(text_style),\n 'x': '0', 'y': '0' }\n text = inkex.etree.SubElement(labelgroup, 'text', text_atts)\n text.text = \"Zoetrope\"\n text_atts = {'style':simplestyle.formatStyle(text_style),\n 'x': '0', 'y': str(font_height*1.2) }\n text = inkex.etree.SubElement(labelgroup, 'text', text_atts)\n text.text = \"Diameter = %4.2f%s. Divisions = %d\" % (self.options.diameter, self.options.units, divisions)\n text_atts = {'style':simplestyle.formatStyle(text_style),\n 'x': '0', 'y': str(font_height*2.4) }\n if self.options.trigger:\n text = inkex.etree.SubElement(labelgroup, 'text', text_atts)\n text.text = \"Pulse Duty = %4.2f, Phase = %4.2f\" % (self.options.ratio, self.options.phase)\n \n # Distortion pattern\n if self.options.template:\n # Group for Labels\n t = 'translate(%s,%s)' % (0, -image_height-font_height*5 )\n g_attr = { inkex.addNS('label','inkscape'): 'Template', 'transform': t }\n templategroup = inkex.etree.SubElement(topgroup, 'g', g_attr )\n # Draw template\n arc_path = self.build_trigger_arc(angle, diameter/2, diameter/2-image_height)\n t = 'rotate(%s)' % (-90+angle/2)\n attribs = { inkex.addNS('label','inkscape'): 'distorted image',\n 'style': simplestyle.formatStyle(line_style), 'd': arc_path , 'transform': t}\n image = inkex.etree.SubElement(templategroup, inkex.addNS('path','svg'), attribs )\n # Draw Image info\n image_width = pi*diameter/divisions\n ystart = -diameter/2.0 + image_height\n image_ratio = image_width / image_height\n text_atts = {'style':simplestyle.formatStyle(text_style),\n 'x': '0', 'y': str(ystart + font_height*2) }\n text = inkex.etree.SubElement(templategroup, 'text', text_atts)\n text.text = \"Aspect ratio=1:%4.2f\" % (image_ratio)\n # template rect\n attr = {'x':str(-image_width*1.8), 'y':str(-diameter/2),\n 'width':str(image_width),\n 'height':str(image_height),\n 'style':simplestyle.formatStyle(line_style)}\n template_sq = inkex.etree.SubElement(templategroup, 'rect', attr)\n # suggested sizes\n # image_height is in 90dpi pixels\n dpi_factor = self.getUnittouu('1in')/float(self.options.dpi)\n h = int(image_height / float(dpi_factor))\n w = int(h*image_ratio)\n text_atts = {'style':simplestyle.formatStyle(text_style),\n 'x': '0', 'y': str(ystart + font_height*3.2) }\n text = inkex.etree.SubElement(templategroup, 'text', text_atts)\n text.text = \"At %d dpi. Image = %d x %d pixels\" % (self.options.dpi, w, h)\n\nif __name__ == '__main__':\n e = Zoetrope()\n e.affect()\n\n" }, { "alpha_fraction": 0.4594348967075348, "alphanum_fraction": 0.4944872558116913, "avg_line_length": 35.30559539794922, "blob_id": "e4090f1eddcb2d8287b111d86facf5508840c426", "content_id": "b5f05b1f0e111c765a7596cc21ab8f90ff727f0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25305, "license_type": "no_license", "max_line_length": 240, "num_lines": 697, "path": "/Guilloche/guilloche_contour.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n'''\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nQuick description:\n\n'''\n# standard library\nfrom math import *\nfrom copy import deepcopy\n# local library\nimport inkex\nimport pathmodifier\nimport cubicsuperpath\nimport bezmisc\nimport simplepath\nimport simpletransform\n\ndef getColorAndOpacity(longColor):\n '''\n Convert the long into a #rrggbb color value\n Conversion back is A + B*256^1 + G*256^2 + R*256^3\n '''\n longColor = long(longColor)\n \n if longColor < 0:\n longColor = longColor & 0xFFFFFFFF\n \n hexColor = hex(longColor)\n \n hexOpacity = hexColor[-3:-1]\n hexColor = '#' + hexColor[2:-3].rjust(6, '0')\n \n return (hexColor, hexOpacity)\n \ndef setColorAndOpacity(style, color, opacity):\n declarations = style.split(';')\n strokeOpacityInStyle = False\n newOpacity = round((int(opacity, 16) / 255.0), 8)\n \n for i,decl in enumerate(declarations):\n parts = decl.split(':', 2)\n \n if len(parts) == 2:\n (prop, val) = parts\n prop = prop.strip().lower()\n \n if (prop == 'stroke' and val != color):\n declarations[i] = prop + ':' + color\n \n if prop == 'stroke-opacity':\n if val != newOpacity:\n declarations[i] = prop + ':' + str(newOpacity)\n \n strokeOpacityInStyle = True\n \n if not strokeOpacityInStyle:\n declarations.append('stroke-opacity' + ':' + str(newOpacity))\n \n return \";\".join(declarations)\n\ndef getSkeletonPath(d, offs):\n '''\n Recieves a current skeleton path and offset specified by the user if it's line.\n Calculates new skeleton path to use for creating contour with given offset.\n '''\n if offs != 0:\n comps = d.split()\n \n if ((comps[2] == 'h' or comps[2] == 'H') and len(comps) == 4):\n startPt = comps[1].split(',')\n startX = float(startPt[0])\n startY = float(startPt[1])\n \n finalX = float(comps[3]) if comps[2] == 'H' else startX + float(comps[3])\n \n if startX < finalX:\n startY -= offs\n else:\n startY += offs\n \n comps[1] = startPt[0] + ',' + str(startY)\n elif ((comps[2] == 'v' or comps[2] == 'V') and len(comps) == 4):\n startPt = comps[1].split(',')\n startX = float(startPt[0])\n startY = float(startPt[1])\n \n finalY = float(comps[3]) if comps[2] == 'V' else startY + float(comps[3])\n \n if startY < finalY:\n startX += offs\n else:\n startX -= offs\n \n comps[1] = str(startX) + ',' + startPt[1]\n elif (comps[0] == 'M' and len(comps) == 3):\n startPt = comps[1].split(',')\n startX = float(startPt[0])\n startY = float(startPt[1])\n \n finalPt = comps[2].split(',')\n finalX = float(finalPt[0])\n finalY = float(finalPt[1])\n \n if startX < finalX:\n if (startY > finalY):\n startX -= offs\n finalX -= offs\n else:\n startX += offs\n finalX += offs\n startY -= offs\n finalY -= offs\n else:\n if startY > finalY:\n startX -= offs\n finalX -= offs\n else:\n startX += offs\n finalX += offs\n startY += offs\n finalY += offs\n \n comps[1] = str(startX) + ',' + str(startY)\n comps[2] = str(finalX) + ',' + str(finalY)\n elif (comps[0] == 'm' and len(comps) == 3):\n startPt = comps[1].split(',')\n startX = float(startPt[0])\n startY = float(startPt[1])\n \n finalPt = comps[2].split(',')\n dx = float(finalPt[0])\n dy = float(finalPt[1])\n finalX = startX + dx\n finalY = startY + dy\n \n if startX < finalX:\n if startY > finalY:\n startX -= offs\n else:\n startX += offs\n startY -= offs\n else:\n if startY > finalY:\n startX -= offs\n else:\n startX += offs\n startY += offs\n \n comps[1] = str(startX) + ',' + str(startY)\n comps[2] = str(dx) + ',' + str(dy)\n \n return cubicsuperpath.parsePath(' '.join(comps))\n \n return cubicsuperpath.parsePath(d)\n\ndef modifySkeletonPath(skelPath):\n resPath = []\n l = len(skelPath)\n resPath += skelPath[0]\n \n if l > 1:\n for i in range(1, l):\n if skelPath[i][0][1] == resPath[-1][1]:\n skelPath[i][0][0] = resPath[-1][0]\n del resPath[-1]\n \n resPath += skelPath[i]\n \n return resPath\n\ndef linearize(p, tolerance=0.001):\n '''\n This function receives a component of a 'cubicsuperpath' and returns two things:\n The path subdivided in many straight segments, and an array containing the length of each segment.\n '''\n zero = 0.000001\n i = 0\n d = 0\n lengths=[]\n \n while i < len(p) - 1:\n box = bezmisc.pointdistance(p[i][1], p[i][2])\n box += bezmisc.pointdistance(p[i][2], p[i+1][0])\n box += bezmisc.pointdistance(p[i+1][0], p[i+1][1])\n chord = bezmisc.pointdistance(p[i][1], p[i+1][1])\n \n if (box - chord) > tolerance:\n b1, b2 = bezmisc.beziersplitatt([p[i][1], p[i][2], p[i + 1][0], p[i + 1][1]], 0.5)\n p[i][2][0], p[i][2][1] = b1[1]\n p[i + 1][0][0], p[i + 1][0][1] = b2[2]\n p.insert(i + 1, [[b1[2][0], b1[2][1]], [b1[3][0], b1[3][1]], [b2[1][0], b2[1][1]]])\n else:\n d = (box + chord) / 2\n lengths.append(d)\n i += 1\n\n new = [p[i][1] for i in range(0, len(p) - 1) if lengths[i] > zero]\n new.append(p[-1][1])\n lengths = [l for l in lengths if l > zero]\n \n return (new, lengths)\n\ndef isSkeletonClosed(sklCmp):\n cntOfDgts = 2\n \n if (round(sklCmp[0][0], cntOfDgts) != round(sklCmp[-1][0], cntOfDgts) or round(sklCmp[0][1], cntOfDgts) != round(sklCmp[-1][1], cntOfDgts)):\n return False\n \n return True\n\ndef getPolygonCentroid(polygon):\n x = 0\n y = 0\n n = len(polygon)\n \n for vert in polygon:\n x += vert[0]\n y += vert[1]\n \n x = x / n\n y = y / n\n \n return [x, y]\n\ndef getPoint(p1, p2, x, y):\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n \n a = (y1 - y2) / (x1 - x2)\n b = y1 - a * x1\n \n if x == None:\n x = (y - b) / a\n else:\n y = a * x + b\n \n return [x, y]\n\ndef getPtOnSeg(p1, p2, segLen, l):\n if p1[0] == p2[0]:\n return [p2[0], p2[1] - l] if p2[1] < p1[1] else [p2[0], p2[1] + l]\n \n if p1[1] == p2[1]:\n return [p2[0] - l, p2[1]] if p2[0] < p1[0] else [p2[0] + l, p2[1]]\n \n dy = abs(p1[1] - p2[1]) \n angle = asin(dy / segLen)\n dx = l * cos(angle)\n x = p1[0] - dx if p1[0] > p2[0] else p1[0] + dx\n \n return getPoint(p1, p2, x, None)\n\ndef drawfunction(nodes, width, fx):\n # x-bounds of the plane\n xstart = 0.0\n xend = 2 * pi\n # y-bounds of the plane\n ybottom = -1.0\n ytop = 1.0\n # size and location of the plane on the canvas\n height = 2\n left = 15\n bottom = 15 + height\n \n # function specified by the user\n try:\n if fx != \"\":\n f = eval('lambda x: ' + fx.strip('\"'))\n except SyntaxError:\n return []\n \n scalex = width / (xend - xstart)\n xoff = left\n # conver x-value to coordinate\n coordx = lambda x: (x - xstart) * scalex + xoff\n \n scaley = height / (ytop - ybottom)\n yoff = bottom\n # conver y-value to coordinate\n coordy = lambda y: (ybottom - y) * scaley + yoff\n \n # step is the distance between nodes on x\n step = (xend - xstart) / (nodes - 1)\n third = step / 3.0\n # step used in calculating derivatives\n ds = step * 0.001\n \n # initialize function and derivative for 0;\n # they are carried over from one iteration to the next, to avoid extra function calculations. \n x0 = xstart\n y0 = f(xstart)\n \n # numerical derivative, using 0.001*step as the small differential\n x1 = xstart + ds # Second point AFTER first point (Good for first point)\n y1 = f(x1)\n \n dx0 = (x1 - x0) / ds\n dy0 = (y1 - y0) / ds\n \n # path array\n a = []\n # Start curve\n a.append(['M ', [coordx(x0), coordy(y0)]])\n\n for i in range(int(nodes - 1)):\n x1 = (i + 1) * step + xstart\n x2 = x1 - ds # Second point BEFORE first point (Good for last point)\n y1 = f(x1)\n y2 = f(x2)\n \n # numerical derivative\n dx1 = (x1 - x2) / ds\n dy1 = (y1 - y2) / ds\n \n # create curve\n a.append([' C ', [coordx(x0 + (dx0 * third)), coordy(y0 + (dy0 * third)), \n coordx(x1 - (dx1 * third)), coordy(y1 - (dy1 * third)),\n coordx(x1), coordy(y1)]])\n \n # Next segment's start is this segment's end\n x0 = x1\n y0 = y1\n # Assume the function is smooth everywhere, so carry over the derivative too\n dx0 = dx1\n dy0 = dy1\n \n return a\n\ndef offset(pathComp, dx, dy):\n for ctl in pathComp:\n for pt in ctl:\n pt[0] += dx\n pt[1] += dy\n\ndef stretch(pathComp, xscale, yscale, org):\n for ctl in pathComp:\n for pt in ctl:\n pt[0] = org[0] + (pt[0] - org[0]) * xscale\n pt[1] = org[1] + (pt[1] - org[1]) * yscale\n\nclass GuillocheContour(pathmodifier.PathModifier):\n def __init__(self):\n pathmodifier.PathModifier.__init__(self)\n self.OptionParser.add_option(\"--tab\",\n action=\"store\", type=\"string\",\n dest=\"tab\", default=\"contour\",\n help=\"Active tab\")\n self.OptionParser.add_option(\"--contourFunction\",\n action=\"store\", type=\"string\",\n dest=\"contourFunction\", default=\"sin\",\n help=\"Function of the contour\")\n self.OptionParser.add_option(\"--frequency\",\n action=\"store\", type=\"int\",\n dest=\"frequency\", default=10,\n help=\"Frequency of the function\")\n self.OptionParser.add_option(\"--amplitude\",\n action=\"store\", type=\"int\",\n dest=\"amplitude\", default=1,\n help=\"Amplitude of the function\")\n self.OptionParser.add_option(\"--phaseOffset\",\n action=\"store\", type=\"int\",\n dest=\"phaseOffset\", default=0,\n help=\"Phase offset of the function\")\n self.OptionParser.add_option(\"--offset\",\n action=\"store\", type=\"int\",\n dest=\"offset\", default=0,\n help=\"Offset of the function\")\n self.OptionParser.add_option(\"--nodes\",\n action=\"store\", type=\"int\",\n dest=\"nodes\", default=20,\n help=\"Count of nodes\")\n self.OptionParser.add_option(\"--remove\",\n action=\"store\", type=\"inkbool\",\n dest=\"remove\", default=False,\n help=\"If True, control object will be removed\")\n self.OptionParser.add_option(\"--strokeColor\",\n action=\"store\", type=\"string\",\n dest=\"strokeColor\", default=255,\n help=\"The line's color\")\n self.OptionParser.add_option(\"--amplitude1\",\n action=\"store\", type=\"float\",\n dest=\"amplitude1\", default=0.0,\n help=\"Amplitude of first harmonic\")\n self.OptionParser.add_option(\"--phase1\",\n action=\"store\", type=\"int\",\n dest=\"phase1\", default=0,\n help=\"Phase offset of first harmonic\")\n self.OptionParser.add_option(\"--amplitude2\",\n action=\"store\", type=\"float\",\n dest=\"amplitude2\", default=0.0,\n help=\"Amplitude of second harmonic\")\n self.OptionParser.add_option(\"--phase2\",\n action=\"store\", type=\"int\",\n dest=\"phase2\", default=0,\n help=\"Phase offset of second harmonic\")\n self.OptionParser.add_option(\"--amplitude3\",\n action=\"store\", type=\"float\",\n dest=\"amplitude3\", default=0.0,\n help=\"Amplitude of third harmonic\")\n self.OptionParser.add_option(\"--phase3\",\n action=\"store\", type=\"int\",\n dest=\"phase3\", default=0,\n help=\"Phase offset of third harmonic\")\n self.OptionParser.add_option(\"--amplitude4\",\n action=\"store\", type=\"float\",\n dest=\"amplitude4\", default=0.0,\n help=\"Amplitude of fourth harmonic\")\n self.OptionParser.add_option(\"--phase4\",\n action=\"store\", type=\"int\",\n dest=\"phase4\", default=0,\n help=\"Phase offset of fourth harmonic\")\n self.OptionParser.add_option(\"--amplitude5\",\n action=\"store\", type=\"float\",\n dest=\"amplitude5\", default=0.0,\n help=\"Amplitude of fifth harmonic\")\n self.OptionParser.add_option(\"--phase5\",\n action=\"store\", type=\"int\",\n dest=\"phase5\", default=0,\n help=\"Phase offset of fifth harmonic\")\n\n def prepareSelectionList(self):\n self.skeletons = self.selected\n self.expandGroupsUnlinkClones(self.skeletons, True, False)\n self.objectsToPaths(self.skeletons)\n \n def linearizePath(self, skelPath, offs):\n comps, lengths = linearize(skelPath)\n \n self.skelCompIsClosed = isSkeletonClosed(comps)\n \n if (self.skelCompIsClosed and offs != 0):\n centroid = getPolygonCentroid(comps)\n \n for i in range(len(comps)):\n pt1 = comps[i]\n dist = bezmisc.pointdistance(centroid, pt1)\n \n comps[i] = getPtOnSeg(centroid, pt1, dist, dist + offs)\n \n if i > 0:\n lengths[i - 1] = bezmisc.pointdistance(comps[i - 1], comps[i])\n \n return (comps, lengths)\n \n def getFunction(self, func):\n res = ''\n \n presetAmp1 = presetAmp2 = presetAmp3 = presetAmp4 = presetAmp5 = 0.0\n presetPhOf1 = presetPhOf2 = presetPhOf3 = presetPhOf4 = presetPhOf5 = presetOffs = 0\n \n if (func == 'sin' or func == 'cos'):\n return '(' + str(self.options.amplitude) + ') * ' + func + '(x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + '))'\n \n if func == 'env1':\n presetAmp1 = presetAmp3 = 0.495\n elif func == 'env2':\n presetAmp1 = presetAmp3 = 0.65\n presetPhOf1 = presetPhOf3 = 25\n elif func == 'env3':\n presetAmp1 = 0.75\n presetPhOf1 = 25\n presetAmp3 = 0.24\n presetPhOf3 = -25\n elif func == 'env4':\n presetAmp1 = 1.105\n presetAmp3 = 0.27625\n presetPhOf3 = 50\n elif func == 'env5':\n presetAmp1 = 0.37464375\n presetPhOf1 = 25\n presetAmp2 = 0.5655\n presetAmp3 = 0.37464375\n presetPhOf3 = -25\n elif func == 'env6':\n presetAmp1 = 0.413725\n presetPhOf1 = 25\n presetAmp2 = 0.45695\n presetPhOf2 = 50\n presetAmp3 = 0.494\n presetPhOf3 = -25\n elif func == 'env7':\n presetAmp1 = 0.624\n presetPhOf1 = 25\n presetAmp2 = 0.312\n presetAmp3 = 0.624\n presetPhOf3 = 25\n elif func == 'env8':\n presetAmp1 = 0.65\n presetPhOf1 = 50\n presetAmp2 = 0.585\n presetAmp3 = 0.13\n elif func == 'env9':\n presetAmp1 = 0.07605\n presetPhOf1 = 25\n presetAmp2 = 0.33345\n presetPhOf2 = 50\n presetAmp3 = 0.468\n presetPhOf3 = -25\n presetAmp4 = 0.32175\n elif func == 'env10':\n presetAmp1 = 0.3575\n presetPhOf1 = -25\n presetAmp2 = 0.3575\n presetAmp3 = 0.3575\n presetPhOf3 = 25\n presetAmp4 = 0.3575\n presetPhOf4 = 50\n elif func == 'env11':\n presetAmp1 = 0.65\n presetPhOf1 = 25\n presetAmp2 = 0.13\n presetPhOf2 = 50\n presetAmp3 = 0.26\n presetPhOf3 = 25\n presetAmp4 = 0.39\n elif func == 'env12':\n presetAmp1 = 0.5525\n presetPhOf1 = -25\n presetAmp2 = 0.0414375\n presetPhOf2 = 50\n presetAmp3 = 0.15884375\n presetPhOf3 = 25\n presetAmp4 = 0.0966875\n presetAmp5 = 0.28315625\n presetPhOf5 = -25\n \n harm1 = '(' + str(self.options.amplitude * (presetAmp1 + self.options.amplitude1)) + ') * cos(1 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf1 + self.options.phase1) / 100.0 * 2 * pi) + '))'\n harm2 = '(' + str(self.options.amplitude * (presetAmp2 + self.options.amplitude2)) + ') * cos(2 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf2 + self.options.phase2) / 100.0 * 2 * pi) + '))'\n harm3 = '(' + str(self.options.amplitude * (presetAmp3 + self.options.amplitude3)) + ') * cos(3 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf3 + self.options.phase3) / 100.0 * 2 * pi) + '))'\n harm4 = '(' + str(self.options.amplitude * (presetAmp4 + self.options.amplitude4)) + ') * cos(4 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf4 + self.options.phase4) / 100.0 * 2 * pi) + '))'\n harm5 = '(' + str(self.options.amplitude * (presetAmp5 + self.options.amplitude5)) + ') * cos(5 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf5 + self.options.phase5) / 100.0 * 2 * pi) + '))'\n \n res = harm1 + ' + ' + harm2 + ' + ' + harm3 + ' + ' + harm4 + ' + ' + harm5\n \n return res\n\n def lengthToTime(self, l):\n '''\n Recieves an arc length l, and returns the index of the segment in self.skelComp \n containing the corresponding point, together with the position of the point on this segment.\n\n If the deformer is closed, do computations modulo the total length.\n '''\n if self.skelCompIsClosed:\n l = l % sum(self.lengths)\n \n if l <= 0:\n return 0, l / self.lengths[0]\n \n i = 0\n \n while (i < len(self.lengths)) and (self.lengths[i] <= l):\n l -= self.lengths[i]\n i += 1\n \n t = l / self.lengths[min(i, len(self.lengths) - 1)]\n \n return (i, t)\n\n def applyDiffeo(self, bpt, vects=()):\n '''\n The kernel of this stuff:\n bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.\n '''\n s = bpt[0] - self.skelComp[0][0]\n i, t = self.lengthToTime(s)\n \n if i == len(self.skelComp) - 1:\n x, y = bezmisc.tpoint(self.skelComp[i - 1], self.skelComp[i], t + 1)\n dx = (self.skelComp[i][0] - self.skelComp[i - 1][0]) / self.lengths[-1]\n dy = (self.skelComp[i][1] - self.skelComp[i - 1][1]) / self.lengths[-1]\n else:\n x, y = bezmisc.tpoint(self.skelComp[i], self.skelComp[i + 1], t)\n dx = (self.skelComp[i + 1][0] - self.skelComp[i][0]) / self.lengths[i]\n dy = (self.skelComp[i + 1][1] - self.skelComp[i][1]) / self.lengths[i]\n\n vx = 0\n vy = bpt[1] - self.skelComp[0][1]\n bpt[0] = x + vx * dx - vy * dy\n bpt[1] = y + vx * dy + vy * dx\n\n for v in vects:\n vx = v[0] - self.skelComp[0][0] - s\n vy = v[1] - self.skelComp[0][1]\n v[0] = x + vx * dx - vy * dy\n v[1] = y + vx * dy + vy * dx\n\n def effect(self):\n if len(self.options.ids) < 1:\n inkex.errormsg(_(\"This extension requires one selected path.\"))\n return\n \n self.prepareSelectionList()\n \n for skeleton in self.skeletons.itervalues():\n resPath = []\n pattern = inkex.etree.Element(inkex.addNS('path','svg'))\n \n self.options.strokeHexColor, self.strokeOpacity = getColorAndOpacity(self.options.strokeColor)\n \n # Copy style of skeleton with setting color and opacity\n s = skeleton.get('style')\n \n if s:\n pattern.set('style', setColorAndOpacity(s, self.options.strokeHexColor, self.strokeOpacity))\n \n skeletonPath = modifySkeletonPath(getSkeletonPath(skeleton.get('d'), self.options.offset))\n \n self.skelComp, self.lengths = self.linearizePath(skeletonPath, self.options.offset)\n \n length = sum(self.lengths)\n patternWidth = length / self.options.frequency\n selectedFunction = self.getFunction(self.options.contourFunction)\n \n pattern.set('d', simplepath.formatPath(drawfunction(self.options.nodes, patternWidth, selectedFunction)))\n \n # Add path into SVG structure\n skeleton.getparent().append(pattern)\n \n if self.options.remove:\n skeleton.getparent().remove(skeleton)\n \n # Compute bounding box\n bbox = simpletransform.computeBBox([pattern])\n \n width = bbox[1] - bbox[0]\n dx = width\n \n if dx < 0.01:\n exit(_(\"The total length of the pattern is too small.\"))\n \n patternPath = cubicsuperpath.parsePath(pattern.get('d'))\n curPath = deepcopy(patternPath)\n \n xoffset = self.skelComp[0][0] - bbox[0]\n yoffset = self.skelComp[0][1] - (bbox[2] + bbox[3]) / 2\n \n patternCopies = max(1, int(round(length / dx)))\n width = dx * patternCopies\n \n newPath = []\n \n # Repeat pattern to cover whole skeleton\n for subPath in curPath:\n for i in range(0, patternCopies, 1):\n newPath.append(deepcopy(subPath))\n offset(subPath, dx, 0)\n \n curPath = newPath\n \n # Offset pattern to the first node of the skeleton\n for subPath in curPath:\n offset(subPath, xoffset, yoffset)\n \n # Stretch pattern to whole skeleton\n for subPath in curPath:\n stretch(subPath, length / width, 1, self.skelComp[0])\n \n for subPath in curPath:\n for ctlpt in subPath:\n self.applyDiffeo(ctlpt[1], (ctlpt[0], ctlpt[2]))\n \n # Check if there is a need to close path manually\n if self.skelCompIsClosed:\n firstPtX = round(curPath[0][0][1][0], 8)\n firstPtY = round(curPath[0][0][1][1], 8)\n finalPtX = round(curPath[-1][-1][1][0], 8)\n finalPtY = round(curPath[-1][-1][1][1], 8)\n \n if (firstPtX != finalPtX or firstPtY != finalPtY):\n curPath[-1].append(curPath[0][0])\n \n resPath += curPath\n \n pattern.set('d', cubicsuperpath.formatPath(resPath))\n\nif __name__ == '__main__':\n e = GuillocheContour()\n e.affect()\n\n\n# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99\n" }, { "alpha_fraction": 0.5570613145828247, "alphanum_fraction": 0.5806639790534973, "avg_line_length": 33.41609191894531, "blob_id": "72dcd9da2e2480d7110480a8276440e523ff2594", "content_id": "3b584845ecb2ce90ee0529fed1b6ea6b2beecb07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15422, "license_type": "permissive", "max_line_length": 120, "num_lines": 435, "path": "/Inkscape-Rounded-Corners-master/RoundedCorners.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \r\n\r\n\"\"\"\r\n==============================================================================================\r\n Rounded Corners Version 1.3 by Chris Hawley\r\n \r\n This effect is designed to round off the corners on shapes made with the pencil tool.\r\n It will round off corners made of straight edges, while ignoring bezier lines.\r\n Corners that are too short for the desired radius will also be ignored.\r\n It does not work on Inkscape objects like the rectangle or star - \r\n the user must convert objects to paths before using this extension.\r\n \r\n Usage: \r\n 1. Select or create a polygon in Inkscape using the pencil tool \r\n 2. Click Extensions->Modify Path->Rounded Corners. \r\n 3. Fill out the options in the dialog that pop up\r\n\t\t- Choose the radius for rounding\r\n\t\t- Pick units (Inches, centimeters, etc.)\r\n\t\t- pick whether you want left corners, right corners, or both to be rounded\r\n\t\t- select \"Live preview\" to see how your selecttions affect the shape before applying\r\n 4. Click Apply.\r\n==============================================================================================\r\n\"\"\"\r\n\r\n\"\"\"\r\nThe MIT License (MIT)\r\nCopyright (c) 2019 Chris Hawley\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\nThe above copyright notice and this permission notice shall be included in\r\nall copies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\nTHE SOFTWARE.\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n==============================================================================================\r\nVector class from https://gist.github.com/mcleonard/5351452\r\nThe MIT License (MIT)\r\nCopyright (c) 2015 Mat Leonard\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\nThe above copyright notice and this permission notice shall be included in\r\nall copies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\nTHE SOFTWARE.\r\n\"\"\"\r\n\r\n\"\"\"\r\n================================================================================================\r\nbegin Vector Class by Mat Leonard\t\r\n================================================================================================\r\n\"\"\"\t\r\nclass Vector(object):\r\n def __init__(self, *args):\r\n \"\"\" Create a vector, example: v = Vector(1,2) \"\"\"\r\n if len(args)==0: self.values = (0,0)\r\n else: self.values = args\r\n \r\n def norm(self):\r\n \"\"\" Returns the norm (length, magnitude) of the vector \"\"\"\r\n return math.sqrt(sum( comp**2 for comp in self ))\r\n \r\n def argument(self):\r\n \"\"\" Returns the argument of the vector, the angle clockwise from +y.\"\"\"\r\n arg_in_rad = math.acos(Vector(0,1)*self/self.norm())\r\n arg_in_deg = math.degrees(arg_in_rad)\r\n arg_in_deg = math.degrees(arg_in_rad)\r\n if self.values[0]<0: return 360 - arg_in_deg\r\n else: return arg_in_deg\r\n\r\n def normalize(self):\r\n \"\"\" Returns a normalized unit vector \"\"\"\r\n norm = self.norm()\r\n normed = tuple( comp/norm for comp in self )\r\n return Vector(*normed)\r\n \r\n def rotate(self, *args):\r\n \"\"\" Rotate this vector. If passed a number, assumes this is a \r\n 2D vector and rotates by the passed value in degrees. Otherwise,\r\n assumes the passed value is a list acting as a matrix which rotates the vector.\r\n \"\"\"\r\n if len(args)==1 and type(args[0]) == type(1) or type(args[0]) == type(1.):\r\n # So, if rotate is passed an int or a float...\r\n if len(self) != 2:\r\n raise ValueError(\"Rotation axis not defined for greater than 2D vector\")\r\n return self._rotate2D(*args)\r\n elif len(args)==1:\r\n matrix = args[0]\r\n if not all(len(row) == len(v) for row in matrix) or not len(matrix)==len(self):\r\n raise ValueError(\"Rotation matrix must be square and same dimensions as vector\")\r\n return self.matrix_mult(matrix)\r\n \r\n def _rotate2D(self, theta):\r\n \"\"\" Rotate this vector by theta in degrees.\r\n \r\n Returns a new vector.\r\n \"\"\"\r\n theta = math.radians(theta)\r\n # Just applying the 2D rotation matrix\r\n dc, ds = math.cos(theta), math.sin(theta)\r\n x, y = self.values\r\n x, y = dc*x - ds*y, ds*x + dc*y\r\n return Vector(x, y)\r\n \r\n def matrix_mult(self, matrix):\r\n \"\"\" Multiply this vector by a matrix. Assuming matrix is a list of lists.\r\n \r\n Example:\r\n mat = [[1,2,3],[-1,0,1],[3,4,5]]\r\n Vector(1,2,3).matrix_mult(mat) -> (14, 2, 26)\r\n \r\n \"\"\"\r\n if not all(len(row) == len(self) for row in matrix):\r\n raise ValueError('Matrix must match vector dimensions') \r\n \r\n # Grab a row from the matrix, make it a Vector, take the dot product, \r\n # and store it as the first component\r\n product = tuple(Vector(*row)*self for row in matrix)\r\n \r\n return Vector(*product)\r\n \r\n def inner(self, other):\r\n \"\"\" Returns the dot product (inner product) of self and other vector\r\n \"\"\"\r\n return sum(a * b for a, b in zip(self, other))\r\n \r\n def __mul__(self, other):\r\n \"\"\" Returns the dot product of self and other if multiplied\r\n by another Vector. If multiplied by an int or float,\r\n multiplies each component by other.\r\n \"\"\"\r\n if type(other) == type(self):\r\n return self.inner(other)\r\n elif type(other) == type(1) or type(other) == type(1.0):\r\n product = tuple( a * other for a in self )\r\n return Vector(*product)\r\n \r\n def __rmul__(self, other):\r\n \"\"\" Called if 4*self for instance \"\"\"\r\n return self.__mul__(other)\r\n \r\n def __div__(self, other):\r\n if type(other) == type(1) or type(other) == type(1.0):\r\n divided = tuple( a / other for a in self )\r\n return Vector(*divided)\r\n \r\n def __add__(self, other):\r\n \"\"\" Returns the vector addition of self and other \"\"\"\r\n added = tuple( a + b for a, b in zip(self, other) )\r\n return Vector(*added)\r\n \r\n def __sub__(self, other):\r\n \"\"\" Returns the vector difference of self and other \"\"\"\r\n subbed = tuple( a - b for a, b in zip(self, other) )\r\n return Vector(*subbed)\r\n \r\n def __iter__(self):\r\n return self.values.__iter__()\r\n \r\n def __len__(self):\r\n return len(self.values)\r\n \r\n def __getitem__(self, key):\r\n return self.values[key]\r\n \r\n def __repr__(self):\r\n\t\treturn str(self.values)\r\n\t\r\n\"\"\"\r\n================================================================================================\r\nend Vector Class by Mat Leonard\t\r\n================================================================================================\r\n\"\"\"\t\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n================================================================================================\r\nBegin Rounded Corners By Chris Hawley\r\n================================================================================================\r\n\"\"\"\t\r\nimport inkex\r\nimport simplepath\r\nimport re\r\nimport math\r\nimport sys\r\n\r\nclass RoundedCorners(inkex.Effect):\r\n\tdef __init__(self):\r\n\t\tinkex.Effect.__init__(self)\r\n\t\tself.OptionParser.add_option(\"--RCradius\",\r\n\t\t\t\taction=\"store\", type=\"float\", \r\n\t\t\t\tdest=\"RCradius\", default=1.0,\r\n\t\t\t\thelp=\"Radius for rounded corners\")\r\n\t\tself.OptionParser.add_option(\"--units\", action=\"store\",\r\n type=\"string\", dest=\"units\",\r\n default=\"25.4/96\") # Inches\r\n\t\t\r\n\t\t\r\n\t\tself.OptionParser.add_option(\"--LeftCorners\", action=\"store\",\r\n\t\t\t\t\t\t\t type=\"string\", dest=\"LeftCorners\",\r\n\t\t\t\t\t\t\t default=True) \r\n\t\t\t\t\t\t\t \r\n\t\tself.OptionParser.add_option(\"--RightCorners\", action=\"store\",\r\n\t\t\t\t\t\t\t type=\"string\", dest=\"RightCorners\",\r\n\t\t\t\t\t\t\t default=True) \t\t\t\t\t\t\t \r\n\t\t\t\t\t\t\t \r\n\t\tself.newPath = []\r\n\r\n\r\n\tdef effect(self):\r\n\t\tfoundPath = False\r\n\t\tsucceededRounding = False\r\n\t\tselection = self.selected\r\n\t\tif (selection):\r\n\t\t\tfor id, node in selection.iteritems():\r\n\t\t\t\tif node.tag == inkex.addNS('path','svg'):\r\n\t\t\t\t\tsucceededRounding = self.MakeRound(node) or succeededRounding\r\n\t\t\t\t\tfoundPath = True\r\n\t\t\tif foundPath == False:\r\n\t\t\t\tinkex.errormsg(\"Suitable path not found. Try converting to path first using the menu item 'Path->Object to Path'.\")\r\n\t\t\telif not succeededRounding:\r\n\t\t\t\tinkex.errormsg(\"Couldn't find any suitable corners to round. Try decreasing the Rounded Corner Radius.\")\r\n\t\telse:\r\n\t\t\tinkex.errormsg(\"Please select an object.\")\r\n\t\r\n\t\r\n\tdef RoundCorner(self, cursorIndex, line1Index, arcIndex, line2Index):\r\n\t\t#\r\n\t\t# p2 _________ p3\r\n\t\t# |\r\n\t\t# | p0\r\n\t\t# p1 |\r\n\t\t\r\n\t\t#sys.stderr.write(str((cursorIndex, line1Index, arcIndex, line2Index)))\r\n\t\t\r\n\t\t\r\n\t\t#radius. defined by user dialog\r\n\t\tr=self.options.RCradius\r\n\t\t\r\n\t\troundLeft = self.options.LeftCorners.lower() == 'true'\r\n\t\troundRight = self.options.RightCorners.lower() == 'true'\r\n\t\t\r\n\t\tif (not roundLeft) and (not roundRight):\r\n\t\t\treturn True\r\n\t\t\r\n\t\tscale = eval(self.options.units)\r\n\t\tif not scale:\r\n\t\t\tscale = 25.4/96 \r\n\t\tscale /= self.unittouu('1px')\r\n\t\tr /= scale\r\n\r\n\t\tp1 = Vector( self.newPath[cursorIndex][1][-2], self.newPath[cursorIndex][1][-1] )\r\n\t\tp2 = Vector( self.newPath[line1Index][1][-2], self.newPath[line1Index][1][-1] )\r\n\t\tp3 = Vector( self.newPath[line2Index][1][-2], self.newPath[line2Index][1][-1] )\r\n\r\n\t\tif (p1-p2).norm() < .0001:\r\n\t\t\treturn\r\n\t\tif (p2-p3).norm() < .0001:\r\n\t\t\treturn\r\n\t\t\r\n\t\t\r\n\t\t#L20 = bisecting line. 320 and 120 are right triangles\r\n\t\t\r\n\t\t#point on line L12 that is 1 unit away from p2 (normalized)\r\n\t\tn1 = (p1 - p2).normalize() + p2\r\n\t\tn3 = (p3 - p2).normalize() + p2\r\n\t\t\r\n\t\t\r\n\t\tp0 = n1 + n3\r\n\t\tp0 = p0 / 2.0\r\n\t\t\r\n\r\n\t\t#V vectors are normalized lines translated to the origin\r\n\t\tV12 = (p2 - p1).normalize()\r\n\t\tV12angle = math.atan2(V12[1], V12[0])\r\n\t\tV12PerpRightx = math.cos(V12angle + math.pi/2)\r\n\t\tV12PerpRighty = math.sin(V12angle + math.pi/2)\r\n\r\n\r\n\t\tV23 = (p3 - p2).normalize()\r\n\t\tV23angle = math.atan2(V23[1], V23[0])\r\n\t\tV23PerpRightx = math.cos(V23angle + math.pi/2)\r\n\t\tV23PerpRighty = math.sin(V23angle + math.pi/2)\r\n\r\n\t\tV13 = p3 - p1\r\n\r\n\t\t#dot product of V13 and the vector perpendicular to V12\r\n\t\tfacingRight = (V12PerpRightx * V13[0] + V12PerpRighty * V13[1]) > 0\r\n\r\n\t\t#distance from point p0 to line L12\r\n\t\tp0x = p0[0]\r\n\t\tp0y = p0[1]\r\n\t\tp1x = p1[0]\r\n\t\tp1y = p1[1]\r\n\t\tp2x = p2[0]\r\n\t\tp2y = p2[1]\r\n\t\t\r\n\t\tp0Dist = abs((p2y - p1y)*p0x - (p2x - p1x)*p0y + p2x * p1y - p2y*p1x)/math.sqrt((p2y-p1y)**2 + (p2x-p1x)**2)\r\n\t\tmultiplier = r/p0Dist\r\n\r\n\t\t#p5 on the line L20, but the correct distance so that it is r away from the other lines\r\n\t\t#it is the center point for the circle of radius r that touches the other 2 lines\r\n\t\tp5 = (p0-p2)*multiplier + p2\r\n\r\n\t\t#Tangent1: where circle with radius r, centered on p5 touches the line L12. Tangent 2 is where circle touches L23\r\n\t\tV12PerpRight = Vector(V12PerpRightx, V12PerpRighty)\r\n\t\tV23PerpRight = Vector(V23PerpRightx, V23PerpRighty)\r\n\t\t\r\n\t\tif facingRight:\r\n\t\t\tTangent1 = p5 - V12PerpRight * r\r\n\t\t\tTangent2 = p5 - V23PerpRight * r\r\n\t\t\r\n\t\telse: \r\n\t\t\tTangent1 = p5 + V12PerpRight * r\r\n\t\t\tTangent2 = p5 + V23PerpRight * r\r\n\r\n\t\t#check that [tangent points are closer to p2] than p1 and p3 are\r\n\t\t#if so, replace line from p1 to p1, and line from p2 to p3 with:\r\n\t\t#arc, starting on tangent1, centered at p5, radius r, ending on tangent2\r\n\t\t#A rx ry x-axis-rotation large-arc-flag sweep-flag x y\r\n\t\t\r\n\t\tallowed = True\r\n\t\t\r\n\t\tif facingRight and (not roundRight):\r\n\t\t\tallowed = False;\r\n\t\t\t\r\n\t\tif (not facingRight) and (not roundLeft):\r\n\t\t\tallowed = False;\r\n\t\t\r\n\t\tif ((Tangent1 - p2).norm() > (p1 - p2).norm() + 0.001) or ((Tangent2 - p2).norm() > (p3 - p2).norm() + 0.001):\r\n\t\t\tallowed = False\r\n\t\t\t\r\n\t\tif allowed:\r\n\t\t\t\r\n\t\t\t#line from p1 to tangent 1\r\n\t\t\tself.newPath[line1Index] = (\"L\", [Tangent1[0], Tangent1[1]])\r\n\t\t\tif facingRight:\r\n\t\t\t\tself.newPath[arcIndex] = (\"A\", [ r,r,0,0,1,Tangent2[0],Tangent2[1]])\r\n\t\t\telse:\r\n\t\t\t\tself.newPath[arcIndex] = (\"A\", [ r,r,0,0,0,Tangent2[0],Tangent2[1]])\r\n\t\t\t\t\r\n\t\treturn allowed\r\n\t\r\n\tdef MakeRound(self, node):\r\n\t\tdArr = simplepath.parsePath(node.get('d'))\r\n\t\tdLen = len(dArr)\r\n\t\t\r\n\t\tif dLen < 3:\r\n\t\t\treturn \r\n\t\t\r\n\t\tself.newPath = []\r\n\t\tisClosed = False\r\n\t\t\r\n\t\t#make a new list of tupples. each tupple is a letter string, and a list of parameters\r\n\t\t#every other tupple will start blank to make room for adding possible arcs as curved corners\r\n\t\t\r\n\t\tfor i in range(dLen):\r\n\t\t\tcmd, params = dArr[i]\r\n\t\t\tif cmd != \"Z\":\r\n\t\t\t\tself.newPath.append((cmd, params))\r\n\t\t\t\tself.newPath.append((\"\",[]))\r\n\t\t\telse:\r\n\t\t\t\tisClosed = True\r\n\t\t\t\r\n\t\tif isClosed:\r\n\t\t\tendIndex = len(self.newPath) - 1\r\n\t\telse:\r\n\t\t\tendIndex = len(self.newPath) - 4\r\n\t\t\r\n\t\tnpLen = len(self.newPath)\r\n\t\t\r\n\t\tsucceeded = False\r\n\t\t\r\n\t\tdebuglog = \"\"\r\n\t\t\r\n\t\tfor n in range(0, endIndex, 2):\r\n\t\t\t#get first point of possible corner\r\n\t\t\tif self.newPath[(n+1) % npLen][0] != '':\r\n\t\t\t\tcursorIndex = (n+1) % npLen\r\n\t\t\telse:\r\n\t\t\t\tcursorIndex = n\r\n\t\t\r\n\t\t\t#check if other 2 points are L, M, or Z commands. if so we have a corner\r\n\t\t\tif self.newPath[(n+2) % npLen][0] in \"LMZ\" and self.newPath[(n+4) % npLen][0] in \"LMZ\":\r\n\t\t\t\t\r\n\t\t\t\tsucceeded = self.RoundCorner(cursorIndex, (n+2) % npLen, (n+3) % npLen, (n+4) % npLen) or succeeded\r\n\t\t\t\r\n\t\tfirstCommand, firstArgs = self.newPath[0]\r\n\t\tself.newPath[0] = ('M', [firstArgs[-2], firstArgs[-1]]);\r\n\t\t\r\n\t\t\r\n\t\tnewDString = \"\"\r\n\t\t\r\n\t\tfor cmd, params in self.newPath:\r\n\t\t\tif cmd != '':\r\n\t\t\t\tnewDString += (cmd + \" \")\r\n\t\t\t\tnewDString += \",\".join(map (str, params))\r\n\t\t\t\tnewDString += \"\\n\"\r\n\t\t\r\n\t\tif isClosed:\r\n\t\t\tnewDString += \"Z\\n\"\r\n\t\t\r\n\t\tif succeeded:\r\n\t\t\tnode.set('d', newDString)\r\n\r\n\t\treturn succeeded\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\te = RoundedCorners()\r\n\te.affect()\r\n\r\n\t\r\n\t\r\n\t\r\n\t\r\n\r\n" }, { "alpha_fraction": 0.6860628724098206, "alphanum_fraction": 0.7006100416183472, "avg_line_length": 25.962024688720703, "blob_id": "65a74830f801b9fbbaab2d6578193225a6755916", "content_id": "ea2de8378786cd83915583d84bc85ee8cec63203", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2131, "license_type": "permissive", "max_line_length": 81, "num_lines": 79, "path": "/svg2ico-002/svg2ico.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n#\n# svg2ico.py\n# Create Win ico files easily.\n#\n# Copyright (C) 2008 Maurizio Aru <ginopc(a)tiscali.it>\n# \n# Based on icon_generator code extesion by David R. Damerell ([email protected])\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n__version__ = \"0.2\"\n\nimport os\nimport sys\nimport subprocess\nimport argparse\n\n#\n# Sgv2Ico Main Class\n#\nclass Svg2Ico:\n\t\n\twidth = 16\n\theigth = 16\n\tifName = 'in.png'\n\tofName = 'out.ico'\n\t\n\tdef __init__(self, size, iName):\n\t\tself.width = size\n\t\tself.heigth = size\n\t\tself.ifName = iName\n\t \n\tdef createPPM(pnfFileName, isAlphaChannel):\n\t\treturn 0\n\t\t\n\tdef resizeImage(self, iFile, oFile, width):\n\t return 0\n\t \n\tdef convertToIco(self, iFile):\n\t\treturn 0\n\t \n\tdef saveToFilename(self, fName):\n\t\tself.ofName = fName\n\t\tprint(\"[DEBUG] Width: %d, Heigth: %d\" % (self.width, self.heigth))\n\t\tprint(\"[DEBUG] iFile: %s, oFile: %s\" % (self.ifName, self.ofName))\n\t\tfrom PIL import Image\n\t\timg = Image.open(self.ifName)\n\t\timg.save(fName)\n\n#\n# Check command line parameters\n# \ndef parseOptions():\n\tparser = argparse.ArgumentParser(description='Convert svg to ico')\n\tparser.add_argument('--s', type=int, default=16, help='Icon Size')\n\tparser.add_argument('--o', default=\"file.ico\", help='Output FileName')\n\tparser.add_argument('iFile', default=\"file.png\", help='Input FileName')\n\targs = parser.parse_args()\n\treturn args\n\n\t\t\nif __name__ == '__main__': #pragma: no cover\n\targs = parseOptions()\n\te = Svg2Ico(args.s, args.iFile)\n\te.saveToFilename(args.o)\n\n" }, { "alpha_fraction": 0.5471827387809753, "alphanum_fraction": 0.5797319412231445, "avg_line_length": 48.74149703979492, "blob_id": "08de94d812cf2ca8c35e159a112bc2a778d4c3e4", "content_id": "71bbc76f07a335cbfd84ae305bffe403d5324740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7312, "license_type": "no_license", "max_line_length": 167, "num_lines": 147, "path": "/eraser_layer_4/eraser_layer.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n'''\nThis extension Create a eraser layer\n\nCopyright (C) 2012 Jabiertxo Arraiza, [email protected]\n\nVersion 0.4 - Eraser\n\nTODO:\nComment Better!!!\n\nCHANGE LOG \n0.1 Start 30/07/2012\n0.3 fix bug with borders\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n'''\n\nimport inkex, sys, simplestyle\nfrom lxml import etree\n\nclass EraserLayer(inkex.Effect):\n\n def __init__(self):\n inkex.Effect.__init__(self) \n\n #inserta el flltro que pasa a negro la mascara\n def insertFilter(self, svg):\n xpathStr = '//filter[@id=\"Decolorize_0001\"]'\n filterElement = svg.xpath(xpathStr, namespaces=inkex.NSS)\n if filterElement == []:\n xpathStr = '//svg:defs'\n defs = svg.xpath(xpathStr, namespaces=inkex.NSS)\n flt = inkex.etree.SubElement(defs[0],inkex.addNS('filter','svg'))\n for k, v in [('id', 'Decolorize_0001'), ('color-interpolation-filters', 'sRGB'),\n ('height', '100'), ('width', '100'),\n ('x', '-50'), ('y', '-50')]:\n flt.set(k, v)\n fltColorMatrix = inkex.etree.SubElement(flt,inkex.addNS('feColorMatrix','svg'))\n for k, v in [('id', 'feColorMatrix_0001'),('values','1'), ('in', 'SourceGraphic'),\n ('type', 'saturate'), ('result', 'result2')]:\n fltColorMatrix.set(k, v)\n fltFlood = inkex.etree.SubElement(flt,inkex.addNS('feFlood','svg'))\n for k, v in [('id', 'feFlood_0001'),('flood-color','rgb(255,255,255)'), ('flood-opacity', '1'),\n ('result', 'result1')]:\n fltFlood.set(k, v)\n fltComposite = inkex.etree.SubElement(flt,inkex.addNS('feComposite','svg'))\n for k, v in [('id', 'feComposite_0001'),('operator', 'atop'),('in2', 'SourceGraphic')\n ,('k2', '1'),('result', 'result4')]:\n fltComposite.set(k, v)\n \n fltInverse = inkex.etree.SubElement(defs[0],inkex.addNS('filter','svg'))\n for k, v in [('id', 'Inverse_0001'), ('color-interpolation-filters', 'sRGB'),\n ('height', '100'), ('width', '100'),\n ('x', '-50'), ('y', '-50')]:\n fltInverse.set(k, v)\n fltColorMatrixInverse = inkex.etree.SubElement(fltInverse,inkex.addNS('feColorMatrix','svg'))\n for k, v in [('id', 'feColorMatrixInverse_0001'),('values','1'),\n ('type', 'saturate'), ('result', 'fbSourceGraphic')]:\n fltColorMatrixInverse.set(k, v)\n fltColorMatrixInverse = inkex.etree.SubElement(fltInverse,inkex.addNS('feColorMatrix','svg'))\n for k, v in [('id', 'feColorMatrixInverse_0002'),('in','fbSourceGraphic'),\n ('values', '-1 0 0 0 1 0 -1 0 0 1 0 0 -1 0 1 0 0 0 1 0 ')]:\n fltColorMatrixInverse.set(k, v)\n\n #Inserta la mascara desde el grupo eraser\n def insertMask(self, idLayer, svg):\n xpathStr = '//mask[@id=\"MaskEraser_' + idLayer + '\"]'\n maskElement = svg.xpath(xpathStr, namespaces=inkex.NSS)\n if maskElement == []:\n xpathStr = '//svg:defs'\n defs = svg.xpath(xpathStr, namespaces=inkex.NSS)\n msk = inkex.etree.SubElement(defs[0],inkex.addNS('mask','svg'))\n for k, v in [('id', 'Eraser_' + idLayer), ('maskUnits', 'userSpaceOnUse')]:\n msk.set(k, v)\n use = inkex.etree.SubElement(msk,inkex.addNS('use','svg'))\n for k, v in [('id', 'Use_Eraser_' + idLayer), ('{http://www.w3.org/1999/xlink}href',\n \"#EraserLayer_\" + idLayer),('style',\"filter:url(#Inverse_0001)\")]:\n use.set(k, v)\n rct = inkex.etree.SubElement(msk,inkex.addNS('rect','svg'))\n for k, v in [('id', 'Background_' + idLayer), ('x', '-25000000'),\n ('y', '-25000000'), ('width', '50000000'), ('height', '50000000'),\n ('style', 'fill:#FFFFFF;fill-opacity:1')]:\n rct.set(k, v)\n\n\n ##crea el grupo contenedor que hara de borrador\n def createEraserLayer(self, layer,idLayer):\n container = etree.Element(\"g\")\n container.set(\"id\",\"ContainerEraserLayer_\" + idLayer)\n container.set('style',\"opacity:0.000000000000000000000000000000000000000000000001\")\n container.set(\"{http://www.inkscape.org/namespaces/inkscape}groupmode\",\"layer\")\n container.set(\"{http://www.inkscape.org/namespaces/inkscape}label\", \"[container eraser] \" + layer[0].get(\"{http://www.inkscape.org/namespaces/inkscape}label\"))\n for position, item in enumerate(layer[0].getparent().getchildren()):\n if item == layer[0]:\n break;\n layer[0].getparent().insert(position+1,container)\n eraser = etree.Element(\"g\")\n eraser.set(\"id\",\"EraserLayer_\" + idLayer)\n eraser.set('style',\"filter:url(#Decolorize_0001)\")\n eraser.set('transform',\"rotate(360)\")\n eraser.set(\"{http://www.inkscape.org/namespaces/inkscape}groupmode\",\"layer\") \n eraser.set(\"{http://www.inkscape.org/namespaces/inkscape}label\", \"[eraser] \" + layer[0].get(\"{http://www.inkscape.org/namespaces/inkscape}label\")) \n layer[0].set('mask',\"url(#Eraser_\" + idLayer + \")\")\n selected = []\n for id, node in self.selected.iteritems():\n eraser.insert(1,node)\n container.insert(1,eraser)\n\n def effect(self):\n saveout = sys.stdout\n sys.stdout = sys.stderr\n svg = self.document.getroot()\n xpathStr = '//sodipodi:namedview'\n namedview = svg.xpath(xpathStr, namespaces=inkex.NSS)\n idLayer = namedview[0].get('{http://www.inkscape.org/namespaces/inkscape}current-layer');\n if idLayer.startswith(\"[mask] \") == False and idLayer.startswith(\"[eraser] \") == False:\n xpathStr = '//svg:g[@id=\"EraserLayer_'+idLayer+'\"]'\n filterElement = svg.xpath(xpathStr, namespaces=inkex.NSS)\n if filterElement == []:\n xpathStr = '//svg:g[@id=\"'+idLayer+'\"]'\n layer = svg.xpath(xpathStr, namespaces=inkex.NSS)\n if layer[0] is not None and layer[0].get(\"{http://www.inkscape.org/namespaces/inkscape}label\") is not None:\n self.insertFilter(svg)\n self.insertMask(idLayer, svg)\n self.createEraserLayer(layer,idLayer)\n else:\n print \"Layer not found, Maybe insisde a group?\"\n else:\n for id, node in self.selected.iteritems():\n filterElement[0].insert(1,node)\n sys.stdout = saveout\n \neffect = EraserLayer()\neffect.affect()\n" }, { "alpha_fraction": 0.7140381336212158, "alphanum_fraction": 0.7383015751838684, "avg_line_length": 36.225807189941406, "blob_id": "426f95145270a3c3b3983c1be9c7edb0c82b821e", "content_id": "a9150f7d0d9f41c1b924d18605fb3efcc55e8203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 103, "num_lines": 31, "path": "/README.md", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "<strong>Inkscape Addons</strong>\n<hr align=”left” size=”1″ width=”300″ color=”red” noshade>\n<b>Container of various addons for Inkscape software</b>\n<hr align=”left” size=”1″ width=”300″ color=”red” noshade>\n<img src=\"https://github.com/ilnanny/Inkscape-addons/blob/master/preview.png?raw=true\"Inkscape Addons\">\n\n<hr align=”left” size=”1″ width=”300″ color=”red” noshade>\n All of addons are distribute whit GPL ,X11 and Pubblic_domain License\n Most of this addons are pubblicate on gitlab github and inkscape page.\n \nIf you redistribuite it please give CREDIT by linking back to the creators !\n\n\n<hr align=”left” size=”1″ width=”300″ color=”red” noshade>\nGenerally the extension folder can be found here:\n\n-> Linux:\n\n~/.config/inkscape/extensions/\n/usr/share/inkscape/extensions/\n<hr align=”left” size=”1″ width=”80″ color=”red” noshade>\n-> Windows:\n\ntype the following into the address line on a File Browser window:\n%appdata%/inkscape/extensions\n<hr align=”left” size=”1″ width=”80″ color=”red” noshade>\n-> Mac OS X:\n\n/Applications/Inkscape.app/Contents/Resources/share/inkscape/extensions/\n\n<hr align=”left” size=”1″ width=”300″ color=”red” noshade>\n" }, { "alpha_fraction": 0.5638676285743713, "alphanum_fraction": 0.5701196789741516, "avg_line_length": 34.68077087402344, "blob_id": "e1928650aa24e8ec8746bcb8d1d698a9b8065bd4", "content_id": "abde88d7fb3a301a75fdfa1957be73b3e6dec121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9277, "license_type": "no_license", "max_line_length": 244, "num_lines": 260, "path": "/guillotine_plus/guillotine_plus.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n'''\nguillotine_plus.py\n\nCopyright (C) 2010 Craig Marshall, craig9 [at] gmail.com\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n-----------------------\n\nThis script slices an inkscape drawing along the guides, similarly to\nthe GIMP plugin called \"guillotine\". It can optionally export to the\nsame directory as the SVG file with the same name, but with a number\nsuffix. e.g.\n\n/home/foo/drawing.svg\n\nwill export to:\n\n/home/foo/drawing0.png\n/home/foo/drawing1.png\n/home/foo/drawing2.png\n/home/foo/drawing3.png\n\netc.\n\n'''\n# standard library\nimport locale\nimport os\nimport sys\ntry:\n from subprocess import Popen, PIPE\n bsubprocess = True\nexcept:\n bsubprocess = False\n# local library\nimport inkex\nimport simplestyle\n\nlocale.setlocale(locale.LC_ALL, '')\n\ndef float_sort(a, b):\n '''\n This is used to sort the horizontal and vertical guide positions,\n which are floating point numbers, but which are held as text.\n '''\n return cmp(float(a), float(b))\n\nclass Guillotine(inkex.Effect):\n \"\"\"Exports slices made using guides\"\"\"\n def __init__(self):\n inkex.Effect.__init__(self)\n self.OptionParser.add_option(\"--directory\", action=\"store\",\n type=\"string\", dest=\"directory\",\n default=None, help=\"\")\n\n self.OptionParser.add_option(\"--image\", action=\"store\",\n type=\"string\", dest=\"image\",\n default=None, help=\"\")\n\n self.OptionParser.add_option(\"--dpi\", action=\"store\",\n type=\"string\", dest=\"dpi\",\n default=\"90\", help=\"\")\n\n self.OptionParser.add_option(\"--ignore\", action=\"store\",\n type=\"inkbool\", dest=\"ignore\",\n default=None, help=\"\")\n\n def get_guides(self):\n '''\n Returns all guide elements as an iterable collection\n '''\n root = self.document.getroot()\n guides = []\n xpath = self.document.xpath(\"//sodipodi:guide\",\n namespaces=inkex.NSS)\n for g in xpath:\n guide = {}\n (x, y) = g.attrib['position'].split(',')\n if g.attrib['orientation'][:2] == '0,':\n guide['orientation'] = 'horizontal'\n guide['position'] = y\n guides.append(guide)\n elif g.attrib['orientation'][-2:] == ',0':\n guide['orientation'] = 'vertical'\n guide['position'] = x\n guides.append(guide)\n return guides\n\n def get_all_horizontal_guides(self):\n '''\n Returns all horizontal guides as a list of floats stored as\n strings. Each value is the position from 0 in pixels.\n '''\n guides = []\n for g in self.get_guides():\n if g['orientation'] == 'horizontal':\n guides.append(g['position'])\n return guides\n\n def get_all_vertical_guides(self):\n '''\n Returns all vertical guides as a list of floats stored as\n strings. Each value is the position from 0 in pixels.\n '''\n guides = []\n for g in self.get_guides():\n if g['orientation'] == 'vertical':\n guides.append(g['position'])\n return guides\n\n def get_horizontal_slice_positions(self):\n '''\n Make a sorted list of all horizontal guide positions,\n including 0 and the document height, but not including\n those outside of the canvas\n '''\n root = self.document.getroot()\n horizontals = ['0']\n height = self.unittouu(root.attrib['height'])\n for h in self.get_all_horizontal_guides():\n if h >= 0 and float(h) <= float(height):\n horizontals.append(h)\n horizontals.append(height)\n horizontals.sort(cmp=float_sort)\n return horizontals\n\n def get_vertical_slice_positions(self):\n '''\n Make a sorted list of all vertical guide positions,\n including 0 and the document width, but not including\n those outside of the canvas.\n '''\n root = self.document.getroot()\n verticals = ['0']\n width = self.unittouu(root.attrib['width'])\n for v in self.get_all_vertical_guides():\n if v >= 0 and float(v) <= float(width):\n verticals.append(v)\n verticals.append(width)\n verticals.sort(cmp=float_sort)\n return verticals\n\n def get_slices(self):\n '''\n Returns a list of all \"slices\" as denoted by the guides\n on the page. Each slice is really just a 4 element list of\n floats (stored as strings), consisting of the X and Y start\n position and the X and Y end position.\n '''\n hs = self.get_horizontal_slice_positions()\n vs = self.get_vertical_slice_positions()\n slices = []\n for i in range(len(hs)-1):\n for j in range(len(vs)-1):\n slices.append([vs[j], hs[i], vs[j+1], hs[i+1]])\n return slices\n\n def get_filename_parts(self):\n '''\n Attempts to get directory and image as passed in by the inkscape\n dialog. If the boolean ignore flag is set, then it will ignore\n these settings and try to use the settings from the export\n filename.\n '''\n\n if self.options.ignore == False:\n if self.options.image == \"\" or self.options.image is None:\n inkex.errormsg(\"Please enter an image name\")\n sys.exit(0)\n return (self.options.directory, self.options.image)\n else:\n '''\n First get the export-filename from the document, if the\n document has been exported before (TODO: Will not work if it\n hasn't been exported yet), then uses this to return a tuple\n consisting of the directory to export to, and the filename\n without extension.\n '''\n svg = self.document.getroot()\n att = '{http://www.inkscape.org/namespaces/inkscape}export-filename'\n try:\n export_file = svg.attrib[att]\n except KeyError:\n inkex.errormsg(\"To use the export hints option, you \" +\n \"need to have previously exported the document. \" +\n \"Otherwise no export hints exist!\")\n sys.exit(-1)\n dirname, filename = os.path.split(export_file)\n filename = filename.rsplit(\".\", 1)[0] # Without extension\n return (dirname, filename)\n\n def check_dir_exists(self, dir):\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\n def get_localised_string(self, str):\n return locale.format(\"%.f\", float(str), 0)\n\n def export_slice(self, s, filename):\n '''\n Runs inkscape's command line interface and exports the image\n slice from the 4 coordinates in s, and saves as the filename\n given.\n '''\n svg_file = self.args[-1]\n command = \"inkscape -d \" + self.options.dpi + \" -a %s:%s:%s:%s -e \\\"%s\\\" \\\"%s\\\" \" % (self.get_localised_string(s[0]), self.get_localised_string(s[1]), self.get_localised_string(s[2]), self.get_localised_string(s[3]), filename, svg_file)\n if bsubprocess:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n return_code = p.wait()\n f = p.stdout\n err = p.stderr\n else:\n _, f, err = os.open3(command)\n f.close()\n\n def export_slices(self, slices):\n '''\n Takes the slices list and passes each one with a calculated\n filename/directory into export_slice.\n '''\n dirname, filename = self.get_filename_parts()\n output_files = list()\n if dirname == '' or dirname == None:\n dirname = './'\n\n dirname = os.path.expanduser(dirname)\n dirname = os.path.expandvars(dirname)\n dirname = os.path.abspath(dirname)\n if dirname[-1] != os.path.sep:\n dirname += os.path.sep\n self.check_dir_exists(dirname)\n i = 0\n for s in slices:\n f = dirname + filename + str(i) + \".png\"\n output_files.append(f)\n self.export_slice(s, f)\n i += 1\n inkex.errormsg(_(\"The sliced bitmaps have been saved as:\") + \"\\n\\n\" + \"\\n\".join(output_files))\n\n def effect(self):\n slices = self.get_slices()\n self.export_slices(slices)\n\nif __name__ == \"__main__\":\n e = Guillotine()\n e.affect()\n" }, { "alpha_fraction": 0.5623683333396912, "alphanum_fraction": 0.5714954137802124, "avg_line_length": 33.459678649902344, "blob_id": "4fb3c5131181645752f5949fe0fc3018eafc98b9", "content_id": "9e34b3a5b687498c2f1bb84f77ba0a3941f4acec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4273, "license_type": "no_license", "max_line_length": 174, "num_lines": 124, "path": "/jitter_gradients/jitter_gradients.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport inkex\nimport simplestyle\nimport re\nimport random\n\nclass JitterGradients(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n self.OptionParser.add_option(\n '-j', \n '--jitter_amount', \n action='store', \n type='int', \n dest='jitterAmount',\n default=10, \n help='Relative to distance between gradient nodes'\n )\n\n def getUrlFromString(self, text):\n pattern = re.compile(r\"url\\(#([a-zA-Z0-9_-]+)\\)\")\n result = re.search(pattern, text)\n if (result):\n return result.group(1)\n else:\n return 0;\n\n def getFill(self, element):\n if(element.get('fill') and self.getUrlFromString(element.get('fill'))):\n return self.getUrlFromString(element.get('fill'))\n elif (element.get('style') and simplestyle.parseStyle(element.get('style'))['fill'] and self.getUrlFromString(simplestyle.parseStyle(element.get('style'))['fill'])):\n return self.getUrlFromString(simplestyle.parseStyle(element.get('style'))['fill'])\n else:\n return None\n\n def getGradientFromId(self, elementId):\n #svg = self.document.getroot()\n element = self.getElementById(elementId)\n #inkex.debug(element.tag)\n if (element is not None and element.tag.find(\"linearGradient\") >= 0):\n return element\n else:\n return None\n\n def effect(self):\n option = self.options.jitterAmount\n \n self._main_function(option)\n\n def _main_function(self, amount):\n for id, node in self.selected.iteritems():\n fillId = self.getFill(node)\n if (fillId is None): \n continue\n \n gradient = self.getGradientFromId(fillId)\n if (gradient is None): \n continue\n \n x1 = self.unittouu(gradient.get(\"x1\"))\n y1 = self.unittouu(gradient.get(\"y1\"))\n x2 = self.unittouu(gradient.get(\"x2\"))\n y2 = self.unittouu(gradient.get(\"y2\"))\n \n x1 += random.uniform(-amount, amount)\n y1 += random.uniform(-amount, amount)\n x2 += random.uniform(-amount, amount)\n y2 += random.uniform(-amount, amount)\n \n gradient.set('x1', str(self.uutounit(x1, self.getDocumentUnit())) + self.getDocumentUnit())\n gradient.set('y1', str(self.uutounit(y1, self.getDocumentUnit())) + self.getDocumentUnit())\n gradient.set('x2', str(self.uutounit(x2, self.getDocumentUnit())) + self.getDocumentUnit())\n gradient.set('y2', str(self.uutounit(y2, self.getDocumentUnit())) + self.getDocumentUnit())\n\nif __name__ == '__main__':\n ExtenObj = JitterGradients()\n ExtenObj.affect()\n\n'''\n def effect(self):\n #self.duplicateNodes(self.selected)\n self.expandGroupsUnlinkClones(self.selected, True)\n self.expandGroups(self.selected, True)\n self.objectsToPaths(self.selected, True)\n self.bbox=computeBBox(self.selected.values())\n for id, node in self.selected.iteritems():\n if node.tag == inkex.addNS('path','svg') or node.tag=='path':\n d = node.get('d')\n p = cubicsuperpath.parsePath(d)\n\n for sub in p:\n for ctlpt in sub:\n self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))\n\n node.set('d',cubicsuperpath.formatPath(p))\n'''\n\n'''\nimport inkex\n\nclass C(coloreffect.ColorEffect):\n def __init__(self):\n coloreffect.ColorEffect.__init__(self)\n #self.OptionParser.add_option(\"-j\", \"--jitter_amount\", action=\"store\", type=\"int\", dest=\"jitter_amount\", default=\"10\", help=\"Relative to distance between gradient nodes\")\n \n def colmod(self,r,g,b):\n this_color = '%02x%02x%02x' % (r, g, b)\n\n fir = self.options.first_color.strip('\"').replace('#', '').lower()\n sec = self.options.second_color.strip('\"').replace('#', '').lower()\n \n #inkex.debug(this_color+\"|\"+fir+\"|\"+sec)\n if this_color == fir:\n return sec\n elif this_color == sec:\n return fir\n else:\n return this_color\n\nc = C()\nc.affect()\n'''\n" }, { "alpha_fraction": 0.5414406061172485, "alphanum_fraction": 0.5692434906959534, "avg_line_length": 41.812904357910156, "blob_id": "016656f676019354491f3276fcb177e7b493cd71", "content_id": "78c47925d61e0a57cec3fc3726a9ae48e5db77b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13272, "license_type": "no_license", "max_line_length": 202, "num_lines": 310, "path": "/render_gear_pair/render_gear_pair.py", "repo_name": "ilnanny/Inkscape-addons", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n'''\nAbe Karnik 2017 (\"Do what you like with it, no liability\" license)\nBased on Aaron Spike (aaron @ ekips.org) and Tavmjong Bah (tavmjong @ free.fr) 'Render Gear'\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n'''\n\nimport inkex\nimport simplestyle, sys\nfrom math import *\nimport string\nfrom fractions import Fraction\n\ndef involute_intersect_angle(Rb, R):\n Rb, R = float(Rb), float(R)\n return (sqrt(R**2 - Rb**2) / (Rb)) - (acos(Rb / R))\n\ndef point_on_circle(radius, angle):\n x = radius * cos(angle)\n y = radius * sin(angle)\n return (x, y)\n\ndef points_to_svgd(p):\n f = p[0]\n p = p[1:]\n svgd = 'M%.5f,%.5f' % f\n for x in p:\n svgd += ' L%.5f,%.5f' % x\n svgd += 'z'\n return svgd\n \ndef make_gear_path(pitch, angle, teeth):\n two_pi = 2.0 * pi\n\n # Pitch (circular pitch): Length of the arc from one tooth to the next)\n # Pitch diameter: Diameter of pitch circle.\n pitch_diameter = float( teeth ) * pitch / pi\n pitch_radius = pitch_diameter / 2.0\n\n # Base Circle\n base_diameter = pitch_diameter * cos( radians( angle ) )\n base_radius = base_diameter / 2.0\n\n # Diametrial pitch: Number of teeth per unit length.\n pitch_diametrial = float( teeth )/ pitch_diameter\n\n # Addendum: Radial distance from pitch circle to outside circle.\n addendum = 1.0 / pitch_diametrial\n\n # Outer Circle\n outer_radius = pitch_radius + addendum\n outer_diameter = outer_radius * 2.0\n\n # Tooth thickness: Tooth width along pitch circle.\n tooth = ( pi * pitch_diameter ) / ( 2.0 * float( teeth ) )\n\n # Undercut?\n undercut = (2.0 / ( sin( radians( angle ) ) ** 2))\n needs_undercut = teeth < undercut\n\n\n # Clearance: Radial distance between top of tooth on one gear to bottom of gap on another.\n clearance = 0.0\n\n # Dedendum: Radial distance from pitch circle to root diameter.\n dedendum = addendum + clearance\n\n # Root diameter: Diameter of bottom of tooth spaces. \n root_radius = pitch_radius - dedendum\n root_diameter = root_radius * 2.0\n\n half_thick_angle = two_pi / (4.0 * float( teeth ) )\n pitch_to_base_angle = involute_intersect_angle( base_radius, pitch_radius )\n pitch_to_outer_angle = involute_intersect_angle( base_radius, outer_radius ) - pitch_to_base_angle\n\n centers = [(x * two_pi / float( teeth) ) for x in range( teeth ) ]\n\n points = []\n\n for c in centers:\n\n # Angles\n pitch1 = c - half_thick_angle\n base1 = pitch1 - pitch_to_base_angle\n outer1 = pitch1 + pitch_to_outer_angle\n\n pitch2 = c + half_thick_angle\n base2 = pitch2 + pitch_to_base_angle\n outer2 = pitch2 - pitch_to_outer_angle\n\n # Points\n b1 = point_on_circle( base_radius, base1 )\n p1 = point_on_circle( pitch_radius, pitch1 )\n o1 = point_on_circle( outer_radius, outer1 )\n\n b2 = point_on_circle( base_radius, base2 )\n p2 = point_on_circle( pitch_radius, pitch2 )\n o2 = point_on_circle( outer_radius, outer2 )\n\n if root_radius > base_radius:\n pitch_to_root_angle = pitch_to_base_angle - involute_intersect_angle(base_radius, root_radius )\n root1 = pitch1 - pitch_to_root_angle\n root2 = pitch2 + pitch_to_root_angle\n r1 = point_on_circle(root_radius, root1)\n r2 = point_on_circle(root_radius, root2)\n p_tmp = [r1,p1,o1,o2,p2,r2]\n else:\n r1 = point_on_circle(root_radius, base1)\n r2 = point_on_circle(root_radius, base2)\n p_tmp = [r1,b1,p1,o1,o2,p2,b2,r2]\n\n points.extend( p_tmp )\n\n path = points_to_svgd( points )\n return path\n\ndef ratioMatch(pair1, pair2, gR): #smallest positive number\n pair1R= (gR-(pair1[1]/pair1[0]))\n pair2R= (gR-(pair2[1]/pair2[0]))\n #print(pair1R, pair2R)\n if(0 <= pair1R):\n if(pair1R <= pair2R):\n return pair1\n else:\n if(pair2R < 0):\n return pair1\n else:\n return pair2\n else:\n return pair2\n return pair1\n \n \ndef add_gear_path_to_sketch(group, gear_idx, teeth, path, centerdiameter, units):\n #t = 'translate(' + str( self.view_center[0] ) + ',' + str( self.view_center[1] ) + ')'\n \n #group g contains one gear each\n #g = inkex.etree.SubElement(current_layer, 'g'+str(gear_idx), g_attribs)\n\n if gear_idx == 1:\n fillGear = '#ffff00'\n else:\n fillGear = '#00ff00'\n \n \n # Create SVG Path for gear, since it is to be laser cut, the stroke is fine\n style = { 'stroke': '#000000', 'fill': fillGear, 'stroke-width': units }\n gear_attribs = {'style':simplestyle.formatStyle(style), 'd':path}\n #add the gear path to the group 'g'\n gear = inkex.etree.SubElement(group, inkex.addNS('path','svg'), gear_attribs )\n styleHole = { 'stroke': '#000000', 'fill': '#ff0000', 'stroke-width': units }\n if(centerdiameter > 0.0):\n center_attribs = {'style':simplestyle.formatStyle(styleHole), \n inkex.addNS('cx','sodipodi') :'0.0',\n inkex.addNS('cy','sodipodi') :'0.0',\n inkex.addNS('rx','sodipodi') :str(centerdiameter/2),\n inkex.addNS('ry','sodipodi') :str(centerdiameter/2),\n inkex.addNS('type','sodipodi') :'arc'\n }\n center = inkex.etree.SubElement(group, inkex.addNS('path','svg'), center_attribs )\n return 0\n\nclass Gears(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n #self.OptionParser.add_option(\"-t\", \"--teeth\",\n # action=\"store\", type=\"int\",\n # dest=\"teeth\", default=24,\n # help=\"Number of teeth\")\n self.OptionParser.add_option(\"-t\", \"--teethG1\",\n action=\"store\", type=\"int\",\n dest=\"teethG1\", default=24,\n help=\"Gear 1 tooth count\")\n self.OptionParser.add_option(\"-y\", \"--teethG2\",\n action=\"store\", type=\"int\",\n dest=\"teethG2\", default=24,\n help=\"Gear 2 tooth count\")\n self.OptionParser.add_option(\"-d\", \"--distance\",\n action=\"store\", type=\"float\",\n dest=\"distance\", default=24,\n help=\"Center to center distance\")\n self.OptionParser.add_option(\"-p\", \"--pitch\",\n action=\"store\", type=\"float\",\n dest=\"pitch\", default=3.0,\n help=\"Circular Pitch (length of arc from one tooth to next)\")\n self.OptionParser.add_option(\"-a\", \"--angle\",\n action=\"store\", type=\"float\",\n dest=\"angle\", default=20.0,\n help=\"Pressure Angle (common values: 14.5, 20, 25 degrees)\")\n self.OptionParser.add_option(\"-c\", \"--centerdiameterG1\",\n action=\"store\", type=\"float\",\n dest=\"centerdiameterG1\", default=10.0,\n help=\"Diameter of central hole - 0.0 for no hole\")\n self.OptionParser.add_option(\"-v\", \"--centerdiameterG2\",\n action=\"store\", type=\"float\",\n dest=\"centerdiameterG2\", default=10.0,\n help=\"Diameter of central hole - 0.0 for no hole\")\n self.OptionParser.add_option(\"-b\", \"--basis\",\n action=\"store\", type=\"string\",\n dest=\"calculationBasis\", default=\"dx\",\n help=\"Fixed CP or fixed distance\")\n self.OptionParser.add_option(\"-u\", \"--unit\",\n action=\"store\", type=\"string\",\n dest=\"unit\", default=\"mm\",\n help=\"unit of measure for circular pitch and center diameter\")\n def effect(self):\n\n #start modification\n \n #capture G1 params\n teethG1 = self.options.teethG1\n centerdiameterG1 = self.unittouu( str(self.options.centerdiameterG1) + self.options.unit)\n \n #capture G2 params\n teethG2 = self.options.teethG2\n centerdiameterG2 = self.unittouu( str(self.options.centerdiameterG2) + self.options.unit)\n \n #capture common params\n #pitch = self.unittouu( str(self.options.pitch) + self.options.unit)\n pitch = self.options.pitch\n angle = self.options.angle # Angle of tangent to tooth at circular pitch wrt radial line.\n distance = self.options.distance\n calculationBasis = self.options.calculationBasis\n \n #gear ratio (GR): N2/N1\n #diametral pitch (dP) is fixed (and related to CP)\n #pitch diameter (PD) = N/dP\n # PD1 + PD2 = distance * 2\n # N1/dP1 + N2/dP2 = distance * 2\n # N1/dP + GR*N1/dP = distance * 2\n # (1+GR)*N1/dP = distance * 2\n # dP = (1+GR)*N1/(distance * 2)\n # CP = pi/dP\n # CP = pi*(distance*2)/((1+GR)*N1)\n # OR\n # distance = CP*(1+GR)*N1/(2*PI)\n # OR\n # N1 = D*2*PI/(CP*(1+GR)) # N2 = GR*N1\n \n gearRatio = teethG2/(teethG1*1.0)\n if(calculationBasis == 'dx'): #CP can be changed, distance is as is\n pitchUU = self.unittouu( str((pi * 2 * distance)/((1+gearRatio)*teethG1)) + self.options.unit)\n distanceUU = self.unittouu(str(distance)+self.options.unit)\n elif (calculationBasis == 'gr'):\n pitchUU = self.unittouu(str(pitch)+self.options.unit)\n distanceUU = self.unittouu(str(distance)+self.options.unit)\n tempT1 = distanceUU*2*pi/(pitchUU*(1+gearRatio))\n intT1 = int(tempT1)\n tempT2 = gearRatio*tempT1\n intT2 = int(tempT2)\n gr1 = ratioMatch((intT1,intT2),(intT1,intT2+1),gearRatio)\n gr2 = ratioMatch((intT1-1,intT2),(intT1-1,intT2+1),gearRatio)\n grFinal = ratioMatch(gr1,gr2, gearRatio)\n teethG1 = grFinal[0]\n teethG2 = grFinal[1]\n gearRatioNew = teethG2/(teethG1*1.0)\n else:\n distanceUU = self.unittouu(str(pitch*(1+gearRatio)*teethG1/(2*pi))+self.options.unit)\n pitchUU = self.unittouu(str(pitch)+self.options.unit)\n # print >>sys.stderr, \"Teeth: %s\\n\" % teeth\n \n pathG1 = make_gear_path(pitchUU, angle, teethG1)\n pathG2 = make_gear_path(pitchUU, angle, teethG2)\n \n t1 = 'translate(' + str( self.view_center[0] ) + ',' + str( self.view_center[1] ) + ')'\n g1_attribs = {inkex.addNS('label','inkscape'):'Gear1' + str( teethG1 ), 'transform':t1 }\n \n t2 = 'translate(' + str( self.view_center[0] + distanceUU ) + ',' + str(self.view_center[1]) + ')'\n g2_attribs = {inkex.addNS('label','inkscape'):'Gear1' + str( teethG2 ), 'transform':t2 }\n \n gear1Grp = inkex.etree.SubElement(self.current_layer, 'g', g1_attribs)\n gear2Grp = inkex.etree.SubElement(self.current_layer, 'g', g2_attribs)\n \n ret1 = add_gear_path_to_sketch(gear1Grp, 1, teethG1, pathG1, centerdiameterG1,str(self.unittouu('0.001 mm')))\n ret2 = add_gear_path_to_sketch(gear2Grp, 2, teethG2, pathG2, centerdiameterG2,str(self.unittouu('0.001 mm')))\n \n #add text indicating cp value\n t3 = 'translate(' + str( self.view_center[0] ) + ',' + str(self.view_center[1] + distanceUU) + ')'\n t3_attribs = {inkex.addNS('label','inkscape'):'CPText', 'transform':t3 }\n \n t3Text = inkex.etree.SubElement(self.current_layer, inkex.addNS('text','svg'),t3_attribs)\n txt_attribs = {'font-family':'Courier New', 'font-weight':'bold', 'font-style':'normal','font-size':'10 px','fill':'#0000ff'}\n \n tAdd = inkex.etree.SubElement(t3Text, inkex.addNS('tspan','svg'), txt_attribs )\n if(calculationBasis == 'dx'):\n tAdd.text = \"CP=\" + str('%.3f'%self.uutounit(pitchUU, self.options.unit)) + self.options.unit\n elif(calculationBasis == 'gr'):\n tAdd.text = \"GR(new)=\" + str('%.2f'%gearRatioNew) +\" GR=\" + str('%.2f'%gearRatio) \n #t3 = 'translate(' + str( 0 ) + ',' + str(self.unittouu(\"15px\")) + ')'\n txt_attribs2 = {'font-family':'Courier New', 'font-weight':'bold', 'font-style':'normal','font-size':'10 px','fill':'#0000ff','x':str(self.unittouu(\"0 px\")),'dy':str(self.unittouu(\"15 px\"))}\n tAdd2 = inkex.etree.SubElement(t3Text, inkex.addNS('tspan','svg'), txt_attribs2 )\n tAdd2.text = \"T1=\" + str(teethG1) + \" T2=\" + str(teethG2) + \" Err=\" + str('%.1f'%(100*(gearRatioNew-gearRatio)/gearRatio)) +\"%\" \n else:\n tAdd.text = \"Distance=\" + str('%.2f'%self.uutounit(distanceUU, self.options.unit)) + self.options.unit\n \nif __name__ == '__main__':\n e = Gears()\n e.affect()\n\n\n\n# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99\n" } ]
20
wahyutirta/aksara-cnn-nodejs
https://github.com/wahyutirta/aksara-cnn-nodejs
7cf243735b466c9975707baf32add1a6e60f5cf4
518bb8fcd4cb7847a0c45d1f1dbcce7ed8ba9e9d
da8ff8c4892c12817ceaf461ae88eef7776f80a9
refs/heads/main
2023-08-31T14:43:58.405378
2021-10-12T11:01:46
2021-10-12T11:01:46
405,154,042
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7736842036247253, "alphanum_fraction": 0.7836257219314575, "avg_line_length": 47.85714340209961, "blob_id": "ba10520575534d981671550d5b244f6fa2c94ddb", "content_id": "72008e23bb7032933bd3dcad9b0f2f43ec897c48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1712, "license_type": "permissive", "max_line_length": 423, "num_lines": 35, "path": "/README.md", "repo_name": "wahyutirta/aksara-cnn-nodejs", "src_encoding": "UTF-8", "text": "# Pengenalan Aksara Bali\naplikasi ini merupakan aplikasi pengenalan dan penulisan aksara bali berbasis web dimana aplikasi ini dapat mengenali tulisan aksara yang merupakan input dari user dan mengklasifikasikannya ke dalam 18 kelas huruf dasar atau 5 pengangge suara. aplikasi ini dibuat dengan tujuan untuk belajar implementasi sebuah model machine learning (lebih spesifiknya deep learning) dan mendeploy model tersebut ke dalam bentuk aplikasi.\n\n## Tech Stack\n\n - Node.js\n - Express.js\n - Bootstrap\n - body-parser\n - jQuery\n - Ajax\n - npm\n - Tensorflow.js\n - Tensorflow\n - Keras\n ## Prerequisites\n \n - npm\n - git\n - python\n - node.js\n\n ## Instructions \n - jika tidak ditemukan model cnn pada aplikasi, gunakan notebook yang sudah disediakan untuk membentuk dan melatih suatu model deep learning CNN baru\n - untuk menjalankan project ini secara lokal di perangkatmu, ikuti langkah-langkah berikut.\n 1. Clone repositori ini ke dalam perangkatmu.&nbsp;\n 2. Buka dokumen proyek yang telah berhasil didownload/clone.&nbsp;\n 3. Jalankan perintah `npm install` command untuk melakukan instalasi beberapa library yang dibutuhkan.&nbsp;\n 4. Jalankan perintah `npm run` command. maka secara otomatis proyek ini akan berjalan secara lokal pada link berikut http://localhost:3000/.&nbsp;\n 4.1. anda juga dapat menggunakan library nodemon dari npm dengan cara `nodemon app.js` untuk menjalankan proyek secara local di perangkat anda pada link berikut http://localhost:3000/ &nbsp;\n ## Acknowledgements\n gunakan [Google Colab](https://colab.research.google.com)- free jupyter notebook environment untuk membentuk dan melatih ML models in cloud.\n\n## License\nThis project has been licensed under MIT License.\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.710010290145874, "avg_line_length": 20.065217971801758, "blob_id": "e27fd3bf69035ad928abbde09c8bfa4fbf930391", "content_id": "049c1af450a8732c5aa5ecc36d8237464683ef4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "permissive", "max_line_length": 93, "num_lines": 46, "path": "/testimage.py", "repo_name": "wahyutirta/aksara-cnn-nodejs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 6 22:52:51 2021\n\n@author: ASUS\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport os\nfrom tqdm import tqdm\nfrom einops import rearrange, reduce, repeat\nimport matplotlib.pyplot as plt\n\nimgpath = \"C:/Users/ASUS/Documents/project/Digit-Recognizer-master/datasetAksara/0/ha_20.jpg\"\nimg = cv2.imread(imgpath, cv2.IMREAD_COLOR)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimg = cv2.resize(gray, (32,32), interpolation = cv2.INTER_AREA)\n\n#print(img)\n\n#cv2.imshow(\"img\", img)\n\n#cv2.waitKey(0)\n \n#cv2.destroyAllWindows()\n\nthresh, bw = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)\n\ntemp = bw == 0\n\nswap = 255 * (bw==0).astype(int)\nswap = swap.reshape((32,32,1))\n\nimgplot = plt.imshow(swap)\nprint(swap.shape)\n\n#cv2.imshow(\"img\", swap)\n\n#cv2.waitKey(0)\n \n#cv2.destroyAllWindows()\n\naksarapath = \"C:/Pictures/download.png\"\naksara = cv2.imread(imgpath, cv2.IMREAD_COLOR)\naksara = cv2.resize(aksara, (64,64), interpolation = cv2.INTER_AREA)\n" }, { "alpha_fraction": 0.6000351309776306, "alphanum_fraction": 0.6093447804450989, "avg_line_length": 23.623376846313477, "blob_id": "c2c4e8bd3397d97aa0edc74a9f372a81ae86e55f", "content_id": "56971c91b170a5dd640919cd8e8e87cd7cb79e4c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5693, "license_type": "permissive", "max_line_length": 185, "num_lines": 231, "path": "/public/js/app.js", "repo_name": "wahyutirta/aksara-cnn-nodejs", "src_encoding": "UTF-8", "text": "\n\n//Global Variables\n\nvar canvas, ctx;\nvar base_url = window.location.origin;\n//Canvas Variables\nvar canvas, ctx;\nvar mouseX, mouseY, mouseDown = 0;\nvar touchX, touchY;\n\nvar base_url = window.location.origin;\n\nconst aksara = [\"ha/a\", \"na\", \"ca\", \"ra\", \"ka\", \"da\", \"ta\", \"sa\", \"wa\", \"la\", \"ma\", \"ga\", \"ba\", \"nga\", \"pa\", \"ja\", \"ya\", \"nya\", \"ulu\", \"suku\", \"taling\", \"tedong\", \"pepet\"];\nvar rand = null;\n//console.log(aksara);\n\nfunction getRndInteger(min, max) {\n return Math.floor(Math.random() * (max - min)) + min;\n}\n\ndocument.getElementById('generate-aksara').addEventListener(\"click\", async function () {\n\n rand = getRndInteger(0, 22);\n\n const tempAksara = aksara[rand];\n\n document.getElementById('aksara-heading').innerHTML = \"Tuliskan Aksara \" + tempAksara;\n\n ctx.clearRect(0, 0, canvas.width, canvas.height);\n ctx.fillStyle = \"black\";\n ctx.fillRect(0, 0, canvas.width, canvas.height);\n\n document.getElementById('result').innerHTML = \"-\";\n document.getElementById('confidence').innerHTML = \"-\";\n\n\n});\n\n\nfunction draw(ctx, x, y, size, isDown) {\n\n if (isDown) {\n ctx.beginPath();\n ctx.strokeStyle = \"white\";\n ctx.lineWidth = '5';\n ctx.lineJoin = ctx.lineCap = 'round';\n ctx.moveTo(lastX, lastY);\n ctx.lineTo(x, y);\n ctx.closePath();\n ctx.stroke();\n }\n lastX = x; lastY = y;\n\n}\n\ndocument.getElementById('clear-canvas').addEventListener(\"click\", function () {\n console.log('clear clicked');\n ctx.clearRect(0, 0, canvas.width, canvas.height);\n ctx.fillStyle = \"black\";\n ctx.fillRect(0, 0, canvas.width, canvas.height);\n document.getElementById('result').innerHTML = \"-\";\n document.getElementById('confidence').innerHTML = \"-\";\n});\n\nfunction sketchpad_mouseDown() {\n mouseDown = 1;\n draw(ctx, mouseX, mouseY, 12, false);\n}\n\nfunction sketchpad_mouseUp() {\n mouseDown = 0;\n}\n\nfunction sketchpad_mouseMove(e) {\n\n getMousePos(e);\n if (mouseDown == 1) {\n draw(ctx, mouseX, mouseY, 12, true);\n }\n}\n\nfunction getMousePos(e) {\n if (!e)\n var e = event;\n\n if (e.offsetX) {\n mouseX = e.offsetX;\n mouseY = e.offsetY;\n }\n else if (e.layerX) {\n mouseX = e.layerX;\n mouseY = e.layerY;\n }\n}\n\nfunction sketchpad_touchStart() {\n\n getTouchPos();\n draw(ctx, touchX, touchY, 12, false);\n event.preventDefault();\n}\n\nfunction sketchpad_touchMove(e) {\n\n getTouchPos(e);\n draw(ctx, touchX, touchY, 12, true);\n event.preventDefault();\n}\n\nfunction getTouchPos(e) {\n if (!e)\n var e = event;\n\n if (e.touches) {\n if (e.touches.length == 1) {\n var touch = e.touches[0];\n touchX = touch.pageX - touch.target.offsetLeft;\n touchY = touch.pageY - touch.target.offsetTop;\n }\n }\n}\n\nfunction init() {\n\n canvas = document.getElementById('canvas-box');\n ctx = canvas.getContext('2d');\n ctx.fillStyle = \"black\";\n ctx.fillRect(0, 0, canvas.width, canvas.height);\n\n if (ctx) {\n\n canvas.addEventListener('mousedown', sketchpad_mouseDown, false);\n canvas.addEventListener('mousemove', sketchpad_mouseMove, false);\n window.addEventListener('mouseup', sketchpad_mouseUp, false);\n\n canvas.addEventListener('touchstart', sketchpad_touchStart, false);\n canvas.addEventListener('touchmove', sketchpad_touchMove, false);\n }\n\n // inisiasi modal part \n const viewBtn = document.querySelector(\".view-modal\"),\n popup = document.querySelector(\".popup\"),\n close = popup.querySelector(\".close\"),\n inputresult = popup.querySelector(\".input-result\");\n viewBtn.onclick = () => {\n popup.classList.toggle(\"show\");\n }\n close.onclick = () => {\n viewBtn.click();\n }\n}\n\n\n\n\n// Model Loader\nvar model;\n(async function () {\n console.log(\"Model Loading.....\");\n model = await tf.loadLayersModel(\"cnn_model/modeljs/model.json\");\n console.log(\"Model Loaded.....\");\n\n})();\n//let model;\n\n\nfunction preprocessCanvas(image) {\n // resize the input image to target size of (1, 28, 28)\n let tensor = tf.browser.fromPixels(image,1)\n .resizeNearestNeighbor([32, 32])\n .mean(2)\n .expandDims(2)\n .expandDims()\n .toFloat();\n console.log(tensor.shape);\n //tensor.print();\n // tf.reshape(tensor, shape)\n return tensor.div(255.0);\n}\n\n//Bounding Box\n\n// function bound/\n\ndocument.getElementById('predict-canvas').addEventListener(\"click\", async function () {\n\n console.log('predict clicked');\n var imageData = canvas.toDataURL(\"image/png\").replace(\"image/png\", \"image/octet-stream\");;\n //console.log(imageData);\n\n \n\n let tensor = preprocessCanvas(canvas);\n\n let predictions = await model.predict(tensor).data();\n\n let results = Array.from(predictions);\n\n //console.log(results);\n\n displayLabel(results);\n\n console.log(results);\n\n\n\n\n});\n\n//var first_time = 0;\n//Display chart with updated drawing from canvas\n\nfunction displayLabel(data) {\n var max = data[0];\n var maxIndex = 0;\n\n for (var i = 1; i < data.length; i++) {\n if (data[i] > max) {\n maxIndex = i;\n max = data[i];\n }\n }\n var hasil = \"salah\"\n if (rand === maxIndex){\n hasil = \"benar\"\n }\n\n console.log(maxIndex);\n document.getElementById('result').innerHTML = aksara[maxIndex];\n document.getElementById('confidence').innerHTML = \"Confidence: \" + (max * 100).toFixed(3) + \"%\";\n document.getElementById('input-result').innerHTML = \"Aksara yang anda tuliskan \" + hasil + \". Anda menuliskan \" + aksara[maxIndex] + \" yang seharusnya dituliskan \" + aksara[rand] ;\n}\n\n\n\n" }, { "alpha_fraction": 0.5325481295585632, "alphanum_fraction": 0.5440729260444641, "avg_line_length": 38.06435775756836, "blob_id": "17796ced501fe67f67bde9a7dfbbf9d915ba12bf", "content_id": "524d412249c37d61e498856db3a00e9fddc9d2b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7896, "license_type": "permissive", "max_line_length": 147, "num_lines": 202, "path": "/Data.py", "repo_name": "wahyutirta/aksara-cnn-nodejs", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport os\nimport math\nfrom tqdm import tqdm\nfrom einops import rearrange, reduce, repeat\nfrom skimage.feature import greycomatrix, greycoprops\nimport matplotlib.pyplot as plt \nimport pickle\n\nclass Data:\n def __init__(self, workPath, imagePath):\n self.dataPath = os.path.join(workPath[0],imagePath) #image path\n self.imagePath = imagePath\n \n \"\"\"\n DOKUMENTASI \n \n \"\"\" \n @staticmethod\n def unison_shuffled_copies_4( a , b, c):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p] , c[p]\n \n @staticmethod\n def unison_shuffled_copies_2( a , b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n \n def loadLabel(self):\n \"\"\"\n DOKUMENTASI \n \n \"\"\"\n arr_Namelabel = []\n self.count = 0\n for i, (dirpath, dirnames, filenames) in tqdm(enumerate(os.walk(self.imagePath)), desc= \"Loading Image Data\"):\n #print('{} {} {}'.format(repr(dirpath), repr(dirnames), repr(filenames)))\n #print(i)\n if dirpath is not self.imagePath:\n dirpath_components = dirpath.split(\"/\")\n listImageTrain = []\n listLabelTrain = []\n listImageTest = []\n listLabelTest = []\n testFName = []\n trainFName = []\n semantic_label = dirpath_components[-1]\n \n _, label = os.path.split(semantic_label)\n\n #print(\"\\nProcessing {}, {}\".format(semantic_label,i))\n arr_Namelabel.append(label)\n labelArray = np.array(arr_Namelabel)\n return labelArray\n \n def load(self,trainRatio=0.8,testRatio=0.2):\n \n \"\"\"\n DOKUMENTASI \n \n \"\"\"\n \n temp_mod = math.ceil(trainRatio/testRatio)\n #arr_img = []\n #arr_label = []\n arr_Namelabel = []\n self.count = 0\n for i, (dirpath, dirnames, filenames) in tqdm(enumerate(os.walk(self.imagePath)), desc= \"Loading Image Data\"):\n #print('{} {} {}'.format(repr(dirpath), repr(dirnames), repr(filenames)))\n #print(i)\n if dirpath is not self.imagePath:\n dirpath_components = dirpath.split(\"/\")\n listImageTrain = []\n listLabelTrain = []\n listImageTest = []\n listLabelTest = []\n testFName = []\n trainFName = []\n semantic_label = dirpath_components[-1]\n \n _, label = os.path.split(semantic_label)\n\n #print(\"\\nProcessing {}, {}\".format(semantic_label,i))\n arr_Namelabel.append(label)\n self.count = 0\n train = 0\n test = 0\n\n for f in filenames:\n #load images\n file_path = os.path.join(dirpath, f)\n #print(file_path)\n img = cv2.imread(file_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img, (32,32), interpolation = cv2.INTER_AREA)\n thresh, bw = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)\n \n\n swap = 255 * (bw==0).astype(int)\n #print(swap)\n #break\n swap = swap.reshape((1,32,32))\n ##img = rearrange(swap, ' h w c -> c h w ')\n\n #arr_label.append(i-1)\n # if mod append to test\n names = semantic_label.split(\"\\\\\")\n #print(str(names[-1]))\n if self.count % temp_mod == 0:\n \n listImageTest.append(swap)\n listLabelTest.append(names[-1])\n testFName.append(f)\n test+= 1\n # if not mod append to train\n else:\n listImageTrain.append(swap)\n listLabelTrain.append(names[-1])\n trainFName.append(f)\n \n train+= 1\n \n self.count+=1\n #print(\"blaaaaaaaaa\"+ str(len(listImageTest)))\n arrayImageTest = np.array(listImageTest, dtype='float64') /255\n arrayImageTrain = np.array(listImageTrain, dtype='float64') /255\n #print(np.array(arr_img).shape)\n arrayLabelTest = np.array(listLabelTest)\n arrayLabelTrain = np.array(listLabelTrain)\n \n arrayFNameTest = np.array(testFName)\n arrayFNameTrain = np.array(trainFName)\n \n self.labelName = np.array(arr_Namelabel)\n self.jum_kelas = len(self.labelName)\n\n if not hasattr(self, 'testSet'):\n self.trainSet = arrayImageTrain\n self.trainLabel = arrayLabelTrain\n self.testSet = arrayImageTest\n self.testLabel = arrayLabelTest\n \n self.arrayFNameTrain = arrayFNameTrain\n self.arrayFNameTest = arrayFNameTest\n else:\n self.trainSet = np.concatenate((self.trainSet, arrayImageTrain), axis = 0)\n self.trainLabel = np.concatenate((self.trainLabel, arrayLabelTrain), axis = 0)\n self.testSet = np.concatenate((self.testSet, arrayImageTest), axis = 0)\n self.testLabel = np.concatenate((self.testLabel, arrayLabelTest), axis = 0)\n self.arrayFNameTest = np.concatenate((self.arrayFNameTest, arrayFNameTest), axis = 0)\n self.arrayFNameTrain = np.concatenate((self.arrayFNameTrain, arrayFNameTrain), axis = 0)\n\n #print(self.arrayFNameTest)\n #self.trainSet, self.trainLabel, self.arrayFNameTrain = self.unison_shuffled_copies_4(self.trainSet, self.trainLabel, self.arrayFNameTrain)\n #self.testSet, self.testLabel, self.arrayFNameTest = self.unison_shuffled_copies_4(self.testSet, self.testLabel, self.arrayFNameTest)\n #print(self.arrayFNameTest)\n return self.trainSet, self.trainLabel, self.testSet, self.testLabel\n \n\n#mainPath = os.path.dirname(os.path.abspath(__file__)) #file path main.py\n#workPath = os.path.split(mainPath) #path working folder (whole file project)\n#imagePath = \"data_jepun\"\n#data = Data(workPath,imagePath)\n#trainSet, trainLabel, testSet, testLabel = data.load(trainRatio=0.8,testRatio=0.2)\n\n#print(\"ts\",trainSet.shape)\n#print(\"tl\",trainLabel.shape)\n#print(\"tts\",testSet.shape)\n#print(\"ttl\",testLabel.shape)\n\n\n\nmainPath = os.path.dirname(os.path.abspath(__file__)) #file path main.py\nworkPath = os.path.split(mainPath) #path working folder (whole file project)\nimagePath = \"datasetAksara\"\ndata = Data(workPath,imagePath)\ntrainSet, trainLabelSet, testSet, testLabelSet = data.load(trainRatio=0.8,testRatio=0.2)\ntest1 = trainSet[0]\n\nwith open('x-train.pickle', 'wb') as handle:\n pickle.dump(trainSet, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('y-train.pickle', 'wb') as handle:\n pickle.dump(trainLabelSet, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('x-test.pickle', 'wb') as handle:\n pickle.dump(testSet, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('y-test.pickle', 'wb') as handle:\n pickle.dump(testLabelSet, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n#trainSet, trainLabelSet = data.unison_shuffled_copies_2(trainSet, trainLabelSet)\n\nfor x in range(0, 1500, 100):\n plt.imshow(trainSet[x,0,:,:])\n print(trainLabelSet[x])\n plt.show()\n\nimg = trainSet[100]\n\n\n\n\n\n" } ]
4
maidnaut/ALiCEBOT
https://github.com/maidnaut/ALiCEBOT
643d09513c66e98016709de8f5facf679f3254af
7defc987e912494c3bcce20fc6cc7bccba499fe2
048c5bc5053adde1a52d536ab8f50173663e4551
refs/heads/master
2021-01-24T22:34:57.621748
2016-08-11T03:01:33
2016-08-11T03:01:33
65,450,281
1
1
null
2016-08-11T07:57:34
2016-08-11T03:03:33
2016-08-11T03:30:08
null
[ { "alpha_fraction": 0.4955889880657196, "alphanum_fraction": 0.5687597393989563, "avg_line_length": 17.89215660095215, "blob_id": "0735621992bb9d355b0e15f0eb760ec6645b1cb8", "content_id": "5c2cb9ab5e7f903c37a7f0c8673d1be222e6b193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1927, "license_type": "no_license", "max_line_length": 86, "num_lines": 102, "path": "/insult.py", "repo_name": "maidnaut/ALiCEBOT", "src_encoding": "UTF-8", "text": "import random\n\ndef isprime(n):\n\tif n == 2:\n\t\treturn 1\n\tif n % 2 == 0:\n\t\treturn 0\n\tmax = n**0.5+1\n\ti = 3\n\twhile i <= max:\n\t\tif n % i == 0:\n\t\t\treturn 0\n\t\ti+=2\n\treturn 1\n\ndef asl():\n\ta = random.randint(1,9001)\n\tif isprime(a):a=12;\n\ts = {\n\t\t1:'m',\n\t\t2:'f',\n\t\t3:'robot',\n\t\t4:'raptor',\n\t\t5:'demigod',\n\t\t6:'neckbeard',\n\t\t7:'plant',\n\t\t8:'shoe',\n\t\t9:'flounder',\n\t\t10:'dolphin',\n\t\t11:'omni',\n\t\t12:'anti-spiral',\n\t\t13:'hokage',\n\t\t14:'ninja',\n\t\t15:'cyborg',\n\t\t16:'pirate',\n\t\t17:'slut',\n\t\t18:'dog',\n\t\t19:'cat',\n\t\t20:'squid',\n\t\t21:'kid',\n\t\t22:'your favorite anime character',\n\t\t23:'ur waifu',\n\t\t24:'slime',\n\t\t25:'doge',\n\t\t26:'living meme',\n\t\t27:'stand',\n\t\t28:'undead',\n\t\t29:'burn victom',\n\t\t30:'b8',\n\t\t31:'gr8 b8',\n\t\t32:'yokai',\n\t\t33:'shrine maiden',\n\t\t34:'loli',\n\t\t35:'2hu',\n\t\t36:'edge lord',\n\t\t\n\t}\n\tl = {\n\t\t1:'cali',\n\t\t2:'space',\n\t\t3:'dark side of the moon',\n\t\t4:'bottom of the ocean',\n\t\t5:'ur moms box',\n\t\t6:'behind you',\n\t\t7:'bathroom',\n\t\t8:'ur gf',\n\t\t9:'fuck you',\n\t\t10:'a coffin',\n\t\t11:'in bed',\n\t\t12:'in traffic',\n\t\t13:'girls only bus',\n\t\t14:'your favorite anime',\n\t\t15:'your least favorite anime',\n\t\t16:'ur ex',\n\t\t17:'gensokyo',\n\t\t18:'a well',\n\t}\n\treturn str(a)+'/'+s.get(random.randint(1,len(s)))+'/'+l.get(random.randint(1,len(l)))\n\n\t\t\n\ndef comeback():\n\ty = {\n\t\t1:'you dirty gook',\n\t\t2:'because you touch yourself at night',\n\t\t3:'maybe if you weren\\'t such a fgt',\n\t\t4:'ur mom',\n\t\t5:'cause I\\'m not a little bitch',\n\t\t6:'cause you\\'re a little bitch',\n\t\t7:'ur just mad cuz\\' I\\'m stylin\\' on u',\n\t\t8:'maybe you\\'d know if you\\'d stop bouncing on yo daddy\\'s dick and read a book',\n\t\t9:'ur waifu is trash',\n\t\t10:'we\\'re all dumber for hearing that',\n\t\t11:'ask ur mom',\n\t\t12:'you\\'re why your father left',\n\t\t13:'jet fuel can\\'t melt steel beams but your fat ass can bend them',\n\t\t14:': 7/11 was an inside job but you were an accident',\n\t\t15:'you\\'re tacky and I hate you',\n\t\t\n\t\t\n\t}\n\treturn y.get(random.randint(1,len(y)), 'Ur mom')\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 14, "blob_id": "071502c5a58de9b06c462c68037361754c5790e8", "content_id": "fb3ab4f689888e114e803866500cf856859d57ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/README.md", "repo_name": "maidnaut/ALiCEBOT", "src_encoding": "UTF-8", "text": "# ALiCEBOT\nshitty Discord bot\n" }, { "alpha_fraction": 0.5924476981163025, "alphanum_fraction": 0.6076433062553406, "avg_line_length": 26.61979103088379, "blob_id": "89e78fe0bf05987fd2b86077d75d66b31ad8443d", "content_id": "0dcaf376ca01700bb8f6bbe5f890fabcce33eb23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10990, "license_type": "no_license", "max_line_length": 111, "num_lines": 384, "path": "/ALiCE.py", "repo_name": "maidnaut/ALiCEBOT", "src_encoding": "UTF-8", "text": "import os\r\nimport importlib\r\nimport json\r\nimport time\r\nimport discord\r\nimport random\r\nimport requests\r\nimport threading\r\nimport asyncio\r\nfrom asyncio import coroutines, futures\r\nimport insult\r\nimport requests.packages.urllib3\r\nimport urllib3\r\nimport youtube_dl\r\nclient = discord.Client()\r\n\r\nvoice = ''\r\nplayer = ''\r\nplaylist = []\r\n\r\nusr = '<email>'\r\npassword = '<password>'\r\nowner = 177733171553632257\r\nme = 195350693832294400\r\nbot_banned = 195941375437438976\r\nsafe = True\r\naplay = False\r\n\r\n# wtf is with url shortening\r\n\r\n\r\n\r\[email protected]\r\nasync def on_message(message):\r\n\r\n\tglobal usr\r\n\tglobal password\r\n\tglobal owner\r\n\tglobal me\r\n\tglobal bot_banned\r\n\tglobal safe\r\n\tglobal playlist\r\n\tglobal aplay\r\n\t\r\n\tglobal player\r\n\tglobal voice\r\n\tpv = message.channel.is_private\r\n\tauthor = message.author\r\n\r\n\tif pv is False:#defines rolelist\r\n\t\trolelist = message.server.roles\r\n\t\tmemroles = []\r\n\thighlight = '<@' + author.id + '>'\r\n\ts = message.content.split()\r\n\targs = len(s)-1\r\n\r\n\t############################################################\r\n\t# Functions \r\n\t############################################################\r\n\r\n\t# shortening channel output because fuck me\r\n\tdef send(string):\r\n\t\treturn client.send_message(message.channel, string)\r\n\r\n\tdef dm(string):\r\n\t\treturn client.send_message(author, string)\r\n\r\n\r\n\t# should determine the video service from url string\r\n\t# then return properly formated video url for youtube-dl\r\n\tdef v_url(url):\r\n\t\ttry:\r\n\t\t\tif url.find('youtu.be') != -1:\r\n\t\t\t\tx = url.split('youtu.be/')\r\n\t\t\t\treturn 'https://youtube.com/watch?v='+x[1]\r\n\t\t\telif url.find('www.youtube.com') != -1 or url.find('youtube.com') != -1:\t\r\n\t\t\t\tx = url.split('?')\r\n\t\t\t\ty = x[1].split('&')\r\n\t\t\t\tz = {}\r\n\t\t\t\tfor i in range(len(y)):\r\n\t\t\t\t\tz[y[i].split('=')[0]] = y[i].split('=')[1]\r\n\r\n\t\t\t\treturn 'https://youtube.com/watch?v='+z['v']\r\n\t\t\tif\t url.find('youtu.be') != -1:\r\n\t\t\t\tx = url.split('youtu.be/')\r\n\t\t\t\treturn 'https://youtube.com/watch?v='+x[1]\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t#https://soundcloud.com/sweatsonklank/sweatson-klank-empty-your-soul?in=sweatsonklank/sets/upcoming-releases\r\n\t\t\t\r\n\t\t\telif url.find('soundcloud.com') != -1:\r\n\t\t\t\tx = url.split('soundcloud.com/')\r\n\t\t\t\ty = url[1].split('/')\r\n\t\t\t\tif y[1] == 'sets':\r\n\t\t\t\t\tsend('Cannot be a playlist!')\r\n\t\t\t\telif y[1].find('?') != -1:\r\n\t\t\t\t\ty[1] = y[i].split('?')[0]\r\n\t\t\t\tz = 'https://soundcloud.com/'+y[0]+'/'+y[1]+'/'\r\n\t\t\t\tprint(z)\r\n\t\t\t\treturn z\r\n\t\t\telse:\r\n\t\t\t\tsend('Bad url! - else')\r\n\t\texcept:\r\n\t\t\tsend('Bad url! - exception')\r\n\r\n\tasync def p(url):\r\n\t\tglobal player\r\n\t\tglobal voice\r\n\t\tglobal safe\r\n\r\n\t\tif not client.is_voice_connected(author.server):\r\n\t\t\t# joins voice server\r\n\t\t\tsafe = False\r\n\t\t\tvoice = await client.join_voice_channel(author.voice_channel)\r\n\t\t\tplayer = await voice.create_ytdl_player(url)\r\n\t\t\tplayer.start()\r\n\t\t\ttime.sleep(5)\r\n\t\t\tsafe = True\r\n\t\telif player == '' or not player.is_playing():\r\n\t\t\t# plays video cleanly\r\n\t\t\tsafe = False\r\n\t\t\tplayer = await voice.create_ytdl_player(url)\r\n\t\t\tplayer.start()\r\n\t\t\ttime.sleep(5)\r\n\t\t\tsafe = True\r\n\t\telse:\r\n\t\t\t# restarts stream with new link\r\n\t\t\tsafe = False\r\n\t\t\tplayer.stop()\r\n\t\t\tplayer = await voice.create_ytdl_player(url)\r\n\t\t\tplayer.start()\r\n\t\t\ttime.sleep(5)\r\n\t\t\tsafe = True\r\n\r\n\tasync def autoplay():\r\n\t\tglobal player\r\n\t\tglobal voice\r\n\t\tglobal safe\r\n\t\tglobal playlist\r\n\t\tglobal aplay\r\n\r\n\t\tplrange = len(playlist)\r\n\t\taplay = True\r\n\t\twhile plrange > 0:\r\n\t\t\ttime.sleep(1)\r\n\t\t\tif aplay == True:\r\n\t\t\t\tif not player.is_playing():\r\n\t\t\t\t\tif plrang > 1 and safe == True:\r\n\t\t\t\t\t\tplaylist.pop(0)\r\n\r\n\t\t\t\t\t\tsafe = False\r\n\t\t\t\t\t\tplayer.stop()\r\n\t\t\t\t\t\tplayer = await voice.create_ytdl_player(url)\r\n\t\t\t\t\t\tplayer.start()\r\n\t\t\t\t\t\ttime.sleep(5)\r\n\t\t\t\t\t\tsafe = True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tplaylist.pop(0)\r\n\t\taplay = False\r\n\r\n\tif pv is False:\r\n\t\tfor i in range(len(author.roles)):\r\n\t\t\tmemroles.append(author.roles[i].id)\r\n\r\n\t#ban check\r\n\tif (pv is False) and (str(bot_banned) in memroles) and (s[0][0] == '!'):\r\n\t\tawait send(highlight+' '+insult.comeback())\r\n\t# output help\r\n\telif s[0] == '!help':\r\n\t\tawait client.delete_message(message)\r\n\t\tawait send('```'+\r\n\t\t\t' !c !v creates a text/voice channel\\n'+\r\n\t\t\t' !g !i !w !y, creates a google/wiki/youtube link for topic\\n'+\r\n\t\t\t' Example: !i horse cocks\\n\\n'+\r\n\r\n\t\t\t' !p <[url]|add [url]|current|list|play|pause|skip|stop|disconnect>\\n'+\r\n\t\t\t' examples: !p url\\n'+\r\n\t\t\t' !p add url\\n'+\r\n\t\t\t' if having problems, try leaving and joining the voice channel.\\n'+\r\n\t\t\t'```')\r\n\t# join invite channel\r\n\telif s[0] == '!j' and args > 0 and (int(author.id) == owner):#join given channel\r\n\t\tif pv is False:await client.delete_message(message)\r\n\t\tawait client.accept_invite(s[1])\r\n\t# leave message.server\r\n\telif s[0] == '!d' and args == 1 and (int(author.id) == owner):#join given channel\r\n\t\tawait client.delete_message(message)\r\n\t\tif s[1] == 'y':\r\n\t\t\tawait client.leave_server(message.server)\r\n\t# finish this later\r\n\telif 'a/s/l' in s or 'asl' in s:\r\n\t\tawait send(highlight+' '+insult.asl())\r\n\t# insult on bot mention\r\n\telif ('<@'+str(me)+'>' in s) and (int(author.id) != me):\r\n\t\tawait send(highlight+' '+insult.comeback())\r\n\t# !g <string>\r\n\telif s[0] == '!g' and args > 0:\r\n\t\tawait client.delete_message(message)\r\n\t\tawait send(highlight+' '+'https://www.google.com/?gws_rd=ssl#q='+'+'.join(s[1:]))\r\n\t# !i <string>\r\n\telif s[0] == '!i' and args > 0:\r\n\t\tawait client.delete_message(message)\r\n\t\tawait send(highlight+' '+'https://www.google.com/search?tbm=isch&q='+'+'.join(s[1:]))\r\n\t# !y <string>\r\n\telif s[0] == '!y' and args > 0:\r\n\t\tawait client.delete_message(message)\r\n\t\tawait send(highlight+' '+'https://www.youtube.com/results?search_query='+'+'.join(s[1:]))\r\n\t# !w <string>\r\n\t# formats search results to properly display information on discord link preview\r\n\telif s[0] == '!w' and args > 0:\r\n\t\tawait client.delete_message(message)\r\n\t\tx=['is', 'of', 'a', 'and', 'or']\r\n\t\tfor i in range(len(s)):\r\n\t\t\tif i > 0 and s[i] not in x:\r\n\t\t\t\ts[i]=s[i].capitalize()\r\n\t\tfor x in s[2:]:\r\n\t\t\tx=s\r\n\t\tawait send(highlight+' '+'https://en.wikipedia.org/wiki/'+'_'.join(s[1:]))\r\n\r\n\t# create text channel\r\n\telif s[0] == '!c' and pv is False:\r\n\t\tawait client.delete_message(message)\r\n\t\tif args > 0:\r\n\t\t\tawait client.create_channel(message.server, s[1] , type=discord.ChannelType.text)\r\n\t\t\tawait client.send_message(message.channel, '#'+s[1]+\" created.\")\r\n\t\telse:\r\n\t\t\tawait send(\"Too many arguments! Channels can only be one word.\")\r\n\r\n\t# create voice channel\r\n\telif s[0] == '!v' and pv is False:\r\n\t\tawait client.delete_message(message)\r\n\t\tawait client.create_channel(message.server, \" \".join(s[1:]), type=discord.ChannelType.voice)\r\n\t\tawait client.send_message(message.channel, \" \".join(s[1:])+' voice channel created.')\r\n\r\n\r\n\t# \"\"\"\r\n\t# This whole secion of code is broke\r\n\t# uses the youtube-dl library to play music through discord\r\n\r\n\t# having trouble handling clean disconnections on new video calls\r\n\t# and leaving at the end of videos\r\n\t# conveniently, discord.py has half the work done already, but now\r\n\t# the trouble lies in handling discord channel actions correctly\r\n\r\n\t# TODO: seeking, playlist\r\n\r\n\telif s[0] == '!p' and pv is False:\r\n\t\tplrange = len(playlist)\r\n\r\n\t\tawait client.delete_message(message)\r\n\t\t# should check to see if already in room before running\r\n\t\t# plays the yt video linked in s[1], and if already playing,\r\n\t\t# stops and plays new yt link\r\n\r\n\t\t# SHOULDN'T SEGFAULT ANYMORE \\O/\r\n\r\n\t\tif args == 0:\r\n\t\t\tawait send('Not enough arguments!\\n'+\r\n\t\t\t\t\t\t'!p (stop|pause|play|url|disconnect)')\r\n\t\telif s[1] == 'stop': \r\n\t\t\tif not client.is_voice_connected(author.server):\r\n\t\t\t\t# checks if playing\r\n\t\t\t\tawait send(\"Not playing!\")\r\n\t\t\telse:\r\n\t\t\t\t# Stop\r\n\t\t\t\taplay = False\r\n\t\t\t\tplayer.stop()\r\n\t\t\t\tplaylist.pop(0)\r\n\t\telif s[1] == 'pause' or s[1] == 'play':\r\n\t\t\tif not client.is_voice_connected(author.server):\r\n\t\t\t\t# checks if playing\r\n\t\t\t\tawait send(\"Not playing!\")\r\n\t\t\telif player == '' or not player.is_playing():\r\n\t\t\t\t# Play\r\n\t\t\t\tplayer.resume()\r\n\t\t\telse:\r\n\t\t\t\t# Pause\r\n\t\t\t\tplayer.pause()\r\n\t\telif s[1] == 'disconnect':\r\n\t\t\tif not client.is_voice_connected(author.server):\r\n\t\t\t\tawait send(\"Not in a room!\")\r\n\t\t\telse:\t\t\t\t\r\n\t\t\t\t# Leaves current voice room and cleanly closes player\r\n\t\t\t\tawait voice.disconnect()\r\n\t\telif s[1] == 'add' and args >= 2:\r\n\t\t\tif s[2].find('http://') != -1 or s[2].find('https://') != -1:\r\n\t\t\t\tif plrange == 0:\r\n\t\t\t\t\tawait send('No playlist active!')\r\n\t\t\t\telse:\r\n\t\t\t\t\tplaylist.append(v_url(s[2]))\r\n\t\t\telse:\r\n\t\t\t\tawait send('Improper syntax!')\t\t\r\n\t\telif s[1] == 'skip':\r\n\t\t\tif len(playlist) > 1:\r\n\t\t\t\tif safe == False:\r\n\t\t\t\t\tawait send(\"Not yet!\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tplaylist.pop(0)\r\n\t\t\t\t\tawait p(playlist[0])\r\n\t\t\telse:\r\n\t\t\t\tawait send('Nothing next!')\r\n\t\telif s[1] == 'list':\r\n\t\t\tif len(playlist) > 1:\r\n\t\t\t\toutput = 'Current playing: '+playlist[0]+' , then \\n'\r\n\t\t\t\tfor i in range(plrange):\r\n\t\t\t\t\tif i == 3:break\r\n\t\t\t\t\tif i > 0:\r\n\t\t\t\t\t\toutput = output+str(i+1)+': '+playlist[i]+'\\n'\r\n\t\t\t\tawait dm(output)\r\n\t\telif s[1] == 'current':\r\n\t\t\tif plrange == 0:\r\n\t\t\t\tawait dm('Playlist empty!')\r\n\t\t\telse:\r\n\t\t\t\tawait dm('Currently playing: '+playlist[0])\t\t\t\t\t\r\n\t\telif s[1].find('http://') != -1 or s[1].find('https://') != -1:\t\r\n\t\t\tif safe == True:\r\n\t\t\t\tif plrange > 0:\r\n\t\t\t\t\tplaylist.pop(0)\r\n\t\t\t\t\tplaylist.insert(0, v_url(s[1]))\r\n\t\t\t\t\tawait p(playlist[0])\r\n\t\t\t\telse:\r\n\t\t\t\t\tplaylist.append(v_url(s[1]))\r\n\t\t\t\t\r\n\t\t\t\t\tawait p(playlist[0])\r\n#\t\t\t\tif aplay is False:\r\n#\t\t\t\t\tthreading.Tread(autoplay())\r\n\t\t\telse:\r\n\t\t\t\tawait send(\"Not yet!\")\r\n\t\telse:\r\n\t\t\tawait send(\"Improper syntax!\")\r\n\t# End of god forsaken youtube-dl stuff\r\n\t# \"\"\"\t\r\n\r\n\r\n\t# refresh insult dictionary\r\n\telif s[0] == '!l' and pv is False and (int(author.id) == owner):\r\n\t\tawait client.delete_message(message)\r\n\t\timportlib.reload(insult)\r\n\t\tprint(\"insult.py reloaded\")\r\n\r\n\telif s[0] == '!r' and (int(author.id) == owner):\r\n\t\tawait client.delete_message(message)\r\n\t\tawait send('Bye!')\r\n\t\tclient.close()\r\n\t\ttime.sleep(5)\r\n\t\tclient.run(usr, password)\r\n\telif s[0] == 'Hello!' and (int(author.id) == me):\r\n\t\tawait client.delete_message(message)\r\n\telse:\r\n\t\t# randomly insult someone\r\n\t\t# moved to else so it runs only on actual text and not on commands\r\n\t\tif (random.randint(1,30) is 1) and (int(author.id) != me):\r\n\t\t\tprint(author.id)\r\n\t\t\tawait send(highlight+' '+insult.comeback())\r\n\r\n\r\n\t# Now for some easter eggs\r\n\r\n\t# hash-check for nhentai joke\r\n\tfor i in range(len(s)):\r\n\t\tif pv is True and '#' in s[i][0]:\r\n\t\t\tawait send('https://nhentai.net/search/?q='+s[i][1:])\r\n\t\telif (pv is False) and ('#' in s[i][0]) and (len(s[i]) > 1) and (s[i] not in message.server.channels):\r\n\t\t\tawait send('https://nhentai.net/search/?q='+s[i][1:])\r\n\r\n\r\n\r\[email protected]\r\nasync def on_server_join(server):\r\n\tawait client.send_message(server.default_channel, \"Hello!\")\r\n\t\r\[email protected]\r\nasync def on_ready():\r\n\tif client.is_logged_in:\r\n\t\tprint('Log in successful.')\r\n\tclient.join_voice_channel(\"discordurl\")\r\n\tss = client.servers\r\n\tfor s in ss:\r\n\t\tprint(s.default_channel)\r\n\t\tawait client.send_message(s.default_channel, \"Hello!\")\r\n\tprint('Join channel succesful.')\r\n\r\nclient.run(usr, password)\r\nclient.close()\r\n" } ]
3
jensiepoo/Multithreaded-Python-Scraper
https://github.com/jensiepoo/Multithreaded-Python-Scraper
49ea8061aa29c2eeb809cce6db54eb5af92af4eb
05b4a1817fec827f364568df910b0a288654a76b
e77494a5f5d74847e78a975baa6021d59490e50d
refs/heads/master
2016-09-06T18:48:07.007526
2015-07-21T18:18:39
2015-07-21T18:18:39
39,117,498
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7686375379562378, "alphanum_fraction": 0.7763496041297913, "avg_line_length": 29, "blob_id": "99523c48b6d695fe8fb1e3cadb3d486531778811", "content_id": "d79337b7cf1f5d721877e031bfe4f1f1d0eeedef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 389, "license_type": "no_license", "max_line_length": 126, "num_lines": 13, "path": "/ReadMe.txt", "repo_name": "jensiepoo/Multithreaded-Python-Scraper", "src_encoding": "UTF-8", "text": "Problem:\n\nScrape the Top 500 Free Apps from Google Play (https://play.google.com/store/apps/collection/topselling_free?hl=en) and \nlocally store as much detailed information about each application as possible using a (ideally multi-threaded) python scraper.\n\n \nSort results by:\n\n- Number of comments (highest to lowest)\n\n- Release date (newest to oldest)\n\n- Star rating (highest to lowest)" }, { "alpha_fraction": 0.6534216403961182, "alphanum_fraction": 0.6545253992080688, "avg_line_length": 26.42424201965332, "blob_id": "cff9b21110cb2b1931309016f4417279884a195d", "content_id": "a6d9fa970fa6914fe92be53d41559bf23b443a11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 51, "num_lines": 33, "path": "/scraper/scraper/items.py", "repo_name": "jensiepoo/Multithreaded-Python-Scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass ScraperItem(scrapy.Item):\n name = scrapy.Field()\n url = scrapy.Field()\n developer = scrapy.Field()\n categorie = scrapy.Field()\n inapp_purchases = scrapy.Field()\n link = scrapy.Field()\n age = scrapy.Field()\n description = scrapy.Field()\n rating = scrapy.Field()\n rating_count = scrapy.Field()\n cover_image = scrapy.Field()\n screenshots = scrapy.Field()\n preview_vid = scrapy.Field()\n badge_title = scrapy.Field()\n star_rating = scrapy.Field()\n top_comments = scrapy.Field()\n recent_changes = scrapy.Field()\n additional_information = scrapy.Field()\n see_more = scrapy.Field()\n similar_app = scrapy.Field()\n more_from_developer = scrapy.Field()\n name = scrapy.Field()\n\n" }, { "alpha_fraction": 0.5802118182182312, "alphanum_fraction": 0.583570122718811, "avg_line_length": 49.907894134521484, "blob_id": "b657df21e88697d13dc6ea50566484b45c0fe1e6", "content_id": "cd69626a78201684a7828f537f5ce60407e777a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3871, "license_type": "no_license", "max_line_length": 171, "num_lines": 76, "path": "/scraper/scraper/spiders/google_spider.py", "repo_name": "jensiepoo/Multithreaded-Python-Scraper", "src_encoding": "UTF-8", "text": "import scrapy\nfrom .. import scraper\nfrom scraper.items import *\nimport locale\n\nURL = \"https://play.google.com/store/apps/collection/topselling_free?hl=en\"\nlocale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\nclass GoogleSpider(scrapy.Spider):\n name = \"scraper\"\n allowed_domains = [\"play.google.com\"]\n start_urls = [\"https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftDMHM\"]\n\n def parse(self, response):\n item = ScraperItem()\n item['name'] = response.xpath('//div[@class = \"document-title\"]/div/text()').extract_first()\n item['url'] = response.url\n item['developer'] = response.xpath('//span[@itemprop = \"name\"]/text()').extract()\n item['categorie'] = response.xpath('//span[@itemprop = \"genre\"]/text()').extract()\n item['inapp_purchases'] = response.xpath('//div[@class = \"inapp-msg\"]/text()').extract_first(default = 'No in-app purchase.')\n item['age'] = response.xpath('//div[@class = \"document-subtitle content-rating-title\"]/text()').extract_first(default = \"No age specification.\")\n item['rating'] = locale.atof(response.xpath('//div[@class = \"score\"]/text()').extract_first())\n item['rating_count'] = locale.atoi(response.xpath('//div[@class = \"reviews-stats\"]/span/text()').extract_first())\n item['cover_image'] = response.xpath('//div[@class = \"cover-container\"]//@src').extract()\n item['badge_title'] = response.xpath('//span[@class = \"badge-title\"]/text()').extract_first(default = \"No badge title.\") \n\n string = \"\"\n string += response.xpath('//div[@class = \"id-app-orig-desc\"]/text()').extract_first()\n for node in response.xpath('//div[@class = \"id-app-orig-desc\"]/p'):\n string += node.xpath('text()').extract_first()\n item['description'] = string\n\n \n lst = []\n for node in response.xpath('//div[@class = \"thumbnails\"]/img'):\n lst.append(str(node.xpath('@src').extract_first()))\n item['screenshots'] = lst \n\n item['preview_vid'] = response.xpath('//div[@class = \"thumbnails\"]/span//@data-video-url').extract_first(default = \"No trailer video.\")\n \n collection = []\n review = {}\n for node in response.xpath('//div[@class = \"single-review\"]'):\n author = node.xpath('div[1]/div[@class = \"review-info\"]/span[@class = \"author-name\"]/a/text()').extract_first()\n date = node.xpath('div[1]/div[@class = \"review-info\"]/span[@class = \"review-date\"]/text()').extract_first()\n stars = int(node.xpath('div[1]/div[@class = \"review-info\"]/div[@class = \"review-info-star-rating\"]//@style').extract_first().split(\" \")[1].split('%')[0])/100.0\n title = node.xpath('div[2]/span[@class = \"review-title\"]/text()').extract_first()\n comment = node.xpath('div[2]/text()').extract()[1]\n review['author'] = author\n review['date'] = date\n review['rating'] = stars\n review['title'] = title\n review['comment'] = comment\n collection.append(review)\n review = {}\n item['top_comments'] = collection \n\n\n lst = []\n for node in response.xpath('//div[@class = \"details-section whatsnew\"]//div[@class = \"recent-change\"]'):\n lst.append(node.xpath('text()').extract_first())\n item['recent_changes'] = lst\n\n\n additional = {}\n for node in response.xpath('//div[@class = \"details-section-contents\"]//div[@class = \"meta-info\"]'):\n title = node.xpath('div[@class = \"title\"]/text()').extract_first()\n print \"title\"\n print title\n content = node.xpath('div[@class = \"content\"]/text()').extract_first()\n print content\n additional[title] = content\n item['additional_information'] = additional\n\n \n yield item\n\n " }, { "alpha_fraction": 0.7033567428588867, "alphanum_fraction": 0.7119438052177429, "avg_line_length": 26.782608032226562, "blob_id": "f025055641350580de2fc8e539203824a3122951", "content_id": "b6f4a8e531123489aa9ee669b0794a185bedd267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 97, "num_lines": 46, "path": "/scraper/scraper/scraper.py", "repo_name": "jensiepoo/Multithreaded-Python-Scraper", "src_encoding": "UTF-8", "text": "from threading import Thread\nfrom bs4 import BeautifulSoup\nfrom urllib2 import urlopen\nfrom selenium import webdriver\n# from selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains as AC\n\nBASE_URL = \"https://play.google.com\"\n\ndef get_links(section_url):\n\tbrowser = webdriver.Chrome(\"/Users/jensenkuo/Downloads/chromedriver\")\n\tbrowser.get(section_url)\n\t\n\tactions = AC(browser)\n\tslow = 0 \n\tfast = len(browser.page_source)\n\n\twhile True:\t\t\t\t\t\t\t\t\t\t\t\n\t\ttry: \n\t\t\tpass\n\n\t\texcept:\n\t\t\tslow = fast\n\t\t\tbrowser.execute_script(\"window.scrollBy(0, -10);\")\n\t\t\tbrowser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\tbrowser.implicitly_wait(4)\n\t\t\tfast = len(browser.page_source)\n\t\telse:\n\t\t\tbreak\n\t\tfinally:\n\t\t\tif slow == fast:\n\t\t\t\ttry: \n\t\t\t\t\tshowbutton = browser.find_element_by_id(\"footer-content\").find_element_by_tag_name(\"button\")\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tactions.click(showbutton).perform()\n\n\thtml_source = browser.page_source\n\tsoup = BeautifulSoup(html_source, \"lxml\")\n\tcardlist = soup.find(\"div\", {\"class\": \"card-list two-cards\"})\n\tlinks = [BASE_URL + h2.a[\"href\"] for h2 in cardlist.findAll(\"h2\")]\n\tbrowser.quit()\n\tprint links\n\treturn links\n\t\n\n" } ]
4
Oktoreno/pemrograman-covid-19
https://github.com/Oktoreno/pemrograman-covid-19
c253bb368759f6ff8f8eb851e58c4fb314e0c8d6
5c942105753a081b89c1049bc5dd5cedab93f86b
4c7d2b3014cf14cc30e581c655d4f40f13e932d8
refs/heads/master
2023-06-04T01:19:49.921362
2021-06-15T03:07:43
2021-06-15T03:07:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7947598099708557, "alphanum_fraction": 0.8296943306922913, "avg_line_length": 21.899999618530273, "blob_id": "76d2c789a3f8726a77fad25b64c227523ec3cee8", "content_id": "42cbfdd2c1307354dbdea103accd39a619aca48a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 229, "license_type": "no_license", "max_line_length": 31, "num_lines": 10, "path": "/scripts/run.sh", "repo_name": "Oktoreno/pemrograman-covid-19", "src_encoding": "UTF-8", "text": "#! /bin/sh\n\npython3 generate-aktif.py\npython3 generate-case-harian.py\npython3 generate-confirmed.py\npython3 generate-doubling.py\npython3 generate-meninggal.py\npython3 generate-rate.py\npython3 generate-sembuh.py\npython3 gabung.py\n" }, { "alpha_fraction": 0.6267730593681335, "alphanum_fraction": 0.667553186416626, "avg_line_length": 20.283018112182617, "blob_id": "2733c8192ea7b08d984aa5987e07412cb4bb21bb", "content_id": "eaacbfb10b082659c89da38f83698db6e161cad8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1128, "license_type": "no_license", "max_line_length": 74, "num_lines": 53, "path": "/scripts/generate-case-harian.py", "repo_name": "Oktoreno/pemrograman-covid-19", "src_encoding": "UTF-8", "text": "# Rekap CASE HARIAN\n# Budi Rahardjo (@rahard)\n# 2020\n\noutputdir = \"../output/\"\n\n# list directories and find the latest date\nfrom glob import glob\n# nama file hanya yang berbentuk angka \"200515\" => 2020/05/15\nf = glob(\"../[0-9][0-9][0-9][0-9][0-9][0-9]\")\ndir = sorted(f, reverse=True)[0]\n# print(dir)\n# karena format dir adalah \"../200515\" - sesuaikan dengan lokasi direktori\n# maka ambil tanggal dari nama direktori tersebut\ntanggal = dir[7:9] + \"/\" + dir[5:7] + \"/20\" + dir[3:5]\n#print(tanggal)\n\n# ambil nama file\nfrekap = glob(dir + \"/\" + \"csv_tabel_cases*\")[0]\nprint(frekap)\n\n# plot\nimport pandas as pd\nimport sys\n\ntry:\n df = pd.read_csv(frekap)\n # print(df.head())\nexcept:\n print(\"Cannot open \" + frekap)\n\n# convert the column to datetime\ndf['Datetime'] = pd.to_datetime(df['Tanggal'])\n#print(df.Datetime)\n\nimport matplotlib.pyplot as plt\n\nax = df.plot(x='Datetime', y='WestJava')\n\nplt.grid(True)\n\njudul = \"Kasus Harian di Jawa Barat sd. \" + tanggal\nplt.title(judul)\nplt.xlabel('Tanggal')\nplt.ylabel(\"Jumlah\")\n\n\n# plt.show()\noutput = outputdir+\"/kasus.png\"\ntry:\n plt.savefig(output)\nexcept:\n print(\"Cannot save figure\")\n" }, { "alpha_fraction": 0.6210700273513794, "alphanum_fraction": 0.6492002010345459, "avg_line_length": 24.53521156311035, "blob_id": "cdd44bc9719dffa4746a8d2424e0ae18d3edf5cd", "content_id": "84ba33a701d9293d2be9b361e2d7ba47e9ae6a46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1813, "license_type": "no_license", "max_line_length": 75, "num_lines": 71, "path": "/scripts/gabung.py", "repo_name": "Oktoreno/pemrograman-covid-19", "src_encoding": "UTF-8", "text": "# Plot AKTIF\n# Budi Rahardjo (@rahard)\n# Mei 2020\n\noutputdir = \"../output/\"\n\n# list directories and find the latest date\nfrom glob import glob\n# nama file hanya yang berbentuk angka \"200515\" => 2020/05/15\n# \"..\" itu karena direktori di atas ini\nf = glob(\"../[0-9][0-9][0-9][0-9][0-9][0-9]\")\ndir = sorted(f, reverse=True)[0]\n#print(dir)\n# karena format dir adalah \"../200515\" - sesuaikan dengan lokasi direktori\n# maka ambil tanggal dari nama direktori tersebut\ntanggal = dir[7:9] + \"/\" + dir[5:7] + \"/20\" + dir[3:5]\n#print(tanggal)\n\n# ambil nama file\nfrekap = glob(dir + \"/\" + \"csv_tabel_aktif_*\")[0]\ngrekap = glob(dir + \"/\" + \"csv_tabel_confirmed_cases_*\")[0]\nhrekap = glob(dir + \"/\" + \"csv_tabel_meninggal_*\")[0]\nirekap = glob(dir + \"/\" + \"csv_tabel_sembuh_*\")[0]\n\n\n# plot\nimport pandas as pd\n\nimport sys\n\ntry:\n df = pd.read_csv(frekap)\n dg = pd.read_csv(grekap)\n dh = pd.read_csv(hrekap)\n di = pd.read_csv(irekap)\n # print(df.head())\nexcept:\n print(\"Cannot open \" + frekap)\n\n# convert the column to datetime\ndf['Datetime'] = pd.to_datetime(df['Tanggal'])\ndg['Datetime'] = pd.to_datetime(dg['Tanggal'])\ndh['Datetime'] = pd.to_datetime(dh['Tanggal'])\ndi['Datetime'] = pd.to_datetime(di['Tanggal'])\n#print(df.Datetime)\n# add dg,dh,di to df\ndf['Confirmed'] = dg['WestJava']\ndf['Meninggal'] = dh['WestJava']\ndf['Sembuh'] = di['WestJava']\n#print(df[['Datetime', 'WestJava', 'Confirmed', 'Meninggal', 'Sembuh']])\n\ndf = df.rename(columns={'WestJava':'Active'})\n\nimport matplotlib.pyplot as plt\n\nax = df.plot(x='Datetime', y=['Confirmed','Active', 'Sembuh', 'Meninggal'])\n\nplt.grid(True)\n\njudul = \"COVID-19 di Jawa Barat sd. \" + tanggal\nplt.title(judul)\nplt.xlabel('Tanggal')\nplt.ylabel(\"Jumlah\")\n\n#plt.show()\n\noutput = outputdir+\"/gabungan.png\"\ntry:\n plt.savefig(output)\nexcept:\n print(\"Cannot save figure\")\n" }, { "alpha_fraction": 0.7737287878990173, "alphanum_fraction": 0.7974576354026794, "avg_line_length": 39.68965530395508, "blob_id": "4e8fabd9f41cd789f8bb47aab792207c8f2e77f5", "content_id": "144427d02714154eb346afb4be468d9dbe39b87e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1180, "license_type": "no_license", "max_line_length": 95, "num_lines": 29, "path": "/README.md", "repo_name": "Oktoreno/pemrograman-covid-19", "src_encoding": "UTF-8", "text": "# Pemrograman Untuk COVID-19\nUsing Python and Matplotlib to plot COVID-19 cases in Indonesia.\nThe description will be in Bahasa Indonesia (Indonesian), although you can see the code easily.\n\nIni adalah contoh pemrograman dengan menggunakan bahasa Python dan Matplotlib.\nKasus yang digunakan adalah data COVID-19.\n\nSemisal Anda mendapatkan data COVID-19 dalam bentuk CSV (comma separated value).\nData tersebut ingin kita tampilkan secara grafik (visual).\nNah, ini contoh kodenya.\n\nData harian disimpan dalam folder yang namanya menggunakan tanggal.\nJadi \"200515\" berisi data (CSV) dari tanggal \"15/5/2020\".\nContoh data ini saya peroleh dari Budi Sulistyo (Sharing Vision). Terima kasih.\n\nProgram (skrip) disimpan di direktori \"scripts\" dan kesemuanya dijalankan\ndengan shell script (run.sh). Hasilnya (grafik) akan tersedia di direktori \"output\".\n\nKodenya baru saya buat tadi malam (sambil menunggu sahur).\nJadi kode ini tidak dioptimasi, namun lebih ke arah kejelasan (clean)\nagar dapat digunakan untuk belajar.\n\nAda juga video yang menjelaskan secara lengkap mengenai ini di:\nhttps://www.youtube.com/watch?v=ryzdIuBoPV8\n\nSemoga bermanfaat.\n\nBudi Rahardjo (@rahard)\nBandung, 16 Mei 2020\n" } ]
4
jbadger3/stat679work
https://github.com/jbadger3/stat679work
be6ca79f9243d4c75f742abe59ba6906c1a6f018
ae7acf70ce2720cf97cf8a6f991f3833fb23fe3b
bdd13a527ed47f62a0d637e73de2de0207512c97
refs/heads/master
2021-05-04T08:21:23.909552
2016-11-13T17:07:09
2016-11-13T17:07:09
68,550,774
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7281879186630249, "alphanum_fraction": 0.7516778707504272, "avg_line_length": 26.090909957885742, "blob_id": "4a97f3b40bee2e4c892be65a49ad8a9609350f1b", "content_id": "68ac08516fa68d67013de791c5d54c2f984bbcf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 596, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/hw1/scripts/normalizedFileNames.sh", "repo_name": "jbadger3/stat679work", "src_encoding": "UTF-8", "text": "#change all file names\n#`timetesty_snaq.log` to `timetest0y_snaq.log` where \"y\" is a digit between 1 and 9.\n#Similarly, change `timetesty_snaq.out` to `timetest0y_snaq.out`.\n\ndeclare -i counter=1\nfor filename in hw1-snaqTimeTests/log/timetest?_snaq.log\ndo\n #echo $filename\n #echo $counter\n mv $filename 'hw1-snaqTimeTests/log/timetest0'$counter'_snaq.log'\n let counter=counter+1\ndone\n\nlet counter=1\n\nfor filename in hw1-snaqTimeTests/out/timetest?_snaq.out\ndo\n #echo $filename\n #echo $counter\n mv $filename 'hw1-snaqTimeTests/out/timetest0'$counter'_snaq.out'\n let counter=counter+1\ndone\n" }, { "alpha_fraction": 0.6574965715408325, "alphanum_fraction": 0.7324621677398682, "avg_line_length": 37.26315689086914, "blob_id": "db2ec421b44dc8814b33fbd27138ecff2c1a9717", "content_id": "3676f887abbfc3bd22d5e6fc882d6eac3f60edac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 125, "num_lines": 38, "path": "/hw2/readme.md", "repo_name": "jbadger3/stat679work", "src_encoding": "UTF-8", "text": "## Homework 2 Readme\n### Outline:\nThe python script merge_temp_and_energy_files.py merges data from solar heated water system and solar panel system csv files.\nBasic operation of the script takes two command line arguments preceded by -files in order: temperature_file and energy_file.\nEnergy generation values in Wh are converted to KWh and collated by date with\nthe daily water temperature data. All temperature data and collated energy values\nare appended to a single file energy_and_temperature.csv.\n### Assumptions:\n* monthly files are .csv files with lines formatted as follows\n\n###### temperature_file format:\n\n\"Plot Title: 10679014 jackson July29\" \n\"#\",\"Date Time, GMT-05:00\",\"K-Type, °F (LGR S/N: 10679014, SEN S/N: 10679014, LBL: water pipe)\" \n1,07/29/16 10:26:34 AM,72.86 \n2,07/29/16 11:26:34 AM,73.92 \n... \n\n###### energy_file format\nDate/Time,Energy Produced (Wh) \n2016-07-29 00:00:00 -0500,2956 \n2016-07-30 00:00:00 -0500,9468 \n... \nTotal, the_total \n\n### script(s)\n`merge_temp_endergy_data.py` \n*usage* \n- run under shell as follows: \n`./path/to/merge_temp_and_energy_files.py -files temp_file energy_file`\n- use -files to specify files\n- use --overwrite to overwrite existing output file. Dangerous!\n- use -h for help\n- use --test to run module tests\n\n*result*\n- energy_and_temperature.csv file at current directory\n- if --overwrite option is specified, if energy_and_temperature.csv exists, the contents are overwritten\n" }, { "alpha_fraction": 0.7769347429275513, "alphanum_fraction": 0.8057662844657898, "avg_line_length": 93.14286041259766, "blob_id": "a69356adbf7514172e98ac7d827257d29067f50f", "content_id": "abfd07985585ff412abbb50c148bcac03d269ac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 659, "license_type": "no_license", "max_line_length": 142, "num_lines": 7, "path": "/README.md", "repo_name": "jbadger3/stat679work", "src_encoding": "UTF-8", "text": "# stat679work\n1. This git repository will contain all homework assignments for STATS 679.\n2. A local clone was created in /Users/jonathanbadger/Documents/classes/STATS_679/course_repo/coursedata\n`git clone [email protected]:jbadger3/stat679work.git`\n3. Each homework assignment will be given it's own subdirectory labeled 'hw' with the assingment number e.g. 'hw1' for the first assignment. \n4. All home works contain copies of the raw data from the course repository located at [email protected]:UWMadison-computingtools/coursedata.git\n5. Each homework directory contains a detailed readme.md containing the date, sh scripts, and unix commands used to complete the assignment.\n" }, { "alpha_fraction": 0.721587061882019, "alphanum_fraction": 0.7652992606163025, "avg_line_length": 60.95833206176758, "blob_id": "eefeff8cf1b987f80f5223d378d775df18d9b194", "content_id": "cbc0056ed5901c4fbc6ae1ac152ff48d27276a72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 331, "num_lines": 24, "path": "/hw1/readme.md", "repo_name": "jbadger3/stat679work", "src_encoding": "UTF-8", "text": "# Homework 1\n\nThis directory will contain all the original data for HW1. Below is the sequence of how the homework was completed.\n* Note: all scripts should be run from /hw1\n\n- *9/18/2016* The files from the git repo were copied to a working directory\n\n`sudo cp -r ../course_repo/coursedata/hw1-snaqTimeTests/ hw1` \n\n- *9/18/2016* A shell script was created `normalizeFileNames.sh` to change all log and out file names from format\n `timetesty_snaq.xxx` to `timetest0y_snaq.xxx` where \"y\" is a digit between 1 and 9.\n\n`sh scripts/normalizeFileNames.sh` edited: *10/6/2016*\n- *9/25/2016* A shell script was created summarizeSNaQres.sh to summarize the results of the snaq runs. The file summary.csv contains three fields: analysis, hmax, and CPUtime for each run.\n\n`sh scripts/summarizeSNaQres.sh` edited: *10/6/2016*\n\n- *10/6/2016* The file structure of the assignment was updated. A new folder scripts was created and both previous scripts `normalizeFileNames.sh` and `summarizeSNaQres.sh` were moved to that location. A new folder results was also created and 'summary.csv' was moved to the new location. All new results will also be stored here\n\n- *10/7/2016* A new script `detailed_summmary.sh` was created to gather a more detailed summary of the snaqTimeTests\n\n`sh scripts/detailed_summary.sh`\n\n- *10/11/2016* Replaced `summarizeSNaQres.sh` with `detailed_summmary.sh` and renamed back to `summarizeSNaQres.sh`. The original file should have been edited in the first place.\n" }, { "alpha_fraction": 0.6421540379524231, "alphanum_fraction": 0.6778228282928467, "avg_line_length": 47.2402229309082, "blob_id": "b90a3718a5b30f16c79b499180837b623c57d701", "content_id": "e7e1acb82d94391cf1e8d2b2ab81855250022acb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8636, "license_type": "no_license", "max_line_length": 214, "num_lines": 179, "path": "/hw2/merge_temp_energy_data.py", "repo_name": "jbadger3/stat679work", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport datetime, re, sys, os, argparse\n\"\"\"This script merges data from a solar heated water system and solar panel system.\nThe script takes two commad line arguments(files) in order: temperature_file and energy_file.\nEnergy generation values in Wh are converted to KWh and collated by date with\nthe daily water temperature data. All temperature data and collated energy values\nare appended to a single file energy_and_temperature.csv.\nAssumptions:\n* montly files are csv files with lines formated as follows:\n\ntemperature_file format:\n\"Plot Title: 10679014 jackson July29\"\n\"#\",\"Date Time, GMT-05:00\",\"K-Type, °F (LGR S/N: 10679014, SEN S/N: 10679014, LBL: water pipe)\"\n1,07/29/16 10:26:34 AM,72.86\n2,07/29/16 11:26:34 AM,73.92\n...\n\nenergy_file format:\nDate/Time,Energy Produced (Wh)\n2016-07-29 00:00:00 -0500,2956\n2016-07-30 00:00:00 -0500,9468\n...\n\"\"\"\n\n# use an Argument Parser object to handle script arguments\nparser = argparse.ArgumentParser(description=\"Combine temperature and solar energy data from files.\")\nparser.add_argument('-files', metavar=('temp_file','energy_file'),nargs='*', help='Paths to temp and energy files. Must be two files [temp_file energy_file] in this order.')\nparser.add_argument(\"--overwrite\", action=\"store_true\", help=\"Overwrite any existing file in output. Use with caution!\")\nparser.add_argument(\"--test\", action=\"store_true\", help=\"Tests the module and quits.\")\nargs = parser.parse_args()\n# test argument problems early:\nif args.test and (args.files or args.overwrite):\n print(\"ignoring file or overwrite arguments\")\nif args.files:\n if len(args.files) != 2:\n raise Exception(\"Files for temperature and energy must be supplied\")\n else:\n assert 'Temperature' in args.files[0], 'First agument must be the temp file. File given: %s.'% args.files[0]\n assert 'energy' in args.files[1], 'Second argument must be the energy file. File given: %s.'% args.files[1]\nif not args.files and not args.test:\n raise Exception(\"Arguments to run script not provided. Use -h for help.\")\n\n\n\n\ndef remove_all_new_line_characters(file_lines):\n r\"\"\"Remove all new line characters from passed file_lines and\n return a cleaned list of file lines.\n Assume:\n * file_lines is a list of lines\n * newline characters are not embeded within lines ex.\n ['this,\\n,is,imbeded\\n','this,is,not\\n']\n Examples:\n\n >>> remove_all_new_line_characters(['line1\\n'])\n ['line1']\n >>> remove_all_new_line_characters(['\\nline1'])\n ['line1']\n >>> remove_all_new_line_characters(['\\n','\\nline1\\n','line2'])\n ['line1', 'line2']\n \"\"\"\n\n clean_lines = [] #store cleaned lines in a list\n\n for line in file_lines:\n clean_line = line.strip()\n if clean_line != '': #only add non-blank lines to the cleaned list\n clean_lines.append(clean_line)\n return clean_lines\n\ndef parse_energy_lines(energy_lines):\n r\"\"\"\n Take all lines as a list from the energy file. For each line separate the fields\n into a list. Convert each times stamp into a datetime and add it to a list of eneryg_days.\n Take each energy value in watt hours, convert it to killowatt hours, and store it in a list\n of killowatt_hours. Return both lists for further processing.\n Assumptions:\n * all newline characters have been previously stripped.\n * The first line is the list of fields for the file and should be skipped.\n * The last line contains the total and should be skipped\n * The time stamp is the first field an of the format '%Y-%m-%d %H:%M:%S %z'\n * The energy is the second field and given in watt hours\n Examples:\n >>> parse_energy_lines(['','2016-11-13 10:30:00 -0500,1000',''])\n ([datetime.datetime(2016, 11, 13, 10, 30, tzinfo=datetime.timezone(datetime.timedelta(-1, 68400)))], [1.0])\n >>> parse_energy_lines(['','2016-11-13 10:30:00 -0500,100',''])\n ([datetime.datetime(2016, 11, 13, 10, 30, tzinfo=datetime.timezone(datetime.timedelta(-1, 68400)))], [0.1])\n >>> parse_energy_lines(['','2016-11-13 10:30:00 -0500,100','2016-11-16 10:30:00 -0500,1000',''])\n ([datetime.datetime(2016, 11, 13, 10, 30, tzinfo=datetime.timezone(datetime.timedelta(-1, 68400))), datetime.datetime(2016, 11, 16, 10, 30, tzinfo=datetime.timezone(datetime.timedelta(-1, 68400)))], [0.1, 1.0])\n \"\"\"\n energy_days = []\n killowatt_hours = []\n for e_line_num in range(1 ,len(energy_lines)-1): #loop from second line to the end\n e_line = energy_lines[e_line_num]\n e_fields = e_line.split(',') #split the line on commas\n\n energy_datetime = datetime.datetime.strptime(e_fields[0], '%Y-%m-%d %H:%M:%S %z') # convert timestamp to datetime\n energy_days.append(energy_datetime)\n\n energy_kw = float(e_fields[1])/1000 #convert energy from wh to kwh\n killowatt_hours.append(energy_kw)\n return energy_days, killowatt_hours\n\ndef merge_temp_and_energy_files(args):\n files = args.files #files passed in execution as list\n\n #open the temp file, read the lines into a list, and strip out all new line and white space\n temperature_file = files[0] #the temp file. verified in args check above\n temperature_file = open(temperature_file, 'r')\n temperature_lines = temperature_file.readlines()\n temperature_lines = remove_all_new_line_characters(temperature_lines)\n\n #open the energy file, read the lines into a list, and strip out all new line and white space\n energy_file = files[1]\n energy_file = open(energy_file, 'r')\n energy_lines = energy_file.readlines()\n energy_lines = remove_all_new_line_characters(energy_lines)\n\n #parse the lines from the energy file and return two aligned lists containing\n #ernergy days, and KWh for each day\n energy_days, kilowatt_hours = parse_energy_lines(energy_lines)\n\n\n days_counter = 0 #counter to track the index to use from energy_days and kilowatt_hours\n current_energy_day = energy_days[days_counter] # get the first energy date\n first_temp_line = temperature_lines[2] # get the first temp line\n firt_temp_date = datetime.datetime.strptime(first_temp_line.split(',')[1],'%m/%d/%y %I:%M:%S %p') # convert to date\n\n #align starting date to use from energy_days with the temp file\n while current_energy_day.date() <= firt_temp_date.date():\n days_counter += 1\n assert days_counter - 1 <= len(energy_days),'Failed to align dates of files!'\n current_energy_day = energy_days[days_counter]\n\n #make fields for new output file. Contains all of temp + energy in KWh\n fields = re.findall(r'\"([^\"]*)\"',temperature_lines[1],re.DOTALL)\n fields.append(',Energy Produced (KWh)')\n\n #check areg options if overwrite was specified.\n if args.overwrite:\n #overwrite new file and write fields list to file\n out_file = open('energy_and_temperature.csv', 'w')\n out_file.write(','.join(fields))\n else:\n need_first_line = not os.path.exists('energy_and_temperature.csv')\n out_file = open('energy_and_temperature.csv', 'a')\n if need_first_line: out_file.write(','.join(fields)) # write fields if file didn't previously exist\n\n #go through each temp line and add to file. Add energy data to end of last line if starting new day\n for t_line_num in range(2, len(temperature_lines)): #line 2 is the first line of data\n t_line = temperature_lines[t_line_num] # get current line\n t_fields = t_line.split(',') # split temp fields\n temperature_day = datetime.datetime.strptime(t_fields[1], '%m/%d/%y %I:%M:%S %p')\n\n #the temp date should ALWAYS be before or equal to the energy date. if this is not true stop execution\n assert temperature_day.date() <= current_energy_day.date(), \"Date matching failed! Check that the dates for both files match up and are in order.\"\n\n if temperature_day.date() == current_energy_day.date(): #add energy data to end of previous line\n\n if days_counter < len(energy_days) -1: #prevent range out of bounds error\n out_file.write(str(kilowatt_hours[days_counter])) #write energy to end of last line\n #move to next energy day\n days_counter += 1\n current_energy_day = energy_days[days_counter]\n out_file.write('\\n') #start the next line\n out_file.write(t_line + ',') #write the temp data to the new line\n out_file.close()\n\ndef run_tests():\n print('Running tests:')\n import doctest\n doctest.testmod(verbose=True)\n print('Testing finished.')\n\nif __name__ == '__main__':\n if args.test:\n run_tests()\n else:\n merge_temp_and_energy_files(args)\n" }, { "alpha_fraction": 0.6257967948913574, "alphanum_fraction": 0.6584177017211914, "avg_line_length": 33.6363639831543, "blob_id": "a50633913442e42b515df370cf0094291c65266e", "content_id": "091e376d20af467f65fd5bba1e328d36e1e40c73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2667, "license_type": "no_license", "max_line_length": 148, "num_lines": 77, "path": "/hw1/scripts/summarizeSNaQres.sh", "repo_name": "jbadger3/stat679work", "src_encoding": "UTF-8", "text": "#This script will further summarize the snaq data and store the results in the results folder\n#as a file named 'detailed_summary.csv'\n\n\necho 'analysis,h,CPUtime,Nruns,Nfail,fabs,frel,xabs,xrel,seed,under3460,under3450,under3440' > results/detailed_summary.csv\n\n#loop through each log file. get the name and hybridizations from the log file.\n#then get the CPUtime for the analysis and write all values to the summary.csv file\nfor filename in hw1-snaqTimeTests/log/*\ndo\n\n #get the name of the file XXX.log\n analysis=$(basename $filename | cut -d. -f1)\n\n #get the value of hmax\n hmax=$(grep \"hmax = \\d\\+\" \"$filename\" | grep -o '\\d\\+')\n\n #trim the filename to handle both .out and .log files\n trimmed_name=$(basename $filename | sed -E 's/(^.*\\.)(.*)/\\1\\out/')\n\n #find the Elapsed time line in the file then get the time in seconds\n cpuTime=$(grep -i 'Elapsed time' \"hw1-snaqTimeTests/out/$trimmed_name\" | grep -Eo '\\d+\\.\\d+')\n\n #get the successful # of runs '\n #after confering with classmate this sed implementation works better than grep\n n_runs=$(sed -nE 's/.*seconds in ([0-9]+) successful runs/\\1/p' hw1-snaqTimeTests/out/$trimmed_name)\n\n #get max # of failed proposals\n n_fail=$(grep \"max number of failed proposals\" $filename | sed -E 's/.* proposals = ([0-9]+).*/\\1/')\n\n\n #get \"ftolAbs\" in the log file (tolerateddifference in the absolute value of the score function, to stop the search)\n f_abs=$(grep \"ftolAbs\" $filename | cut -d, -f2 | cut -d= -f2)\n\n #get \"ftolRel\" in log file\n f_rel=$(grep \"ftolAbs\" $filename | cut -d, -f2 | cut -d= -f2)\n\n #get \"xtolAbs\" in log file\n x_abs=$(grep \"xtolAbs\" $filename | cut -d= -f2 | cut -d, -f1)\n\n #get \"xtolRel\" in log file\n x_rel=$(grep \"xtolRel\" $filename | cut -d= -f3 | sed -E 's/(.*).$/\\1/')\n\n #get \"main seed\" for first runs\n seed=$(grep \"main seed\" $filename | sed -E 's/.* ([0-9]+)/\\1/')\n\n #get -loglik of best network\n #since the loglik is in the last field desired, rev the string first, then cut, and rev back\n logliks=$(grep -E \"\\-loglik of best\" $filename | rev | cut -d ' ' -f 1 | rev | cut -d. -f1)\n\n under3440=0\n under3450=0\n under3460=0\n\n#creat a loop and keep track of all logliks that fall below specified thresholds\n for number in $logliks\n do\n if [ $number -lt 3440 ]\n then\n ((under3440++))\n fi\n\n if [ $number -lt 3450 ]\n then\n ((under3450++))\n fi\n\n if [ $number -lt 3460 ]\n then\n ((under3460++))\n fi\n done\n\n #append the summary line of the analysis to the summary file\n echo \"$analysis,$hmax,$cpuTime,$n_runs,$n_fail,$f_abs,$f_rel,$x_abs,$x_rel,$seed,$under3460,$under3450,$under3440\" >> results/detailed_summary.csv\n\ndone\n" } ]
6
arielwsc/AI
https://github.com/arielwsc/AI
a5ada340df4ee823c5bd66aa1bf492dc5c08777a
96825254857fc662062656bea1fee1adbfd97ddb
08cd62432e1584ecc4b3a1e2f5977510871c41e5
refs/heads/master
2023-03-24T02:06:30.601920
2021-07-09T05:01:46
2021-07-09T05:01:46
348,232,625
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.473270446062088, "alphanum_fraction": 0.4862421452999115, "avg_line_length": 29.432098388671875, "blob_id": "9a65a83b5184f042c51924f615aa02d0e29fc94d", "content_id": "c8d18787770b3ff9ed2e32f635b01762b9285f98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2544, "license_type": "no_license", "max_line_length": 81, "num_lines": 81, "path": "/MDPLearning.py", "repo_name": "arielwsc/AI", "src_encoding": "UTF-8", "text": "import sys\r\nimport re\r\n\r\nnumState = (int)(sys.argv[1])\r\nnumAction = sys.argv[2]\r\ninFile = open(sys.argv[3])\r\ndiscFactor = sys.argv[4]\r\n\r\nMDPstates = []\r\n\r\nfor line in inFile:\r\n singleState = []\r\n counter = 0\r\n word = line.split()\r\n while (counter < len(word)):\r\n if counter <= 1:\r\n singleState.append(word[counter])\r\n counter += 1\r\n else:\r\n action = []\r\n action.append(re.sub(r'[()]', '', word[counter]))\r\n action.append(re.sub(r'[()]', '', word[counter+1]))\r\n action.append(re.sub(r'[()]', '', word[counter+2]))\r\n singleState.append(action)\r\n counter += 3\r\n \r\n MDPstates.append(singleState)\r\n\r\n\r\ndef bellmanEq(reward, discFactor, SumDiscReward):\r\n r = (float)(reward)\r\n g = (float)(discFactor) #gamma\r\n J = SumDiscReward #Probability already included\r\n result = r + g * J\r\n return result\r\n\r\ndef computeJtable(MDPstates, discFactor):\r\n states = MDPstates\r\n jTable = []\r\n discFactor = discFactor\r\n \r\n for t in range(21):\r\n if t == 0:\r\n jValuesAtT = []\r\n for i in range(len(states)):\r\n jValuesAtT.append(states[i][0])\r\n jTable.append(jValuesAtT)\r\n elif t == 1:\r\n jValuesAtT = []\r\n for i in range(len(states)):\r\n jValuesAtT.append(states[i][1])\r\n jTable.append(jValuesAtT)\r\n else:\r\n jValuesAtT = []\r\n for i in range(len(states)):\r\n x = 2\r\n bestAction = \"\"\r\n bestActionValue = -100000\r\n while (x < len(states[i])):\r\n action = states[i][x][0]\r\n state = states[i][x][1]\r\n prob = states[i][x][2]\r\n column = jTable[0].index(state)\r\n jValue = jTable[t-1][column][0]\r\n actionValue = (float)(prob) * (float)(jValue)\r\n if actionValue > bestActionValue:\r\n bestActionValue = actionValue\r\n bestAction = action\r\n x += 1\r\n JstarValue = bellmanEq(states[i][1], discFactor, bestActionValue)\r\n bestPolicy = [JstarValue, bestAction]\r\n jValuesAtT.append(bestPolicy)\r\n \r\n jTable.append(jValuesAtT) \r\n\r\n for i in range(20):\r\n print(\"After iteration \" + (str)(i) + \": \" + (str)(jTable[i]))\r\n\r\n\r\n\r\nprint(computeJtable(MDPstates, discFactor))" }, { "alpha_fraction": 0.49469053745269775, "alphanum_fraction": 0.5230582356452942, "avg_line_length": 31.96500015258789, "blob_id": "66113cbbba80ddf460149e5992c68c7e1f601bf7", "content_id": "ff7b13ff437a8fd33c897ded52be06c4cf0a2847", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6592, "license_type": "no_license", "max_line_length": 126, "num_lines": 200, "path": "/BayesianLearning.py", "repo_name": "arielwsc/AI", "src_encoding": "UTF-8", "text": "import sys\n\ntrainFile = open(sys.argv[1])\ntestFile = open(sys.argv[2])\nclassifier1 = []\nclassifier2 = []\n\n#For train file:\ntrainAttr = []\nline = trainFile.readline()\n\nfor word in line.split():\n trainAttr.append(word)\n \ntrainInstances = []\nsingleInstance = []\n\nfor line in trainFile:\n for word in line.split():\n singleInstance.append(word)\n trainInstances.append(singleInstance)\n singleInstance = []\n\n# For test file:\ntestAttr = []\nline = testFile.readline()\n\nfor word in line.split():\n testAttr.append(word)\n \ntestInstances = []\nsingleInstance = []\n\nfor line in testFile:\n for word in line.split():\n singleInstance.append(word)\n testInstances.append(singleInstance)\n singleInstance = []\n\ndef naiveBayes(cond, attributes, instances):\n firstClassValue, firstAttrValue = (0, 0)\n secondClassValue, secondAttrValue = (1, 1)\n\n def train():\n class1 = []\n class2 = []\n data1 = []\n data2 = []\n singleClassifier = []\n\n ### Separate data based on class value ###\n lastColumn = len(instances[0])-1\n firstClassValue = instances[0][lastColumn]\n\n for x in range(len(instances)):\n if firstClassValue == instances[x][lastColumn]:\n class1.append(instances[x])\n else:\n secondClassValue = instances[x][lastColumn]\n class2.append(instances[x])\n \n \n ### Compute for first class ###\n firstAttrValue = class1[0][0]\n\n for i in range(len(class1[0])):\n counter1, counter2 = 0, 0\n for j in range(len(class1)):\n if firstAttrValue == class1[j][i]:\n counter1 = counter1 + 1\n \n else:\n counter2 = counter2 + 1\n secondAttrValue = class1[j][i]\n \n data1.append(counter1)\n data2.append(counter2)\n\n ### Compute first classifier ###\n for x in range(len(class1[0])-1):\n prob = data1[x]/(data1[x]+data2[x])\n prob = \"{:.2f}\".format(prob)\n singleClassifier.append(attributes[x])\n singleClassifier.append(firstAttrValue)\n singleClassifier.append(firstClassValue)\n singleClassifier.append(prob)\n classifier1.append(singleClassifier)\n singleClassifier = []\n\n prob = data2[x]/(data1[x]+data2[x])\n prob = \"{:.2f}\".format(prob)\n singleClassifier.append(attributes[x])\n singleClassifier.append(secondAttrValue)\n singleClassifier.append(firstClassValue)\n singleClassifier.append(prob)\n classifier1.append(singleClassifier)\n singleClassifier = []\n\n ### Compute for second class ###\n data1 = []\n data2 = []\n firstAttrValue = class2[0][0]\n\n for i in range(len(class2[0])):\n counter1, counter2 = 0, 0\n for j in range(len(class2)):\n if firstAttrValue == class2[j][i]:\n counter1 = counter1 + 1\n \n else:\n secondAttrValue = class2[j][i]\n counter2 = counter2 + 1\n \n data1.append(counter1)\n data2.append(counter2)\n\n ### Compute second classifier ###\n\n for x in range(len(class2[0])-1):\n prob = data1[x]/(data1[x]+data2[x])\n prob = \"{:.2f}\".format(prob)\n singleClassifier.append(attributes[x])\n singleClassifier.append(firstAttrValue)\n singleClassifier.append(secondClassValue)\n singleClassifier.append(prob)\n classifier2.append(singleClassifier)\n singleClassifier = []\n\n prob = data2[x]/(data1[x]+data2[x])\n prob = \"{:.2f}\".format(prob)\n singleClassifier.append(attributes[x])\n singleClassifier.append(secondAttrValue)\n singleClassifier.append(secondClassValue)\n singleClassifier.append(prob)\n classifier2.append(singleClassifier)\n singleClassifier = []\n\n #print(\"First classifier: \")\n print(\"P(class=\" + str(firstClassValue) + \")=\" + str(round(len(class1) / (len(class2) + len(class1)), 2)), end = \" \")\n for x in classifier1:\n print(\"P(\" + str(x[0]) + \"=\" + str(x[1]) + \"|\" + str(x[2]) + \")=\" + str(x[3]), end = \" \")\n print()\n #print(\"Second classifier: \")\n print(\"P(class=\" + str(secondClassValue) + \")=\" + str(round(len(class2) / (len(class2) + len(class1)), 2)), end = \" \")\n for x in classifier2:\n print(\"P(\" + str(x[0]) + \"=\" + str(x[1]) + \"|\" + str(x[2]) + \")=\" + str(x[3]), end = \" \")\n\n prob = len(class1) / (len(class2) + len(class1))\n classifier1.append(prob)\n prob = len(class2) / (len(class2) + len(class1))\n classifier2.append(prob)\n\n def test():\n accuracy = 0\n\n for i in range(len(instances)):\n acc1, acc2 = (1, 1)\n x = 0\n for j in range(len(instances[0])-1):\n value = instances[i][j]\n\n if classifier1[x][1] == value:\n acc1 = acc1 * float(classifier1[x][3])\n if classifier1[x+1][1] == value:\n acc1 = acc1 * float(classifier1[x+1][3])\n if classifier2[x][1] == value:\n acc2 = acc2 * float(classifier2[x][3])\n if classifier2[x+1][1] == value:\n acc2 = acc2 * float(classifier2[x+1][3])\n\n x = x+2\n\n acc1 = acc1 * classifier1[len(classifier1)-1]\n acc2 = acc2 * classifier2[len(classifier2)-1]\n\n if acc1 > acc2:\n if instances[i][len(instances[0])-1] == classifier1[0][1]:\n accuracy = accuracy + 1\n elif acc2 > acc1:\n if instances[i][len(instances[0])-1] == classifier1[0][1]:\n accuracy = accuracy + 1\n else:\n accuracy = accuracy + 1\n \n accuracy = (round(accuracy / len(instances), 1)*100)\n\n if cond == 0:\n print(\"\\n\")\n print(\"Accuracy on training set (\" + str(len(instances)) + \" instances): \" + str(accuracy) + \"%\")\n else:\n print(\"Accuracy on test set (\" + str(len(instances)) + \" instances): \" + str(accuracy) + \"%\")\n\n if cond == 0:\n train()\n test()\n else:\n test()\n\nnaiveBayes(0, trainAttr, trainInstances)\nnaiveBayes(1, testAttr, testInstances)" }, { "alpha_fraction": 0.8058510422706604, "alphanum_fraction": 0.8058510422706604, "avg_line_length": 36.599998474121094, "blob_id": "8b4f274f6acad4c63e427301e15af4b7d368fea3", "content_id": "60b88ae140331a6f22b7f9d6e0d9f7e6bc5da67c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 376, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/readME.txt", "repo_name": "arielwsc/AI", "src_encoding": "UTF-8", "text": "The objective of Machine Learning is to try to predict \na test instance based on the most similar single or \ngroup of training instances\n\nMachine Learning creates a very compacted representation of a\nregression or classification class to save as much memory as\npossible without enumerating every single state\n\n** To run: **\npython BayesianLearning.py <train-file> <test-file>\n" } ]
3
mxaxaxbx/piedra-papel-tijeras
https://github.com/mxaxaxbx/piedra-papel-tijeras
11f5a3e3083266bcfc2b7fcc1f87c09e52c77fa1
d30afdfa1391a8445e75b581ceeb1fdcf15877de
ea3de11dd6640627dae0c493221567c5882fd6f9
refs/heads/master
2023-02-26T18:45:41.479087
2021-02-10T00:34:01
2021-02-10T00:34:01
337,573,446
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5607808232307434, "alphanum_fraction": 0.5962733030319214, "avg_line_length": 22.39583396911621, "blob_id": "d3ec5e4f87fc2fbd71a4f975b462d11fa59b2222", "content_id": "a02e5a41c275eccfb501c94ee5a8c781d9800f24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1127, "license_type": "no_license", "max_line_length": 90, "num_lines": 48, "path": "/main.py", "repo_name": "mxaxaxbx/piedra-papel-tijeras", "src_encoding": "UTF-8", "text": "from random import randint\nfrom time import sleep\n\nchoices = ['Paper', 'Scessor', 'Rock']\n\np1win = False\np2win = False\n\nwhile p1win != True and p2win != True:\n # Choices\n print(\"Choose \\t\\tRock(1)\\t\\tPaper(2)\\t\\tScissor(3)\")\n p1choice = int( input(\":\") )\n p2choice = randint( 1, 3 )\n\n p1choice_name = choices[p1choice - 1]\n p2choice_name = choices[p2choice - 1]\n\n print(f\"\\n\\nYou Chose {p1choice_name}\")\n sleep(1)\n print(f\"\\n\\nOpponent Chose {p2choice_name}\\n\\n\")\n sleep(1)\n\n # Conditions\n if abs(p1choice - p2choice) == 1:\n if p1choice > p2choice:\n p1win = True\n\n else:\n p2win = True\n\n elif p1choice == p2choice:\n print(\"\\nSince You both chose the Same, its a Draw! Lets try tht again.\\n\\n\")\n\n else:\n if p1choice < p2choice:\n p1win = True\n\n else:\n p2win = True\n\n# Final Result\nprint(\"\\n\"*5)\n\nif p1win == True:\n print(f\"Since You chose {p1choice_name} and Opponent chose {p2choice_name}, You Win!\")\n\nelse:\n print(f\"Since You chose {p2choice_name} and Opponent chose {p1choice_name}, You Win!\")\n " }, { "alpha_fraction": 0.5942857265472412, "alphanum_fraction": 0.5942857265472412, "avg_line_length": 9.29411792755127, "blob_id": "7fef220e3e52e9717304eba247e58a9725030685", "content_id": "530ae71ff15542fd46c5c7908e7e63556f48264f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 176, "license_type": "no_license", "max_line_length": 66, "num_lines": 17, "path": "/readme.md", "repo_name": "mxaxaxbx/piedra-papel-tijeras", "src_encoding": "UTF-8", "text": "# Instalación\n\n## clonar \n\n```\n git clone https://github.com/mxaxaxbx/piedra-papel-tijeras.git\n```\n\n```\n cd piedra-papel-tijeras\n```\n\n# usar\n\n```\n python main.py\n```\n" } ]
2
First-ov/academiaA
https://github.com/First-ov/academiaA
cfcc2edb73ac1489db4c4b0a4eb87f43f3d1ff0b
e0a21a31874b350676b54d34a07b64ed3dcdccb2
01550cc6dbb856f66987efb8a77be0cac9d7ed98
refs/heads/master
2023-04-04T16:03:39.639281
2021-04-16T00:30:55
2021-04-16T00:30:55
358,036,357
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7002456784248352, "alphanum_fraction": 0.7149876952171326, "avg_line_length": 39.599998474121094, "blob_id": "1b67dd3c94aade7b01a805589824fce9edac5316", "content_id": "c0bb5f7d1919e97d832c98082fced80890849a67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 72, "num_lines": 10, "path": "/product/models.py", "repo_name": "First-ov/academiaA", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Product(models.Model):\n #поле Id создается автоматически\n title = models.CharField(max_length=100) # Наименование # String\n amount = models.FloatField() # Количество\n unit = models.CharField(max_length=100) # Единица измерения # String\n price = models.FloatField() # Цена за у.е. # Real\n date = models.DateField() # Дата последнего поступления # Date\n\n" }, { "alpha_fraction": 0.6242878437042236, "alphanum_fraction": 0.6362818479537964, "avg_line_length": 45.33333206176758, "blob_id": "88f841fd1164c7b414c9b83bca1afb367df8cc3f", "content_id": "f4df285f7956213dfc548755c92c2cfa68e755bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3647, "license_type": "no_license", "max_line_length": 95, "num_lines": 72, "path": "/product/views.py", "repo_name": "First-ov/academiaA", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom rest_framework import status,serializers\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom product.models import Product\nfrom django.http import HttpResponse\n\ndef index(request):\n return HttpResponse('haproxy check')\n\nparams=['id','amount','price','date', 'title', 'unit']\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Product\n fields = params\n\n@api_view(['GET', 'POST', 'UPDATE', 'DELETE'])\ndef ProductView(request):\n if request.method == 'GET':\n products = Product.objects.all()#выбор всех объектов\n serializer = ProductSerializer(products, many=True)#Serializer для вывода всех объектов\n return Response({\"resourses\" : serializer.data,\n \"total_count\": Product.objects.all().count()},#вывод количества\n status=status.HTTP_200_OK)\n\n elif request.method == 'POST':\n serializer = ProductSerializer(data=request.data)#Serializer для создания объекта\n if serializer.is_valid():#проверка JSON объекта\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'UPDATE':\n data={}#создание словаря обновляемых полей\n for each in request.POST:#добавление данных полей из параметра запроса\n if each in params:\n data[each] = request.POST.get(each)\n else:\n return Response('', status=status.HTTP_400_BAD_REQUEST)#поле не существует\n if not('id' in data):\n return Response('', status=status.HTTP_400_BAD_REQUEST)#отсутсвует id\n if Product.objects.filter(id=request.POST.get('id')).count():\n product=Product.objects.filter(id=data['id'])[0]#выбор объекта по id\n serializer = ProductSerializer(product,\n data=request.data,\n partial=True)#Serializer для обновления объекта\n if serializer.is_valid():\n serializer.save()\n return Response('', status=status.HTTP_202_ACCEPTED)\n return Response('', status=status.HTTP_400_BAD_REQUEST)#id не найден\n\n elif request.method == 'DELETE':\n params0=['id']\n for each in request.POST:\n if each in params0:\n pass\n else:\n return Response('', status=status.HTTP_400_BAD_REQUEST)#параметр отличный от id\n if request.POST.get('id')==None:\n return Response('', status=status.HTTP_400_BAD_REQUEST)#отсутсвует id\n if Product.objects.filter(id=request.POST.get('id')).count():\n Product.objects.filter(id=request.data['id']).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response('', status=status.HTTP_400_BAD_REQUEST)#id не найден\n\n@api_view(['GET'])\ndef Cost(request):\n if request.method == 'GET':\n total_cost=0#объявление общей стоимости\n for each in Product.objects.all():\n total_cost+=each.price#добавление цены каждого объекта\n return Response({\"total_cost\": total_cost}, status=status.HTTP_200_OK)" }, { "alpha_fraction": 0.7670251131057739, "alphanum_fraction": 0.7885304689407349, "avg_line_length": 38.85714340209961, "blob_id": "b8104979aab6475161b134de7266bf2f49c87a84", "content_id": "3de0aa7a8be9accf7654ae5107ffa1ea54d340b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 279, "license_type": "no_license", "max_line_length": 72, "num_lines": 7, "path": "/academiaa.ini", "repo_name": "First-ov/academiaA", "src_encoding": "UTF-8", "text": "[uwsgi]\nchdir = /home/paov/AA/academiaA\nvirtualenv = /home/paov/AA/academiaA/linvenv\npythonpath = /home/paov/AA/academiaA/linvenv/lib/python3.9/site-packages\nsocket = /home/paov/AA/academiaA/django.sock\nwsgi-file = /home/paov/AA/academiaA/djangoProject1/wsgi.py\nchmod-socket=666\n" } ]
3
RadwaaGalal/Task2
https://github.com/RadwaaGalal/Task2
5abcdc720b35177099dbea40e8a4f151a8bff200
0757592c03c944ab23ebe2deb4752a2bcbdfe12d
3f035d0602148e5f78de1998968598fdbb2d8f3d
refs/heads/main
2023-07-09T09:39:53.463810
2021-08-11T19:14:27
2021-08-11T19:14:27
395,006,432
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.35444745421409607, "alphanum_fraction": 0.3814016282558441, "avg_line_length": 26.14634132385254, "blob_id": "d73344742f820600e35a70d75a17ae1145f84810", "content_id": "275f6db0d94ac9d0f10c4106476dda4e3bfee49c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2226, "license_type": "no_license", "max_line_length": 120, "num_lines": 82, "path": "/main.py", "repo_name": "RadwaaGalal/Task2", "src_encoding": "UTF-8", "text": "def print_R():\n for row in range(7):\n for col in range(5):\n print(end=\" \")\n if col == 0 or (col == 4 and (row != 0 and row != 3)) or ((row == 0 or row == 3) and (col > 0 and col < 4)):\n print(\"R\", end=\"\")\n else:\n print(end=\" \")\n print(\" \")\ndef print_A():\n for row in range(7):\n for col in range(5):\n print(end=\" \")\n if ((col == 0 or col == 4) and row != 0) or ((row == 0 or row == 3) and (col > 0 and col < 4)):\n print(\"A\", end=\"\")\n else:\n print(end=\" \")\n print(\" \")\n\ndef print_D():\n for row in range(7):\n for col in range(5):\n print(end=\" \")\n if col == 0 or (col == 4 and (row != 0 and row != 6)) or ((row == 0 or row == 6) and (col > 0 and col < 4)):\n print(\"D\", end=\"\")\n else:\n print(end=\" \")\n print(\" \")\n\ndef print_W():\n for row in range(4):\n for col in range(9):\n print(end=\" \")\n if (col == 0 or col == 8) or (col == 2 and row == 2) or (col == 4 and row == 1) or (col == 6 and row == 2):\n print(\"W\", end=\"\")\n else:\n print(end=\" \")\n print(\" \")\n\ndef print_G():\n for row in range(7):\n for col in range(5):\n print(end=\" \")\n if (col == 0 and (row != 0 and row != 6)) or ((row == 0 or row == 6) and (col > 0 and col < 4)) or (\n col == 4 and row == 4) or (col == 4 and row == 3) or (col == 3 and row == 3) or (\n col == 4 and row == 5):\n print(\"G\", end=\"\")\n else:\n print(end=\" \")\n print(\" \")\ndef print_L():\n for row in range(6):\n for col in range(6):\n print(end=\" \")\n if col == 0 or row == 5:\n print(\"L\", end=\"\")\n else:\n print(end=\" \")\n print(\" \")\n\n\n\n\nprint_R()\nprint(\"\\n\")\nprint_A()\nprint(\"\\n\")\nprint_D()\nprint(\"\\n\")\nprint_W()\nprint(\"\\n\")\nprint_A()\nprint(\"-----------------------------------------------------\")\nprint_G()\nprint(\"\\n\")\nprint_A()\nprint(\"\\n\")\nprint_L()\nprint(\"\\n\")\nprint_A()\nprint(\"\\n\")\nprint_L()\n" } ]
1
calebfrome/eBirdLiferAlert
https://github.com/calebfrome/eBirdLiferAlert
495aa41979129e644c67110239431345f9cee5a6
1757380b83de3e369e42150f9f8e9eee2b838d4a
3901ecf42cfd59dbc2f3ef4b8744c3df72b574d5
refs/heads/master
2020-04-20T19:08:22.473971
2019-03-05T23:45:59
2019-03-05T23:45:59
169,041,160
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7802534103393555, "alphanum_fraction": 0.7820008993148804, "avg_line_length": 87.03845977783203, "blob_id": "6a473084f91c0d3c3c6700bcc19a598d00e869e3", "content_id": "3337f8ef322080bab713155bacbb066ce1cb0378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2289, "license_type": "no_license", "max_line_length": 598, "num_lines": 26, "path": "/README.txt", "repo_name": "calebfrome/eBirdLiferAlert", "src_encoding": "UTF-8", "text": "======== eBird Lifer Alert ========\n\t\t\tCaleb Frome\n\neBird Lifer Alert is a personal project I developed to generate a customizable list of nearby birds that I haven't seen, based on eBird reports. The goal is to build upon the existing eBird alerts feature, adding customization options to make a more useful tool, though perhaps sacrificing some simplicity and ease of use.\n\nTo get started using this program, you'll need Python 3 installed on your computer, plus a few other libraries (listed in the import statements at the beginning of createAlert.py). This process can be streamlined using an IDE such as PyCharm - this is particularly useful if you plan to make any changes to the code. The venv directory should be located in the top-level eBirdLiferAlert directory that contains all of the other files. I didn't include my venv directory in this repository because of its size and dynamic nature, but if you have trouble setting up the environment, I can provide it.\nOnce that's working, you'll want to edit config.json to personalize the program. You'll have to provide your eBird username and password (don't worry, no one can access any of your local files) and the list of states you want alerts from. There are a few other settings here as well, including:\n\ncombine\n- none:\t\tdon't combine any alerts (default)\n- county:\tcombine alerts of the same species within the same county (and state)\n- state:\tcombine alerts of the same species within the same state\n- all:\t\tcombine all alerts of the same species\n\naba_rare\n- 3/4/5:\tinclude reports of birds anywhere in the ABA area whose ABA rarity code is at least this number\n- off:\t\tdon't include reports of any rare birds outside the chosen regions\n\noutput\n- none:\t\tdon't display any results (the output.html file is still created)\n- browser:\topen the about.html results file in a browser\n- desktop:\tdisplay the results as desktop notifications\n\t\nAfter the environment has been set up, any Python IDE can easily run the program, or you can run createAlert.bat.\n\nThe entire project open-source and publicly available. If you're interested in my future plans for the project, check out the Projects tab. If you need any help getting started, have any additional suggestions, find a bug, or you're interested in collaborating, let me know!\n" }, { "alpha_fraction": 0.5590702295303345, "alphanum_fraction": 0.592218279838562, "avg_line_length": 45.238319396972656, "blob_id": "7cd073011e7b4a4c4170130b05a8abb6da307cea", "content_id": "04997bb0d3a2d8602cc5723c606f0363078d71e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9895, "license_type": "no_license", "max_line_length": 135, "num_lines": 214, "path": "/createAlert.py", "repo_name": "calebfrome/eBirdLiferAlert", "src_encoding": "UTF-8", "text": "import subprocess\nfrom bs4 import BeautifulSoup\nimport mechanize\nimport http.cookiejar as cj\nimport json\nimport datetime\nimport calendar\nfrom win10toast import ToastNotifier\nimport time\n\nalert_url_prefix = 'https://ebird.org/ebird/alert/summary?sid='\nalert_sids = {'AL': 'SN10344', 'AK': 'SN10345', 'AR': 'SN10346', 'AZ': 'SN10347', 'CA': 'SN10348', 'CO': 'SN10349',\n 'CT': 'SN10350', 'DC': 'SN10351', 'DE': 'SN10352', 'FL': 'SN10353', 'GA': 'SN10354', 'HI': 'SN10355',\n 'IA': 'SN10356', 'ID': 'SN10357', 'IL': 'SN10358', 'IN': 'SN10359', 'KS': 'SN10360', 'KY': 'SN10361',\n 'LA': 'SN10362', 'MA': 'SN10363', 'MD': 'SN10364', 'ME': 'SN10365', 'MI': 'SN10366', 'MN': 'SN10367',\n 'MO': 'SN10368', 'MS': 'SN10369', 'MT': 'SN10370', 'NC': 'SN10371', 'ND': 'SN10372', 'NE': 'SN10373',\n 'NH': 'SN10374', 'NJ': 'SN10375', 'NM': 'SN10376', 'NV': 'SN10377', 'NY': 'SN10378', 'OH': 'SN10379',\n 'OK': 'SN10380', 'OR': 'SN10381', 'PA': 'SN10382', 'RI': 'SN10383', 'SC': 'SN10384', 'SD': 'SN10385',\n 'TN': 'SN10386', 'TX': 'SN10387', 'UT': 'SN10388', 'VT': 'SN10389', 'VA': 'SN10390', 'WA': 'SN10391',\n 'WI': 'SN10392', 'WV': 'SN10393', 'WY': 'SN10394', 'ABA': 'SN10489'}\n\nlife_list_url = 'https://ebird.org/MyEBird?cmd=lifeList&listType=world&listCategory=default&time=life'\naba_list_url = 'https://ebird.org/MyEBird?cmd=lifeList&listType=aba&listCategory=default&time=life'\nlogin_url = 'https://secure.birds.cornell.edu/cassso/login?service=https%3A%2F%2Febird.org%2Flogin%2Fcas%3Fportal%3Debird&locale=en_US'\n\nmonth_dict = {v: k for k, v in enumerate(calendar.month_abbr)}\n\n\nclass Observation:\n def __init__(self, species, count, date, checklist_link, location, map_link, county, state, aba_rare):\n self.species = species\n self.count = count\n self.date = date\n self.checklist_link = checklist_link\n self.location = location\n self.map_link = map_link\n self.county = county\n self.state = state\n self.aba_rare = aba_rare\n\n def __eq__(self, other):\n return self.species == other.species and self.date.strftime('%y%b%d%I%M') == other.date.strftime('%y%b%d%I%M') \\\n and self.location == other.location\n\n def output(self):\n return [self.species, self.count, self.date.strftime('%b %d %I:%M %p'), self.location,\n '<a href=' + self.map_link + '>Map</a>', self.state, self.county,\n '<a href=' + self.checklist_link + '>Checklist</a>']\n\n\ndef main():\n # Read ABA checklist\n aba_list = {}\n aba_list_file = open('aba_list.txt')\n for line in aba_list_file.readlines():\n species_elements = line.strip().split(',')\n aba_list[species_elements[0]] = int(species_elements[1])\n\n # Read exceptions\n exceptions_list = []\n exceptions_file = open('exceptions.txt')\n for species in exceptions_file.readlines():\n exceptions_list.append(species.strip())\n\n # Read config file\n config_data = json.load(open('config.json'))\n\n # Create web browser\n br = mechanize.Browser()\n bcj = cj.LWPCookieJar()\n br.set_cookiejar(bcj)\n\n # Browser options\n br.set_handle_equiv(True)\n br.set_handle_gzip(True)\n br.set_handle_redirect(True)\n br.set_handle_referer(True)\n br.set_handle_robots(False)\n br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n br.addheaders = [('User-agent', 'Chrome')]\n\n # Open the eBird login page\n br.open(login_url)\n # Select the login form\n br.select_form(nr=0)\n # Set credentials\n br.form['username'] = config_data['credentials']['username']\n br.form['password'] = config_data['credentials']['password']\n # Submit the login form\n br.submit()\n\n # Scrape life list\n print('scraping life list')\n life_list = []\n life_list_html = BeautifulSoup(br.open(life_list_url).read(), 'html.parser')\n for a in life_list_html.find_all(attrs={'data-species-code': True}):\n life_list.append(str.strip(a.text))\n\n # Scrape eBird alerts\n print('scraping eBird alerts:', end=' ')\n observation_list = []\n alert_regions = config_data['regions']\n alert_regions.append('ABA' if int(config_data['aba_rare']) in [3, 4, 5] else None)\n for alert_region in alert_regions:\n print(alert_region, end=' ')\n alert_html = BeautifulSoup(br.open(alert_url_prefix + alert_sids[alert_region]).read(), 'html.parser')\n for tr in alert_html.find_all('tr', class_='has-details'):\n species_name = str.strip(tr.findChild(class_='species-name').findChild('a').text)\n # Filter out species from ABA RBA whose code is below the specified level\n if alert_region == 'ABA':\n if species_name not in aba_list.keys() or aba_list[species_name] < int(config_data['aba_rare']):\n continue\n species_count = str.strip(tr.findChild(class_='count').text)\n date_str = str.strip(tr.findChild(class_='date').text)[:-10] # truncate 'Checklist'\n date_month_str = date_str[0:3]\n date_month = month_dict[date_month_str]\n date_day = int(date_str[4:6])\n date_year = int(date_str[8:12])\n date_hour = 0 if len(date_str) < 13 else int(date_str[13:15]) # some reports don't have a time\n date_minute = 0 if len(date_str) < 13 else int(date_str[16:18]) # some reports don't have a time\n species_date = datetime.datetime(date_year, date_month, date_day, date_hour, date_minute)\n species_checklist_link = 'https://ebird.org' + str.strip(tr.findChild(class_='date').findChild('a')['href'])\n species_location = str.strip(tr.findChild(class_='location').text)[:-4] # truncate 'Map'\n species_map_link = str.strip(tr.findChild(class_='location').findChild('a')['href'])\n species_county = str.strip(tr.findChild(class_='county').text)\n species_state = str.strip(tr.findChild(class_='state').text).split(',')[0] # truncate ', United States'\n species_aba_rare = (alert_region == 'ABA')\n observation_list.append(Observation(species_name, species_count, species_date, species_checklist_link,\n species_location, species_map_link, species_county, species_state,\n species_aba_rare))\n\n # determine which species would be lifers\n print('\\ncreating custom alert')\n observation_list.sort(key=lambda x: x.state, reverse=True)\n observation_list.sort(key=lambda x: x.species, reverse=False)\n lifer_needs = []\n for o in observation_list:\n if o.species not in life_list and o.species not in exceptions_list and (\n o.species in aba_list.keys() or o.aba_rare):\n lifer_needs.append(o)\n\n # Combine reports of the same species based on the config setting\n combined_needs_dict = {}\n if config_data['combine'] in ['county', 'state', 'all']:\n for obs in lifer_needs:\n key_location = 'all'\n if config_data['combine'] == 'county':\n key_location = obs.county\n elif config_data['combine'] == 'state':\n key_location = obs.state\n key = (obs.species, key_location)\n if key not in combined_needs_dict or obs.date > combined_needs_dict[key].date:\n combined_needs_dict[key] = obs\n\n lifer_needs = combined_needs_dict.values()\n\n # build output html file\n output = open('alert.html', 'w')\n output.write('<html><body><h1>eBird Lifer Alert</h1>')\n if len(lifer_needs) == 0:\n output.write('<p>No lifers reported.</p>')\n else:\n output.write('<table border=1 style=border-collapse:collapse cellpadding=3><tr><th>Species</th><th>Count</th>'\n '<th>Date</th><th>Location</th><th>Map Link</th><th>State</th><th>County</th>'\n '<th>Checklist Link</th></tr>')\n for l in lifer_needs:\n output.write('<tr>')\n for td in range(len(l.output())):\n output.write('<td>%s</td>' % l.output()[td])\n output.write('</tr>')\n output.write('</table>')\n output.write('</body></html>')\n output.close()\n\n # display the results in the browser\n if config_data['output'] == 'browser':\n subprocess.Popen('alert.html', shell=True)\n\n # load the previous alert for comparison\n previous_alert = []\n previous_alert_file = open('alert.txt')\n for raw_obs in previous_alert_file.readlines():\n obs = raw_obs.strip().split(',')\n species = obs[0]\n date_str = obs[2]\n date_month = month_dict[date_str[0:3]]\n date_year = datetime.datetime.today().year\n if datetime.datetime.today().month == 1 and date_month == 12:\n date_year -= 1\n date = datetime.datetime(date_year, date_month, int(date_str[4:6]), int(date_str[7:9]), int(date_str[10:12]))\n location = obs[3]\n previous_alert.append(Observation(species, 0, date, '', location, '', '', '', False))\n\n # display the results as desktop notifications\n if config_data['output'] == 'desktop':\n toaster = ToastNotifier()\n for obs in lifer_needs:\n if obs in previous_alert:\n continue\n toaster.show_toast(obs.species, obs.county + ', ' + obs.state + ' | ' + obs.date.strftime('%b %d %I:%M %p'),\n icon_path='ebird_logo.ico', duration=5)\n while toaster.notification_active():\n time.sleep(0.1)\n\n # write the alert to a file for future reference\n save_data = open('alert.txt', 'w')\n for obs in lifer_needs:\n for item in obs.output():\n save_data.write(str(item) + ',')\n save_data.write('\\n')\n\n\nif __name__ == '__main__':\n main()\n" } ]
2
adrianpaniagualeon/Duck-DNS-Updater
https://github.com/adrianpaniagualeon/Duck-DNS-Updater
dc66b1bf98d59cb56228f8d4365c2877ba1f270d
100f8c7433726203a33f6f46e994f307e71eeb72
ee15fbfdf89c6e02dc4b8f4511c872e34acebb96
refs/heads/main
2023-03-19T14:53:40.247562
2021-03-08T22:48:32
2021-03-08T22:48:32
345,815,455
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.7804255485534668, "alphanum_fraction": 0.7812765836715698, "avg_line_length": 52.45454406738281, "blob_id": "e6a9b07ae55b3633e7a2d53b258290525aafe046", "content_id": "e40d81c7e27e47de2dcf1c7b4081169c6a86f032", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1194, "license_type": "permissive", "max_line_length": 234, "num_lines": 22, "path": "/README.md", "repo_name": "adrianpaniagualeon/Duck-DNS-Updater", "src_encoding": "UTF-8", "text": "## DUCK DNS UPDATER\n\n## ¿Qué hace este script?\nEste script permite la actualización de la IP pública de la maquina que lo ejecuta en los servidores DNS de Duck DNS. Para evitar sobrecargar la API de Duck DNS, se ejecuta cada 5 minutos hasta que el usuario decida parar el servicio.\n\n## ¿Cómo puedo colaborar?\nExisten dos formas de colaborar:\n- Añadiendo nuevas funcionalidades al bot mediante _pull-request_. \n- Aportación económica: Puedes aportar tu granito de arena por [Paypal](https://paypal.me/panleoad)\n- Invitarme a un café: Me puedes invitar a un café a través de [Ko-Fi](https://ko-fi.com/adrianpaniagualeon)\n\n## ¿Necesita alguna librería externa?\nSi, este script necesita la librería [Requests](https://requests.readthedocs.io/en/master/). Puedes instalarla con el siguiente comando \"pip install requests\".\n\n## ¿Qué datos necesito para autenticarme?\n|DATO|INFORMACIÓN|\n|-|-|\n|EMAIL|CORREO ELECTRÓNICO UTILIZADO PARA REGISTRARTE EN Duck DNS|\n|PASSWORD|CONTRASEÑA DE TU CUENTA Duck DNS|\n|HOST|DOMINIO QUE QUEREMOS ACTUALIZAR|\n\nTienes que modificar el archivo ddns.py con tus datos para que el script funcione. Es muy importante no introducir espacios para que todo funcione correctamente." }, { "alpha_fraction": 0.438618928194046, "alphanum_fraction": 0.4437340199947357, "avg_line_length": 20.72222137451172, "blob_id": "01316e448723ad5427516aab4ca40de143d12d4e", "content_id": "18aff1a70b633a61e425845001258f29d20b58a9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "permissive", "max_line_length": 82, "num_lines": 36, "path": "/ddns.py", "repo_name": "adrianpaniagualeon/Duck-DNS-Updater", "src_encoding": "UTF-8", "text": "import requests\nimport time\n\n#####################################\n# VARIABLE #\n#####################################\nTOKEN = \"\"\nHOST = \"\"\n#####################################\n\n\n\ndef get_ip():\n\tglobal my_ip\n\n\tmy_ip = requests.get('http://icanhazip.com/').text\n\tmy_ip = my_ip.strip()\n\n\n\ndef change_dns():\n\tglobal status\n\turl = \"https://www.duckdns.org/update?domains=\"+HOST+\"&token=\"+TOKEN+\"&ip=\"+my_ip\n\tr = requests.get(url)\n\tstatus = r.text\n\nwhile True:\n\tprint (\"-------------------------------------------\")\n\tprint (\"WEB [\"+HOST+\"]\")\n\tget_ip()\n\tprint (\"CURRENT IP [\"+my_ip+\"]\")\n\tchange_dns()\n\tprint (\"STATUS [\"+status+\"]\")\n\tprint (\"WAITING 5 MINUTES BEFORE THE NEXT EXECUTION\")\n\tprint (\"-------------------------------------------\\n\\n\")\n\ttime.sleep(300)\n" } ]
2
BY-jk/sqlalchemy_exasol
https://github.com/BY-jk/sqlalchemy_exasol
72348006318f2fb1b1bdf6e688866a9e633271eb
854468fd8036da74106b4ca691a0093b6b8206c0
0b803b3cde72fe40dcbf4379dc1146331147a9ce
refs/heads/master
2021-01-21T08:06:16.336354
2020-05-20T08:00:34
2020-05-20T08:00:34
45,261,152
0
0
null
2015-10-30T15:57:53
2015-10-13T21:43:07
2015-10-21T22:12:23
null
[ { "alpha_fraction": 0.36486485600471497, "alphanum_fraction": 0.5945945978164673, "avg_line_length": 23.33333396911621, "blob_id": "47868a45aa6c28843f093711068cc13200a75f1f", "content_id": "455f99628f48f79770bfb10479d112d44dfa8e84", "detected_licenses": [ "LicenseRef-scancode-public-domain", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 74, "license_type": "permissive", "max_line_length": 45, "num_lines": 3, "path": "/requirements.txt", "repo_name": "BY-jk/sqlalchemy_exasol", "src_encoding": "UTF-8", "text": "SQLAlchemy==1.0.9 # rq.filter: >=1.0.0,<1.1.0\npyodbc==3.0.10\nsix==1.10.0 \n" }, { "alpha_fraction": 0.5972001552581787, "alphanum_fraction": 0.6563792824745178, "avg_line_length": 48.873016357421875, "blob_id": "d92fc63070b516066be36f1b800ebe8dbbc58713", "content_id": "4f93fd9effe720883af53f4b4184522e7b088e11", "detected_licenses": [ "LicenseRef-scancode-public-domain", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3143, "license_type": "permissive", "max_line_length": 173, "num_lines": 63, "path": "/test/test_exadialect_pyodbc.py", "repo_name": "BY-jk/sqlalchemy_exasol", "src_encoding": "UTF-8", "text": "from sqlalchemy.engine import url as sa_url\n\nfrom sqlalchemy.testing import fixtures\nfrom sqlalchemy.testing import eq_\n\nfrom sqlalchemy_exasol.pyodbc import EXADialect_pyodbc\n\n\nclass EXADialect_pyodbcTest(fixtures.TestBase):\n \n def setup(self):\n self.dialect = EXADialect_pyodbc()\n\n def assert_parsed(self, dsn, expected_connector, expected_args):\n url = sa_url.make_url(dsn)\n connector, args = self.dialect.create_connect_args(url)\n eq_(connector, expected_connector)\n eq_(args, expected_args)\n\n\n def test_create_connect_args(self):\n self.assert_parsed(\"exa+pyodbc://scott:[email protected]:1234/my_schema\",\n ['DRIVER={EXAODBC};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;UID=scott;PWD=tiger;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {})\n \n def test_create_connect_args_with_driver(self):\n self.assert_parsed(\"exa+pyodbc://scott:[email protected]:1234/my_schema?driver=FOOBAR\",\n ['DRIVER={FOOBAR};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;UID=scott;PWD=tiger;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {})\n\n def test_create_connect_args_dsn(self):\n self.assert_parsed(\"exa+pyodbc://scott:tiger@exa_test\",\n ['DSN=exa_test;UID=scott;PWD=tiger;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {})\n\n def test_create_connect_args_trusted(self):\n self.assert_parsed(\"exa+pyodbc://192.168.1.2..8:1234/my_schema\",\n ['DRIVER={EXAODBC};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;Trusted_Connection=Yes;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {})\n \n\n def test_create_connect_args_autotranslate(self):\n self.assert_parsed(\"exa+pyodbc://scott:[email protected]:1234/my_schema?odbc_autotranslate=Yes\",\n ['DRIVER={EXAODBC};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;UID=scott;PWD=tiger;AutoTranslate=Yes;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {})\n\n \n def test_create_connect_args_with_param(self):\n self.assert_parsed(\"exa+pyodbc://scott:[email protected]:1234/my_schema?autocommit=true\",\n ['DRIVER={EXAODBC};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;UID=scott;PWD=tiger;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {'AUTOCOMMIT': True})\n \n \n def test_create_connect_args_with_param_multiple(self):\n self.assert_parsed(\"exa+pyodbc://scott:[email protected]:1234/my_schema?autocommit=true&ansi=false&unicode_results=false\",\n ['DRIVER={EXAODBC};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;UID=scott;PWD=tiger;INTTYPESINRESULTSIFPOSSIBLE=y'],\n {'AUTOCOMMIT': True, 'ANSI': False, 'UNICODE_RESULTS': False})\n\n\n def test_create_connect_args_with_unknown_params(self):\n self.assert_parsed(\"exa+pyodbc://scott:[email protected]:1234/my_schema?clientname=test&querytimeout=10\",\n ['DRIVER={EXAODBC};EXAHOST=192.168.1.2..8:1234;EXASCHEMA=my_schema;UID=scott;PWD=tiger;INTTYPESINRESULTSIFPOSSIBLE=y;clientname=test;querytimeout=10'],\n {})\n\n" }, { "alpha_fraction": 0.4318181872367859, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 13.666666984558105, "blob_id": "7458c847102975a6c20493eb1dd4bb3ef36a4f17", "content_id": "38b4557170eab6d13aed3124c728695eb2548ea4", "detected_licenses": [ "LicenseRef-scancode-public-domain", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "permissive", "max_line_length": 17, "num_lines": 3, "path": "/requirements_test.txt", "repo_name": "BY-jk/sqlalchemy_exasol", "src_encoding": "UTF-8", "text": "pytest==2.7.1\npytest-cov==1.8.1\nmock==1.0.1\n" } ]
3
aguricci/Veterinaria_Django_3
https://github.com/aguricci/Veterinaria_Django_3
b08e815580906946804c3aeab043aa7a3385db31
f07a0a7554f9fde87838e95d565bedfed9231bc3
b3c14651d509ebca089ce5a5b24a78e4e0584260
refs/heads/main
2023-06-24T02:23:04.458557
2021-07-21T19:15:34
2021-07-21T19:15:34
384,553,358
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6873315572738647, "alphanum_fraction": 0.6873315572738647, "avg_line_length": 25.571428298950195, "blob_id": "c9266439ab7038717be5847d99d125c8051370c5", "content_id": "ef7addb988d347c6b5ff892f49a1d96c2649cc69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 371, "license_type": "no_license", "max_line_length": 70, "num_lines": 14, "path": "/vetRicci/sitioVet/forms.py", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "from django.db.models import fields\nfrom django.forms import ModelForm\nfrom .models import Mascota, Animal\n\nclass Agregamascota(ModelForm):\n class Meta:\n model = Mascota\n fields = ['idMascota', 'nombre', 'telefonoDuenio', 'idAnimal']\n\n\nclass Agregacategoria(ModelForm):\n class Meta:\n model = Animal\n fields = ['idAnimal', 'nombreAnimal']" }, { "alpha_fraction": 0.515320360660553, "alphanum_fraction": 0.5682451128959656, "avg_line_length": 18.94444465637207, "blob_id": "eabd516468c448485ebde9bcaa4aacfd5587a329", "content_id": "929476e3f17313e76ce33a687967ae26b5fd5f4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/vetRicci/sitioVet/migrations/0002_rename_nombre_animal_nombreanimal.py", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.4 on 2021-07-12 21:28\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sitioVet', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='animal',\n old_name='nombre',\n new_name='nombreAnimal',\n ),\n ]\n" }, { "alpha_fraction": 0.524124264717102, "alphanum_fraction": 0.5822868347167969, "avg_line_length": 31.17021369934082, "blob_id": "9c2a6622746d6b0060143cda7d037a295e23d34e", "content_id": "7086af3964cdf08fa7d1131bf177a52de84fd84b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3035, "license_type": "no_license", "max_line_length": 107, "num_lines": 94, "path": "/vetRicci/sitioVet/static/sitioVet/js/index.js", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "\n// codigos del clima que entrega la API\nvar soleado = 1000\nvar nublado = [1003, 1006, 1009, 1030, 1135, 1147, 1087]\nvar lluvia = [1063, 1069, 1072, 1150, 1153, 1168, 1171, 1180, 1183, 1186, 1189, 1192, 1195, 1198,\n 1201, 1207, 1240, 1243, 1246, 1249, 1252, 1273, 1276]\n\n// función que maneja el clima en general\nfunction clima() {\n navigator.geolocation.getCurrentPosition(function (p) {\n var lat;\n var lon;\n lat = p.coords.latitude;\n lon = p.coords.longitude;\n llamado(lat, lon);\n })\n}\n\n//llamado a la API del clima y callback a la funcion que despliega la información en la página\nfunction llamado(lat, lon) {\n $.get({\n url: 'https://api.weatherapi.com/v1/current.json',\n data: {\n key: '672af457a1974972960214033210606',\n q: lat + \", \" + lon,\n lang: 'es',\n units: 'metrics'\n },\n datatype: 'json',\n success: function(response){\n console.log(response);\n manejo_respuesta(response.location, response.current)\n }\n })\n}\n\n//Función que analiza la respuesta de la API del clima, selecciona los iconos correspondientes y\n//lo agrega a la página\nfunction manejo_respuesta(locacion, clima){\n $(\"#w-cont\").addClass(\"weather-cont\");\n $(\".weather\").prepend(\"<h3>Clima Actual y Ubicación</h3>\");\n $(\"#ubicacion\").append(\"<h5>Tiempo en: <i class='fas fa-map-marker-alt fa-2x location'></i></h5> <h6>\" \n + locacion.name + \", \" + locacion.region + \"</h6> \");\n\n var desc = clima.condition.text;\n var icon;\n\n if (clima.condition.code === soleado){\n if (clima.is_day === \"yes\"){\n icon = '<h5>' + desc + ' <i class=\"fas fa-sun fa-2x sun\"></i></h5>';\n }\n else{\n icon = '<h5>' + desc + ' <i class=\"fas fa-moon fa-2x moon\"></i></h5>';\n }\n \n }\n else if(nublado.includes(clima.condition.code)){\n icon = '<h5>' + desc + ' <i class=\"fas fa-cloud fa-2x cloud\"></i></h5>';\n }\n else if(lluvia.includes(clima.condition.code)){\n icon = '<h5>' + desc + ' <i class=\"fas fa-cloud-showers-heavy fa-2x\"></i></h5>';\n }\n\n $(\"#tiempo\").append(icon + '<h6>Temperatura: ' + clima.temp_c + '˚C</h6>');\n}\n\nclima();\n\n//Funcion para la ubicacion en google maps\nfunction myMap() {\n //se solicita la ubicación\n navigator.geolocation.getCurrentPosition(function (p, ) {\n var lat;\n var lon;\n lat = p.coords.latitude;\n lon = p.coords.longitude;\n mapa(lat, lon);\n })\n\n}\n\n//Funcion que crea el mapa con las coordenadas de la API de HTML llamada en la función,\n//la agrega al div correspondiente y le agrega el marcador de la ubicacion mas precisa \nfunction mapa(lat, lon) {\n loc = new google.maps.LatLng(lat, lon);\n var mapProp = {\n center: loc,\n zoom: 15,\n };\n var map = new google.maps.Map(document.getElementById(\"map\"), mapProp);\n const marker = new google.maps.Marker({\n position: loc,\n map: map,\n }); \n}\n\n" }, { "alpha_fraction": 0.5741758346557617, "alphanum_fraction": 0.593406617641449, "avg_line_length": 34.225807189941406, "blob_id": "b889bf421b0fc1fde215867f22bdb325d4447f44", "content_id": "d6a56d56cd1f98e372f96e7eaea93b209dd26195", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1093, "license_type": "no_license", "max_line_length": 116, "num_lines": 31, "path": "/vetRicci/sitioVet/migrations/0001_initial.py", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.4 on 2021-07-12 20:55\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Animal',\n fields=[\n ('idAnimal', models.IntegerField(primary_key=True, serialize=False, verbose_name='Id de Animal')),\n ('nombre', models.CharField(max_length=50, verbose_name='Nombre de Animal')),\n ],\n ),\n migrations.CreateModel(\n name='Mascota',\n fields=[\n ('idMascota', models.IntegerField(primary_key=True, serialize=False, verbose_name='Id de Mascota')),\n ('nombre', models.CharField(max_length=30, verbose_name='Nombre de Mascota')),\n ('telefonoDuenio', models.CharField(max_length=15, verbose_name='Telefono del Dueño')),\n ('idAnimal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sitioVet.animal')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6658795475959778, "alphanum_fraction": 0.6658795475959778, "avg_line_length": 41.20000076293945, "blob_id": "77720669953305e6c382c2dad7db4543e9afed27", "content_id": "3ff77b8dd93dc6dc68354fe43f6adb522f0cf11d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/vetRicci/sitioVet/urls.py", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom django.urls.resolvers import URLPattern\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index ,name='index'), \n path('clientes/', views.clientes, name='clientes'),\n path('nosotros/', views.nosotros, name='nosotros'),\n path('consultas/', views.consultas, name='consultas'),\n path('medicos/', views.medicos, name='medicos'),\n path('peluqueria/', views.pelu, name='pelu'),\n path('contacto/', views.contacto, name='contacto'),\n path('vacunacion/', views.vacunacion, name='vacunacion'),\n path('mascotas/', views.mascotas, name='mascotas'),\n path('crearmascota/', views.crear, name='crear'),\n path('eliminar/<id>', views.eliminar, name='eliminar'),\n path('modificar/<id>', views.modificar, name='modificar'),\n path('categoria/', views.categoria, name='categoria'),\n ] \n" }, { "alpha_fraction": 0.760869562625885, "alphanum_fraction": 0.804347813129425, "avg_line_length": 22, "blob_id": "45d94ff35cea1e5dd2a58f84bac769c522230fc9", "content_id": "3ec3683bcb9a34f5a5d9075ee1521ba15ec4bd5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "# Veterinaria_Django_3\n3era parte del trabajo\n" }, { "alpha_fraction": 0.733485221862793, "alphanum_fraction": 0.7403188943862915, "avg_line_length": 38.8636360168457, "blob_id": "a0b7d62a65649e3adf92befe7e0026a917704f9b", "content_id": "3a1d12d4e53f00abb23b29d4d9ddc705f421c59e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 881, "license_type": "no_license", "max_line_length": 92, "num_lines": 22, "path": "/vetRicci/sitioVet/models.py", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db.models.base import Model\nfrom django.db.models.fields.related import ForeignKey\nfrom django.db.models import CharField, IntegerField\n\n# Create your models here.\n\nclass Animal (models.Model):\n idAnimal = models.IntegerField(primary_key=True, verbose_name='Id de Animal')\n nombreAnimal = models.CharField(max_length=50, verbose_name='Categoría')\n\n def __str__(self):\n return self.nombreAnimal\n\nclass Mascota (models.Model):\n idMascota = models.IntegerField(primary_key=True, verbose_name='Id de Mascota')\n nombre = models.CharField(max_length=30, verbose_name='Nombre de Mascota')\n telefonoDuenio = models.CharField(max_length=15, verbose_name='Telefono del Dueño')\n idAnimal = models.ForeignKey(Animal, on_delete=models.CASCADE, verbose_name='Categoría')\n\n def __str__(self):\n return self.nombre\n\n" }, { "alpha_fraction": 0.5201465487480164, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 28.03191566467285, "blob_id": "4d1cdcd6d6247b843cd461a1f7d31dce0a9f122c", "content_id": "0c0fa13ce65b7b18bd99605f4cfd960fa33ab4b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2747, "license_type": "no_license", "max_line_length": 70, "num_lines": 94, "path": "/vetRicci/sitioVet/static/sitioVet/js/validacion.js", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "\n\n$(\"#error\").hide();\n\n$(\"#tipo\").change(function(e){\n var opcion = $(\"select option\").filter(\":selected\").val();\n if (opcion === \"1\"){\n $(\"#rut-label\").html(\"Rut\");\n }\n else if (opcion === \"2\"){\n $(\"#rut-label\").html(\"Pasaporte\");\n }\n} \n);\n\n$(\"#formulario\").submit(function(e){\n var mensaje = \"\";\n var opcion = $(\"select option\").filter(\":selected\").val();\n var rut = $(\"#rut\").val().trim();\n \n\n if (opcion === \"Tipo de Identificación\"){\n mensaje =\"No se seleccionó el tipo de identificación\";\n }\n else if (rut.length === 0){\n mensaje = \"Debes ingresar la identificación\";\n }\n else if (opcion === \"1\" && !checkRut(rut)){\n mensaje = \"Rut Inválido\";\n }\n else if($(\"#nombre\").val().trim().length === 0){\n mensaje = \"Debes ingresar el nombre\";\n }\n else if($(\"#apellido\").val().trim().length === 0){\n mensaje = \"Debes ingresar el apellido\";\n }\n else if($(\"#mail\").val().trim().length === 0){\n mensaje = \"Debes ingresar un correo electrónico\";\n }\n else if($(\"#ciudad\").val().trim().length === 0){\n mensaje = \"Debes ingresar tu ciudad de residencia\";\n }\n else if($(\"#comentarios\").val().trim().length > 50){\n mensaje = \"El comentario no puede tener mas de 50 caracteres\";\n }\n \n \n if (mensaje != \"\"){\n e.preventDefault();\n $(\"#error\").html(mensaje);\n $(\"#error\").show();\n }\n \n \n})\n\nfunction checkRut(rut) {\n // Despejar Puntos\n var valor = rut.replace('.', '');\n // Despejar Guión\n valor = valor.replace('-', '');\n // Aislar Cuerpo y Dígito Verificador\n cuerpo = valor.slice(0, -1);\n dv = valor.slice(-1).toUpperCase();\n // Si no cumple con el mínimo ej. (n.nnn.nnn)\n if (cuerpo.length < 7) {\n return false;\n }\n // Calcular Dígito Verificador\n suma = 0;\n multiplo = 2;\n // Para cada dígito del Cuerpo\n for (i = 1; i <= cuerpo.length; i++) {\n // Obtener su Producto con el Múltiplo Correspondiente\n index = multiplo * valor.charAt(cuerpo.length - i);\n // Sumar al Contador General\n suma = suma + index;\n // Consolidar Múltiplo dentro del rango [2,7]\n if (multiplo < 7) {\n multiplo = multiplo + 1;\n } else {\n multiplo = 2;\n }\n }\n // Calcular Dígito Verificador en base al Módulo 11\n dvEsperado = 11 - (suma % 11);\n // Casos Especiales (0 y K)\n dv = (dv == 'K') ? 10 : dv;\n dv = (dv == 0) ? 11 : dv;\n // Validar que el Cuerpo coincide con su Dígito Verificador\n if (dvEsperado != dv) {\n return false;\n }\n // Si todo sale bien, eliminar errores (decretar que es válido)\n return true;\n}" }, { "alpha_fraction": 0.6612576246261597, "alphanum_fraction": 0.6612576246261597, "avg_line_length": 30.5256404876709, "blob_id": "fb97555e048384e1f4e330701d8567bb381dd024", "content_id": "e6fb489b220fc97c8ec40bfe30fec21e27244354", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2466, "license_type": "no_license", "max_line_length": 71, "num_lines": 78, "path": "/vetRicci/sitioVet/views.py", "repo_name": "aguricci/Veterinaria_Django_3", "src_encoding": "UTF-8", "text": "from typing import ContextManager\nfrom django.shortcuts import render, redirect\nfrom .models import Mascota\nfrom .forms import Agregacategoria, Agregamascota\n\n\n# Create your views here.\n\ndef index (request):\n return render(request, 'sitioVet/index.html', {})\n\ndef clientes (request):\n return render(request, 'sitioVet/clientes.html', {})\n\ndef consultas (request):\n return render(request, 'sitioVet/consultas.html', {})\n\ndef contacto (request):\n return render(request, 'sitioVet/contacto.html', {})\n\n\ndef medicos (request):\n return render(request, 'sitioVet/medicos.html', {})\n\n\ndef pelu (request):\n return render(request, 'sitioVet/pelu.html', {})\n\ndef vacunacion(request):\n return render(request, 'sitioVet/vacunacion.html', {})\n\ndef nosotros(request):\n return render(request, 'sitioVet/nosotros.html', {})\n\ndef mascotas(request):\n info = Mascota.objects.all()\n contexto = {'mascotas' : info}\n return render (request, 'sitioVet/mascotas.html', context=contexto)\n\ndef crear(request):\n datos = {'form' : Agregamascota()} \n if request.method == 'POST' : \n formulario = Agregamascota(request.POST)\n if formulario.is_valid():\n formulario.save()\n datos['mensaje'] = 'Datos guardados correctamente'\n else :\n datos['mensaje'] = 'Id ya registrado'\n return render(request, 'sitioVet/crear.html', context=datos)\n\ndef eliminar(request, id):\n mascota = Mascota.objects.get(idMascota=id)\n mascota.delete()\n return redirect(to=\"mascotas\")\n\ndef modificar(request, id):\n mascota = Mascota.objects.get(idMascota=id)\n datos = {'form' : Agregamascota(instance=mascota)}\n if request.method == 'POST':\n formulario = Agregamascota(data=request.POST, instance=mascota)\n if formulario.is_valid():\n formulario.save()\n datos['mensaje'] = 'Mascota modificada'\n else:\n datos['mensaje'] = 'Error al modificar los datos'\n return render(request, 'sitioVet/modificar.html', context=datos)\n\n\ndef categoria(request):\n datos = {'form' : Agregacategoria()}\n if request.method == 'POST':\n formulario = Agregacategoria(request.POST)\n if formulario.is_valid():\n formulario.save()\n datos ['mensaje'] = 'Categoría agregada correctamente'\n else:\n datos ['mensaje'] = 'Id ya asociado a una categoria'\n return render(request, 'sitioVet/categoria.html', context=datos)\n\n \n" } ]
9
kevin1061517/LineBot-CCU
https://github.com/kevin1061517/LineBot-CCU
924b1bc8e1112b57a90afdb2dd28d360234a31fc
9c106be534c88c9fe77c302d8af7bd6cd5816573
3d0a460c89192b21bba67d461d687e4109d3a758
refs/heads/master
2020-03-27T11:19:48.511644
2019-12-22T16:44:09
2019-12-22T16:44:09
146,479,479
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5477768778800964, "alphanum_fraction": 0.5614779591560364, "avg_line_length": 36.336631774902344, "blob_id": "bee4ca537a98c98ef1405860a54119b791b6d6be", "content_id": "d8988c3017e8c1465ed6c19a0e914cb93805c4af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23364, "license_type": "permissive", "max_line_length": 278, "num_lines": 606, "path": "/temp.py", "repo_name": "kevin1061517/LineBot-CCU", "src_encoding": "UTF-8", "text": "#fang_test herokuapp\nfrom flask import Flask, request, abort\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n LineBotApiError, InvalidSignatureError\n)\nfrom firebase import firebase\nfrom linebot.models import (\n SourceUser,SourceGroup,SourceRoom,LeaveEvent,JoinEvent,\n TemplateSendMessage,PostbackEvent,AudioMessage,LocationMessage,\n ButtonsTemplate,LocationSendMessage,AudioSendMessage,ButtonsTemplate,\n ImageMessage,URITemplateAction,MessageTemplateAction,ConfirmTemplate,\n PostbackTemplateAction,ImageSendMessage,MessageEvent, TextMessage, \n TextSendMessage,StickerMessage, StickerSendMessage,DatetimePickerTemplateAction,\n CarouselColumn,CarouselTemplate\n)\nfrom imgurpython import ImgurClient\nfrom config import *\nimport re\nfrom bs4 import BeautifulSoup as bf\nimport requests\nimport random\nimport os,tempfile\n\napp = Flask(__name__)\n#imgur上傳照片\nclient_id = os.getenv('client_id',None)\nclient_secret = os.getenv('client_secret',None)\nalbum_id = os.getenv('album_id',None)\naccess_token = os.getenv('access_token',None)\nrefresh_token = os.getenv('refresh_token',None)\nclient = ImgurClient(client_id, client_secret, access_token, refresh_token)\nurl = os.getenv('firebase_bot',None)\nfb = firebase.FirebaseApplication(url,None)\nline_bot_api = LineBotApi(os.getenv('LINE_CHANNEL_ACCESS_TOKEN',None))\nhandler = WebhookHandler(os.getenv('LINE_CHANNEL_SECRET', None))\n\[email protected](\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n # handle webhook body\n try:\n handler.handle(body,signature)\n except LineBotApiError as e:\n print(\"Catch exception from LINE Messaging API: %s\\n\" % e.message)\n for m in e.error.details:\n print(\"ERROR is %s: %s\" % (m.property, m.message))\n print(\"\\n\")\n except InvalidSignatureError:\n abort(400)\n return 'OK'\n\n\nimport json\nfrom selenium import webdriver\ndef get_shop_rank(shop_name):\n temp = shop_name\n print(temp)\n url = 'https://www.google.com.tw/search?q='+temp+'&rlz=1C1EJFA_enTW773TW779&oq='+temp+'&aqs=chrome..69i57j0j69i60l2j69i59l2.894j0j7&sourceid=chrome&ie=UTF-8'\n res = requests.get(url)\n soup = bf(res.text,'html.parser')\n n = soup.find_all('div',{'class':\"IvtMPc\"})\n name = []\n rank = []\n for t in n:\n name.append(t.find_all('span')[0].text)\n rank.append(t.find_all('span')[1].text)\n return name,rank\n\ndef img_describe(text,img_id):#紀錄describe 把firebase裡面describe修改\n t = fb.get('/pic',None)\n tex = text[1:]\n patterns = str(img_id)+'.*'\n if re.search(patterns,text.lower()):\n count = 1\n for key,value in t.items():\n if count == len(t):#取得最後一個dict項目\n data2 = {'describe': str(tex), 'id': value['id'], 'user': value['user']}\n fb.put(url+'/pic/',data=data2,name=key)\n count+=1\n return 'Image紀錄程功'\ndef get_image(text):\n if len(text) <4:\n return None\n else:\n tex = text[3:]\n t = fb.get('/pic',None)\n for key,value in t.items():\n if value['describe'] == tex:\n \n client = ImgurClient(client_id, client_secret)\n images = client.get_album_images(album_id)\n img_id = int(value['id'])-1 #從firebase取出來是字串\n url = images[img_id].link\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n return image_message\ndef job_seek():\n target_url = 'https://www.104.com.tw/jobbank/custjob/index.php?r=cust&j=503a4224565c3e2430683b1d1d1d1d5f2443a363189j48&jobsource=joblist_b_relevance#info06'\n print('Start parsing appleNews....')\n rs = requests.session()\n res = rs.get(target_url)\n res.encoding = 'utf-8'\n soup = bf(res.text, 'html.parser')\n content = \"\"\n temp = []\n reback = []\n for date in soup.select('.joblist_cont .date'):\n if date.text == '':\n temp.append('緊急!!重點職務')\n else:\n temp.append(date.text)\n for v,data in enumerate(soup.select('.joblist_cont .jobname a'),0):\n link = data['href']\n title = data['title']\n content += '發布時間->{}\\n工作名稱->{}\\n連結網址->{}\\n'.format(temp[v],title,'https://www.104.com.tw'+link)\n if v%5==0 :\n if v == 0:\n continue\n reback.append(TextSendMessage(text=content))\n content = ''\n return reback\ndef movie_template():\n buttons_template = TemplateSendMessage(\n alt_text='電影 template',\n template=ButtonsTemplate(\n title='服務類型',\n text='請選擇',\n thumbnail_image_url='https://i.imgur.com/zzv2aSR.jpg',\n actions=[\n MessageTemplateAction(\n label='近期上映電影',\n text='近期上映電影'\n ),\n MessageTemplateAction(\n label='依莉下載電影',\n text='eyny'\n ),\n MessageTemplateAction(\n label='觸電網-youtube',\n text='觸電網-youtube'\n ),\n MessageTemplateAction(\n label='Marco體驗師-youtube',\n text='Marco體驗師'\n )\n ]\n )\n )\n return buttons_template\ndef apple_news():\n target_url = 'https://tw.appledaily.com/new/realtime'\n print('Start parsing appleNews....')\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n soup = bf(res.text, 'html.parser')\n content = \"\"\n\n for index, data in enumerate(soup.select('.rtddt a'), 0):\n if index == 5:\n return content\n title = data.select('font')[0].text\n link = data['href']\n content += '{}\\n{}\\n'.format(title,link)\n return content\n\ndef get_image_link(search_query):\n img_urls = []\n chrome_options = webdriver.ChromeOptions()\n chrome_options.binary_location = os.getenv('GOOGLE_CHROME_BIN',None)\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(chrome_options=chrome_options,executable_path=os.getenv('CHROMEDRIVER_PATH',None))\n# driver = webdriver.Chrome(executable_path='/app/.chromedriver/bin/chromedriver')\n if search_query[-4:] == 'menu':\n t = search_query[:-4]+'餐點價格'\n url = 'https://www.google.com.tw/search?q='+t+'&rlz=1C1EJFA_enTW773TW779&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjX47mP-IjfAhWC7GEKHcZCD4YQ_AUIDigB&biw=1920&bih=969'\n elif search_query[-3:] == 'pic':\n t = search_query[:-3]\n url = 'https://www.google.com.tw/search?rlz=1C1EJFA_enTW773TW779&biw=1920&bih=920&tbs=isz%3Alt%2Cislt%3Asvga&tbm=isch&sa=1&ei=1UwFXLa8FsT48QWsvpOQDQ&q='+t+'&oq='+t+'&gs_l=img.3..0l10.10955.19019..20688...0.0..0.65.395.10......3....1..gws-wiz-img.....0..0i24.sGlMLu_Pdf0'\n driver.get(url)\n imges = driver.find_elements_by_xpath('//div[contains(@class,\"rg_meta notranslate\")]')\n count = 0\n for img in imges:\n img_url = json.loads(img.get_attribute('innerHTML'))[\"ou\"]\n print(str(count)+'--->'+str(img_url))\n if img_url.startswith('https') == False or (img_url in img_urls) == True or img_url.endswith('jpg') == False:\n continue\n else:\n img_urls.append(img_url)\n count = count + 1\n if count > 3:\n break\n driver.quit()\n return img_urls\n\n#更改\ndef drink_menu(text):\n pattern = r'.*menu$'\n web = []\n if re.search(pattern,text.lower()):\n \n temp = get_image_link(text)\n print('fun'+str(temp))\n for t in temp:\n web.append(ImageSendMessage(original_content_url=t,preview_image_url=t))\n return web\n \ndef google_picture(text):\n pattern = r'.*pic$'\n web = []\n if re.search(pattern,text.lower()):\n temp = get_image_link(text)\n for t in temp:\n web.append(ImageSendMessage(original_content_url=t,preview_image_url=t))\n return web\ndef sister_picture(text):\n pattern = r'.*sister$'\n web = []\n r = random.randint(0,122)\n url = 'https://forum.gamer.com.tw/Co.php?bsn=60076&sn=26514065'\n res = requests.get(url)\n soup = bf(res.text,'html.parser')\n if re.search(pattern,text.lower()):\n temp = []\n temp.append('https://img.pornpics.com/2014-07-14/281181_13.jpg')\n for item in soup.select('.photoswipe-image'):\n temp.append(item.get('href'))\n for t in temp[r:r+5]:\n web.append(ImageSendMessage(original_content_url=t,preview_image_url=t))\n return web\ndef movie():\n target_url = 'http://www.atmovies.com.tw/movie/next/0/'\n print('Start parsing movie ...')\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n res.encoding = 'utf-8'\n soup = bf(res.text, 'html.parser')\n content = \"\"\n for index, data in enumerate(soup.select('ul.filmNextListAll a')):\n if index == 20:\n return content\n title = data.text.replace('\\t', '').replace('\\r', '')\n link = \"http://www.atmovies.com.tw\" + data['href']\n content += '{}\\n{}\\n'.format(title, link)\n return content\ndef pattern_mega(text):\n patterns = [\n 'mega', 'mg', 'mu', 'MEGA', 'ME', 'MU',\n 'me', 'mu', 'mega', 'GD', 'MG', 'google',\n ]\n for pattern in patterns:\n if re.search(pattern, text, re.IGNORECASE):\n return True\ndef eyny_movie():\n target_url = 'http://www.eyny.com/forum-205-1.html'\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n soup = bf(res.text, 'html.parser')\n content = ''\n for titleURL in soup.select('.bm_c tbody .xst'):\n if pattern_mega(titleURL.text):\n title = titleURL.text\n if '11379780-1-3' in titleURL['href']:\n continue\n link = 'http://www.eyny.com/' + titleURL['href']\n data = '{}\\n{}\\n\\n'.format(title, link)\n content += data\n return content\n\ndef panx():\n target_url = 'https://panx.asia/'\n print('Start parsing ptt hot....')\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n soup = bf(res.text, 'html.parser')\n content = \"\"\n for data in soup.select('div.container div.row div.desc_wrap h2 a'):\n title = data.text\n link = data['href']\n content += '{}\\n{}\\n\\n'.format(title, link)\n return content\ndef magazine():\n target_url = 'https://www.cw.com.tw/'\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n res.encoding = 'utf-8'\n soup = bf(res.text, 'html.parser')\n temp = \"\"\n for v ,date in enumerate(soup.select('.caption h3 a'),0):\n url = date['href']\n title = date.text.strip()\n temp += '{}\\n{}\\n'.format(title,url)\n if(v>4):\n break\n return temp\ndef check_pic(img_id):\n Confirm_template = TemplateSendMessage(\n alt_text='要給你照片標籤描述嗎?',\n template=ConfirmTemplate(\n title='注意',\n text= '要給你照片標籤描述嗎?\\n要就選Yes,並且回覆\\n-->id+描述訊息(這張照片id是'+ str(img_id) +')',\n actions=[ \n PostbackTemplateAction(\n label='Yes',\n text='I choose YES',\n data='action=buy&itemid=1'\n ),\n MessageTemplateAction(\n label='No',\n text='I choose NO'\n )\n ]\n )\n )\n return Confirm_template\n\ndef button_template(name,shop_name,title,text,image_url):\n message = TemplateSendMessage(\n alt_text = 'Button Template',\n template = ButtonsTemplate(\n title = title,\n text = name+text,\n thumbnail_image_url = image_url,\n actions = [\n URITemplateAction(\n label = '搜尋一下附近其他美食',\n uri = 'line://nv/location'\n ),\n PostbackTemplateAction(\n label = shop_name+'的google評價',\n data = 'rank&'+shop_name,\n ),\n MessageTemplateAction(\n label = '納入口袋名單',\n text = '納入口袋名單'\n )\n ]\n )\n \n )\n return message\ndef mrt_stop(text):\n url = 'http://tcgmetro.blob.core.windows.net/stationnames/stations.json'\n res = requests.get(url)\n doc=json.loads(res.text)\n t = doc['resource']\n print('doc'+str(doc))\n temp = ''\n for i in t:\n if text == i['Destination']:\n temp += '現在捷運在->'+i['Station']+'\\n'\n print('mrt_fun'+temp)\n return temp\n \[email protected](PostbackEvent)\ndef handle_postback(event):\n temp = event.postback.data\n s = ''\n if event.postback.data[0:1] == 'T':\n temp = event.postback.data[1:]\n print('postback'+temp)\n s = mrt_stop(temp)\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text=s))\n elif temp[:4] == 'rank':\n name,rank = get_shop_rank(temp[5:])\n print(name)\n print(rank)\n for i in range(len(name)):\n s = s + '{}的評價是{}顆星-僅為參考\\n'.format(name[i],rank[i])\n line_bot_api.reply_message(event.reply_token,TextSendMessage(text=s))\[email protected](JoinEvent)\ndef handle_join(event):\n newcoming_text = \"謝謝邀請我這個ccu linebot來至此群組!!我會當做個位小幫手~\"\n# 謝謝邀請我這個ccu linebot來至此群組!!我會當做個位小幫手~<class 'linebot.models.events.JoinEvent'>\n line_bot_api.reply_message(\n event.reply_token,\n TextMessage(text=newcoming_text + str(JoinEvent))\n )\n# 處理圖片\[email protected](MessageEvent,message=ImageMessage)\ndef handle_msg_img(event):\n profile = line_bot_api.get_profile(event.source.user_id)\n tem_name = str(profile.display_name)\n img_id = 1\n t = fb.get('/pic',None)\n if t!=None:\n count = 1\n for key,value in t.items():\n if count == len(t):#取得最後一個dict項目\n img_id = int(value['id'])+1\n count+=1\n try:\n message_content = line_bot_api.get_message_content(event.message.id)\n with tempfile.NamedTemporaryFile(prefix='jpg-', delete=False) as tf:\n for chunk in message_content.iter_content():\n tf.write(chunk)\n fb.post('/pic',{'id':str(img_id),'user':tem_name,'describe':''})\n tempfile_path = tf.name\n path = tempfile_path\n client = ImgurClient(client_id, client_secret, access_token, refresh_token)\n config = {\n 'album': album_id,\n 'name' : img_id,\n 'title': img_id,\n 'description': 'Cute kitten being cute on'\n }\n client.upload_from_path(path, config=config, anon=False)\n os.remove(path)\n image_reply = check_pic(img_id)\n line_bot_api.reply_message(event.reply_token,[TextSendMessage(text='上傳成功'),image_reply])\n except Exception as e:\n t = '上傳失敗'+str(e.args)\n line_bot_api.reply_message(event.reply_token,TextSendMessage(text=t))\n\[email protected](MessageEvent, message=LocationMessage)\ndef handle_location(event):\n title = event.message.title\n latitude = event.message.latitude\n longitude = event.message.longitude\n temp = 'hey guy~\\ntitle={} latitude={} longitude={}'.format(title,latitude,longitude)\n line_bot_api.reply_message(event.reply_token,TextSendMessage(text=temp))\n# 處理訊息:\[email protected](MessageEvent, message=TextMessage)\ndef handle_msg_text(event):\n content = event.message.text \n profile = line_bot_api.get_profile(event.source.user_id)\n user_name = profile.display_name\n picture_url = profile.picture_url\n if event.message.text == 'where':\n message = LocationSendMessage(\n title='My CCU Lab',\n address='中正大學',\n latitude=23.563381,\n longitude=120.4706944\n )\n line_bot_api.reply_message(event.reply_token, message)\n return\n\n elif event.message.text.lower() == \"eyny\":\n content = eyny_movie()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=content))\n return 0\n elif google_picture(event.message.text) != None:\n image = google_picture(event.message.text)\n line_bot_api.reply_message(event.reply_token,image)\n return\n elif sister_picture(event.message.text) != None:\n image = sister_picture(event.message.text)\n line_bot_api.reply_message(event.reply_token,image)\n return\n elif event.message.text == \"PanX泛科技\":\n content = panx()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=content))\n return 0\n elif drink_menu(event.message.text) != None:\n image = drink_menu(event.message.text)\n image.append(button_template(user_name,event.message.text[:-4],'請問一下~','有想要進一步的資訊嗎?',picture_url))\n line_bot_api.reply_message(event.reply_token,image)\n return\n elif event.message.text == \"近期上映電影\":\n content = movie()\n template = movie_template()\n line_bot_api.reply_message(\n event.reply_token,[\n TextSendMessage(text=content),\n template\n ]\n )\n return 0\n elif event.message.text.lower() == \"tool\":\n Carousel_template = TemplateSendMessage(\n alt_text='Carousel template',\n template=CarouselTemplate(\n columns=[\n CarouselColumn(\n thumbnail_image_url='https://i.imgur.com/Upw0mY5.jpg',\n title = '功能目錄',\n text = user_name+'我可以幫你做到下列這些喔',\n actions=[\n MessageTemplateAction(\n label='國泰工作查詢',\n text= '國泰工作'\n ),\n MessageTemplateAction(\n label='電影資訊',\n text= 'movie'\n ),\n MessageTemplateAction(\n label='新聞資訊',\n text= 'news'\n )\n ]\n )\n ]\n )\n )\n line_bot_api.reply_message(event.reply_token,Carousel_template)\n return 0\n\n elif event.message.text == \"Marco體驗師\":\n target_url = 'https://www.youtube.com/channel/UCQTIdBx41To9Gg42aGEO0gQ/videos'\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n soup = bf(res.text, 'html.parser')\n template = movie_template()\n seqs = ['https://www.youtube.com{}'.format(data.find('a')['href']) for data in soup.select('.yt-lockup-title')]\n line_bot_api.reply_message(\n event.reply_token, [\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n template\n ])\n return 0\n elif event.message.text == \"觸電網-youtube\":\n target_url = 'https://www.youtube.com/user/truemovie1/videos'\n rs = requests.session()\n res = rs.get(target_url, verify=False)\n soup = bf(res.text, 'html.parser')\n seqs = ['https://www.youtube.com{}'.format(data.find('a')['href']) for data in soup.select('.yt-lockup-title')]\n template = movie_template()\n line_bot_api.reply_message(\n event.reply_token, [\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),\n template\n ])\n return 0\n elif event.message.text.lower() == \"ramdom picture\":\n client = ImgurClient(client_id, client_secret)\n images = client.get_album_images(album_id)\n index = random.randint(0, len(images) - 1)\n url = images[index].link\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n line_bot_api.reply_message(event.reply_token,image_message)\n return\n\n elif event.message.text.lower() == \"movie\":\n buttons_template = movie_template()\n line_bot_api.reply_message(event.reply_token, buttons_template)\n return 0\n elif event.message.text == \"蘋果即時新聞\":\n content = apple_news()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=content))\n return 0\n elif event.message.text.lower() == \"news\":\n buttons_template = TemplateSendMessage(\n alt_text='news template',\n template=ButtonsTemplate(\n title='新聞類型',\n text='請選擇',\n thumbnail_image_url='https://i.imgur.com/GoAYFqv.jpg',\n actions=[\n MessageTemplateAction(\n label='蘋果即時新聞',\n text='蘋果即時新聞'\n ),\n MessageTemplateAction(\n label='天下雜誌',\n text='天下雜誌'\n ),\n MessageTemplateAction(\n label='PanX泛科技',\n text='PanX泛科技'\n )\n ]\n )\n )\n line_bot_api.reply_message(event.reply_token, buttons_template)\n return 0\n elif event.message.text == \"天下雜誌\":\n content = magazine()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=content))\n return 0\n elif event.message.text == \"test\":\n static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')\n content = static_tmp_path\n\n message = TextSendMessage(text=content)\n line_bot_api.reply_message(event.reply_token,message)\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n" }, { "alpha_fraction": 0.7913574576377869, "alphanum_fraction": 0.8179833889007568, "avg_line_length": 42.20754623413086, "blob_id": "808fa34c6306d1c5ab0b09cb8cc045465e29fd86", "content_id": "081e5c0b418983666717aeb07579735fae08366f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3791, "license_type": "permissive", "max_line_length": 376, "num_lines": 53, "path": "/README.md", "repo_name": "kevin1061517/LineBot-CCU", "src_encoding": "UTF-8", "text": "Linebot處理音檔及相關爬蟲\n====\n問題:\n------- \n當user傳來音檔(.aac)可以透過google提供的python套件speech_recognition來解析音檔內容,並且可以轉成文字,這中間必須透過pydub這個套件來幫魔,因為line傳來的音檔格式是(.aac),跟speech_recognition所支援的格式不一樣,所以首先要先把音檔轉成.wav格式,這樣才有辦法做解析,但遇到了很大問題就是pydub套件非常麻煩,搞了我好幾個禮拜才了解,原來是pydub好像沒有很完整,他需要一個叫做ffmpeg的東西來幫忙解碼(decode),所以首先要把ffmpeg加入heroku的buildpack裡面,然後要去找ffmpeg在heroku的位置,像是我找到的位置是在'/app/vendor/ffmpeg/ffmpeg'這裡面,所以必須做一些處理,下面來一步一步做,以提醒之後遇到相同問題!\n下圖是跑了幾百次一直都出現例外(exception),包括了上面講的ffmpeg問題,還有一部分是我路徑沒設好\n\n步驟: \n------- \n一.先在heroku的buildpack中加入 https://github.com/alevfalse/heroku-buildpack-ffmpeg \n二.再把程式push到heroku上面(git push heroku master -f)後,可以用指令看ffmpeg有沒有建立在heroku上面,指令是heroku run \"ffmpeg -version\",接著進入bash看ffmpeg的位置在heroku的哪裡,因為要給AudioSegment.converter來指定ffmpeg位置,用指令heroku run bash就可以進入bash殼了,在輸入指令which ffmpeg,這時ㄧ般正常的話就會回應給你ffmpeg的位置,如下圖這樣子,他給我的ffmpeg的位置在(/app/vendor/ffmpeg/ffmpeg)裡面\n三.知道了位置我們就可以在python裡面做手腳了\n\n參考:\n------- \nhttps://stackoverflow.com/questions/26477786/reading-in-pydub-audiosegment-from-url-bytesio-returning-oserror-errno-2-no\n\nhttps://github.com/integricho/heroku-buildpack-python-ffmpeg.git\n\nhttps://hk.saowen.com/a/4e1f6599b0c03d19d8945f9cc23a7bc313b638d9d134d8bd335db9B \n\nHeroku 上使用 webdriver 爬蟲抓資料\n====\n\n問題:\n------- \n\n利用selenium模組的 webdriver來進行爬蟲,但webdriver在heroku上面不支援,必須要靠buildpack來幫忙處理,主要問題是使用網路上爬文來的資料說xvfb-google-chrome這個buildpack在heroku-16 stack並不相容\n\n解決\n------- \n\n所以要解決的話有兩個辦法,一個是把現在的stack 轉為 heroku-14 stack,另外一個是再找其他的buildpack,我就使用轉為heroku-14 stack這個方法,因為最近再寫的linebot想進行些更進階的爬蟲,必須要動態抓取網頁程式碼,所以就不得以要用到selenium模組的 webdriver方法來幫忙,其實我是想要爬GOOGLE圖片搜尋時,抓到圖片的實際位置而且網址最後一個是以.jpg結尾,來幫我完成一些事情,下面是我片段的程式碼,這次也搞了一個多禮拜才解決,但也越來越熟悉了。\n\n參考:\n------- \n\nheroku的webdriver 使用說明---------->https://devcenter.heroku.com/articles/heroku-ci#known-issues \nheroku的轉換webdriver 使用說明----->https://devcenter.heroku.com/articles/cedar-14-stack \n\n需要架在heroku上面的buildpack和變數設定\n====\n需要加入的兩個buildpack分別是如下兩個:\n------- \n\n1.https://github.com/heroku/heroku-buildpack-chromedriver \n2.https://github.com/heroku/heroku-buildpack-xvfb-google-chrome \n\n需要加入的環境變數為如下兩個:\n------- \n1.CHROMEDRIVER_PATH---->/app/.chromedriver/bin/chromedriver \n2.GOOGLE_CHROME_BIN--->/app/.apt/usr/bin/google-chrome \n最後還需要再requirement.txt檔加上selenium==3.8.0,這邊搞了我很久,一開始沒打上版本,會很不穩定常常崩潰,爬文爬到說一定要指定selenium==3.8.0,因為這個版本的selenium是最穩定的樣子\n\n" } ]
2
zuheir-zaidon/phd-utils
https://github.com/zuheir-zaidon/phd-utils
aede87004fcdb372e4f289c6c7f88e8412a2b478
cb34d7a83a96b82c40054c5549dc6b201a3b6db4
c9dc99599a65dc3f92922b3487a1f228765e91d2
refs/heads/main
2023-05-19T13:19:00.368383
2021-06-08T20:55:13
2021-06-08T20:55:13
363,141,001
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6423690319061279, "alphanum_fraction": 0.6902050375938416, "avg_line_length": 16.440000534057617, "blob_id": "7313bfcddc725a0fddbaa42f09526b02cc609594", "content_id": "e68b41051706c0d19b6018fb511e1e2a2e1a8df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 481, "license_type": "no_license", "max_line_length": 74, "num_lines": 25, "path": "/README.md", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "# phd-utils\nTools to cut down on manual intervention in my PhD. \nInstallation of this package gives you the following command-line tools: \n\n## `tiff-stacker`\nStack many tiff files. \nFrom this:\n```\nexperiment1/\n├── image0000.tif\n├── image0001.tif\n├── ...\n├── image0999.tif\n└── image1000.tif\n```\n\nTo this\n```\nexperiment1/\n├── stack0.tif\n└── stack1.tif\n```\n\nby running `tiff-stacker experiment1`. \nYou must have ImageMagick installed. \n\n" }, { "alpha_fraction": 0.6391516923904419, "alphanum_fraction": 0.6405383348464966, "avg_line_length": 33.34173583984375, "blob_id": "528d13705c891fccdd044d937ddcc4ec8b32f863", "content_id": "eabf7fc9df7577c73ed93e8620dad657045b2867", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12260, "license_type": "no_license", "max_line_length": 126, "num_lines": 357, "path": "/phd_utils/csv_analyser.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import logging\nimport argparse\nfrom pathlib import Path\nfrom typing import Optional\nfrom numpy import cos\nimport pandas as pd\nimport string\nimport datetime\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef read_displacement_csv(path: Path):\n \"\"\"Reads in a CSV, returning a dataframe with:\n - (Index)\n - Frame\n - X_Position\n - Y_Position\n\n Args:\n path (Path): Where the file is\n\n Returns:\n pd.DataFrame: A DataFrame (tabular data)\n \"\"\"\n df: pd.DataFrame = pd.read_csv(\n path,\n index_col=False,\n names=[s for s in string.ascii_uppercase], # A..Z\n )\n\n df.dropna(\n axis=\"index\", # Drop empty rows\n how=\"all\", # If all of their cells are empty\n inplace=True,\n )\n\n df.dropna(\n axis=\"columns\",\n how=\"all\",\n inplace=True,\n )\n\n df.rename(\n columns={\n \"H\": \"Frame\",\n \"E\": \"X_Position\",\n \"F\": \"Y_Position\",\n },\n inplace=True,\n )\n\n # Pandas interpreted this as a float. Fix that now\n df[\"Frame\"] = df[\"Frame\"].astype(int)\n\n df.drop(\n # We've pulled out all of the columns we want, so drop the ones that we don't need (which have single-character names)\n labels=[col for col in df.columns if len(col) == 1],\n axis=\"columns\",\n inplace=True,\n )\n\n logger.info(f\"Loaded csv from {path.as_posix()} ({len(df)} rows)\")\n\n return df\n\n\ndef merge_and_displace_frames(\n substrate: pd.DataFrame,\n reference: pd.DataFrame,\n pipette: pd.DataFrame,\n experiment_duration: pd.Timedelta,\n duration_of_resampled_row: pd.Timedelta,\n):\n reference = reference.add_prefix(\"Reference_\")\n substrate = substrate.add_prefix(\"Substrate_\")\n pipette = pipette.add_prefix(\"Pipette_\")\n\n # Convert from frame numbers to the actual time through the experiment\n for df, name in [\n (reference, \"Reference\"),\n (substrate, \"Substrate\"),\n (pipette, \"Pipette\"),\n ]:\n df: pd.DataFrame\n name: str\n number_of_frames = df[f\"{name}_Frame\"].max()\n instant = df[f\"{name}_Frame\"] / number_of_frames * experiment_duration\n # pd.Timedelta -> seconds\n df[\"Instant\"] = instant\n df.set_index(\"Instant\", inplace=True)\n\n combined = pd.concat(\n (reference, substrate, pipette),\n axis=\"columns\", # We want to join two tables so that the columns are the joining point (i.e left and right)\n )\n\n # In order to compare results between experiments, we must now resample them (so that each row has a common `Instant`)\n # This is a lossy operation. We choose to take the mean\n combined: pd.DataFrame = combined.resample(rule=duration_of_resampled_row).mean()\n # Frame numbers are no longer valid\n combined.drop(\n columns=[col for col in combined.columns if col.endswith(\"Frame\")],\n inplace=True,\n )\n logger.info(\n f\"Resampled to buckets of {duration_of_resampled_row} ({len(combined)} rows)\"\n )\n\n combined[\"X_Delta\"] = (\n combined[\"Substrate_X_Position\"] - combined[\"Reference_X_Position\"]\n )\n combined[\"Y_Delta\"] = (\n combined[\"Substrate_Y_Position\"] - combined[\"Reference_Y_Position\"]\n )\n\n # Make our delta lines start at 0\n x_start = combined[\"X_Delta\"].iloc[0]\n y_start = combined[\"Y_Delta\"].iloc[0]\n\n combined[\"X_Delta\"] = combined[\"X_Delta\"] - x_start\n combined[\"Y_Delta\"] = combined[\"Y_Delta\"] - y_start\n\n return combined\n\n\ndef generate_normal_force_and_correct_for_load_positioning(\n df: pd.DataFrame,\n initial_x_displacement: float,\n initial_substrate_tip_position: float,\n length_of_substrate: float,\n stiffness_constant_of_substrate: float,\n stiffness_constant_of_pipette: float,\n reverse_sliding_direction: bool,\n angle_alpha: float,\n angle_beta: float,\n substrate_tip_velocity: float, # micrometres per second\n flexural_rigidity: float,\n duration_subtrate_tip_is_stationary_for: pd.Timedelta = pd.Timedelta(5, \"seconds\")\n # pipette_position_at_rest: Optional[float] = None,\n):\n # Pretend we're at a constant velocity for the whole thing (in micrometers per second)\n df[\"Substrate_Tip_Position\"] = df.index.total_seconds() * substrate_tip_velocity\n shifted = pd.DataFrame(\n df[\"Substrate_Tip_Position\"].shift(freq=duration_subtrate_tip_is_stationary_for)\n )\n # Now shift our line to the right, because we're stationary for the first n seconds\n df = pd.merge_asof(\n df.reset_index().drop(columns=\"Substrate_Tip_Position\"),\n shifted.reset_index(),\n on=\"Instant\",\n )\n df.set_index(\"Instant\", inplace=True)\n # Fill in the missing values (add a flat section to the line)\n df[\"Substrate_Tip_Position\"].fillna(0, inplace=True)\n # Translate up the y axis\n df[\"Substrate_Tip_Position\"] = (\n df[\"Substrate_Tip_Position\"] + initial_substrate_tip_position\n )\n\n displaced_x_delta = df[\"X_Delta\"] / cos(angle_beta) + initial_x_displacement\n bead_to_tip_displacement = df[\"Pipette_Y_Position\"] - df[\"Substrate_Tip_Position\"]\n\n length_from_the_tip = length_of_substrate - bead_to_tip_displacement\n df[\"Corrected_Deflection\"] = (\n displaced_x_delta\n * stiffness_constant_of_substrate\n * length_from_the_tip\n * length_from_the_tip\n * (3 * length_of_substrate - length_from_the_tip)\n / (6 * flexural_rigidity * 10 ** 16)\n )\n df[\"Normal_Force\"] = df[\"Corrected_Deflection\"] * stiffness_constant_of_substrate\n\n if angle_alpha > 0:\n holder = cos(angle_alpha)\n else:\n holder = 1\n\n if reverse_sliding_direction is True: # user said -d\n df[\"Pipette_Deflection\"] = (\n df[\"Pipette_Y_Position\"] - df[\"Pipette_Y_Position\"].iloc[0]\n ) / holder\n else: # user didn't say -d\n df[\"Pipette_Deflection\"] = (\n df[\"Pipette_Y_Position\"].iloc[0] - df[\"Pipette_Y_Position\"]\n ) / holder\n\n df[\"Friction_Force\"] = (\n df[\"Pipette_Deflection\"] / cos(angle_beta) * stiffness_constant_of_pipette\n )\n\n df[\"Friction_Coefficient\"] = df[\"Friction_Force\"] / df[\"Normal_Force\"]\n\n return df\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n Given a string, this program will\n - Read substrate*.csv, reference*.csv and pipette*.csv, where each filename contains that string\n - Convert them to timeseries, and concatenate\n - Resample\n - Do some basic analysis\n \"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n \"-c\",\n \"--filename-contains\",\n type=str,\n help=\"\"\"Look for the substrate, reference and pipette csvs which have their filenames containing this number\"\"\",\n )\n parser.add_argument(\n \"-f\",\n \"--folder\",\n type=Path,\n default=Path.cwd(),\n help=\"The folder to look in. Defaults to the current working directory\",\n )\n parser.add_argument(\n \"-e\",\n \"--experiment-duration\",\n type=float,\n help=\"Duration of this experiment, in seconds\",\n required=True,\n )\n parser.add_argument(\n \"-r\",\n \"--resample-to\",\n type=float,\n help=\"How many seconds each row should last for after resampling\",\n required=True,\n )\n\n parser.add_argument(\"-x\", \"--initial-x-displacement\", type=float, required=True)\n parser.add_argument(\"-t\", \"--substrate-tip-position\", type=float, required=True)\n parser.add_argument(\"-L\", \"--substrate-length\", type=float, required=True)\n parser.add_argument(\"-k\", \"--substrate-stiffness\", type=float, required=True)\n parser.add_argument(\"-j\", \"--pipette-stiffness\", type=float, required=True)\n # parser.add_argument(\"-R\", \"--pipette-position-at-rest\", type=float, default=None, required=False)\n parser.add_argument(\"-a\", \"--angle-alpha\", type=float, default=None, required=False)\n parser.add_argument(\"-b\", \"--angle-beta\", type=float, default=None, required=False)\n parser.add_argument(\"-s\", \"--speed\", type=float, default=None, required=False)\n parser.add_argument(\n \"-fr\", \"--flexural-rigidity\", type=float, default=None, required=False\n )\n parser.add_argument(\n \"-d\", \"--reverse-sliding-direction\", default=False, action=\"store_true\"\n )\n parser.add_argument(\n \"-O\",\n \"--overwrite\",\n default=False,\n action=\"store_true\",\n help=\"If the output filename already exists, ovewrite it. Else, the program will raise an error\",\n )\n parser.add_argument(\n \"-l\",\n \"--log-level\",\n type=lambda x: getattr(logging, x.upper()),\n default=logging.INFO,\n help=\"How verbose to be\",\n )\n args = parser.parse_args()\n\n logging.basicConfig(level=args.log_level)\n\n logger.debug(f\"Arguments: {args}\")\n\n analyse_csv(\n filename=args.filename_contains,\n folder=args.folder,\n experiment_duration=args.experiment_duration,\n resample_to=args.resample_to,\n initial_x_displacement=args.initial_x_displacement,\n substrate_tip_position=args.substrate_tip_position,\n length_of_substrate=args.substrate_length,\n stiffness_constant_of_substrate=args.substrate_stiffness,\n stiffness_constant_of_pipette=args.pipette_stiffness,\n reverse_sliding_direction=args.reverse_sliding_direction,\n angle_alpha=args.angle_alpha,\n angle_beta=args.angle_beta,\n speed=args.speed,\n flexural_rigidity=args.flexural_rigidity,\n # pipette_position_at_rest=args.pipette_position_at_rest,\n overwrite=args.overwrite,\n )\n\n\ndef glob_once(folder: Path, pattern: str):\n candidates = list(folder.glob(pattern))\n assert len(candidates) == 1, f\"Found more than file for {pattern}: {candidates}\"\n destination_path = candidates.pop()\n assert destination_path.is_file()\n logging.info(f\"Using {destination_path.as_posix()}\")\n return destination_path\n\n\ndef analyse_csv(\n filename: str,\n folder: Path, # yes\n experiment_duration: float, # yes\n resample_to: float,\n initial_x_displacement: float, # yes\n substrate_tip_position: float, # yes\n length_of_substrate: float,\n stiffness_constant_of_substrate: float,\n stiffness_constant_of_pipette: float,\n reverse_sliding_direction: bool, # yes\n angle_alpha: float,\n angle_beta: float,\n speed: float,\n flexural_rigidity: float,\n # pipette_position_at_rest: Optional[float],\n overwrite: bool,\n):\n \"\"\"This function does the entire analysis for one experiment\"\"\"\n\n substrate_path = glob_once(folder, f\"substrate_{filename}.csv\")\n reference_path = glob_once(folder, f\"reference_{filename}.csv\")\n pipette_path = glob_once(folder, f\"pipette_{filename}.csv\")\n\n merged_and_displaced = merge_and_displace_frames(\n substrate=read_displacement_csv(substrate_path),\n reference=read_displacement_csv(reference_path),\n pipette=read_displacement_csv(pipette_path),\n experiment_duration=pd.Timedelta(value=experiment_duration, unit=\"seconds\"),\n duration_of_resampled_row=pd.Timedelta(value=resample_to, unit=\"seconds\"),\n )\n\n result = generate_normal_force_and_correct_for_load_positioning(\n df=merged_and_displaced,\n initial_x_displacement=initial_x_displacement,\n initial_substrate_tip_position=substrate_tip_position,\n length_of_substrate=length_of_substrate,\n stiffness_constant_of_substrate=stiffness_constant_of_substrate,\n stiffness_constant_of_pipette=stiffness_constant_of_pipette,\n reverse_sliding_direction=reverse_sliding_direction,\n angle_alpha=angle_alpha,\n angle_beta=angle_beta,\n substrate_tip_velocity=speed,\n flexural_rigidity=flexural_rigidity,\n # pipette_position_at_rest=pipette_position_at_rest,\n )\n\n output_file = folder.joinpath(f\"processed_{filename}.csv\")\n if output_file.exists():\n assert output_file.is_file()\n assert (\n overwrite is True\n ), f\"About to write over existing file {output_file}, but `--overwrite` not specified\"\n logger.warn(f\"Overwriting file {output_file.as_posix()}\")\n\n result.to_csv(output_file)\n" }, { "alpha_fraction": 0.6433495283126831, "alphanum_fraction": 0.6459024548530579, "avg_line_length": 32.194915771484375, "blob_id": "4513ca2b0777e36a6d32084e90ea8e2d9e7211f7", "content_id": "9cf510774fc27ba48bc324a94930bbbf559144f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3917, "license_type": "no_license", "max_line_length": 159, "num_lines": 118, "path": "/phd_utils/tiff_stacker.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import argparse\nimport logging\nimport subprocess\nimport sys\n\nfrom collections import deque\nfrom pathlib import Path\nfrom typing import Iterable, List, TextIO\n\nfrom .utils import grouper, pformat\n\nlogger = logging.getLogger(__name__)\n\n\ndef stack_in_folders(\n folders: Iterable[Path], files_per_stack: int, imagemagick_stderr: TextIO\n):\n \"\"\"For each folder, discover all TIF files, and combine them into stacks comprising of the contents of N of those files\n\n Args:\n folders (Iterable[Path]): The folders to search for TIFs in. The final folder will contain the stacks, with the originals removed\n frames_per_stack (int): How many files to combine into each stack\n \"\"\"\n folders: List[Path] = list(filter(Path.is_dir, folders)) # type: ignore\n logger.debug(f\"Creating stacks from contents of each folder in {folders}\")\n\n for folder in folders:\n tifs = list(filter(Path.is_file, folder.glob(\"**/*.tif\")))\n tifs.sort()\n\n logger.info(f\"Found {len(tifs)} TIFs in {folder}\")\n\n logger.debug(f\"TIFs:\\n{pformat(tifs)}\")\n\n for group_number, group in enumerate( # `enumerate` gives us the group number\n grouper(iterable=tifs, group_size=files_per_stack)\n ):\n group: List[Path] = list(group) # type:ignore\n stack = folder / f\"stack{group_number}.tif\"\n logger.info(\n f\"Making a stacking from {group[0]} to {group[-1]} into {stack}\"\n )\n\n stack_tifs(\n sources=group, destination=stack, imagemagick_stderr=imagemagick_stderr\n )\n\n # Now delete all the files\n deque(map(Path.unlink, group))\n\n\ndef stack_tifs(sources: Iterable[Path], destination: Path, imagemagick_stderr: TextIO):\n \"\"\"Call out to imagemagick to do the stacking\n\n Args:\n sources (Iterable[Path]): Images to stack from\n destination (Path): Image to stack to\n \"\"\"\n command = [\"convert\"]\n sources = map(Path.as_posix, map(Path.absolute, sources)) # type: ignore\n command.extend(map(str, sources))\n command.append(destination.absolute().as_posix())\n\n logger.debug(f\"Issuing command {pformat(command)}\")\n\n subprocess.run(\n command, check=True, stderr=imagemagick_stderr\n ) # TODO there is a more pythonic way of doing this\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n Given a list of directories this program will\n - Gather all files that end in .tif\n - Sort them lexicographically\n - Merge FILES_PER_STACK of them into a single \"stackN.tif\" file into that directory (where N is the current batch), for all files\n - Delete the original files\n\n It calls `convert`, with the assumption that it is ImageMagick. Ensure that ImageMagick is installed\n \"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\"folder\", type=Path, nargs=\"+\")\n parser.add_argument(\n \"-f\",\n \"--files-per-stack\",\n type=int,\n default=2000,\n help=\"Number of files to combine into each stack. Defaults to 2000\",\n )\n parser.add_argument(\n \"-i\",\n \"--imagemagick-stderr\",\n type=argparse.FileType(mode=\"w\"),\n default=sys.stdout,\n help=\"ImageMagick sometimes emits some benign errors to stderr (like unknown TIFF metadata fields). Specify a logfile to avoid stdout being cluttered\",\n )\n parser.add_argument(\n \"-l\",\n \"--log-level\",\n type=lambda x: getattr(logging, x.upper()),\n default=logging.INFO,\n help=\"How verbose to be\",\n )\n args = parser.parse_args()\n\n logging.basicConfig(level=args.log_level)\n\n logger.debug(f\"Arguments: {args}\")\n\n stack_in_folders(\n folders=args.folder,\n files_per_stack=args.files_per_stack,\n imagemagick_stderr=args.imagemagick_stderr,\n )\n\n logger.info(\"All done!\")\n" }, { "alpha_fraction": 0.5896103978157043, "alphanum_fraction": 0.6285714507102966, "avg_line_length": 19.263158798217773, "blob_id": "4e5a086f7df324f888735f75faf85e952aebf458", "content_id": "ab9f011d0ada081412c3b2d33a26f21f77c326e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/tests/test_utils.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "from phd_utils import utils as subject\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_grouper():\n i = [1, 2, 3]\n\n grouped = subject.grouper(iterable=i, group_size=2)\n grouped = list(map(list, grouped))\n\n assert grouped == [[1, 2], [3]]\n\n\ndef test_pformat():\n l = [str(i) * 50 for i in range(1000)]\n\n assert len(subject.pformat(l).splitlines()) == 10\n" }, { "alpha_fraction": 0.5364779829978943, "alphanum_fraction": 0.5893082022666931, "avg_line_length": 23.84375, "blob_id": "0b6652b9a42f878220bbfdad4e74d5656f689d94", "content_id": "91750ccd9e91df2fedb8fe87ff5a6eaa06a01141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1590, "license_type": "no_license", "max_line_length": 84, "num_lines": 64, "path": "/tests/test_csv_analyser.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import phd_utils.csv_analyser as subject\nimport pytest\nfrom pathlib import Path\nimport pandas as pd\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef reference_csv(assets: Path):\n return assets / \"reference.csv\"\n\n\[email protected]\ndef substrate_csv(assets: Path):\n return assets / \"substrate.csv\"\n\n\[email protected]\ndef pipette_csv(assets: Path):\n return assets / \"pipette.csv\"\n\n\ndef test_read_csv(reference_csv: Path):\n df = subject.read_displacement_csv(reference_csv)\n logger.debug(df)\n logger.debug(df.columns)\n\n\ndef test_calculate_displacment():\n reference = pd.DataFrame.from_dict(\n {\n \"Frame\": [1, 2, 3],\n \"X_Displacement\": [100, 200, 300],\n \"Y_Displacement\": [1000, 2000, 3000],\n }\n ).set_index(\"Frame\")\n\n substrate = pd.DataFrame.from_dict(\n {\n \"Frame\": [2, 3, 4],\n \"X_Displacement\": [202, 303, 404],\n \"Y_Displacement\": [2020, 3030, 4040],\n }\n ).set_index(\"Frame\")\n\n expected_df = pd.DataFrame.from_dict(\n {\n \"Frame\": [2, 3], # Only frames common to the two\n \"Reference_X_Displacement\": [200, 300],\n \"Reference_Y_Displacement\": [2000, 3000],\n \"Substrate_X_Displacement\": [202, 303],\n \"Substrate_Y_Displacement\": [2020, 3030],\n \"X_Delta\": [2, 3],\n \"Y_Delta\": [20, 30],\n }\n ).set_index(\"Frame\")\n\n df = subject.merge_and_displace_frames(reference=reference, substrate=substrate)\n\n logger.debug(df)\n\n assert df.equals(expected_df)\n" }, { "alpha_fraction": 0.6860986351966858, "alphanum_fraction": 0.6860986351966858, "avg_line_length": 17.58333396911621, "blob_id": "ef2231748a6716b4636bfdbc5a66e4121c2d2b7b", "content_id": "042108114b36a18b48882292e4e0efded5d90af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/tests/conftest.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import pytest\nfrom pathlib import Path\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef assets() -> Path:\n folder = Path(__file__).parent / \"assets\"\n assert folder.is_dir()\n return folder\n" }, { "alpha_fraction": 0.5733333230018616, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 21.5, "blob_id": "9b10b0c68118530348442abf1715d280f40b641f", "content_id": "25c154ee6a8f441d27dea21d497e26f48718d04c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 900, "license_type": "no_license", "max_line_length": 78, "num_lines": 40, "path": "/phd_utils/utils.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import itertools\nimport pprint\nfrom typing import Iterable\n\n\ndef grouper(iterable: Iterable, group_size: int):\n \"\"\"\n Split an iterable into group_size groups\n https://stackoverflow.com/a/8998040/9838189\n \"\"\"\n\n it = iter(iterable)\n while True:\n chunk_it = itertools.islice(it, group_size)\n try:\n first_el = next(chunk_it)\n except StopIteration:\n return\n yield itertools.chain((first_el,), chunk_it)\n\n\ndef pformat(\n object,\n indent=1,\n width=80,\n depth=None,\n *,\n compact=False,\n sort_dicts=True,\n maxlen: int = 10\n):\n if isinstance(object, list) and len(object) > maxlen:\n tail = object[-1]\n object = object[: maxlen - 2]\n object.append(\"...\")\n object.append(tail)\n\n return pprint.pformat(\n object, indent=1, width=80, depth=None, compact=False, sort_dicts=True\n )\n" }, { "alpha_fraction": 0.5880370736122131, "alphanum_fraction": 0.6242628693580627, "avg_line_length": 25.977272033691406, "blob_id": "032d668fb5cdd5a2b66aa1afcac2a68812f6ed92", "content_id": "01f4be196ed10c8a8be0fff4e13135b512a50bc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1187, "license_type": "no_license", "max_line_length": 120, "num_lines": 44, "path": "/tests/test_resample.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import phd_utils.resample as subject\n\nimport pytest\nimport pandas as pd\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_resample():\n fps500 = (\n pd.timedelta_range(start=\"0s\", end=\"0.1s\", periods=50, name=\"instant\")\n .to_frame(index=False)\n .set_index(\"instant\")\n )\n\n fps600 = (\n pd.timedelta_range(start=\"0s\", end=\"0.1s\", periods=60, name=\"instant\")\n .to_frame(index=False)\n .set_index(\"instant\")\n )\n\n fps500[\"Reference\"] = range(1, 51)\n fps600[\"Substrate\"] = range(1, 61)\n\n logger.debug(fps500)\n logger.debug(fps600)\n\n ##########\n # METHOD 1\n ##########\n # Merge\n df = pd.concat([fps500, fps600]).sort_index()\n\n # Optionally interpolate at this step\n df = df.interpolate(\n method=\"linear\"\n ) # Could also be \"nearest\" etc. See https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html\n\n # Optionally resample to a fixed interval.\n # May be worth avoiding if future steps are time-zone aware\n interval = pd.Timedelta(value=10, unit=\"milliseconds\")\n df = df.resample(interval).mean() # Could also be min, max, sum etc\n logger.debug(df)\n" }, { "alpha_fraction": 0.7032359838485718, "alphanum_fraction": 0.7048144936561584, "avg_line_length": 23.365385055541992, "blob_id": "da7c3a35a0e0b6ce993fa63ec80a57f1b254be1d", "content_id": "9db28c77ad4bcb93deff3a5d86a73bff5ac16d30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 85, "num_lines": 52, "path": "/tests/test_tiff_stacker.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import filecmp\nimport logging\nimport shutil\nimport sys\n\nfrom pathlib import Path\nfrom typing import Iterable\n\nimport phd_utils.tiff_stacker as subject\nimport pytest\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef single_images(assets: Path) -> Iterable[Path]:\n files = [\n path\n for path in assets.iterdir()\n if path.name.startswith(\"single\") and path.name.endswith(\"tif\")\n ]\n files.sort()\n return files\n\n\[email protected]\ndef correctly_stacked(assets: Path):\n return assets / \"stacked.tif\"\n\n\[email protected]\ndef experiment_folder(single_images: Iterable[Path], tmp_path: Path):\n for image in single_images:\n shutil.copy(image, tmp_path)\n return tmp_path\n\n\ndef test_stack_tifs(\n single_images: Iterable[Path], correctly_stacked: Path, tmp_path: Path\n):\n destination = tmp_path / \"stacked.tif\"\n subject.stack_tifs(\n sources=single_images, destination=destination, imagemagick_stderr=sys.stdout\n )\n assert filecmp.cmp(correctly_stacked, destination, shallow=False)\n\n\ndef test_stack_in_folders(experiment_folder: Path):\n subject.stack_in_folders(\n [experiment_folder], files_per_stack=2, imagemagick_stderr=sys.stdout\n )\n assert len(list(experiment_folder.iterdir())) == 3\n" }, { "alpha_fraction": 0.7067669034004211, "alphanum_fraction": 0.7067669034004211, "avg_line_length": 13.777777671813965, "blob_id": "f77643c53463d438c11de54411d327089e986a56", "content_id": "f82a00fc221f218b455ca50e61a25793a5720603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/phd_utils/resample.py", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef resample(a: pd.DataFrame, b: pd.DataFrame):\n pass\n" }, { "alpha_fraction": 0.574638843536377, "alphanum_fraction": 0.6356340050697327, "avg_line_length": 17.878787994384766, "blob_id": "e4b2e03472bebbd2ccd73d6a4440d11eea5a3f75", "content_id": "9a9b48feccfa441315ce171a8b9c316cf41adede", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 623, "license_type": "no_license", "max_line_length": 44, "num_lines": 33, "path": "/pyproject.toml", "repo_name": "zuheir-zaidon/phd-utils", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"phd_utils\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\n \"Aatif Syed <[email protected]>\",\n \"Zuheir Zaidon\"\n]\n\n[tool.poetry.scripts]\ntiff-stacker = \"phd_utils:tiff_stacker.main\"\ncsv-analyser = \"phd_utils:csv_analyser.main\"\n\n[tool.poetry.dependencies]\npython = \">=3.8,<3.10\"\npandas = \"^1.2.4\"\n\n[tool.poetry.dev-dependencies]\nblack = \"^21.4b2\"\npytest = \"^6.2.3\"\nmypy = \"^0.812\"\nipython = \"^7.22.0\"\nipykernel = \"^5.5.3\"\nmatplotlib = \"^3.4.1\"\nbokeh = \"^2.3.2\"\n\n[tool.pytest.ini_options]\nlog_cli = true\nlog_cli_level = \"DEBUG\"\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n" } ]
11
destrotechs/codesharinghub
https://github.com/destrotechs/codesharinghub
404d096cabb522195e4b31048daceb798b7e6500
cd31a5443d0040c3b9c5bfa8810a1b09ef69aba3
c1a6f676c5cd87a3638537bda62e95e0ea6254d3
refs/heads/main
2023-06-09T13:09:05.507924
2021-06-26T13:02:26
2021-06-26T13:02:26
380,502,208
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5601660013198853, "alphanum_fraction": 0.5678263902664185, "avg_line_length": 35.01149368286133, "blob_id": "9a09df9269e0954869ae8d3b8f14f9a5a68bc776", "content_id": "d11406912b19c9148885365ee55f0eb09ef37dd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3133, "license_type": "no_license", "max_line_length": 115, "num_lines": 87, "path": "/app.py", "repo_name": "destrotechs/codesharinghub", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template,redirect,url_for,request,flash\nimport flask\nimport os\nfrom random import randint\napp = flask.Flask(__name__)\nUPLOAD_FOLDER = '/snippets'\n\napp.secret_key = \"ABC\"\n\n\[email protected]('/',methods=['GET','POST'])\ndef index():\n if request.method == 'GET':\n\n # os.system('cmd /k \"python app.py\"')\n return render_template('index.html')\n elif request.method == 'POST':\n language = request.form.get('language')\n\n code = request.form.get('code')\n\n description = request.form.get('description')\n\n \n if language == 'python':\n filename = randint(1,1000000)\n file = open(\"snippets/python/\"+str(filename)+\".txt\",\"w\")\n file.write(str(code)+\"\\n\")\n file.write(\"desk\"+str(description))\n file.close()\n flash(\"success\")\n return redirect(url_for('index'))\n if language == 'php':\n filename = randint(1,1000000)\n file = open(\"snippets/php/\"+str(filename)+\".txt\",\"w\")\n file.write(str(code)+\"\\n\")\n file.write(\"desk\"+str(description))\n file.close()\n flash(\"success\")\n return redirect(url_for('index'))\n if language == 'java':\n filename = randint(1,1000000)\n file = open(\"snippets/java/\"+str(filename)+\".txt\",\"w\")\n file.write(str(code)+\"\\n\")\n file.write(\"desk\"+str(description))\n file.close()\n flash(\"success\")\n return redirect(url_for('index')) \n \n\n\[email protected]('/snippets/view/<language>',methods=['GET'])\ndef snippet_view(language):\n if language == 'python':\n arr_txt = [x for x in os.listdir('snippets/python/') if x.endswith(\".txt\")]\n content = list()\n for snippet in arr_txt:\n file = open(\"snippets/python/\"+snippet,'r')\n filecontent = file.read()\n code = filecontent.split(\"desk\")\n content.append(code)\n length = len(arr_txt)\n return render_template(\"languages.html\",snippets = arr_txt,content=content,length=length,language=language)\n if language == 'php':\n arr_txt = [x for x in os.listdir('snippets/php/') if x.endswith(\".txt\")]\n content = list()\n for snippet in arr_txt:\n file = open(\"snippets/php/\"+snippet,'r')\n filecontent = file.read()\n code = filecontent.split(\"desk\")\n content.append(code)\n length = len(arr_txt)\n return render_template(\"languages.html\",snippets = arr_txt,content=content,length=length,language=language)\n if language == 'java':\n arr_txt = [x for x in os.listdir('snippets/java/') if x.endswith(\".txt\")]\n content = list()\n for snippet in arr_txt:\n file = open(\"snippets/java/\"+snippet,'r')\n filecontent = file.read()\n code = filecontent.split(\"desk\")\n content.append(code)\n length = len(arr_txt)\n return render_template(\"languages.html\",snippets = arr_txt,content=content,length=length,language=language)\n\n\n\napp.run(debug=False)\n" }, { "alpha_fraction": 0.8387096524238586, "alphanum_fraction": 0.8387096524238586, "avg_line_length": 45.5, "blob_id": "cc83367a2624a9b68170c6dfb70019095d6fb480", "content_id": "1329d04d07d9fcd1840bf32aa2781535f5d872f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "no_license", "max_line_length": 75, "num_lines": 2, "path": "/README.md", "repo_name": "destrotechs/codesharinghub", "src_encoding": "UTF-8", "text": "# codesharinghub\na web platform fro easily sharing code snippets with friends and colleagues\n" } ]
2
twang126/gh6_streaming
https://github.com/twang126/gh6_streaming
bd7445c03f6e8307a3c42e9db7837419e137ee57
06c095b824685b34409ad821d93492c2ac7cf67f
63a2925ec2206a54b0a0de42d5a1166d198ed099
refs/heads/main
2023-01-19T02:44:10.245442
2020-11-28T20:10:44
2020-11-28T20:10:44
316,654,467
0
0
null
2020-11-28T04:14:16
2020-11-28T04:54:32
2020-11-28T20:10:44
Python
[ { "alpha_fraction": 0.5903614163398743, "alphanum_fraction": 0.6385542154312134, "avg_line_length": 15.600000381469727, "blob_id": "1af1e0102fbce0948c321ae65ee803129026d2e4", "content_id": "68c337fe3f015f4be591c571e11460e373a1aff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/common/time_utils.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "import time\n\n\ndef get_current_time_ms() -> int:\n return int(time.time() * 1000)\n" }, { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 30.799999237060547, "blob_id": "c0301873613e7659c4818b7ea8680068d6b18a3a", "content_id": "093d52ec788d4d3f6fae1b5f9135dfe8332ec445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 44, "num_lines": 5, "path": "/schema/attributes.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "EVENT_NAME_FIELD: str = \"event_name\"\nOCCURRED_AT_MS_FIELD: str = \"occurred_at_ms\"\nLAT_FIELD: str = \"lat\"\nLNG_FIELD: str = \"lng\"\nGEOHASH_FIELD: str = \"geohash\"\n" }, { "alpha_fraction": 0.6906474828720093, "alphanum_fraction": 0.6978417038917542, "avg_line_length": 26.799999237060547, "blob_id": "af3967b090f2e25a58f60d450dc0100b20b0ca49", "content_id": "d8ee9cb65abd22217e89b2198037a806f4f8a27c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 74, "num_lines": 5, "path": "/common/geo_utils.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "import geohash\n\n\ndef lat_lng_to_geohash(lat: float, lng: float, precision: int = 5) -> str:\n return geohash.encode(lat, lng, precision)\n" }, { "alpha_fraction": 0.6514360308647156, "alphanum_fraction": 0.6592689156532288, "avg_line_length": 29.039215087890625, "blob_id": "dab2c94fc79d2878c6afa299e70878a4f10d545b", "content_id": "4cd3f59be037bc14833ecf4cb01d2bc1c3778561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 86, "num_lines": 51, "path": "/transforms/default.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "from typing import Any\nfrom typing import Dict\n\nimport apache_beam as beam\n\nfrom common import geo_utils\nfrom common import time_utils\nfrom schema import attributes\nfrom schema import event_schema\n\n\nclass _DefaultDoFn(beam.DoFn):\n def __init__(self, lateness_threshold_seconds: int = 60):\n self.lateness_threshold_seconds = 60\n\n def is_event_late(self, event: Dict[str, Any]) -> bool:\n lateness = abs(\n event[attributes.OCCURRED_AT_MS_FIELD] - time_utils.get_current_time_ms(),\n )\n\n return lateness > self.lateness_threshold_seconds * 1000\n\n def should_pass_event(self, event: Dict[str, Any]) -> bool:\n if not event_schema.is_valid_event(event,):\n return False\n\n if self.is_event_late(event):\n return False\n\n return True\n\n def annotate_fields_with_gh5(self, event: Dict[str, Any]) -> Dict[str, Any]:\n event[attributes.GEOHASH_FIELD] = geo_utils.lat_lng_to_geohash(\n event[attributes.LAT_FIELD], event[attributes.LNG_FIELD],\n )\n\n return event\n\n def process(self, event):\n if self.should_pass_event(event):\n yield self.annotate_fields_with_gh5(event)\n\n\nclass DefaultTransform(beam.PTransform):\n def __init__(self, lateness_threshold_seconds: int = 60):\n self.lateness_threshold_seconds = lateness_threshold_seconds\n\n def expand(self, pcol): # type: ignore\n default_dofn = _DefaultDoFn(self.lateness_threshold_seconds)\n\n return pcol | beam.ParDo(default_dofn)\n" }, { "alpha_fraction": 0.6700611114501953, "alphanum_fraction": 0.6700611114501953, "avg_line_length": 22.380952835083008, "blob_id": "8f6461a132d5234b8797caaba1a5ba95ef8c5e80", "content_id": "76aba82c610e1b53447e6f4802eb1dfa8333a3e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/common/testing_utils.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "from typing import Any\nfrom typing import List\nimport apache_beam as beam\n\n\nclass _WriteToMemorySink(beam.DoFn):\n results: List[Any] = []\n\n def process(self, element): # type: ignore\n _WriteToMemorySink.results.append(element)\n\n\nclass MemorySink(beam.PTransform):\n def __init__(self):\n self.write_do_fn = _WriteToMemorySink()\n\n def get_results(self):\n return self.write_do_fn.results\n\n def expand(self, pcol):\n pcol | beam.ParDo(self.write_do_fn)\n" }, { "alpha_fraction": 0.7586206793785095, "alphanum_fraction": 0.7586206793785095, "avg_line_length": 28, "blob_id": "41d75c9a9359851902854ed95cb64f2fc42bef7f", "content_id": "74685678e0116758818e0a152d824d90d548ad3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 66, "num_lines": 5, "path": "/schema/aggregation.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "from collections import namedtuple\n\nStatefulAggregationResult = namedtuple(\n \"StatefulAggregationResult\", [\"key\", \"feature_name\", \"value\"],\n)\n" }, { "alpha_fraction": 0.539432168006897, "alphanum_fraction": 0.5662460327148438, "avg_line_length": 27.81818199157715, "blob_id": "413cb16b00f964b1286b1cdc8d40af1a38415b00", "content_id": "7912aa1609a3ed8e80e333e6e84e022ab1828b36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1902, "license_type": "no_license", "max_line_length": 85, "num_lines": 66, "path": "/test/transforms/test_default_transform.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "import time\n\nimport apache_beam as beam\nimport pytest\n\nfrom common import testing_utils\nfrom schema import attributes\nfrom transforms import default\n\n\[email protected]\ndef sample_events():\n return [\n # gh5 is: v0000\n {\n attributes.EVENT_NAME_FIELD: \"event_name\",\n attributes.LAT_FIELD: 45,\n attributes.LNG_FIELD: 45,\n attributes.OCCURRED_AT_MS_FIELD: int(time.time() * 1000),\n \"event_id\": 1,\n },\n # missing event name field\n {\n attributes.LAT_FIELD: 45,\n attributes.LNG_FIELD: 45,\n attributes.OCCURRED_AT_MS_FIELD: int(time.time() * 1000),\n },\n # missing lat field\n {\n attributes.EVENT_NAME_FIELD: \"event_name\",\n attributes.LNG_FIELD: 45,\n attributes.OCCURRED_AT_MS_FIELD: int(time.time() * 1000),\n },\n # missing lng field\n {\n attributes.EVENT_NAME_FIELD: \"event_name\",\n attributes.LAT_FIELD: 45,\n attributes.OCCURRED_AT_MS_FIELD: int(time.time() * 1000),\n },\n # missing occured_at_ms field\n {\n attributes.EVENT_NAME_FIELD: \"event_name\",\n attributes.LAT_FIELD: 45,\n attributes.LNG_FIELD: 45,\n },\n # too out of date\n {\n attributes.EVENT_NAME_FIELD: \"event_name\",\n attributes.LAT_FIELD: 45,\n attributes.LNG_FIELD: 45,\n attributes.OCCURRED_AT_MS_FIELD: 0,\n },\n ]\n\n\ndef test_default_transform(sample_events):\n pipeline = beam.Pipeline()\n memory_sink = testing_utils.MemorySink()\n pipeline | beam.Create(sample_events,) | default.DefaultTransform() | memory_sink\n\n pipeline.run()\n results = memory_sink.get_results()\n\n assert len(results) == 1\n assert results[0][\"event_id\"] == 1\n assert results[0][\"geohash\"] == \"v0000\"\n" }, { "alpha_fraction": 0.672386884689331, "alphanum_fraction": 0.672386884689331, "avg_line_length": 25.70833396911621, "blob_id": "ea7cdf5f9f861f95f6c594ea733275fc297505c3", "content_id": "6d59e745b9c9ec0db2592194212015c2f7ca126a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 80, "num_lines": 24, "path": "/schema/event_schema.py", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "from typing import Any\nfrom typing import Dict\nfrom typing import Set\n\nfrom schema import attributes\n\n\nREQUIRED_EVENT_ATTRS_TO_TYPES: Dict[str, Set[Any]] = {\n attributes.EVENT_NAME_FIELD: {str},\n attributes.LAT_FIELD: {int, float},\n attributes.LNG_FIELD: {int, float},\n attributes.OCCURRED_AT_MS_FIELD: {int, float, str},\n}\n\n\ndef is_valid_event(event: Dict[Any, Any]) -> bool:\n for event_attr_name, expected_type in REQUIRED_EVENT_ATTRS_TO_TYPES.items():\n if event_attr_name not in event:\n return False\n\n if type(event[event_attr_name]) not in expected_type:\n return False\n\n return True\n" }, { "alpha_fraction": 0.45945945382118225, "alphanum_fraction": 0.683783769607544, "avg_line_length": 15.086956977844238, "blob_id": "d4a58a4d22646a6d9295e084b1cfd217583ea99a", "content_id": "d02c1e2bf3b80f3453085fd784e33f4786eb2b8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 740, "license_type": "no_license", "max_line_length": 26, "num_lines": 46, "path": "/requirements.txt", "repo_name": "twang126/gh6_streaming", "src_encoding": "UTF-8", "text": "apache-beam==2.25.0\nattrs==20.3.0\navro-python3==1.9.2.1\ncertifi==2020.11.8\nchardet==3.0.4\ncrcmod==1.7\ndill==0.3.1.1\ndocopt==0.6.2\ndocutils==0.16\nfastavro==1.2.0\nfuture==0.18.2\ngrpcio==1.33.2\nhdfs==2.5.8\nhttplib2==0.17.4\nhypothesis==5.41.4\nidna==2.10\nimportlib-metadata==3.1.0\niniconfig==1.1.1\ninstall==1.3.4\nmock==2.0.0\nnumpy==1.19.4\noauth2client==4.1.3\npackaging==20.5\npbr==5.5.1\npluggy==0.13.1\nprotobuf==3.14.0\npy==1.9.0\npyarrow==0.17.1\npyasn1==0.4.8\npyasn1-modules==0.2.8\npydot==1.4.1\npygeohash==1.2.0\npymongo==3.11.1\npyparsing==2.4.7\npytest==6.1.2\npython-dateutil==2.8.1\npython-geohash==0.8.5\npytz==2020.4\nrequests==2.25.0\nrsa==4.6\nsix==1.15.0\nsortedcontainers==2.3.0\ntoml==0.10.2\ntyping-extensions==3.7.4.3\nurllib3==1.26.2\nzipp==3.4.0\n" } ]
9
JamesDelfini/Python-SandBox-Starter
https://github.com/JamesDelfini/Python-SandBox-Starter
730017c93ab922013b1fac8598277bf2f39eab74
b47cabd8f4e9b54845e1b6a66247a40d78e9535b
0618131dce7ef4dcc7437149498483e70261bc90
refs/heads/master
2020-06-02T00:44:41.738484
2019-06-10T16:31:39
2019-06-10T16:31:39
190,983,171
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5873388051986694, "alphanum_fraction": 0.6060961484909058, "avg_line_length": 26.54838752746582, "blob_id": "ec36933e4256814ae3bf8ac7f04e3c4bf85809f5", "content_id": "60ab0be4a00970b857549a91a2c9162fb9e11606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 162, "num_lines": 31, "path": "/functions.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A function is a block of code which only runs when it is called. In Python, we do not use parentheses and curly brackets, we use indentation with tabs or spaces\n\n###################################\n## Description: Create Function\n###################################\ndef sayHello(name='Delfini'):\n print(f'Hello {name}')\n\n\nsayHello('James')\nsayHello()\n\n# Return a Value in Function \ndef getSum(num1, num2):\n total = num1 + num2\n return total\n\nnum = getSum(3, 4)\nprint(num)\nnum += 3\nprint(num)\n\n# A lambda function is a small anonymous function.\n# A lambda function can take any number of arguments, but can only have one expression. Very similar to JS arrow functions\n\n###################################\n## Description: Create Lambda Function\n###################################\ngetSum2 = lambda num1, num2: num1 + num2\n\nprint(getSum2(10, 3))" }, { "alpha_fraction": 0.6300246119499207, "alphanum_fraction": 0.649712860584259, "avg_line_length": 19.33333396911621, "blob_id": "c739d5757560ac74416e0895eee6ecebb026169b", "content_id": "280783e367f9787c0c449925697356ffc9274a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1219, "license_type": "no_license", "max_line_length": 96, "num_lines": 60, "path": "/dictionaries.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A Dictionary is a collection which is unordered, changeable and indexed. No duplicate members.\n\n###################################\n## Description: Create Dictionary\n###################################\n# Create key value pairs; like JSON or Objects in JavaScript\nperson = {\n 'first_name': 'Sauer',\n 'last_name': 'Voussoir',\n 'age': 30\n}\n\n# Use constructor\nperson2 = dict(first_name='James', last_name='Delfini')\n\nprint(person, type(person))\nprint(person2, type(person2))\n\n# Get a value in position\nprint(person['first_name'])\nprint(person.get('last_name'))\n\n# Add key/value\nperson['phone'] = '555-5555-555'\nprint(person)\n\n# Get dictionaries keys Keys()\nprint(person.keys())\n\n# Get dictionaries items Items()\nprint(person.items())\n\n#Copy dictioanry Copy(); Spread Operator in JavaScript\nperson3 = person.copy()\nperson3['city'] = 'Boston'\n\nprint(person3)\n\n# Remove an Item Del() & Pop()\ndel(person['age'])\nperson.pop('phone')\nprint(person)\n\n# Clear dictionary Clear()\nperson.clear()\nprint(person)\n\n# Get Length\nprint(len(person3))\n\n# List of Dictionary\npeople = [\n {'name': 'Martha', 'age': 30},\n {'name': 'Kevin', 'age': 25}\n]\n\nprint(people)\n\n# Get a list of dictionary position\nprint(people[1]['name'])" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 12, "blob_id": "00c2246fd318ff22a4c6c1247643d3adbb83e1a9", "content_id": "95d59d34be236775ad20609575d867068bd6d5ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/README.md", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# Python-SandBox-Starter\n\nPython 3.7.3\n" }, { "alpha_fraction": 0.6145404577255249, "alphanum_fraction": 0.6186556816101074, "avg_line_length": 24.172412872314453, "blob_id": "bcbfaab9bce826ebcd8e5cb3914794eb3f5c050a", "content_id": "7ae280b644725a397c6cdbfc8bfae0135bec5282", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/files.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# Python has functions for creating, reading, updating, and deleting files.\n\n###################################\n## Description: Create File\n# File created is \"myfile.txt\"\n###################################\n\n# Open a File\nmyFile = open('myfile.txt', 'w') # w - for write\n\n# Get some info\nprint('Name: ', myFile.name)\nprint('Is Closed: ', myFile.closed)\nprint('Opening Mode: ', myFile.mode)\n\n# Write to File\nmyFile.write('Python rocks!')\nmyFile.write(' and JavaScript')\nmyFile.close()\n\n# Append to file\nmyFile = open('myfile.txt', 'a') # a - append to the file\nmyFile.write('. PHP also rocks! YEEEAH!')\nmyFile.close()\n\n# Read from file\nmyFile = open('myfile.txt', 'r+') # r+ - reading the file\ntext = myFile.read(100)\nprint(text)" }, { "alpha_fraction": 0.5916359424591064, "alphanum_fraction": 0.599016010761261, "avg_line_length": 19.350000381469727, "blob_id": "b4c37302023db0158874caeee3b262ba278fd7c2", "content_id": "4316091cea52b36daadc4ff9a9f8a5c8a334549f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 77, "num_lines": 40, "path": "/variables.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A variable is a container for a value, which can be of various types\n\n'''\nThis is a \nmultiline comment\nor docstring (used to define a functions purpose)\ncan be single or double quotes\n'''\n\n\"\"\"\nVARIABLE RULES:\n - Variable names are case sensitive (name and NAME are different variables)\n - Must start with a letter or an underscore\n - Can have numbers but can not start with one\n\"\"\"\n###################################\n# Description : Variables\n################################### \n# Single Alignment\nx = 1 # int\ny = 2.5 # float\nname = 'James' # str\nisValidate = True # boolean\n\n# Multiple Assignment\nx, y, name, isValidate = (1, 2.5, 'James', True)\n\nprint(x, y, name, isValidate)\n\n# Basic Math\na = x + y\n\nprint (a)\n\n# Casting\nx = str(x)\ny = int(y) \nz = float(y)\n\nprint(type(x), y, z)" }, { "alpha_fraction": 0.5319488644599915, "alphanum_fraction": 0.5415335297584534, "avg_line_length": 28.85714340209961, "blob_id": "c78866fb798d19cbab07137283be77a867565727", "content_id": "2b3d8d3ab5bfce72dd29d9872058198a41281474", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 91, "num_lines": 21, "path": "/py_json.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# JSON is commonly used with data APIS. Here how we can parse JSON into a Python dictionary\n\nimport json\n\n###################################\n## Description: Deserialize JSON\n###################################\nuserJSON = '{\"first_name\": \"Hazel\", \"last_name\": \"Nornea\", \"age\": 21}'\n\n# Parse to Dictionary. It is like JSON.parse(<JSON_DATA>) in JavaScript\nuser = json.loads(userJSON)\nprint(user)\n\nprint(user['first_name'])\n\n###################################\n## Description: Serialize to JSON\n###################################\ncar = {'make': 'Ford', 'model': 'Mustang', 'year': 1970}\ncarJSON = json.dumps(car)\nprint(carJSON)" }, { "alpha_fraction": 0.6093922853469849, "alphanum_fraction": 0.648618757724762, "avg_line_length": 24.871429443359375, "blob_id": "780a7ce74d67aa7e9367b7276dcdbb25b90bbac4", "content_id": "9365aa1ab6f83c6f928491177c5cc91b16200d4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1810, "license_type": "no_license", "max_line_length": 73, "num_lines": 70, "path": "/stretch.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "## 10 Python Tips and Tricks For Writing Better Code\n# https://www.youtube.com/watch?v=C-gEQdGVXbk\n\n# 1. Ternary Conditionals aka inline if statement\nisValidated = True\nmyVar = 777 if isValidated else 666\n\nprint(myVar)\n\n# 2. Underscore Placeholders\n# To make the code readable\nnum1 = 10_000_000_000\nnum2 = 100_000_000\n\ntotal = num1 + num2\n\nprint(f'{total:,}')\n# 10,100,000,000\n\n# 3. Context Managers\nwith open('myfile.txt', 'r') as f:\n file_contents = f.read()\n\nwords = file_contents.split(' ')\nword_count = len(words)\nprint(word_count)\n\n# 4. Enumerate using Enumerate(<list>, [start=(n)])\nnames = ['Corey', 'Chris', 'Dave', 'Travis']\nfor index, name in enumerate(names, start=1):\n print(index, name)\n# 1 Corey\n# 2 Chris\n# 3 Dave\n# 4 Travi\n\n# 5. Zip(<list>, <list>, ...)\nnames = ['Peter Parker', 'Clark Kent', 'Wade Wilson', 'Bruce Wayne']\nheroes = ['Spiderman', 'Superman', 'Deadpool', 'Batman']\nuniverses = ['Marvel', 'DC', 'Marvel', 'DC']\nfor name, hero, universe in zip(names, heroes, universes): # Unpacking\n print(f'{name} is actually {hero} from {universe}') # Outputs a Tuple\n# Peter Parker is actually Spiderman from Marvel\n# Clark Kent is actually Superman from DC\n# Wade Wilson is actually Deadpool from Marvel\n# Bruce Wayne is actually Batman from DC\n\n# Use Zip Longest Function from Aether Tools Module for unbalanced list\nfor value in zip(names, heroes, universes): # Packing\n print(value) # Outputs a Tuple\n# ('Peter Parker', 'Spiderman', 'Marvel')\n# ('Clark Kent', 'Superman', 'DC')\n# ('Wade Wilson', 'Deadpool', 'Marvel')\n# ('Bruce Wayne', 'Batman', 'DC')\n\n# 6. Unpacking\n# Normal\nitems = (1, 2)\nprint(items)\n\n# Unpacking\na, _ = (1, 2) # Use \"_\" for the variable will not be used \na, b, *c = (1, 2, 3, 4, 5) # Use \"*\" to unpack inside the variable\na, b, *c, d = (1, 2, 3, 4, 5, 6, 7)\nprint(a)\nprint(b)\nprint(c)\nprint(d)\n\n# 7. Setattr/Getattr" }, { "alpha_fraction": 0.6182065010070801, "alphanum_fraction": 0.63722825050354, "avg_line_length": 22.015625, "blob_id": "1742480e16708d729d775bac1caa512995dcd4fb", "content_id": "c8fb15bf6de16fe67591c32e43c1f5da25130a20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1472, "license_type": "no_license", "max_line_length": 152, "num_lines": 64, "path": "/conditionals.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# If/ Else conditions are used to decide to do something based on something being true or false\n\nx = 3\ny = 50\n\n# Comparison Operators (==, !=, >, <, >=, <=) - Used to compare values\n\n# IF/ELSE Statement\nif x > y:\n print(f'{x} is greater than {y}')\nelse:\n print(f'{y} is greater than {x}')\n\n\n# IF/IF ELSE/ELSE Statement\nif x > y:\n print(f'{x} is greater than {y}')\nelif x == y:\n print(f'{x} is equal to {y}')\nelse:\n print(f'{y} is greater than {x}')\n\n# NESTED IF STATEMENT\nif x > 2:\n if x<=10:\n print(f'{x} is greater than 2 and less than or equal to 10')\n\n\n# Logical operators (and, or, not) - Used to combine conditional statements\n\n# AND STATEMENT\nif x > 2 and x<=20:\n print(f'{x} is greater than 2 and less than or equal to 20')\n\n# OR STATEMENT\nif x > 2 or y > 2:\n print(f'{x} is greater than 2 and {y} greater than 2')\n\n# NOT STATEMENT\nif not(x==y):\n print(f'{x} is not equal to {y}')\n\n# Membership Operators (not, not in) - Membership operators are used to test if a sequence is presented in an object\nnumbers = [1, 2, 3, 4, 5]\n\n# IN STATEMENT\nif x in numbers:\n print(f'{x} in numbers')\n\n# NOT IN STATEMENT\nif y not in numbers:\n print(f'{y} not in numbers')\n\n# Identity Operators (is, is not) - Compare the objects, not if they are equal, but if they are actually the same object, with the same memory location:\n\n# IS STATEMENT\nx = 50\nif x is y:\n print(x is y)\n\n# IS NOT STATEMENT\nx = 20\nif x is not y:\n print(x is not y)" }, { "alpha_fraction": 0.6232365369796753, "alphanum_fraction": 0.6232365369796753, "avg_line_length": 27.046510696411133, "blob_id": "916f644a80b7ced264d1f01946e43b1a1617c3bd", "content_id": "4bd8e94287598b128fc355e4a272ba7bfdb59d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 93, "num_lines": 43, "path": "/modules.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A module is basically a file containing a set of functions to include in your application. \n# There are core python modules, modules you can install using the pip package manager \n# (including Django) as well as custom modules\n\n###################################\n## Description: Core Modules\n###################################\nimport datetime\nfrom datetime import date\nimport time\nfrom time import time\n\n# today = datetime.date.today()\ntoday = date.today()\n# timestamp = time.time()\ntimestamp = time()\n\nprint(today, timestamp)\n\n\n###################################\n## Description: Importing Modules third party\n# Use pip package manager to install\n# pip install camelcase; this will install globally\n# pip freeze; will show all the globally installed packages\n###################################\n# import camelcase\n# c = camelcase.CamelCase()\nfrom camelcase import CamelCase\nc = CamelCase()\nprint(c.hump('hello there world'))\n\n###################################\n## Description: Import Custom Modules\n###################################\nimport validator\nfrom validator import validate_email\n\nemail = '[email protected]'\nif (validate_email(email)):\n print('Email is valid')\nelse:\n print('Email is invalid')" }, { "alpha_fraction": 0.6290130615234375, "alphanum_fraction": 0.6492270827293396, "avg_line_length": 18.136363983154297, "blob_id": "f80394af289589547cb55ea327bd04d4f60deb03", "content_id": "fd64b928778b89daf3908f5e696395a4e3fba6e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "no_license", "max_line_length": 83, "num_lines": 44, "path": "/lists.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A List is a collection which is ordered and changeable. Allows duplicate members.\n\n###################################\n## Description: Create list\n###################################\nnumbers = [1, 2, 3, 4, 5]\nfruits = ['Apples', 'Oranges', 'Grapes', 'Pears']\n\n# Use a Constructor\nnumbers2 = list((1, 2, 3, 4, 5))\n\nprint(numbers, numbers2, fruits)\nprint(fruits[1], len(fruits[1]))\n\n# Append to list\nfruits.append('Mango')\nprint(fruits)\n\n# Remove from list\nfruits.remove('Grapes')\nprint(fruits)\n\n# Insert into position\nfruits.insert(2, 'Strawberries')\nprint(fruits)\n\n# Change Value\nfruits[0] = 'Blueberries'\n\n# Remove with Pop()\nfruits.pop(2)\nprint(fruits)\n\n# Reverse list Reverse()\nfruits.reverse()\nprint(fruits)\n\n# Sort List Sort()\nfruits.sort()\nprint(fruits)\n\n# Reverse Sort Sort(reserve=<Boolean>)\nfruits.sort(reverse=True)\nprint(fruits)" }, { "alpha_fraction": 0.555031418800354, "alphanum_fraction": 0.5621069073677063, "avg_line_length": 27.288888931274414, "blob_id": "06178450c314e2db39aaed3574e6e4da7a2d4aba", "content_id": "3f6e4270f92debf631bda38e3922d532483d8a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "no_license", "max_line_length": 93, "num_lines": 45, "path": "/classes.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A class is like a blueprint for creating objects. \n# An object has properties and methods(functions) associated with it. \n# Almost everything in Python is an object\n\n###################################\n## Description: Create Class\n###################################\nclass User:\n # Constructor\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n def greeting(self):\n return f'My name is {self.name} and I am {self.age}'\n def has_birthday(self):\n self.age += 1\n\n# Init User Object\njames = User('James Delfini', '[email protected]', 22)\n\nprint(type(james))\nprint(james.age)\njames.has_birthday()\nprint(james.age)\nprint(james.greeting())\n\n###################################\n## Description: Extend Class\n###################################\nclass Customer(User):\n # Constructor\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n def set_balance(self, balance):\n self.balance = balance\n def greeting(self):\n return f'My name is {self.name} and I am {self.age} and my balance is {self.balance}'\n\nhazel = Customer('Hazel Nornea', '[email protected]', '21')\nhazel.set_balance(500)\nprint(hazel.greeting())" }, { "alpha_fraction": 0.525519847869873, "alphanum_fraction": 0.539697527885437, "avg_line_length": 24.829267501831055, "blob_id": "3cbc2c8f2884603f5b8e1e80521e0660c516450e", "content_id": "876a9dfa3ea03cfbeaec69e4a885f6871426c935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 118, "num_lines": 41, "path": "/loops.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A for loop is used for iterating over a sequence (that is either a list, a tuple, a dictionary, a set, or a string).\n\n###################################\n## Description: For Loop\n###################################\npeople = ['Sarah', 'Susan', 'Hazel', 'John', 'Paul']\n\n# for person in people:\n# print(f'Current person: {person}')\n\n# Break Loop Statement\n# for person in people:\n# if person == 'Hazel':\n# break\n# print(f'Current person: {person}')\n\n# Continue Loop Statement\n# for person in people:\n# if person == 'Hazel':\n# continue\n# print(f'Current person: {person}')\n\n# Range\nfor i in range(len(people)):\n print(people[i])\n\nfor i in range(0, 11):\n print(f'Number: {i}')\n# Number: 1 ... Number: 2 .. Number: 10(i)\n\n\n# While loops execute a set of statements as long as a condition is true.\n\n###################################\n## Description: Create White Loop\n###################################\ncount = 0 \nwhile count<=10:\n print(f'Count: {count}')\n count +=1\n# Count: 1 ... Count: 2 ... Count: 10(count)" }, { "alpha_fraction": 0.6371994614601135, "alphanum_fraction": 0.6478076577186584, "avg_line_length": 21.79032325744629, "blob_id": "5a8c91459b2a24d661d127bb3cd450e5d7a9a9e4", "content_id": "5d33c04db2f6f89e8d9f3d1d1fe75f800b495e24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 86, "num_lines": 62, "path": "/tuples_sets.py", "repo_name": "JamesDelfini/Python-SandBox-Starter", "src_encoding": "UTF-8", "text": "# A Tuple is a collection which is ordered and unchangeable. Allows duplicate members.\n\n###################################\n## Description: Create tuples\n###################################\nfruits = ('Apples', 'Oranges', 'Grapes')\nfruits2 = tuple(('Apples', 'Oranges', 'Grapes')) # Constructor\nprint(fruits, fruits2)\nprint(type(fruits2))\n\n# A string without trailing comma\nfruits2 = ('Apples')\nprint(fruits2)\nprint(type(fruits2))\n\n# A tuple requires a trailing comma to be considered as a collection.\nfruits2 = ('Apples',)\nprint(fruits2)\nprint(type(fruits2))\n\n# Get value\nprint(fruits[1])\n\n# Can't change a value\n# fruits2[0] = 'Pears' # TypeError: 'tuple' object does not support item assignment\n\n# Delete tuple\ndel fruits2\n# print(fruits2) #NameError: name 'fruits2' is not defined\n\n# Get length Len()\nprint(len(fruits))\n\n# A Set is a collection which is unordered and unindexed. No duplicate members.\n\n###################################\n## Description: Create tuples sets\n###################################\nfruits = {'Apples', 'Oranges', 'Mango'}\nprint(fruits)\n\n# Check if In the Set\nprint('Apples' in fruits)\n\n# Add to set Add()\nfruits.add('Grape')\nprint(fruits)\n\n# Remove from set Remove()\nfruits.remove('Grape')\nprint(fruits)\n\n# Add duplicate\nfruits.add('Apples')\n\n# Clear Set Clear()\nfruits.clear()\nprint(fruits)\n\n# Deleting a set Del()\ndel fruits\n# print(fruits) NameError: name 'fruits' is not defined\n\n" } ]
13
NicolaivdSmagt/nova-docker
https://github.com/NicolaivdSmagt/nova-docker
cd66c859c19fa1ce87875e76d225f60b820573f6
2b91fe00d5a84889369e716ae4dc8e1136722545
307f1c101e970be7551d359ca8410dfc189cf5ae
refs/heads/master
2021-01-21T06:14:01.339876
2014-12-15T20:51:04
2014-12-15T20:51:04
27,880,944
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6297218799591064, "alphanum_fraction": 0.64300537109375, "avg_line_length": 34.42647171020508, "blob_id": "7916c2ad4658827414e0ee4ca9344c0da4ab7c92", "content_id": "6aef0ef82b4399ddf7c6e63400bd206226197d5a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2409, "license_type": "permissive", "max_line_length": 78, "num_lines": 68, "path": "/novadocker/virt/docker/opencontrail_api.py", "repo_name": "NicolaivdSmagt/nova-docker", "src_encoding": "UTF-8", "text": "# Copyright (C) 2014 Juniper Networks, Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom nova_contrail_vif.gen_py.instance_service import InstanceService\n\nimport uuid\nimport thrift\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.transport import TTransport\n\n\nclass OpenContrailComputeApi(object):\n def __init__(self):\n self._client = None\n\n def _rpc_client_instance(self):\n \"\"\" Return an RPC client connection \"\"\"\n import thrift.transport.TSocket as TSocket\n socket = TSocket.TSocket('127.0.0.1', 9090)\n try:\n transport = TTransport.TFramedTransport(socket)\n transport.open()\n except thrift.transport.TTransport.TTransportException:\n logging.error('Connection failure')\n return None\n protocol = TBinaryProtocol.TBinaryProtocol(transport)\n return InstanceService.Client(protocol)\n\n def _uuid_from_string(self, idstr):\n \"\"\" Convert an uuid into an array of integers \"\"\"\n if not idstr:\n return None\n hexstr = uuid.UUID(idstr).hex\n return [int(hexstr[i:i+2], 16) for i in range(32) if i % 2 == 0]\n\n def add_port(self, vm_uuid, vif_uuid, interface_name, mac_address,\n project_id=None):\n if self._client is None:\n self._client = self._rpc_client_instance()\n\n from nova_contrail_vif.gen_py.instance_service import ttypes\n data = ttypes.Port(\n self._uuid_from_string(vif_uuid),\n self._uuid_from_string(vm_uuid),\n interface_name,\n '0.0.0.0',\n [0] * 16,\n mac_address)\n\n self._client.AddPort([data])\n\n def delete_port(self, vif_uuid):\n if self._client is None:\n self._client = self._rpc_client_instance()\n\n self._client.DeletePort(self._uuid_from_string(vif_uuid))\n" }, { "alpha_fraction": 0.5804480910301208, "alphanum_fraction": 0.585539698600769, "avg_line_length": 39.356163024902344, "blob_id": "082b1478566ac85a74fbe5c373fc70a4a2094f78", "content_id": "ad3e8bb47ee763dee6ac128d133276c8af276f7e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2946, "license_type": "permissive", "max_line_length": 78, "num_lines": 73, "path": "/novadocker/virt/docker/opencontrail.py", "repo_name": "NicolaivdSmagt/nova-docker", "src_encoding": "UTF-8", "text": "# Copyright (C) 2014 Juniper Networks, Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom nova import utils\nfrom nova.openstack.common import log as logging\nfrom novadocker.virt.docker import network\n\nfrom opencontrail_api import OpenContrailComputeApi\n\nLOG = logging.getLogger(__name__)\n\n\nclass OpenContrailVIFDriver(object):\n def __init__(self):\n self._api = OpenContrailComputeApi()\n\n def plug(self, instance, vif):\n pass\n\n def attach(self, instance, vif, container_id):\n if_local_name = 'veth%s' % vif['id'][:8]\n if_remote_name = 'ns%s' % vif['id'][:8]\n\n undo_mgr = utils.UndoManager()\n\n try:\n utils.execute('ip', 'link', 'add', if_local_name, 'type', 'veth',\n 'peer', 'name', if_remote_name, run_as_root=True)\n undo_mgr.undo_with(lambda: utils.execute(\n 'ip', 'link', 'delete', if_local_name, run_as_root=True))\n\n utils.execute('ip', 'link', 'set', if_remote_name, 'address',\n vif['address'], run_as_root=True)\n utils.execute('ip', 'link', 'set', if_remote_name, 'mtu',\n '1450', run_as_root=True)\n utils.execute('ip', 'link', 'set', if_remote_name, 'netns',\n container_id, run_as_root=True)\n\n self._api.add_port(instance['uuid'], vif['id'], if_local_name,\n vif['address'],\n project_id=instance['project_id'])\n utils.execute('ip', 'link', 'set', if_local_name, 'up',\n run_as_root=True)\n except:\n LOG.exception(\"Failed to configure network\")\n msg = _('Failed to setup the network, rolling back')\n undo_mgr.rollback_and_reraise(msg=msg, instance=instance)\n\n # TODO: attempt DHCP client; fallback to manual config if the\n # container doesn't have an working dhcpclient\n utils.execute('ip', 'netns', 'exec', container_id, 'dhclient',\n if_remote_name, run_as_root=True)\n\n def unplug(self, instance, vif):\n try:\n self._api.delete_port(vif['id'])\n except Exception:\n LOG.exception(_(\"Delete port failed\"), instance=instance)\n\n if_local_name = 'veth%s' % vif['id'][:8]\n utils.execute('ip', 'link', 'delete', if_local_name, run_as_root=True)\n" } ]
2
j0nnnnn0/opencv
https://github.com/j0nnnnn0/opencv
daaf5d7a5badbab58869c62ab91ad5fe29f3f1c7
3914ffa08db6ea7ff105566bd9768feff335d212
00f73d2b1caf68d59ab2db6319b1e7d9dfc67844
refs/heads/master
2023-08-29T02:56:29.666058
2021-10-24T18:12:09
2021-10-24T18:12:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7291666865348816, "avg_line_length": 18.25, "blob_id": "47de06221a54350e73c0407538adc06d3015d48d", "content_id": "22b3b71ffd84c9608f883ee321b8ed3bf41ae531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/chapter3.py", "repo_name": "j0nnnnn0/opencv", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n\n#import photo\nimg = cv2.imread(\"resources/lambo.png\")\nprint(img.shape)\n\n# Resize an image\nimgResize = cv2.resize(img, (300,200)) #width, height\nprint(imgResize.shape)\n\ncv2.imshow(\"Image\", img)\ncv2.imshow(\"Image Resize\", imgResize)\n\n# Crop an image\nimgCropped = img[0:200, 200:500] # height, width\ncv2.imshow(\"Image Cropped\", imgCropped)\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6557842493057251, "alphanum_fraction": 0.6976579427719116, "avg_line_length": 28.29166603088379, "blob_id": "84b2abe5075a45426fe28c5a13073b0c1857c9c0", "content_id": "69f2ca2aeddb2ac417d8080a327e052229f6de5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1409, "license_type": "no_license", "max_line_length": 82, "num_lines": 48, "path": "/chapter1.py", "repo_name": "j0nnnnn0/opencv", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n#import an imagine and display with infinite wait time\n#img = cv2.imread(\"resources/lena.png\")\n# cv2.imshow(\"Output\", img)\n# cv2.waitKey(0)\n\n#import a video\n#cap = cv2.VideoCapture(\"resources/test_video.mp4\")\n\n# # import webcam\n# cap = cv2.VideoCapture(0) # id of webcam\n# cap.set(3, 640) # set width\n# cap.set(4, 480) # set height\n# cap.set(10, 100) # set the brightness\n\n# # as a video is a set of photos, we use a loop to display each photos in a video\n# # we set q to quit\n# while True:\n# success, img = cap.read()\n# cv2.imshow(\"Video\", img)\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# break\n\n#import photo\nimg = cv2.imread(\"resources/lena.png\")\n\n# set kernel for image dialation\nkernel = np.ones((5,5), np.uint8) # values can be from 0 to 255 (grey scale)\n\n#set to grey scale\nimgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# Blur function for grey image\nimgBlur = cv2.GaussianBlur(imgGray, (7, 7),0) # set blurness, has to be odd number\n# edge detector (canny - a reversed image)\nimgCanny = cv2.Canny(img, 100, 100)\n# dialate\nimgDialation = cv2.dilate(imgCanny, kernel, iterations=1)\n# Erode\nimgErode = cv2.erode(imgDialation, kernel, iterations=1)\n\ncv2.imshow(\"Gray Image\", imgGray)\ncv2.imshow(\"Blur Image\", imgBlur)\ncv2.imshow(\"Canny Image\", imgCanny)\ncv2.imshow(\"Dialation Image\", imgDialation)\ncv2.imshow(\"Erosion Image\", imgErode)\ncv2.waitKey(0)\n\n\n\n" }, { "alpha_fraction": 0.7104762196540833, "alphanum_fraction": 0.7314285635948181, "avg_line_length": 20.040000915527344, "blob_id": "d0c83906a2dc8fa06e11e871be1ed4ee8c70b58d", "content_id": "b4c085dee09389b9bc9ad9d85abd640ec19215d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 68, "num_lines": 25, "path": "/chapter6.py", "repo_name": "j0nnnnn0/opencv", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nfrom chapter6_stackImages import stackImages\n\n# Join images together \n\nimg = cv2.imread('resources/lena.png')\n# cv2.imshow(\"image\", img)\n\n# #Stacking img horizontally\n# imgHor = np.hstack((img,img))\n\n# # Stacking img vertically\n# imgVer = np.vstack((img,img))\n\n# Using the function stackImages to handle images of different types\nimgStack = stackImages(0.5,([img,img,img]))\n\n# cv2.imshow(\"Horizontal\", imgHor)\n# cv2.imshow(\"Vertical\", imgVer)\ncv2.imshow(\"ImageStack\", imgStack)\n\n\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6556671261787415, "alphanum_fraction": 0.7274031639099121, "avg_line_length": 28.08333396911621, "blob_id": "e7bc4b5ff3ec272335148b85dc4868fafa9c1758", "content_id": "6c30c2a2fb7da0f9a4afb242ba8d437e1e26cd8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 65, "num_lines": 24, "path": "/chapter5.py", "repo_name": "j0nnnnn0/opencv", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n# Warp perspective (to get birds eye view)\n\nimg = cv2.imread(\"resources/cards.jpg\")\n\n# Define the width and height of the image\nwidth, height = 250,350\n\n#Declare points of the part of the image we want to capture\n# this is the points of the king of spades\npts1 = np.float32([[111,219], [287,188],[154,482], [352,440]])\n# Define the points origin\npts2 = np.float32([[0,0], [width,0], [0,height], [width,height]])\n# Set the matrix of the transform\nmatrix = cv2.getPerspectiveTransform(pts1,pts2)\n# Define the output of the warped image\nimgOutput = cv2.warpPerspective(img,matrix,(width,height))\n\ncv2.imshow(\"image\", img)\ncv2.imshow(\"Output\", imgOutput)\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5396825671195984, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 26.035715103149414, "blob_id": "5dc4ad62fdc2a1da6020c3fa55e04ca4558cd8a2", "content_id": "fb81847b4799deb59b418d8ba23f1551c91e98fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 118, "num_lines": 28, "path": "/chapter4.py", "repo_name": "j0nnnnn0/opencv", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n# Draw shapes and text on images\n\n#Set color of image\nimg = np.zeros((512,512,3),np.uint8) # 0 to 255 (RGB)\n#print(img.shape)\n# img[:] = 255,0,0 #Set whole image in blue for width:height\n\n#Create a line\n#cv2.line(img,(0,0), (300,300), (0,255,0), 3) # start, end, colour, thickness\ncv2.line(img,(0,0), (img.shape[1],img.shape[0]), (0,255,0), 3) # using the img.shape for start, end, colour, thickness\n\n# Create a rectangle and fill it\ncv2.rectangle(img,(0,0), (250,350), (0,0,255), cv2.FILLED)\n\n# Create a circle and fill it\ncv2.circle(img, (200,400), 105, (160,160,0), cv2.FILLED)\n\n# Add text to image\ncv2.putText(img, \"OPENCV \", (300,100),cv2.FONT_HERSHEY_COMPLEX,1.2,(0,150,0),3)\n\n\ncv2.imshow(\"Image\", img)\n\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.48570042848587036, "alphanum_fraction": 0.5676199793815613, "avg_line_length": 42, "blob_id": "6929c914bab9249a2b000963d14c93b42182223a", "content_id": "67e141d0e7febdc5f1918676bcc81ecdcc474d05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 175, "num_lines": 48, "path": "/chapter6_stackImages.py", "repo_name": "j0nnnnn0/opencv", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n \n \ndef stackImages(scale,imgArray):\n rows = len(imgArray)\n cols = len(imgArray&#91;0])\n rowsAvailable = isinstance(imgArray&#91;0], list)\n width = imgArray&#91;0]&#91;0].shape&#91;1]\n height = imgArray&#91;0]&#91;0].shape&#91;0]\n if rowsAvailable:\n for x in range ( 0, rows):\n for y in range(0, cols):\n if imgArray&#91;x]&#91;y].shape&#91;:2] == imgArray&#91;0]&#91;0].shape &#91;:2]:\n imgArray&#91;x]&#91;y] = cv2.resize(imgArray&#91;x]&#91;y], (0, 0), None, scale, scale)\n else:\n imgArray&#91;x]&#91;y] = cv2.resize(imgArray&#91;x]&#91;y], (imgArray&#91;0]&#91;0].shape&#91;1], imgArray&#91;0]&#91;0].shape&#91;0]), None, scale, scale)\n if len(imgArray&#91;x]&#91;y].shape) == 2: imgArray&#91;x]&#91;y]= cv2.cvtColor( imgArray&#91;x]&#91;y], cv2.COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = &#91;imageBlank]*rows\n hor_con = &#91;imageBlank]*rows\n for x in range(0, rows):\n hor&#91;x] = np.hstack(imgArray&#91;x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray&#91;x].shape&#91;:2] == imgArray&#91;0].shape&#91;:2]:\n imgArray&#91;x] = cv2.resize(imgArray&#91;x], (0, 0), None, scale, scale)\n else:\n imgArray&#91;x] = cv2.resize(imgArray&#91;x], (imgArray&#91;0].shape&#91;1], imgArray&#91;0].shape&#91;0]), None,scale, scale)\n if len(imgArray&#91;x].shape) == 2: imgArray&#91;x] = cv2.cvtColor(imgArray&#91;x], cv2.COLOR_GRAY2BGR)\n hor= np.hstack(imgArray)\n ver = hor\n return ver\n \nimg = cv2.imread('Resources/lena.png')\nimgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n \nimgStack = stackImages(0.5,(&#91;img,imgGray,img],&#91;img,img,img]))\n \n# imgHor = np.hstack((img,img))\n# imgVer = np.vstack((img,img))\n#\n# cv2.imshow(\"Horizontal\",imgHor)\n# cv2.imshow(\"Vertical\",imgVer)\ncv2.imshow(\"ImageStack\",imgStack)\n \ncv2.waitKey(0)" } ]
6
Raw-Nozzite/NozzleBot
https://github.com/Raw-Nozzite/NozzleBot
6b9767d66c76c05db02e1e52467d3abf59221038
ba57b34e35ef6da4338f30a3047697eb0512508d
60af770f39db2bea1dbc02cf6d9f6ff043cbc238
refs/heads/master
2020-04-05T21:37:54.045120
2018-11-19T10:48:09
2018-11-19T10:48:09
157,227,376
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6277034282684326, "alphanum_fraction": 0.6519052386283875, "avg_line_length": 28.89230728149414, "blob_id": "fe2bb49fd6855c277a43d2571027ac8762c13bb2", "content_id": "adf704192cff33803707858d266b10edc5d23a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1942, "license_type": "no_license", "max_line_length": 107, "num_lines": 65, "path": "/code.py", "repo_name": "Raw-Nozzite/NozzleBot", "src_encoding": "UTF-8", "text": "import discord.ext.commands as discord\nimport discord.utils as discordutils\nimport random #Literally only used once\nimport asyncio\nblisted = []\n\nwith open(\"discord-token.txt\",mode=\"r\") as f:\n token = f.readlines()[0]\nbot = discord.Bot(\"..\")\n\[email protected]() \nasync def ping(ctx):\n \"\"\"Test command for bot responsiveness\"\"\"\n botSpeech = random.randint(1,2)\n if botSpeech == 1:\n await ctx.channel.send(\"Pong!\")\n else:\n await ctx.channel.send(\"I don't play table tennis. I bet you watched the anime though.\")\n\[email protected]()\nasync def del_channel(ctx, *args): \n \"\"\"Instantly deletes a specified channel\"\"\"\n if for ctx2 in args:\n await bot.get_channel(int(ctx2[2:-1])).delete()\n else:\n await ctx.channel.send(\"Please specify a channel!\")\n\[email protected]()\nasync def bal(ctx):\n \"\"\"Will eventually print the user's balance\"\"\"\n await ctx.channel.send(\"0\")\n \[email protected]()\nasync def devs(ctx):\n \"\"\"Print the devs of the bot (Hard coded fight me Bax)\"\"\"\n await ctx.channel.send(\"\"\"```- Raw-Nozzite for Design and Programming, and being an excellent person :)\n- ZomBMage for BETTER programming```\"\"\")\n\[email protected]()\nasync def invite(ctx):\n \"\"\"Return a link to invite the bot, in case of emergency ;)\"\"\"\n await ctx.channel.send(discordutils.oauth_url(\"511255939806920755\"))\n\[email protected]()\nasync def kys(ctx):\n \"\"\"Makes bot leave server\"\"\"\n await ctx.channel.send(\"Guess I'll die then\")\n await bot.get_guild(ctx.guild.id).leave()\n\[email protected]()\nasync def roulette(ctx):\n \"\"\"Get a surprise!\"\"\"\n with open(\"randomLine.txt\") as f:\n allLines = f.readlines()\n await ctx.channel.send(random.choice(allLines))\n\[email protected]\nasync def on_ready():\n print(\"Logged in as:\")\n print(bot.user.name)\n print(bot.user.id)\n print(\"~\"*50)\n print(\"Invite with:\",discordutils.oauth_url(\"511255939806920755\"))\n\nbot.run(token)" } ]
1
Hugoyu-ops/django-wep-app
https://github.com/Hugoyu-ops/django-wep-app
0b1f933fdcf8bf1644f26d5bd7efd6c4dd02c55a
9c4c3c1a9460d74b1489ed277b89dad55f8e4d9b
92070d363f130b20bd816b22dde1b81ea4bd4b0a
refs/heads/master
2021-06-29T04:19:34.912856
2020-01-25T09:08:49
2020-01-25T09:08:49
237,641,807
1
0
null
2020-02-01T16:19:53
2020-02-04T15:19:14
2021-06-10T22:32:41
Python
[ { "alpha_fraction": 0.6856368780136108, "alphanum_fraction": 0.6937669515609741, "avg_line_length": 34.14285659790039, "blob_id": "d6105c705a0df3fff9be536a47f54f50cf4a8121", "content_id": "fe365a7b916c2f6a144293d03bf9b774e7b56077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 91, "num_lines": 21, "path": "/movies/views.py", "repo_name": "Hugoyu-ops/django-wep-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom airtable import Airtable\nimport os\n\nAIRTABLE_MOVIESTABLE_BASE_ID='appnmJlwnGQvu1iLY'\nAIRTABLE_API_KEY='key2fEpOGmPi9WJki'\n\n\n# Create your views here.\nAT = Airtable(os.environ.get('AIRTABLE_MOVIESTABLE_BASE_ID', AIRTABLE_MOVIESTABLE_BASE_ID),\n 'Table%201',\n api_key=os.environ.get('AIRTABLE_API_KEY', AIRTABLE_API_KEY),\n )\n\n\ndef home_page(request):\n user_query = str(request.GET.get('query',''))\n search_result = AT.get_all(formula=\"FIND('\" + user_query.lower() + \"', LOWER({Name}))\")\n start_for_frontend ={'search_result':search_result}\n return render(request, 'movies/movies_stuff.html', start_for_frontend)\n" } ]
1
shixiaobo8/flask_blog
https://github.com/shixiaobo8/flask_blog
a1f98fcbd7b5fa904ef4cfd84ff2912e07451d7b
8be93e76c659648b32036df3d55d6a66d48e8a89
60e332e7004bbe19ceade64734a6386e30de0050
refs/heads/master
2020-04-05T18:09:37.507108
2018-12-01T07:12:45
2018-12-01T07:12:45
85,276,958
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6779999732971191, "alphanum_fraction": 0.6919999718666077, "avg_line_length": 21.988506317138672, "blob_id": "eadb127f50f467e15f1c1b58da4361ba3d8ebffa", "content_id": "080a7edb64935226c2e4937bd4e7d85e8aedbb6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2262, "license_type": "no_license", "max_line_length": 81, "num_lines": 87, "path": "/manage.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\nfrom flask import jsonify,make_response,request,g\nfrom devops_blog import create_app\nfrom flask_script import Manager,Server,Shell\nfrom devops_blog.commands import Hello\nfrom flask_migrate import MigrateCommand\n# 导入自定义错误类\nfrom devops_blog.utils import CustomFlaskErr\n# 导入所有migrate需要操作的model 类\nfrom devops_blog.blog.models import *\nimport os\nimport werkzeug\n\ndef _make_context(app):\n return dict(app=app)\n\n\n# 创建一个flask app 应用 并初始化\nblog_app = create_app(flask_env=os.getenv('FLASK_ENV') or 'default')\n\n\n#初始化shell\ndef _make_context():\n return dict(app=blog_app)\n\n\nmanager = Manager(blog_app)\n# 添加shell 命令交互器\nmanager.add_command(\"shell\", Shell(make_context=_make_context),user_ipython=True)\n# 添加hello 打印命令测试\nmanager.add_command(\"hello\", Hello())\n# 添加执行server命令\nmanager.add_command(\"runserver\",Server())\n# 添加数据 migrate 工具\nmanager.add_command('db',MigrateCommand)\n\n\n# 自定义错误\n@blog_app.errorhandler(CustomFlaskErr)\ndef handle_flask_error(error):\n # response 的 json 内容为自定义错误代码和错误信息\n response = jsonify(error.to_dict())\n # response 返回 error 发生时定义的标准错误代码\n response.status_code = error.status_code\n return response\n\n\n# 404 错误\n@blog_app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n# 500 错误\n@blog_app.errorhandler(500)\ndef inter_server_error(e):\n blog_app.logger.error('error 500: %s', e)\n return make_response(jsonify({'error': 'server internet error'}), 500)\n\n\n# 应用情景\n# 第一次请求的时候做初始化导航栏nav数据\n@blog_app.before_first_request\ndef print_request_info():\n blog_app.logger.info(\"启动项目中,正在初始化导航栏nav....\")\n navs = Nav.query.all()\n g.navs= navs\n print(\"=====\")\n print(navs)\n\n\n#def get_db():\n# if 'db' not in g:\n# g.db = connect_to_database()\n# return g.db\n\n#@blog_app.teardown_appcontext\n#def teardown_db():\n# db = g.pop('db', None)\n# if db is not None:\n# db.close()\n\n\nif __name__ == \"__main__\":\n manager.run()\n# blog_app.run(debug=True,host='0.0.0.0',port=89)\n" }, { "alpha_fraction": 0.6954545378684998, "alphanum_fraction": 0.7018181681632996, "avg_line_length": 24, "blob_id": "6dff0092d5757bbe42ef4dd3a39d308716e5a88f", "content_id": "e3080537fd5b5ce3334ca9c15889d19075bb8798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1290, "license_type": "no_license", "max_line_length": 63, "num_lines": 44, "path": "/blog_start.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n\tflask 启动参数\n\"\"\"\n# 导入Falsk 应用\nfrom flask import Flask\n# 导入模板应用\nfrom flask import render_template\n# 导入falsk bootstrap 应用\nfrom flask_bootstrap import Bootstrap\n# 导入 sqlAlchemy python orm 模块\nfrom flask_sqlalchemy import SQLAlchemy\n# 导入 sqlAlchemy mongo orm 模块\nfrom flask_mongoalchemy import MongoAlchemy\n# 导入配置文件全局环境变量\nfrom config import mysql_db_url,mongo_db_url\n\n# 定义一个创建app应用并且初始化的方法\ndef create_app():\n # 创建flask应用\n app = Flask(__name__)\n # 初始化bootstrap\n Bootstrap(app)\n # 初始化mysql数据库连接\n app.config['SQLALCHEMY_DATABASE_URI'] = mysql_db_url\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n mysql_db = SQLAlchemy(app)\n # 初始化mongo数据库连接\n app.config['MONGOALCHEMY_CONNECTION_STRING'] = mongo_db_url\n app.config['MONGOALCHEMY_DATABASE'] = 'flask_blog'\n mongo_db = MongoAlchemy(app)\n return app\n\n# 创建一个flask app 应用\nblog_app = create_app()\n\n# 创建一个视图应用,博客首页\n@blog_app.route('/')\ndef index():\n return render_template('base/base.html')\n\nif __name__ == \"__main__\":\n blog_app.run(debug=True,host='0.0.0.0',port=89)\n" }, { "alpha_fraction": 0.7463617324829102, "alphanum_fraction": 0.7546777725219727, "avg_line_length": 24.3157901763916, "blob_id": "3e08cfce3e51a5d26e92d569124e4d1b26e031dd", "content_id": "c9e80e40749d829cb28b1d3c9740b45956b841de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/devops_blog/backend/__init__.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\nfrom flask import Blueprint\n\nbackend = Blueprint('backend',__name__)\n\nfrom . import views,api_resources\nfrom flask import make_response\nfrom flask_restful import Resource,Api\nfrom .api_resources import ApiTest,NavApi,NavListApi\nimport json\n\n# 注册蓝图api\napi = Api(backend,catch_all_404s=True)\n\n# 添加api 可插拔式路由\napi.add_resource(ApiTest,'/apitest')\napi.add_resource(NavApi,'/Nav','NavApi')\napi.add_resource(NavListApi,'/NavList','NavListApi')\n" }, { "alpha_fraction": 0.48275861144065857, "alphanum_fraction": 0.568965494632721, "avg_line_length": 25.363636016845703, "blob_id": "2e51a342e9103c1066f708acef689020753766c4", "content_id": "e8bd3c092e11a663c5f0635ae1d2439583ba2859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 87, "num_lines": 11, "path": "/devops_blog/scripts/settings.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n flask 自动转换\n\"\"\"\n\nSQLALCHEMY_DATABASE_URI = 'flask_blog': 'pymysql://root:[email protected]/flask_blog', \nSQLALCHEMY_BINDS = { \n 'flask_blog': 'pymysql://root:[email protected]/flask_blog', \n } \nTABLE_PREFIX = 'fb_'\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 12, "blob_id": "3a45255c7359d0654089db512d3a2a6ee4dff19b", "content_id": "6b9808628f9620a17372424ea1e45078b424fe9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 12, "num_lines": 2, "path": "/README.md", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "# flask_blog\n基于flask的个人博客\n\n" }, { "alpha_fraction": 0.7230428457260132, "alphanum_fraction": 0.723781406879425, "avg_line_length": 26.079999923706055, "blob_id": "5bc4362a18b952e2c1fe8ca6e6c5bbf3f72fe79e", "content_id": "af0fe3274f86fc97e869c6bc740a1a0118529ae2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1584, "license_type": "no_license", "max_line_length": 107, "num_lines": 50, "path": "/devops_blog/__init__.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n\tflask 启动参数\n\"\"\"\n# 导入Falsk 应用\nfrom flask import Flask, jsonify\n# 导入模板应用\nfrom flask import render_template\n# 导入falsk bootstrap 应用\nfrom flask_bootstrap import Bootstrap\n# 导入 sqlAlchemy python orm 模块\nfrom flask_sqlalchemy import SQLAlchemy\n# 导入 sqlAlchemy mongo orm 模块\nfrom flask_mongoalchemy import MongoAlchemy\n# 导入 falsk_migrate \nfrom flask_migrate import Migrate\n# 导入flask restfull api\nfrom flask_restful import Api\n# 导入配置文件全局环境变量\nfrom devops_blog.config import flask_env_config\nimport os\n\n# 初始化mysql数据库连接\nmysql_db = SQLAlchemy()\n# 初始化mongo数据库连接\nmongo_db = MongoAlchemy()\nmigrate = Migrate()\n\n# 定义一个创建app应用并且初始化的方法\ndef create_app(flask_env='default'):\n # 创建flask应用\n app = Flask(__name__)\n app.config.from_object(flask_env_config[flask_env])\n # 初始化bootstrap\n Bootstrap(app)\n # 初始化mysql数据库连接\n mysql_db.init_app(app)\n # 初始化mongo数据库连接\n mongo_db.init_app(app)\n # 初始化mysql据库migrate \n migrate.init_app(app,mysql_db,directory=os.path.abspath(os.path.dirname(__file__))+os.sep+\"migrations\")\n # 导入自定义蓝图模块\n from .blog import blog as blog_bp\n # 注册蓝图 \n app.register_blueprint(blog_bp,url_prefix='/blog')\n app.add_url_rule('/',endpoint='blog.index')\n from .backend import backend as backend_bp\n app.register_blueprint(backend_bp,url_prefix='/houtai')\n return app\n" }, { "alpha_fraction": 0.5963281989097595, "alphanum_fraction": 0.5985947251319885, "avg_line_length": 31.681482315063477, "blob_id": "268a166a90ee368fa9089f341f1659bc9695d36c", "content_id": "d07d5db9dd7000bc137bbda6ccf49c48b77e8f27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4864, "license_type": "no_license", "max_line_length": 130, "num_lines": 135, "path": "/devops_blog/backend/api_resources.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n\trestful api route 类和类注册文件\n\"\"\"\nfrom flask_restful import abort,Resource,reqparse,fields,marshal_with,marshal\nfrom ..blog.models import Nav,subNav\nfrom .. import mysql_db\nfrom ..utils import generate_response,CustomFlaskErr\nfrom flask import request,current_app\nimport json\n\n# 测试api 类\nclass ApiTest(Resource):\n # 参数检查\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('title', type = str, required = True,\n help = 'No task title provided', location = 'args')\n self.reqparse.add_argument('description', type = str, default = \"\", location = 'json')\n super(ApiTest, self).__init__()\n \n def get(self):\n args = self.reqparse.parse_args(strict=True)\n return {\"data\":\"this is apitest\",'title':args['title']}\n \n def post(self):\n return {\"data\":\"this is apitest\"}\n\n\n# 导航栏 单个处理类\nclass NavApi(Resource):\n # 请求参数处理\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.args = self.reqparse.parse_args()\n # 获取request json 参数\n self.json_args = request.json\n super(NavApi,self).__init__()\n\n # 查询单个\n def get(self):\n return self.args\n \n \n # 修改一个\n def post(self):\n pass\n\n # 新增一个\n def put(self):\n # 获取request json 参数\n json_args = self.json_args\n # 获取二级导航栏参数\n sNavs = json_args['sNavs']\n # 查询是否含有相同的菜单名称和url\n exists_fName = Nav.query.filter_by(navTitle=json_args['fNavName']).first()\n exists_fUrl = ''\n if json_args['fNavUrl'] != '':\n exists_fUrl = Nav.query.filter_by(navUrl=json_args['fNavUrl']).first()\n if exists_fUrl or exists_fName:\n current_app.logger.error(\"查询出错: 已存在相同的一级导航栏信息\")\n raise CustomFlaskErr(\"vAlreadyExistsError1\")\n else:\n # 先添加一级菜单,然后关联二级菜单\n firstNav = Nav(json_args['fNavName'],json_args['fNavUrl'],int(json_args['type']),int(json_args['navPris'][0]))\n for secondNav in sNavs:\n # 检查是否含有存在的二级菜单\n exists_sNav = subNav.query.filter_by(title=secondNav['sNavName'],nav_url=secondNav['sNavUrl']).first()\n if exists_sNav:\n current_app.logger.error(\"查询出错: 已存在相同的二级导航栏信息\")\n raise CustomFlaskErr(\"NavAlreadyExistsError2\")\n else:\n sub_nav = subNav(secondNav['sNavName'],secondNav['sNavUrl'])\n mysql_db.session.add(sub_nav)\n # 关联二级菜单\n firstNav.subnavs.append(sub_nav)\n mysql_db.session.add(firstNav)\n mysql_db.session.commit()\n return generate_response()\n\n\n # 删除一个\n def delete(self):\n pass\n\n\n# 输出字段\nsubNav_fields = {\n 'subtitle':fields.String(attribute='title'),\n 'suburl': fields.String(attribute='nav_url'),\n}\n\nNav_fields = {\n 'id':fields.Integer,\n 'title':fields.String(attribute='navTitle'),\n 'type':fields.Integer(attribute='navType'),\n 'url': fields.String(attribute='navUrl'),\n 'subnavs': fields.List(fields.Nested(subNav_fields,allow_null=True,default=''),default='')\n}\n\n# 导航栏nav 处理列表类\nclass NavListApi(Resource):\n \n # 请求参数处理\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('page', type = int, default=1, location='args',help='第几个分页')\n self.reqparse.add_argument('limit', type = int, default=10 , location='args',help='每页显示多少')\n super(NavListApi,self).__init__()\n\n # 获取(查询)nav 列表\n #@marshal_with(Nav_fields,envelope='Nav')\n def get(self):\n args = self.reqparse.parse_args()\n page_index = args['page']\n page_size = args['limit']\n # 先获取所有满足条件的nav isouter=True 表示left join all 方法得到一个列表,这里不使用paginate,会报错\n navall = mysql_db.session.query(Nav).filter(Nav.is_del==0).join(subNav,Nav.id==subNav.nav_Id,isouter=True).all()\n # 分页\n page_navs = navall[(page_index-1)*page_size:page_size*page_index]\n return {'code':0,'count':len(navall),'cureent_page':page_index,\"page_size\":page_size,'data':marshal(page_navs,Nav_fields)}\n\n # 修改多个nav列表\n def post(self):\n return 'post test'\n\n # 增加多个列表\n def put(self):\n args = self.reqparse.parse_args()\n return {\"data\":args}\n \n # 删除多个列表\n def delete(self):\n pass\n" }, { "alpha_fraction": 0.3387138247489929, "alphanum_fraction": 0.36869075894355774, "avg_line_length": 38.42424392700195, "blob_id": "89b1f6304498b18c31cfcb9ba5a61b81e57ccc5b", "content_id": "a1b61d569b0bf29deaecc02fb15559aa1d0fdb59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8608, "license_type": "no_license", "max_line_length": 299, "num_lines": 198, "path": "/devops_blog/backend/views.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\nfrom flask import render_template,request, make_response, url_for,current_app,jsonify,g\nfrom . import backend\nimport os,datetime,random\nfrom .models import Post,Category\n\n\njson_test={\n \"code\": 0\n ,\"msg\": \"\"\n ,\"count\": 3000000\n ,\"data\": [{\n \"id\": \"10001\"\n ,\"username\": \"杜甫\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"男\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"点击此处,显示更多。当内容超出时,点击单元格会自动显示更多内容。\"\n ,\"experience\": \"116\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"108\"\n ,\"joinTime\": \"2016-10-14\"\n }, {\n \"id\": \"10002\"\n ,\"username\": \"李白\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"男\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"君不见,黄河之水天上来,奔流到海不复回。 君不见,高堂明镜悲白发,朝如青丝暮成雪。 人生得意须尽欢,莫使金樽空对月。 天生我材必有用,千金散尽还复来。 烹羊宰牛且为乐,会须一饮三百杯。 岑夫子,丹丘生,将进酒,杯莫停。 与君歌一曲,请君为我倾耳听。(倾耳听 一作:侧耳听) 钟鼓馔玉不足贵,但愿长醉不复醒。(不足贵 一作:何足贵;不复醒 一作:不愿醒/不用醒) 古来圣贤皆寂寞,惟有饮者留其名。(古来 一作:自古;惟 通:唯) 陈王昔时宴平乐,斗酒十千恣欢谑。 主人何为言少钱,径须沽取对君酌。 五花马,千金裘,呼儿将出换美酒,与尔同销万古愁。\"\n ,\"experience\": \"12\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n ,\"LAY_CHECKED\": True\n }, {\n \"id\": \"10003\"\n ,\"username\": \"王勃\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"男\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"人生恰似一场修行\"\n ,\"experience\": \"65\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n }, {\n \"id\": \"10004\"\n ,\"username\": \"李清照\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"女\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"人生恰似一场修行\"\n ,\"experience\": \"666\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n }, {\n \"id\": \"10005\"\n ,\"username\": \"冰心\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"女\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"人生恰似一场修行\"\n ,\"experience\": \"86\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n }, {\n \"id\": \"10006\"\n ,\"username\": \"贤心\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"男\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"人生恰似一场修行\"\n ,\"experience\": \"12\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n }, {\n \"id\": \"10007\"\n ,\"username\": \"贤心\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"男\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"人生恰似一场修行\"\n ,\"experience\": \"16\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n }, {\n \"id\": \"10008\"\n ,\"username\": \"贤心\"\n ,\"email\": \"[email protected]\"\n ,\"sex\": \"男\"\n ,\"city\": \"浙江杭州\"\n ,\"sign\": \"人生恰似一场修行\"\n ,\"experience\": \"106\"\n ,\"ip\": \"192.168.0.8\"\n ,\"logins\": \"106\"\n ,\"joinTime\": \"2016-10-14\"\n }]\n } \n\n\[email protected]('/list_comment',methods=('GET','POST'))\ndef get_list_comments():\n return jsonify(json_test)\n\n\[email protected]('/addNavModal.html')\ndef test():\n return render_template('houtai/addNavModal.html')\n\n\[email protected]('/addFNav',methods=('POST',))\ndef addFNavs1():\n res = dict()\n res['code'] = 200\n res['data'] = 'ok'\n return jsonify(res)\n\n\[email protected]('/list_navs',methods=('GET','POST'))\ndef listNavs():\n print(g)\n return jsonify(g.get('navs'))\n\n\[email protected]('/system_settings')\ndef sys_settings():\n return render_template('houtai/system.html')\n\n\[email protected]('/')\ndef index():\n return render_template(\"houtai/index.html\")\n\n\[email protected]('/comment')\ndef comment_list():\n return render_template(\"blog/comment_list.html\")\n\n\ndef gen_rnd_filename():\n filename_prefix = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n return '%s%s' % (filename_prefix, str(random.randrange(1000, 10000)))\n\n\[email protected]('/edit',methods=('GET','POST'))\ndef edit():\n error = ''\n url = ''\n filename = ''\n \"\"\"CKEditor file upload\"\"\"\n callback = request.args.get(\"CKEditorFuncNum\")\n if request.method == 'GET': \n \treturn render_template('blog/edit.html')\n elif request.method == 'POST' and 'upload' not in request.files:\n title = request.form.get('subject')\n data = request.form.get('content')\n post = Post(title=title,body=data)\n category = Category()\n post.save()\n response = make_response(data)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n elif request.method == 'POST' and 'upload' in request.files:\n fileobj = request.files['upload']\n fname, fext = os.path.splitext(fileobj.filename)\n rnd_name = '%s%s' % (gen_rnd_filename(), fext)\n filepath = os.path.join(current_app.static_folder, 'upload', rnd_name)\n # 检查路径是否存在,不存在则创建\n dirname = os.path.dirname(filepath)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except:\n error = 'ERROR_CREATE_DIR'\n elif not os.access(dirname, os.W_OK):\n error = 'ERROR_DIR_NOT_WRITEABLE'\n if not error:\n fileobj.save(filepath)\n filename = '%s/%s' % ('upload', rnd_name)\n url = url_for('static', filename=filename)\n else:\n error = 'post error'\n res = \"\"\"\n\n<script type=\"text/javascript\">\n window.parent.CKEDITOR.tools.callFunction(%s, '%s', '%s');\n</script>\n\n\"\"\" % (callback, url, error)\n response = make_response(res)\n response.headers[\"Content-Type\"] = \"text/html\"\n return jsonify(uploaded=1, url=url, filename=filename)\n #return response\n" }, { "alpha_fraction": 0.49219202995300293, "alphanum_fraction": 0.5135916471481323, "avg_line_length": 30.059879302978516, "blob_id": "f60a0b8279079cbdd0f6162352de85c800d3a10b", "content_id": "a56199f6477e545e9fed6605302780740882704d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5759, "license_type": "no_license", "max_line_length": 164, "num_lines": 167, "path": "/devops_blog/config.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n\t配置文件(官方文档中提倡的有效配置文件写法)\n\t 参考:https://dormousehole.readthedocs.io/en/latest/config.html\n\t 使用config.py 对象的规则:\n\t\t1. app加载的时候使用 app.config.from_object('configmodule.ProductionConfig')\n\t \t2. Config 对象中必须的变量名称必须大写开头\n\"\"\"\nfrom logging.config import dictConfig\nimport os\nimport time\nimport logging\n \nbasedir = os.path.abspath(os.path.dirname(__file__))\nlog_path = basedir+os.sep+'logs'+ os.sep\nif not os.path.exists(log_path):\n os.makedirs(log_path)\n\n# 日志格式配置\ndictConfig({\n 'version': 1,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s [%(threadName)s:%(thread)d in %(module)s:] [task_id:%(name)s] [%(name)s:%(lineno)d] [%(levelname)s]- %(message)s'\n },\n 'file_access': {\n 'format': '%(asctime)s [%(threadName)s:%(thread)d in %(module)s:] [task_id:%(name)s] [%(name)s:%(lineno)d] [%(levelname)s]- %(message)s'\n },\n 'file_error': {\n 'format': '%(asctime)s [%(threadName)s:%(thread)d in %(module)s:] [task_id:%(name)s] [%(name)s:%(lineno)d] [%(levelname)s]- %(message)s'\n },\n },\n 'handlers': {\n 'wsgi': {\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://flask.logging.wsgi_errors_stream',\n 'formatter': 'default'\n },\n 'file_access_handler':{\n 'class': 'logging.handlers.RotatingFileHandler', # 自动切割\n 'maxBytes': 1024 * 1024 * 20, # 20m 一个日志文件\n 'filename': os.path.join(log_path, time.strftime('%Y_%m_%d',time.localtime())+\"_access.log\"), # 日志文件\n 'backupCount': 50, # 最多备份几个\n 'level': 'DEBUG',\n 'formatter': 'file_access', \n 'encoding': 'utf8'\n },\n 'file_error_handler':{\n 'class': 'logging.handlers.RotatingFileHandler', # 自动切割\n 'maxBytes': 1024 * 1024 * 20, # 20m 一个日志文件\n 'filename': os.path.join(log_path, time.strftime('%Y_%m_%d',time.localtime())+\"_error.log\"), # 日志文件\n 'backupCount': 50, # 最多备份几个\n 'level': 'ERROR',\n 'formatter': 'file_error', \n 'encoding': 'utf8'\n }\n },\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['wsgi','file_access_handler','file_error_handler']\n }\n})\n\nclass Config:\n SQLALCHEMY_RECORD_QUERIES = True\n SQLALCHEMY_NATIVE_UNICODE = 'utf8' \n\n\n# 生产环境配置\nclass ProductionConfig(Config):\n ENV = 'production'\n # 调试模式\n DEBUG = False\n # 测试模式\n TESTING = False\n # mongodb 连接\n MONGOALCHEMY_CONNECTION_STRING='mongodb://127.0.0.1:27017/flask_blog'\n MONGOALCHEMY_DATABASE = 'flask_blog'\n # mysql 连接\n SQLALCHEMY_DATABASE_URI='mysql+pymysql://root:123456@localhost:3306/flask_blog'\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n # 域名配置\n SERVER_NAME = 'demo.devops89.cn'\n # 方便apache 部署\n USE_X_SENDFILE = False\n # cookie 会话的安全签名\n SECRET_KEY = b'ab#_2L\"3dQ8zc-\\xcc]/'\n # 邮件配置\n MAIL_SERVER = 'smtp.yikaobang.com.cn'\n MAIL_PORT = 465 \n MAIL_USE_SSL = True\n MAIL_USERNAME = '[email protected]'\n MAIL_PASSWORD = 'Ykbmail89!@#'\n AA = '2323'\n SECRET_KEY = b'233#_2L\"3dQ8zc-\\xcc]/'\n\n\n# 开发环境配置\nclass DevelopmentConfig(Config):\n ENV = 'development'\n # 调试模式\n DEBUG = True\n # 测试模式\n TESTING = False\n # mongodb 连接\n MONGOALCHEMY_CONNECTION_STRING='mongodb://127.0.0.1:27017/flask_blog'\n MONGOALCHEMY_DATABASE = 'flask_blog'\n # mysql 连接\n SQLALCHEMY_DATABASE_URI=\"mysql+pymysql://root:[email protected]:3306/flask_blog\"\n SQLALCHEMY_ECHO = True\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n # 域名配置 正式环境必须配置这个参数\n #SERVER_NAME = 'blog.devops89.cn'\n # 方便apache 部署\n USE_X_SENDFILE = False\n # cookie 会话的安全签名\n SECRET_KEY = b'ab#_2L167dQ8zc-\\xcc2c'\n # 邮件配置\n MAIL_SERVER = 'smtp.yikaobang.com.cn'\n MAIL_PORT = 465 \n MAIL_USE_SSL = True\n MAIL_USERNAME = '[email protected]'\n MAIL_PASSWORD = 'Ykbmail89!@#'\n SECRET_KEY = b'ab#_2L\"3dQ8zc-\\xcc]/'\n\n\n# 测试配置\nclass TestingConfig(Config):\n TESTING = True\n\n\n# 在这里可以配置部署的是生产环境还是开发环境\nflask_env_config = {\n 'development': DevelopmentConfig,\n 'production' : ProductionConfig,\n# 开发环境打开这条注释\n 'default': DevelopmentConfig\n# 生产环境打开这条注释\n# 'default': ProductionConfig,\n}\n\n\n# api 通用错误返回码\n\nDEFINE_ERRORS = {\n 'UserAlreadyExists': { \n 'code': 401,\n 'info' : '用户已经存在',\n 'extra': \"请检查参数\"\n },\n 'vAlreadyExistsError1': {\n 'info': \"导航栏信息已存在\",\n 'code': 402,\n 'extra': \"请检查参数\"\n }, \n 'NavAlreadyExistsError2': {\n 'info': \"二级导航栏信息已存在\",\n 'code': 403,\n 'extra': \"请检查参数\"\n }, \n 'ServerError': {\n 'info':\"服务器存在bug!\",\n 'code': 501,\n 'extra':'请联系管理员!'\n }\n}\n" }, { "alpha_fraction": 0.6423248648643494, "alphanum_fraction": 0.6438152194023132, "avg_line_length": 22.13793182373047, "blob_id": "80321336dbe3f6b88c005163af3e5b07c64eb51d", "content_id": "7b4fcbd014c9dad82b16bb09041707d682187aeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 671, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/devops_blog/backend/models.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n\tmongo_mo\n\"\"\"\nfrom .. import mongo_db\nfrom devops_blog.utils import timeTools\nfrom datetime import datetime\n\ntime_tools = timeTools()\n\nclass Post(mongo_db.Document):\n #id = mongo_db.IntField()\n title = mongo_db.StringField()\n body = mongo_db.StringField()\n ctime = mongo_db.DateTimeField(default=datetime.now())\n category_id = mongo_db.IntField(default=None)\n\n #def __init__(self,title,body):\n # title = self.title\n # body = self.body\n\n\nclass Category(mongo_db.Document):\n #id = mongo_db.IntField()\n name = mongo_db.StringField()\n\n #def __init__(self,name):\n # name = self.name\n" }, { "alpha_fraction": 0.6423357725143433, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 16.125, "blob_id": "a1081c3fb7e1f41b6ec12cdb6731ba8935db66f5", "content_id": "406e335eea04819487636ce3f87c7e5305967953", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 33, "num_lines": 8, "path": "/devops_blog/blog/__init__.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\nfrom flask import Blueprint\n\n# 创建蓝图 \nblog = Blueprint('blog',__name__)\n\nfrom . import views\n" }, { "alpha_fraction": 0.6263157725334167, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 14.75, "blob_id": "b479a976af7d3e59df9604cc9765329e9f594c28", "content_id": "333efa8802c2077a6c5ec428e8543a552bf09b26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/devops_blog/commands.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n\tflask-scripts 定制脚本文件\n\t\n\"\"\"\nfrom flask_script import Command\n\nclass Hello(Command):\n\n def run(self):\n print(\"hello,flask_scripts\")\n\n" }, { "alpha_fraction": 0.6246246099472046, "alphanum_fraction": 0.6366366147994995, "avg_line_length": 26.75, "blob_id": "d2bd89ce8190255b901403b72b7c0e21cc0c4444", "content_id": "a82d28deb39cada14127ecea85f2863e51de0e93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 339, "license_type": "no_license", "max_line_length": 75, "num_lines": 12, "path": "/devops_blog/templates/blog/test.html", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "{% extends \"bootstrap/base.html\" %}\n{% block title %}This is an example page{% endblock %}\n{% block navbar %}\n<div class=\"navbar navbar-fixed-top\">\n<link rel=\"stylesheet\" href=\"{{ url_for('static', filename='test.css') }}\">\n</div>\n{% endblock %}\n\n{% block content %}\n <h1>Hello, Bootstrap</h1>\n\t<h2>这里是base.html</h2>\n{% endblock %}\n" }, { "alpha_fraction": 0.44372129440307617, "alphanum_fraction": 0.4467840790748596, "avg_line_length": 36.62318801879883, "blob_id": "02ac1b3c368464422a4d938f2e2e3a842673fbdb", "content_id": "e07ce52c6fa981bbc242b8f0b46c0104648e8d71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2810, "license_type": "permissive", "max_line_length": 90, "num_lines": 69, "path": "/devops_blog/static/js/houtai/addNavModal.js", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": " // 动态减少二级导航栏 动态添加触发的不能直接用click\n parent.$(function(){\n parent.$(\"#secondNavs\").delegate('#minusSecondNav','click',function(){\n console.log(parent.$(this).index());\n parent.$(this).parent().parent().remove();\n });\n });\n\n // 动态添加和减少二级菜单\n parent.$(function(){\n\t\t\tconsole.log(2232);\n // 如果有一级url那就让添加二级菜单\n parent.$(\"#fNavUrl\").blur(function(){\n if((parent.$(\"#fNavUrl\").val()) == ''){\n parent.$(\"#addSecondNav\").show();\n parent.$(\"#secondNavs\").show();\n }else if((parent.$(\"#fNavUrl\").val()) != ''){\n parent.$(\"#addSecondNav\").hide();\n parent.$(\"#secondNavs\").hide();\n }\n });\n // 动态添加二级导航栏\n parent.$(\"#addSecondNav\").click(function(){\n var subNavForm = parent.$(\"#preAddSecondNavIndex\").html();\n // 动态添加二级导航栏dom 到 #secondNavs 下\n parent.$(\"#secondNavs\").append(subNavForm);\n });\n });\n\n\t\t// ajax 提交addmodal 的 form 表单\n\t\tparent.$(function(){\n\t\t\tparent.$(\"#addFirstNav\").click(function(){\n\t\t\t\tvar form_data = {};\n\t\t\t\tvar t = parent.$(\"form[id='addFirstNavForm']\").serializeArray();\n\t\t\t\tvar navPris = new Array();\n\t\t\t\tvar sNavs = new Array();\n\t\t\t\tvar sNav = {}; \n\t\t\t\t$.each(t,function(){\n\t\t\t\t\tif(this.name == 'navPri'){\n\t\t\t\t\t\tnavPris.push(this.value);\n\t\t\t\t\t}else if(this.name == 'sNavName' && Object.keys(sNav).length == 0){\n\t\t\t\t\t\tsNav['sNavName'] = this.value;\n\t\t\t\t\t}else if(this.name == 'sNavUrl' && sNav['sNavName'] != ''){\n\t\t\t\t\t\tsNav['sNavUrl'] = this.value;\n\t\t\t\t\t\tsNavs.push(sNav);\n\t\t\t\t\t\tsNav = {};\n\t\t\t\t\t}else{\n\t\t\t\t\t\tform_data[this.name] = this.value;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tform_data['navPris'] = navPris;\n\t\t\t\tform_data['sNavs'] = sNavs;\n\t\t\t\tdata = JSON.stringify(form_data);\n\t\t\t\tconsole.log(data);\n\t\t\t\t// 提交到服务器\n\t\t\t\tparent.$.post(\"/houtai/addFNav\",{\"data\":data},function(data,status){\n\t\t\t\t\tconsole.log(data);\n\t\t\t\t\t// 提交结果\n\t\t\t\t\tif(data['code'] == 200){\n\t\t\t\t\t\tparent.layer.msg('<span>添加成功!</span>');\n\t\t\t\t\t\t// 关闭模态框\n\t\t\t\t\t\t$(window.top.document.body).find(\"#myModal\").modal('hide');\n\t\t\t\t\t\twindow.location.reload();\n\t\t\t\t\t}else{\n\t\t\t\t\t\tparent.layer.msg('<span style=\"color:red;\">添加失败,请检查参数</span>');\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\t\t});\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6803519129753113, "avg_line_length": 20.3125, "blob_id": "54e3158b78e54321465b8b7fed01dec085b6d7b0", "content_id": "128018fc7d7bfc73a758e7c14b7039efd06f1f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/devops_blog/blog/views.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\nfrom flask import render_template,current_app\nfrom . import blog\nfrom .. import mysql_db,mongo_db\n\n\n# 创建一个视图应用,博客首页\[email protected]('/',methods=('GET','POST'))\ndef index():\n return render_template('base/base.html')\n\n\[email protected]('/test')\ndef test():\n return render_template(\"houtai/index.html\")\n" }, { "alpha_fraction": 0.6570014357566833, "alphanum_fraction": 0.6626591086387634, "avg_line_length": 35.56034469604492, "blob_id": "ce84de6ab58d8e601e0babb50d02135c098a0a84", "content_id": "c4d8275c2a0c5980b18dec7b6c48b512a730b91e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4756, "license_type": "no_license", "max_line_length": 120, "num_lines": 116, "path": "/devops_blog/blog/models.py", "repo_name": "shixiaobo8/flask_blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\"\"\"\n mysql model 类\n\"\"\"\nfrom .. import mysql_db\nfrom datetime import datetime\n\n\n# 多对多配置 用户<--->用户组\nusers = mysql_db.Table('users_groups',\n mysql_db.Column('user_id', mysql_db.Integer, mysql_db.ForeignKey('fb_users.id')),\n mysql_db.Column('userGroup_id', mysql_db.Integer, mysql_db.ForeignKey('fb_userGroups.id'))\n )\n\n\n# 用户组\nclass userGroup(mysql_db.Model):\n __tablename__ = \"fb_userGroups\"\n id = mysql_db.Column(mysql_db.Integer, primary_key=True,autoincrement=True)\n # 多对多关联用户组\n users = mysql_db.relationship('User', secondary=users,backref=mysql_db.backref('userGroup', lazy='dynamic'))\n # 角色关联(一对多)\n roles = mysql_db.relationship('Role', backref='userGroups',lazy='dynamic')\n def __init__(self,userGroupName):\n self.userGroupName = userGroupName\n\n def __repr__(self):\n return \"<userGroup %r>\" %self.userGroupName\n\n# 用户\nclass User(mysql_db.Model):\n __tablename__ = \"fb_users\"\n id = mysql_db.Column(mysql_db.Integer, primary_key=True,autoincrement=True)\n # 用户名\n username = mysql_db.Column(mysql_db.String(80), unique=True,comment='用户名')\n # 邮箱\n email = mysql_db.Column(mysql_db.String(120), unique=True,comment='邮箱')\n # 微信昵称\n weixin_name = mysql_db.Column(mysql_db.String(120), default='',comment='微信昵称')\n # 逻辑删除\n is_del = mysql_db.Column(mysql_db.Boolean,default=False,comment='逻辑删除')\n # 创建时间\n join_date = mysql_db.Column(mysql_db.DateTime,default=datetime.now())\n # 用户组外键\n userGroup_Id = mysql_db.Column(mysql_db.Integer,mysql_db.ForeignKey('fb_userGroups.id'),comment='关联一级用户组,多对一')\n\n def __init__(self, username, email):\n self.username = username\n self.email = email\n\n def __repr__(self):\n return '<User %r>' % self.username\n\n\nclass Nav(mysql_db.Model):\n __tablename__ = \"fb_navs\"\n id = mysql_db.Column(mysql_db.Integer,primary_key=True,autoincrement=True)\n # 一级导航栏标题\n navTitle = mysql_db.Column(mysql_db.String(120),unique=True,nullable=False,comment='一级导航栏标题')\n # 一级导航栏url,可以为空,默认为空\n navUrl = mysql_db.Column(mysql_db.String(120),nullable=True,default='',index=True,comment='一级导航栏url')\n # 前后台导航栏分类\n navType = mysql_db.Column(mysql_db.Boolean(),default=False,comment='导航栏分类,默认为后台导航栏')\n # 逻辑删除\n is_del = mysql_db.Column(mysql_db.Boolean(),default=False,comment='逻辑删除')\n # 关联用户组权限\n subnavs = mysql_db.relationship('subNav', backref='nav',lazy='dynamic')\n # 角色外键 \n role_id = mysql_db.Column(mysql_db.Integer,mysql_db.ForeignKey('fb_roles.id'),comment='关联角色,多对一')\n def __init__(self,navTitle,navUrl,navType,role_id):\n self.navTitle = navTitle\n self.navType = navType\n self.navUrl = navUrl\n self.role_id = role_id\n def __repr__(self):\n return \"<Nav %r>\" %self.navTitle\n\n\n# 二级导航栏\nclass subNav(mysql_db.Model):\n __tablename__ = \"fb_subnavs\"\n id = mysql_db.Column(mysql_db.Integer,primary_key=True,autoincrement=True)\n # 二级导航栏标题\n title = mysql_db.Column(mysql_db.String(120),index=True,unique=True,nullable=False,default='#',comment='二级导航栏标题')\n # 二级导航栏url\n nav_url = mysql_db.Column(mysql_db.String(200),index=True,unique=True,nullable=False,default='#',comment='二级导航栏url')\n # 一级导航栏外键\n nav_Id = mysql_db.Column(mysql_db.Integer,mysql_db.ForeignKey('fb_navs.id'),comment='关联一级导航栏,多对一')\n # 逻辑删除\n is_del = mysql_db.Column(mysql_db.Boolean,default=False,comment='逻辑删除')\n\n def __init__(self,title,nav_url):\n self.title = title\n self.nav_url = nav_url\n\n def __repr__(self):\n return \"<subNav %r>\" %self.title\n\n\n# 用户角色权限表\nclass Role(mysql_db.Model):\n __tablename__ = \"fb_roles\"\n id = mysql_db.Column(mysql_db.Integer,primary_key=True,autoincrement=True)\n # 角色名称\n roleName = mysql_db.Column(mysql_db.String(120),unique=True,index=True,nullable=False,default='普通用户',comment='角色名称')\n # 一级导航栏外键 一个角色有多个导航栏\n navs = mysql_db.relationship('Nav', backref='role',lazy='dynamic')\n # 用户组外键\n fb_userGroup_Id = mysql_db.Column(mysql_db.Integer,mysql_db.ForeignKey('fb_userGroups.id'),comment='用户组权限外键')\n\n def __init__(self,roleName):\n self.roleName = roleName\n\n def __repr__(self):\n return \"<Role %r>\" %self.roleName\n\n" } ]
16
balintzs/moto
https://github.com/balintzs/moto
dcb0af5021804d3436c18e403d079fcf54944574
19a2741a4aa1bc643b194a6c9d5fc07d526846d4
964578f0c2417020cd63b2368f2a42764e79d04b
refs/heads/master
2021-01-16T21:39:53.002440
2017-03-20T14:15:04
2017-03-20T14:15:04
59,747,230
0
0
null
2016-05-26T12:08:16
2016-05-25T18:17:24
2016-05-18T01:02:13
null
[ { "alpha_fraction": 0.573623538017273, "alphanum_fraction": 0.573623538017273, "avg_line_length": 27.573171615600586, "blob_id": "3241a9c1a178ba0e6d82c235c081c5d4709a39d2", "content_id": "1a3b9505bd0670dfdbcdaf4c87e1f9868c066739", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2343, "license_type": "permissive", "max_line_length": 62, "num_lines": 82, "path": "/moto/ecr/responses.py", "repo_name": "balintzs/moto", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nimport json\n\nfrom moto.core.responses import BaseResponse\nfrom .models import ecr_backends\n\n\nclass ECSContainerRegistryResponse(BaseResponse):\n @property\n def ecr_backend(self):\n return ecr_backends[self.region]\n\n @property\n def request_params(self):\n try:\n return json.loads(self.body.decode())\n except ValueError:\n return {}\n\n def _get_param(self, param):\n return self.request_params.get(param, None)\n\n def create_repository(self):\n repository = self.ecr_backend.create_repository(\n self._get_param('repositoryName')\n )\n return json.dumps({\n 'repository': repository.response_object\n })\n\n def describe_repositories(self):\n repositories = self.ecr_backend.describe_repositories(\n self._get_param(\"registryId\"),\n self._get_param(\"repositoryNames\")\n )\n return json.dumps({\n 'repositories': repositories\n })\n\n def delete_repository(self):\n repository = self.ecr_backend.delete_repository(\n self._get_param(\"registryId\"),\n self._get_param(\"repositoryName\")\n )\n return json.dumps({\n 'repository': repository\n })\n\n def put_image(self):\n image = self.ecr_backend.put_image(\n self._get_param(\"registryId\"),\n self._get_param(\"repositoryName\"),\n self._get_param(\"imageManifest\")\n )\n return json.dumps({\n 'image': image\n })\n\n def list_images(self):\n images = self.ecr_backend.list_images(\n self._get_param(\"registryId\"),\n self._get_param(\"repositoryName\")\n )\n return json.dumps({\n 'imageIds': images\n })\n\n def batch_get_image(self):\n results = self.ecr_backend.batch_get_image(\n self._get_param(\"registryId\"),\n self._get_param(\"repositoryName\"),\n self._get_param(\"imageIds\")\n )\n return json.dumps(results)\n\n def batch_delete_image(self):\n results = self.ecr_backend.batch_delete_image(\n self._get_param(\"registryId\"),\n self._get_param(\"repositoryName\"),\n self._get_param(\"imageIds\")\n )\n return json.dumps(results)\n" }, { "alpha_fraction": 0.6912442445755005, "alphanum_fraction": 0.695852518081665, "avg_line_length": 20.700000762939453, "blob_id": "e66a7d5fb0c4dac011653366433c711ab7882d1f", "content_id": "9e00bc922804d36ccfd29bf0502477b6a8d8f0c4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "permissive", "max_line_length": 51, "num_lines": 10, "path": "/moto/ecr/urls.py", "repo_name": "balintzs/moto", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom .responses import ECSContainerRegistryResponse\n\nurl_bases = [\n \"https?://ecr.(.+).amazonaws.com\",\n]\n\nurl_paths = {\n '{0}/$': ECSContainerRegistryResponse.dispatch,\n}\n" }, { "alpha_fraction": 0.6906474828720093, "alphanum_fraction": 0.6942446231842041, "avg_line_length": 22.16666603088379, "blob_id": "14f42014f2fd425dea7838fcd57028d27632cdb8", "content_id": "4fa0cb55f5fecc8b802934a975b78a06d1072c4b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "permissive", "max_line_length": 42, "num_lines": 12, "path": "/moto/ecr/__init__.py", "repo_name": "balintzs/moto", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom .models import ecr_backends\nfrom ..core.models import MockAWS\n\necr_backend = ecr_backends['us-east-1']\n\n\ndef mock_ecr(func=None):\n if func:\n return MockAWS(ecr_backends)(func)\n else:\n return MockAWS(ecr_backends)\n" }, { "alpha_fraction": 0.5727129578590393, "alphanum_fraction": 0.5776578783988953, "avg_line_length": 30.77857208251953, "blob_id": "a20119fbc7f0a853dcf97baae0886e175537687d", "content_id": "e5db5b127dfbf5bf12917b0c103cf2eff3eb2520", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4449, "license_type": "permissive", "max_line_length": 106, "num_lines": 140, "path": "/moto/ecr/models.py", "repo_name": "balintzs/moto", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nimport json\nimport hashlib\n\nfrom moto.core import BaseBackend\nfrom moto.ec2 import ec2_backends\n\n\nclass BaseObject(object):\n def camelCase(self, key):\n words = []\n for i, word in enumerate(key.split('_')):\n if i > 0:\n words.append(word.title())\n else:\n words.append(word)\n return ''.join(words)\n\n def gen_response_object(self):\n response_object = self.__dict__.copy()\n for key, value in response_object.items():\n if '_' in key:\n response_object[self.camelCase(key)] = value\n del response_object[key]\n return response_object\n\n @property\n def response_object(self):\n return self.gen_response_object()\n\n\nclass Repository(BaseObject):\n def __init__(self, repo_name):\n self.registry_id = \"012345678910\"\n self.repository_arn = 'arn:aws:ecr:us-east-1:{}:repository/{}'.format(self.registry_id, repo_name)\n self.repository_name = repo_name\n self.repository_uri = \"{}.dkr.ecr.us-east-1.amazonaws.com/{}\".format(self.registry_id, repo_name)\n self.images = {}\n\n @property\n def response_object(self):\n obj = self.gen_response_object()\n obj.pop(\"images\")\n return obj\n\n\nclass Image(BaseObject):\n def __init__(self, registry_id, repository_name, manifest):\n self.registry_id = registry_id\n self.repository_name = repository_name\n self.image_manifest = manifest\n self.image_id = dict(\n imageDigest=hashlib.sha256(manifest.encode()).hexdigest(),\n imageTag=json.loads(manifest)[\"tag\"]\n )\n\n\nclass ECSContainerRegistryBackend(BaseBackend):\n def __init__(self):\n self.repositories = {}\n\n def create_repository(self, name):\n repo = Repository(name)\n self.repositories[name] = repo\n return repo\n\n def describe_repositories(self, registry_id, repository_names):\n \"\"\"\n maxResults and pagination not implemented\n \"\"\"\n return [\n self.repositories[repo].response_object for repo in repository_names\n ]\n\n def delete_repository(self, registry_id, repository_name):\n return self.repositories.pop(repository_name)\n\n def put_image(self, registry_id, repository_name, image_manifest):\n repo = self.repositories[repository_name]\n image = Image(repo.registry_id, repository_name, image_manifest)\n repo.images[image.image_id[\"imageTag\"]] = image\n return image.response_object\n\n def list_images(self, registry_id, repository_name):\n \"\"\"\n maxResults and pagination not implemented\n \"\"\"\n repo = self.repositories[repository_name]\n return [\n repo.images[tag].image_id\n for tag in repo.images\n ]\n\n def batch_get_image(self, registry_id, repository_name, image_ids):\n \"\"\"\n maxResults and pagination not implemented\n \"\"\"\n repo = self.repositories[repository_name]\n return dict(\n images=[\n repo.images[image_id[\"imageTag\"]].response_object\n for image_id in image_ids\n if image_id[\"imageTag\"] in repo.images\n ],\n failures=[\n dict(\n imageId=image_id,\n failureCode=\"InvalidImageTag\",\n failureReason=\"Tag not found\"\n )\n for image_id in image_ids\n if image_id[\"imageTag\"] not in repo.images\n ]\n )\n\n def batch_delete_image(self, registry_id, repository_name, image_ids):\n \"\"\"\n maxResults and pagination not implemented\n \"\"\"\n repo = self.repositories[repository_name]\n deleted = []\n failed = []\n for image_id in image_ids:\n if image_id[\"imageTag\"] in repo.images:\n deleted.append(repo.images.pop(image_id[\"imageTag\"]).response_object)\n else:\n failed.append(dict(\n imageId=image_id,\n failureCode=\"InvalidImageTag\",\n failureReason=\"Tag not found\"\n ))\n return dict(\n imageIds=deleted,\n failures=failed\n )\n\n\necr_backends = {}\nfor region, ec2_backend in ec2_backends.items():\n ecr_backends[region] = ECSContainerRegistryBackend()\n" }, { "alpha_fraction": 0.6340222954750061, "alphanum_fraction": 0.6366928815841675, "avg_line_length": 42.736000061035156, "blob_id": "3a944b1926cfc5ecc5a1e97f1f4b2047208c51bf", "content_id": "24b244d47c640211391363696286c59d882d2922", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27335, "license_type": "permissive", "max_line_length": 136, "num_lines": 625, "path": "/moto/autoscaling/models.py", "repo_name": "balintzs/moto", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping\nfrom moto.core import BaseBackend\nfrom moto.ec2 import ec2_backends\nfrom moto.elb import elb_backends\nfrom moto.elb.exceptions import LoadBalancerNotFoundError\n\n# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown\nDEFAULT_COOLDOWN = 300\n\n\nclass InstanceState(object):\n def __init__(self, instance, lifecycle_state=\"InService\", health_status=\"Healthy\"):\n self.instance = instance\n self.lifecycle_state = lifecycle_state\n self.health_status = health_status\n\n\nclass FakeScalingPolicy(object):\n def __init__(self, name, policy_type, adjustment_type, as_name, scaling_adjustment,\n cooldown, autoscaling_backend):\n self.name = name\n self.policy_type = policy_type\n self.adjustment_type = adjustment_type\n self.as_name = as_name\n self.scaling_adjustment = scaling_adjustment\n if cooldown is not None:\n self.cooldown = cooldown\n else:\n self.cooldown = DEFAULT_COOLDOWN\n self.autoscaling_backend = autoscaling_backend\n\n def execute(self):\n if self.adjustment_type == 'ExactCapacity':\n self.autoscaling_backend.set_desired_capacity(self.as_name, self.scaling_adjustment)\n elif self.adjustment_type == 'ChangeInCapacity':\n self.autoscaling_backend.change_capacity(self.as_name, self.scaling_adjustment)\n elif self.adjustment_type == 'PercentChangeInCapacity':\n self.autoscaling_backend.change_capacity_percent(self.as_name, self.scaling_adjustment)\n\n\nclass FakeLaunchConfiguration(object):\n def __init__(self, name, image_id, key_name, ramdisk_id, kernel_id, security_groups, user_data,\n instance_type, instance_monitoring, instance_profile_name,\n spot_price, ebs_optimized, associate_public_ip_address, block_device_mapping_dict):\n self.name = name\n self.image_id = image_id\n self.key_name = key_name\n self.ramdisk_id = ramdisk_id\n self.kernel_id = kernel_id\n self.security_groups = security_groups if security_groups else []\n self.user_data = user_data\n self.instance_type = instance_type\n self.instance_monitoring = instance_monitoring\n self.instance_profile_name = instance_profile_name\n self.spot_price = spot_price\n self.ebs_optimized = ebs_optimized\n self.associate_public_ip_address = associate_public_ip_address\n self.block_device_mapping_dict = block_device_mapping_dict\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n instance_profile_name = properties.get(\"IamInstanceProfile\")\n\n backend = autoscaling_backends[region_name]\n config = backend.create_launch_configuration(\n name=resource_name,\n image_id=properties.get(\"ImageId\"),\n kernel_id=properties.get(\"KernelId\"),\n ramdisk_id=properties.get(\"RamdiskId\"),\n key_name=properties.get(\"KeyName\"),\n security_groups=properties.get(\"SecurityGroups\"),\n user_data=properties.get(\"UserData\"),\n instance_type=properties.get(\"InstanceType\"),\n instance_monitoring=properties.get(\"InstanceMonitoring\"),\n instance_profile_name=instance_profile_name,\n spot_price=properties.get(\"SpotPrice\"),\n ebs_optimized=properties.get(\"EbsOptimized\"),\n associate_public_ip_address=properties.get(\"AssociatePublicIpAddress\"),\n block_device_mappings=properties.get(\"BlockDeviceMapping.member\")\n )\n return config\n\n @classmethod\n def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):\n cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name)\n return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)\n\n @classmethod\n def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n backend = autoscaling_backends[region_name]\n try:\n backend.delete_launch_configuration(resource_name)\n except KeyError:\n pass\n\n def delete(self, region_name):\n backend = autoscaling_backends[region_name]\n backend.delete_launch_configuration(self.name)\n\n @property\n def physical_resource_id(self):\n return self.name\n\n @property\n def block_device_mappings(self):\n if not self.block_device_mapping_dict:\n return None\n else:\n return self._parse_block_device_mappings()\n\n @property\n def instance_monitoring_enabled(self):\n if self.instance_monitoring:\n return 'true'\n return 'false'\n\n def _parse_block_device_mappings(self):\n block_device_map = BlockDeviceMapping()\n for mapping in self.block_device_mapping_dict:\n block_type = BlockDeviceType()\n mount_point = mapping.get('device_name')\n if 'ephemeral' in mapping.get('virtual_name', ''):\n block_type.ephemeral_name = mapping.get('virtual_name')\n else:\n block_type.volume_type = mapping.get('ebs._volume_type')\n block_type.snapshot_id = mapping.get('ebs._snapshot_id')\n block_type.delete_on_termination = mapping.get('ebs._delete_on_termination')\n block_type.size = mapping.get('ebs._volume_size')\n block_type.iops = mapping.get('ebs._iops')\n block_device_map[mount_point] = block_type\n return block_device_map\n\n\nclass FakeAutoScalingGroup(object):\n def __init__(self, name, availability_zones, desired_capacity, max_size,\n min_size, launch_config_name, vpc_zone_identifier,\n default_cooldown, health_check_period, health_check_type,\n load_balancers, placement_group, termination_policies,\n autoscaling_backend, tags):\n self.autoscaling_backend = autoscaling_backend\n self.name = name\n self.availability_zones = availability_zones\n self.max_size = max_size\n self.min_size = min_size\n\n self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name]\n self.launch_config_name = launch_config_name\n self.vpc_zone_identifier = vpc_zone_identifier\n\n self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN\n self.health_check_period = health_check_period\n self.health_check_type = health_check_type if health_check_type else \"EC2\"\n self.load_balancers = load_balancers\n self.placement_group = placement_group\n self.termination_policies = termination_policies\n\n self.instance_states = []\n self.set_desired_capacity(desired_capacity)\n self.tags = tags if tags else []\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n launch_config_name = properties.get(\"LaunchConfigurationName\")\n load_balancer_names = properties.get(\"LoadBalancerNames\", [])\n\n backend = autoscaling_backends[region_name]\n group = backend.create_autoscaling_group(\n name=resource_name,\n availability_zones=properties.get(\"AvailabilityZones\", []),\n desired_capacity=properties.get(\"DesiredCapacity\"),\n max_size=properties.get(\"MaxSize\"),\n min_size=properties.get(\"MinSize\"),\n launch_config_name=launch_config_name,\n vpc_zone_identifier=(','.join(properties.get(\"VPCZoneIdentifier\", [])) or None),\n default_cooldown=properties.get(\"Cooldown\"),\n health_check_period=properties.get(\"HealthCheckGracePeriod\"),\n health_check_type=properties.get(\"HealthCheckType\"),\n load_balancers=load_balancer_names,\n placement_group=None,\n termination_policies=properties.get(\"TerminationPolicies\", []),\n tags=properties.get(\"Tags\", []),\n )\n return group\n\n @classmethod\n def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):\n cls.delete_from_cloudformation_json(original_resource.name, cloudformation_json, region_name)\n return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)\n\n @classmethod\n def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n backend = autoscaling_backends[region_name]\n try:\n backend.delete_autoscaling_group(resource_name)\n except KeyError:\n pass\n\n def delete(self, region_name):\n backend = autoscaling_backends[region_name]\n backend.delete_autoscaling_group(self.name)\n\n @property\n def physical_resource_id(self):\n return self.name\n\n def update(self, availability_zones, desired_capacity, max_size, min_size,\n launch_config_name, vpc_zone_identifier, default_cooldown,\n health_check_period, health_check_type, load_balancers,\n placement_group, termination_policies):\n if availability_zones:\n self.availability_zones = availability_zones\n if max_size is not None:\n self.max_size = max_size\n if min_size is not None:\n self.min_size = min_size\n\n if launch_config_name:\n self.launch_config = self.autoscaling_backend.launch_configurations[launch_config_name]\n self.launch_config_name = launch_config_name\n if vpc_zone_identifier is not None:\n self.vpc_zone_identifier = vpc_zone_identifier\n if health_check_period is not None:\n self.health_check_period = health_check_period\n if health_check_type is not None:\n self.health_check_type = health_check_type\n\n if desired_capacity is not None:\n self.set_desired_capacity(desired_capacity)\n\n def set_desired_capacity(self, new_capacity):\n if new_capacity is None:\n self.desired_capacity = self.min_size\n else:\n self.desired_capacity = new_capacity\n\n curr_instance_count = len(self.instance_states)\n\n if self.desired_capacity == curr_instance_count:\n return\n\n if self.desired_capacity > curr_instance_count:\n # Need more instances\n count_needed = int(self.desired_capacity) - int(curr_instance_count)\n reservation = self.autoscaling_backend.ec2_backend.add_instances(\n self.launch_config.image_id,\n count_needed,\n self.launch_config.user_data,\n self.launch_config.security_groups,\n instance_type=self.launch_config.instance_type,\n )\n for instance in reservation.instances:\n instance.autoscaling_group = self\n self.instance_states.append(InstanceState(instance))\n else:\n # Need to remove some instances\n count_to_remove = curr_instance_count - self.desired_capacity\n instances_to_remove = self.instance_states[:count_to_remove]\n instance_ids_to_remove = [instance.instance.id for instance in instances_to_remove]\n self.autoscaling_backend.ec2_backend.terminate_instances(instance_ids_to_remove)\n self.instance_states = self.instance_states[count_to_remove:]\n\n\nclass FakeScalableTarget(object):\n\n def __init__(self, ServiceNamespace, ResourceId, ScalableDimension, MinCapacity, MaxCapacity, RoleARN):\n self.update(ServiceNamespace, ResourceId, ScalableDimension, MinCapacity, MaxCapacity, RoleARN)\n\n def update(self, ServiceNamespace, ResourceId, ScalableDimension, MinCapacity=None, MaxCapacity=None, RoleARN=None):\n self.ServiceNamespace = ServiceNamespace\n self.ResourceId = ResourceId\n self.ScalableDimension = ScalableDimension\n if MinCapacity is not None:\n self.MinCapacity = MinCapacity\n if MaxCapacity is not None:\n self.MaxCapacity = MaxCapacity\n self.RoleARN = RoleARN or self.RoleARN\n\n\nclass FakeApplicationScalingPolicy(object):\n\n def __init__(self, PolicyName, ServiceNamespace, ResourceId, ScalableDimension, PolicyType, StepScalingPolicyConfiguration):\n self.update(PolicyName, ServiceNamespace, ResourceId, ScalableDimension, PolicyType, StepScalingPolicyConfiguration)\n self.Alarms = []\n\n def update(self, PolicyName, ServiceNamespace, ResourceId, ScalableDimension, PolicyType=None, StepScalingPolicyConfiguration=None):\n self.PolicyARN = (\n \"arn:aws:autoscaling:us-east-1:012345678910:scalingPolicy:\"\n \"6d8972f3-efc8-437c-92d1-6270fEXAMPLE:resource/{ServiceNamespace}/\"\n \"{ResourceId}:policyName/{PolicyName}\"\n ).format(\n ServiceNamespace=ServiceNamespace,\n ResourceId=ResourceId,\n PolicyName=PolicyName\n )\n self.PolicyName = PolicyName\n self.ServiceNamespace = ServiceNamespace\n self.ResourceId = ResourceId\n self.ScalableDimension = ScalableDimension\n self.PolicyType = PolicyType or self.PolicyType\n self.StepScalingPolicyConfiguration = StepScalingPolicyConfiguration or self.StepScalingPolicyConfiguration\n\n\nclass ApplicationAutoScalingBackend(BaseBackend):\n\n def __init__(self):\n self.scalable_targets = {}\n self.policies = {}\n\n def _paginate(self, objects, sort_key, kwargs):\n objects = sorted(\n objects,\n key=sort_key\n )\n index = int(kwargs.get(\"NextToken\", 0))\n count = int(kwargs.get(\"MaxResults\", 50))\n next_token = index + count\n return objects[index:next_token], str(next_token)\n\n def _get_policy_id(self, **kwargs):\n return \"{ServiceNamespace}_{ResourceId}_{ScalableDimension}_{PolicyName}\".format(**kwargs)\n\n def _get_target_id(self, **kwargs):\n return \"{ServiceNamespace}_{ResourceId}_{ScalableDimension}\".format(**kwargs)\n\n def put_scaling_policy(self, **kwargs):\n policy_id = self._get_policy_id(**kwargs)\n if policy_id not in self.policies:\n self.policies[policy_id] = FakeApplicationScalingPolicy(**kwargs)\n else:\n self.policies[policy_id].update(**kwargs)\n return dict(\n PolicyARN=self.policies[policy_id].PolicyARN\n )\n\n def delete_scaling_policy(self, **kwargs):\n self.policies.pop(self._get_policy_id(**kwargs), None)\n return {}\n\n def describe_scaling_policies(self, **kwargs):\n objects, token = self._paginate(\n filter(\n lambda x: (\n x.ServiceNamespace == kwargs[\"ServiceNamespace\"] and\n (not kwargs.get(\"PolicyNames\") or x.PolicyName in kwargs[\"PolicyNames\"]) and\n (not kwargs.get(\"ResourceId\") or x.ResourceId in kwargs[\"ResourceId\"]) and\n (not kwargs.get(\"ScalableDimension\") or x.ScalableDimension in kwargs[\"ScalableDimension\"])\n ),\n self.policies.values()\n ),\n lambda sp: sp.PolicyName,\n kwargs\n )\n return dict(\n ScalingPolicies=[sp.__dict__ for sp in objects],\n NextToken=token\n )\n\n def register_scalable_target(self, **kwargs):\n target_id = self._get_target_id(**kwargs)\n if target_id not in self.scalable_targets:\n self.scalable_targets[target_id] = FakeScalableTarget(**kwargs)\n else:\n self.scalable_targets[target_id].update(**kwargs)\n return {}\n\n def deregister_scalable_target(self, **kwargs):\n self.scalable_targets.pop(self._get_target_id(**kwargs), None)\n return {}\n\n def describe_scalable_targets(self, **kwargs):\n objects, token = self._paginate(\n filter(\n lambda x: (\n x.ServiceNamespace == kwargs[\"ServiceNamespace\"] and\n (not kwargs.get(\"ResourceIds\") or x.ResourceId in kwargs[\"ResourceIds\"]) and\n (not kwargs.get(\"ScalableDimension\") or x.ScalableDimension in kwargs[\"ScalableDimension\"])\n ),\n self.scalable_targets.values()\n ),\n lambda st: st.ResourceId,\n kwargs\n )\n return dict(\n ScalableTargets=[st.__dict__ for st in objects],\n NextToken=token\n )\n\n\nclass AutoScalingBackend(BaseBackend):\n\n def __init__(self, ec2_backend, elb_backend, aas_backend):\n self.autoscaling_groups = {}\n self.launch_configurations = {}\n self.policies = {}\n self.ec2_backend = ec2_backend\n self.elb_backend = elb_backend\n self.aas_backend = aas_backend\n\n def reset(self):\n ec2_backend = self.ec2_backend\n elb_backend = self.elb_backend\n aas_backend = self.aas_backend\n aas_backend.reset()\n self.__dict__ = {}\n self.__init__(ec2_backend, elb_backend, aas_backend)\n\n def create_launch_configuration(self, name, image_id, key_name, kernel_id, ramdisk_id,\n security_groups, user_data, instance_type,\n instance_monitoring, instance_profile_name,\n spot_price, ebs_optimized, associate_public_ip_address, block_device_mappings):\n launch_configuration = FakeLaunchConfiguration(\n name=name,\n image_id=image_id,\n key_name=key_name,\n kernel_id=kernel_id,\n ramdisk_id=ramdisk_id,\n security_groups=security_groups,\n user_data=user_data,\n instance_type=instance_type,\n instance_monitoring=instance_monitoring,\n instance_profile_name=instance_profile_name,\n spot_price=spot_price,\n ebs_optimized=ebs_optimized,\n associate_public_ip_address=associate_public_ip_address,\n block_device_mapping_dict=block_device_mappings,\n )\n self.launch_configurations[name] = launch_configuration\n return launch_configuration\n\n def describe_launch_configurations(self, names):\n configurations = self.launch_configurations.values()\n if names:\n return [configuration for configuration in configurations if configuration.name in names]\n else:\n return list(configurations)\n\n def delete_launch_configuration(self, launch_configuration_name):\n self.launch_configurations.pop(launch_configuration_name, None)\n\n def enter_standby(self, instance_ids, auto_scaling_group_name, should_decrement_desired_capacity):\n group = self.autoscaling_groups[auto_scaling_group_name]\n instance_states = []\n for instance_state in group.instance_states:\n if instance_state.instance.id in instance_ids:\n instance_state.lifecycle_state = 'Standby'\n if should_decrement_desired_capacity:\n group.desired_capacity -= 1\n instance_states.append(instance_state)\n self.update_attached_elbs(auto_scaling_group_name)\n return instance_states\n\n def exit_standby(self, instance_ids, auto_scaling_group_name):\n group = self.autoscaling_groups[auto_scaling_group_name]\n instance_states = []\n for instance_state in group.instance_states:\n if instance_state.instance.id in instance_ids:\n instance_state.lifecycle_state = 'InService'\n group.desired_capacity += 1\n instance_states.append(instance_state)\n self.update_attached_elbs(auto_scaling_group_name)\n return instance_states\n\n def create_autoscaling_group(self, name, availability_zones,\n desired_capacity, max_size, min_size,\n launch_config_name, vpc_zone_identifier,\n default_cooldown, health_check_period,\n health_check_type, load_balancers,\n placement_group, termination_policies, tags):\n\n def make_int(value):\n return int(value) if value is not None else value\n\n max_size = make_int(max_size)\n min_size = make_int(min_size)\n default_cooldown = make_int(default_cooldown)\n if health_check_period is None:\n health_check_period = 300\n else:\n health_check_period = make_int(health_check_period)\n\n group = FakeAutoScalingGroup(\n name=name,\n availability_zones=availability_zones,\n desired_capacity=desired_capacity,\n max_size=max_size,\n min_size=min_size,\n launch_config_name=launch_config_name,\n vpc_zone_identifier=vpc_zone_identifier,\n default_cooldown=default_cooldown,\n health_check_period=health_check_period,\n health_check_type=health_check_type,\n load_balancers=load_balancers,\n placement_group=placement_group,\n termination_policies=termination_policies,\n autoscaling_backend=self,\n tags=tags,\n )\n\n self.autoscaling_groups[name] = group\n self.update_attached_elbs(group.name)\n return group\n\n def update_autoscaling_group(self, name, availability_zones,\n desired_capacity, max_size, min_size,\n launch_config_name, vpc_zone_identifier,\n default_cooldown, health_check_period,\n health_check_type, load_balancers,\n placement_group, termination_policies):\n group = self.autoscaling_groups[name]\n group.update(availability_zones, desired_capacity, max_size,\n min_size, launch_config_name, vpc_zone_identifier,\n default_cooldown, health_check_period, health_check_type,\n load_balancers, placement_group, termination_policies)\n return group\n\n def describe_autoscaling_groups(self, names):\n groups = self.autoscaling_groups.values()\n if names:\n return [group for group in groups if group.name in names]\n else:\n return list(groups)\n\n def delete_autoscaling_group(self, group_name):\n self.set_desired_capacity(group_name, 0)\n self.autoscaling_groups.pop(group_name, None)\n\n def describe_autoscaling_instances(self):\n instance_states = []\n for group in self.autoscaling_groups.values():\n instance_states.extend(group.instance_states)\n return instance_states\n\n def set_desired_capacity(self, group_name, desired_capacity):\n group = self.autoscaling_groups[group_name]\n group.set_desired_capacity(desired_capacity)\n self.update_attached_elbs(group_name)\n\n def change_capacity(self, group_name, scaling_adjustment):\n group = self.autoscaling_groups[group_name]\n desired_capacity = group.desired_capacity + scaling_adjustment\n self.set_desired_capacity(group_name, desired_capacity)\n\n def change_capacity_percent(self, group_name, scaling_adjustment):\n \"\"\" http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html\n If PercentChangeInCapacity returns a value between 0 and 1,\n Auto Scaling will round it off to 1. If the PercentChangeInCapacity\n returns a value greater than 1, Auto Scaling will round it off to the\n lower value. For example, if PercentChangeInCapacity returns 12.5,\n then Auto Scaling will round it off to 12.\"\"\"\n group = self.autoscaling_groups[group_name]\n percent_change = 1 + (scaling_adjustment / 100.0)\n desired_capacity = group.desired_capacity * percent_change\n if group.desired_capacity < desired_capacity < group.desired_capacity + 1:\n desired_capacity = group.desired_capacity + 1\n else:\n desired_capacity = int(desired_capacity)\n self.set_desired_capacity(group_name, desired_capacity)\n\n def create_autoscaling_policy(self, name, policy_type, adjustment_type, as_name,\n scaling_adjustment, cooldown):\n policy = FakeScalingPolicy(name, policy_type, adjustment_type, as_name,\n scaling_adjustment, cooldown, self)\n\n self.policies[name] = policy\n return policy\n\n def describe_policies(self, autoscaling_group_name=None, policy_names=None, policy_types=None):\n return [policy for policy in self.policies.values()\n if (not autoscaling_group_name or policy.as_name == autoscaling_group_name) and\n (not policy_names or policy.name in policy_names) and\n (not policy_types or policy.policy_type in policy_types)]\n\n def delete_policy(self, group_name):\n self.policies.pop(group_name, None)\n\n def execute_policy(self, group_name):\n policy = self.policies[group_name]\n policy.execute()\n\n def update_attached_elbs(self, group_name):\n group = self.autoscaling_groups[group_name]\n group_instance_ids = set(state.instance.id for state in group.instance_states)\n group_paused_instance_ids = set(state.instance.id for state in group.instance_states if \"Standby\" in state.lifecycle_state)\n\n try:\n elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers)\n except LoadBalancerNotFoundError:\n # ELBs can be deleted before their autoscaling group\n return\n\n for elb in elbs:\n elb_instace_ids = set(elb.instance_ids)\n self.elb_backend.register_instances(elb.name, group_instance_ids - elb_instace_ids)\n self.elb_backend.deregister_instances(elb.name, elb_instace_ids - group_instance_ids)\n self.elb_backend.deregister_instances(elb.name, group_paused_instance_ids)\n\n def create_or_update_tags(self, tags):\n\n for tag in tags:\n group_name = tag[\"resource_id\"]\n group = self.autoscaling_groups[group_name]\n old_tags = group.tags\n\n new_tags = []\n #if key was in old_tags, update old tag\n for old_tag in old_tags:\n if old_tag[\"key\"] == tag[\"key\"]:\n new_tags.append(tag)\n else:\n new_tags.append(old_tag)\n\n #if key was never in old_tag's add it (create tag)\n if not any(new_tag['key'] == tag['key'] for new_tag in new_tags):\n new_tags.append(tag)\n\n group.tags = new_tags\n\nautoscaling_backends = {}\nfor region, ec2_backend in ec2_backends.items():\n autoscaling_backends[region] = AutoScalingBackend(ec2_backend, elb_backends[region], ApplicationAutoScalingBackend())\n" } ]
5
jakekirsch/checkio_challenges
https://github.com/jakekirsch/checkio_challenges
5a91216878dfea15311c95467e452789cae409f9
dd3a6ba6b56e5d08f1f70d249cd46dc075b749e9
5464267848580088570d57da6e2807e056d25c6b
refs/heads/master
2021-05-09T14:39:46.218724
2018-01-26T16:34:37
2018-01-26T16:34:37
119,070,505
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6610087156295776, "alphanum_fraction": 0.6755577325820923, "avg_line_length": 25.113924026489258, "blob_id": "20eb7d063591b871890403fb3d089c16d32aba70", "content_id": "0e2c47ec7fd846f4f216a5ff2dd88f704b7ce3ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2062, "license_type": "no_license", "max_line_length": 72, "num_lines": 79, "path": "/checkio_first_challenge/first_challenge.py", "repo_name": "jakekirsch/checkio_challenges", "src_encoding": "UTF-8", "text": "# Define import modules\nimport sys\nimport re, string\n\n# Defines a repeat function that prints out the argument 3 times\n# and uses a conditional to add a smiley face\ndef checkio(text):\n\t# clean up source text\n\tcleantext = re.sub('[^a-zA-Z]+', '', text)\n\tcleantext = cleantext.lower()\n\n\t# now create a dictionary of the values that are present\n\tresultdict = dict(zip(set(cleantext), \n\t\t[0] * len(set(cleantext))))\n\n\t# find counts of keys\n\tfor key in cleantext:\n\t\tresultdict[key] = resultdict[key] + 1\n\n\t# now find the letter with the greatest value, tie goes to \n\t# lower\n\tmax_char = ''\n\tmax_value = 0\n\n\tfor key, value in resultdict.items():\n\t\tif resultdict[key] > max_value:\n\t\t\tmax_char = key\n\t\t\tmax_value = resultdict[key]\n\t\telif resultdict[key] == max_value:\n\t\t\tif key < max_char:\n\t\t\t\tmax_char = key\n\t\t\t\tmax_value = resultdict[key]\n\t\n\treturn max_char\n\n# one of the better answers\n\ndef checkio_improved(text):\n \"\"\"\n We iterate through latyn alphabet and count each letter in the text.\n Then 'max' selects the most frequent letter.\n For the case when we have several equal letter,\n 'max' selects the first from they.\n \"\"\"\n text = text.lower() # I understand this - does a similar thing\n # string.ascii_lowercase - this is a constant?\n # max - returns largest value in an iterable, however key\n # is a one argument ordering function\n\n # which brings us to text.count, which seems that the default\n # value is to take substrings of 1?\n # print(lambda text.count)\n # to what I used\n return max(string.ascii_lowercase, \n \t# key = text.count\n \tkey = lambda x: text.count(x)\n \t)\n\n # string.ascii_lowercase ??\n\n\n \n# define main argument of this module, which calls the repeat \n# function\ndef main():\n\t# text = sys.argv[1]\n\t# smiley = args[2]\n\n\tprint(checkio_improved(\"\"\"asdfa;LKJL;KJL;KJLKJ\n\t\t;LKJ;LKJ;LJK\n\t\tsdf234230498 )98098)(*)(* 134240098a;lkjsd;lfkajd;flkj\n\t\tthisis ;ijasdf string\"\"\"))\n\n\t# print(checkio(string.printable))\n\n\n# Standard boilerplate to call main function\nif __name__ == '__main__':\n\tmain()" } ]
1
davidptracy/BridingWorldsTimeTest
https://github.com/davidptracy/BridingWorldsTimeTest
cdccbe32653f09341956d71f48cd0a84caee73d4
21311de9dad325422af3e6ebb3bbf5f5bd73616d
52034af2bf3cf503bcb09ec1897bd6cf7406b7fe
refs/heads/master
2016-09-05T18:44:47.730791
2014-03-07T16:51:08
2014-03-07T16:51:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6319444179534912, "alphanum_fraction": 0.6712962985038757, "avg_line_length": 27.799999237060547, "blob_id": "cd7932f9029374be3bb44c083bc5710dd64b9eb9", "content_id": "0bf11cb5ab8de0caa98106133930cc26997cd381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 122, "num_lines": 15, "path": "/test.py", "repo_name": "davidptracy/BridingWorldsTimeTest", "src_encoding": "UTF-8", "text": "\"\"\"letting my environment know the path profile for python\"\"\"\n#!/usr/bin/sh\n\nimport time\nimport datetime\n\n\ntime.timezone = -5.00\ncurrent_time = int(time.mktime(time.strptime(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), '%Y-%m-%d %H:%M:%S')))\noutput_time = int(time.mktime(time.strptime('2014-03-04 11:00:00', '%Y-%m-%d %H:%M:%S')))\nevent_Time = output_time - time.timezone\n\ndelta = event_Time - current_time\n\nprint(delta)\n" } ]
1
juanravaz/LuzPy
https://github.com/juanravaz/LuzPy
b9ae0e8ec699f14c6b489d8688c34051a459bb33
ef1bd35322b19548a67fcf3cb258a0ef4a361ae7
465b8d86b73cf150934c4c0f7f4b3b695bd7817c
refs/heads/master
2023-04-11T06:38:48.793501
2021-04-06T07:18:44
2021-04-06T07:18:44
355,086,833
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 22.66666603088379, "blob_id": "beb0b720c2df647625ba4df922ad18c205253ebf", "content_id": "17f4ea509f65ad0c12eee561ed640bdb9efec42b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/README.md", "repo_name": "juanravaz/LuzPy", "src_encoding": "UTF-8", "text": "<h1>LuzPy</h1>\n\n<p>Esta aplicación te mostrará el precio de la luz</p>" }, { "alpha_fraction": 0.6767676472663879, "alphanum_fraction": 0.7070707082748413, "avg_line_length": 48.5, "blob_id": "d14bd568d3c8f245113d9bafe5ee4b740f8e41f2", "content_id": "71cb49439da9e831c494d5eb482e5549f24d8b1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 72, "num_lines": 2, "path": "/settings.py", "repo_name": "juanravaz/LuzPy", "src_encoding": "UTF-8", "text": "URL_JSON = \"https://api.esios.ree.es/archives/70/download_json?date={0}\"\nDATA_FOLDER = 'data/json'\n" }, { "alpha_fraction": 0.5345622301101685, "alphanum_fraction": 0.5467113256454468, "avg_line_length": 28.837499618530273, "blob_id": "f2a068a2894d4025b64e2811c3156395d1d62351", "content_id": "b330b3206fb960271ff29e32d120241847a040b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2388, "license_type": "no_license", "max_line_length": 113, "num_lines": 80, "path": "/main.py", "repo_name": "juanravaz/LuzPy", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport argparse\nfrom datetime import datetime, timedelta\nfrom urllib import request\nimport ssl\n\nfrom settings import *\n\n\ndef download(days: int = 0):\n aux = {}\n noc = []\n gen = []\n\n now = datetime.now()\n to_date = now\n from_date = to_date - timedelta(days)\n # delta_time = now - timedelta(days, 0)\n\n while from_date <= to_date:\n date_formatted = from_date.strftime(\"%Y-%m-%d\")\n ssl._create_default_https_context = ssl._create_unverified_context\n response = request.urlopen(URL_JSON.format(date_formatted))\n data = json.loads(response.read())\n status_code = response.getcode()\n\n if status_code == 200 and \"PVPC\" in data:\n\n pvpc = []\n\n for x in data['PVPC']:\n pvpc.append(\n {\"day\": x['Dia'], \"hours\": x['Hora'], \"night_plan\": float(x['NOC'].replace(\",\", \".\")) / 1000,\n \"General_plan\": float(x['GEN'].replace(\",\", \".\")) / 1000})\n\n noc.append(float(x['NOC'].replace(\",\", \".\")) / 1000)\n gen.append(float(x['GEN'].replace(\",\", \".\")) / 1000)\n\n aux[\"PVPC\"] = pvpc\n\n aux[\"Stats\"] = {\"min_night\": min(noc), \"max_night\": max(noc), \"avg_night\": sum(noc) / len(noc),\n \"min_general\": min(gen), \"max_general\": max(gen),\n \"avg_general\": sum(gen) / len(gen)}\n\n write_json(aux, f'{DATA_FOLDER}/{date_formatted}.json')\n elif status_code != 200:\n print('El recurso presenta errores')\n return\n else:\n print(f'Ha sido imposible sacar información de la fecha {date_formatted}')\n return\n\n\n from_date += timedelta(days=1)\n\n\ndef write_json(content: dict, path: str):\n \"\"\"\n Writes a JSON file\n :param content: JSON content\n :param path: Output path\n \"\"\"\n with open(path, 'w', encoding='utf-8') as file:\n json.dump(content, file)\n\n\ndef main():\n if not os.path.exists(DATA_FOLDER):\n os.makedirs(DATA_FOLDER)\n\n parser = argparse.ArgumentParser(description='CRON PROCESS')\n parser.add_argument('-d', '--days', default=0, type=int, help='Downloads data from [today-days, today]')\n args = parser.parse_args()\n days = args.days if args.days >= 0 else 0\n download(days)\n\n\nif __name__ == '__main__':\n main()\n" } ]
3
Chinry/flypaper
https://github.com/Chinry/flypaper
ed8fd4981a5f741ba89054f0ebfa4c82d6e1d53a
a2542222899f04d64077d9759cbd02b00971df7d
c081a5f2134925d4dbfbe7cfeb5f51e547775d32
refs/heads/master
2020-04-21T16:10:31.100014
2019-02-07T21:08:07
2019-02-07T21:08:07
169,692,014
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7252252101898193, "alphanum_fraction": 0.7342342138290405, "avg_line_length": 30.714284896850586, "blob_id": "7dd08f5e4467d1d84de896026190285c3bba01a1", "content_id": "80fbbd96db8932e719cedd786c95c55ec7c3624f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/board/captcha.py", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "import os, random\nfrom PIL import Image, ImageFont\nchoice = random.choice(os.listdir(\"static/pepe\"))\nprint(choice);\nim = Image.open(\"static/pepe/\" + choice)\nim.show()\nfont = ImageFont.truetype(\"static/font/times.ttf\", 12)\n" }, { "alpha_fraction": 0.6074429750442505, "alphanum_fraction": 0.6398559212684631, "avg_line_length": 21.513513565063477, "blob_id": "e127bd8cc0cd9811c4e48b0b6c9f9f2dfdc27c99", "content_id": "0abbed34b832cabb03fb72a67a8e2c90895a1825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 833, "license_type": "no_license", "max_line_length": 59, "num_lines": 37, "path": "/captcha/captcha.c", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "#include <string.h>\n#include <GLFW/glfw3.h>\n#include <ft2build.h>\n#include FT_FREETYPE_H\n#include FT_TYPES_H\n//#include <freetype/freetype.h>\nint main(){\n FT_Library library;\n FT_Face face;\n\n int error = FT_Init_FreeType(&library);\n if (error){\n printf(\"error1\");\n }\n error = FT_New_Face(\n library, \n \"/home/nick/.fonts/Px437_Phoenix_BIOS.ttf\",\n 0, //face index\n &face);\n error = FT_Set_Char_Size(\n face,\n 0,\n 16*64,\n 300,\n 300);\n error = FT_Load_Char(face, 'X', FT_LOAD_RENDER);\n GLFWwindow* window;\n if (!glfwInit())\n exit(EXIT_FAILURE);\n window = glfwCreateWindow(640, 480, \"BENIS\", NULL, NULL);\n glfwMakeContextCurrent(window);\n int width, height;\n glfwGetFramebufferSize(window, &width, &height);\n glViewport(0, 0, width, height);\n glClear(GL_COLOR_BUFFER_BIT);\n return 0;\n}\n" }, { "alpha_fraction": 0.5827742218971252, "alphanum_fraction": 0.5961933732032776, "avg_line_length": 35.153465270996094, "blob_id": "5b7b5733d2a3de7fec97938d9334b782f620e5b5", "content_id": "3bb60c7e85111b5df500a81c91493ca806a71ea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7303, "license_type": "no_license", "max_line_length": 113, "num_lines": 202, "path": "/board/board.py", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template, redirect, url_for, jsonify, g\nfrom werkzeug.utils import secure_filename\nimport os, logging, sqlite3, datetime, random, re\n\n#create a database file if not created already\nopen(\"database.db\", 'a')\n#set globals\nUPLOAD_FOLDER = 'static/uploads'\nALLOWED_EXTENSIONS = [ 'png', 'jpg', 'jpeg', 'gif']\nTHREAD_LIMIT = 20\nPOST_LIMIT = 200\n\n#create app object\napp = Flask(__name__)\n\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 3 * 1024 * 1024\nDATABASE = 'database.db'\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\n#initialize the database from .sql file\ndef init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('threadlist.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\ninit_db()\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef create_id(ip, thread, is_op=False):\n exists = True\n while exists:\n poster_id = random.randint(10000000,100000000)\n query = \"select poster_id from users_{} where poster_id=?\".format(thread)\n found = query_db(query, args=(poster_id,), one=True)\n if found == None:\n exists = False\n cur = get_db().cursor()\n query = \"INSERT INTO users_{}(poster_id, ip) VALUES(?, ?)\".format(thread)\n cur.execute(query, (poster_id, ip,))\n get_db().commit()\n cur.close()\n return poster_id\n\ndef create_thread_id():\n exists = True\n while exists:\n thread_id = random.randint(10000000,100000000)\n query = \"SELECT thread_id FROM thread_list WHERE thread_id={}\".format(thread_id)\n found = query_db(query, one=True)\n if found == None:\n exists = False\n return thread_id\n\ndef init_thread_db(thread_id):\n with app.app_context():\n db = get_db()\n with app.open_resource('thread.sql', mode='r') as f:\n db.cursor().executescript(f.read().format(thread_id, thread_id, thread_id))\n db.commit()\n\ndef query_db(query, args=(), one=False):\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\ndef threadlimitation():\n results = query_db(\"SELECT thread_count, thread_id from thread_list ORDER BY thread_count ASC\")\n if len(results) >= THREAD_LIMIT:\n #delete images and image folder\n os.system(\"rm -rf \" + app.config['UPLOAD_FOLDER'] + \"/\" + str(results[0][1]))\n #delete tables relating to the thread\n cur = get_db().cursor()\n query = \"DROP TABLE users_{}\".format(results[0][1])\n cur.execute(query)\n get_db().commit()\n query = \"DROP TABLE posts_{}\".format(results[0][1])\n cur.execute(query)\n get_db().commit()\n #delete thread from thread list\n query = \"DELETE FROM thread_list WHERE thread_count = ?\"\n cur.execute(query, (results[0][0],))\n get_db().commit()\n cur.close()\n if len(results) > 0:\n indexlast = len(results) - 1\n return results[indexlast][0]\n else:\n return 0\n\n\ndef textprocessingpost(text, posts):\n text = text.rstrip()\n subunwanted = [\n [\"fuck\", \"buzz\"],\n [\"bitch\", \"musca\"],\n [\"shit\", \"dung\"],\n [\"&\", \"&amp\"],\n [\"<\", \"&lt\"],\n [\">\", \"&gt\"],\n [\"\\n\", \"<br>\"],\n ]\n for pair in subunwanted:\n text = text.replace(pair[0], pair[1])\n pattern = re.compile(r'(&gt){2}\\d{8}(\\s|$|\\n)')\n matches = pattern.finditer(text)\n textpoints = []\n for match in matches:\n firstinsert = \"<div class=\\\"quote\\\">\"\n firstpoint = match.span()[0]\n text = text[0:firstpoint] + firstinsert + text[firstpoint:len(text)]\n secondpoint = match.span()[1] + len(firstinsert)\n text = text[0:secondpoint] + \"</div>\" + text[secondpoint:len(text)]\n return text\n\n\n\[email protected](\"/json\")\ndef json():\n threads = query_db(\"SELECT * FROM thread_list\")\n return jsonify(threads=threads)\n\[email protected](\"/\")\ndef front():\n return render_template(\"index.html\")\[email protected](\"/createthread\", methods=['GET', 'POST'])\ndef createthread():\n if request.method == 'POST':\n thread_id = create_thread_id()\n init_thread_db(thread_id)\n ip = str(request.remote_addr)\n poster_id = create_id(ip, thread_id)\n date = str(datetime.datetime.now().date())\n topic = request.form['topic']\n thread_count = threadlimitation() + 1\n cur = get_db().cursor()\n query = \"INSERT INTO thread_list(thread_id, poster_id, topic, date, thread_count) VALUES(?, ?, ?, ?, ?)\"\n cur.execute(query, (thread_id, poster_id, topic, date, thread_count))\n get_db().commit()\n cur.close()\n os.makedirs(app.config['UPLOAD_FOLDER'] + \"/\" + str(thread_id))\n return redirect(\"/threads/\" + str(thread_id))\n return render_template('createthread.html')\n\n\[email protected](\"/threads/<int:thread_id>\", methods=['GET', 'POST'])\ndef board(thread_id):\n limit = False\n posts = query_db(\"SELECT * FROM posts_{}\".format(thread_id))\n threadinfo = query_db(\"SELECT * FROM thread_list WHERE thread_id = ?\", args=(thread_id,), one=True)\n #find if thread is at post limit\n if len(posts) == POST_LIMIT:\n limit = True\n if request.method == 'POST' and (len(request.form['postaddition']) <= 400 and not limit):\n #check for existing user and create one if nonexistent\n ip = str(request.remote_addr)\n query = \"select poster_id from users_{} where ip=?\".format(thread_id)\n poster_id = query_db(query, args=(ip,), one=True)\n if poster_id == None:\n poster_id = create_id(ip, thread_id)\n else:\n poster_id = poster_id[0]\n #insert new post into database\n post_text = textprocessingpost(request.form['postaddition'], posts)\n date = str(datetime.datetime.now().date())\n post_id = len(posts) + 1\n cur = get_db().cursor()\n query = \"INSERT INTO posts_{}(post_id, poster_id, date, post_text) VALUES(?, ?, ?, ? )\".format(thread_id)\n cur.execute(query, (post_id, poster_id, date, post_text,))\n get_db().commit()\n cur.close()\n\n #file upload\n #create f outside of the if scope\n f = None\n if 'fileupload' in request.files:\n f = request.files['fileupload']\n #if f exists and is an allowed file, randomize name and upload server and add to database\n if f and allowed_file(f.filename):\n filenamebegin = str(random.randint(10000000,100000000))\n filenameend = f.filename.rsplit('.', 1)[1].lower()\n filename = filenamebegin + \".\" + filenameend\n f.save(os.path.join(app.config['UPLOAD_FOLDER'] + \"/\" + str(thread_id), filename))\n cur = get_db().cursor()\n query = \"UPDATE posts_{} SET image=? WHERE post_id=?\".format(thread_id)\n cur.execute(query, (filename, post_id,))\n get_db().commit()\n cur.close()\n posts = query_db(\"SELECT * FROM posts_{}\".format(thread_id))\n return render_template('board.html',posts=posts, threadinfo=threadinfo, limit=limit)\n" }, { "alpha_fraction": 0.7541899681091309, "alphanum_fraction": 0.7541899681091309, "avg_line_length": 24.571428298950195, "blob_id": "01acecf3fc3dc8c2c65420a185ab822edbb53ad0", "content_id": "39d6f9055c4795915bd4aa4ad063d473b418d8c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 179, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/board/threadlist.sql", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "CREATE TABLE IF NOT EXISTS thread_list (\n\tthread_id integer PRIMARY KEY,\n\tposter_id integer NOT NULL,\n\ttopic text NOT NULL,\n\tdate text NOT NULL,\n\tthread_count integer NOT NULL\n);\n" }, { "alpha_fraction": 0.7173144817352295, "alphanum_fraction": 0.7173144817352295, "avg_line_length": 20.769229888916016, "blob_id": "633f3c59d87e891c577a94882f3fb91c99a2a185", "content_id": "f3a9e21fc594c1b8f47101cb786b77018fca776e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 283, "license_type": "no_license", "max_line_length": 53, "num_lines": 13, "path": "/board/thread.sql", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "CREATE TABLE users_{} (\n\tposter_id integer PRIMARY KEY,\n\tip text NOT NULL\n);\n\nCREATE TABLE posts_{} (\n\tpost_id integer PRIMARY KEY,\n\tposter_id integer NOT NULL,\n\timage text NULL,\n\tdate text NOT NULL,\n\tpost_text text NOT NULL,\n\tFOREIGN KEY(poster_id) REFERENCES users{}(poster_id)\n);\n" }, { "alpha_fraction": 0.6334459185600281, "alphanum_fraction": 0.6537162065505981, "avg_line_length": 25.909090042114258, "blob_id": "b3ee45f0005d87e881878fd499dc40dccf2939a1", "content_id": "001be7dd7d32f0e1ab4793a3c37d348440f5d8bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 108, "num_lines": 22, "path": "/board/test.py", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template, g\nfrom werkzeug.utils import secure_filename\nimport sqlite3\nimport datetime, random\n\napp = Flask(__name__)\nDATABASE = 'database.db'\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\[email protected](\"/\")\ndef front():\n query = \"INSERT INTO posts(post_id, poster_id, date, post_text) VALUES(3, 4321432, 'june 12', 'hello' )\"\n cur = get_db().cursor()\n cur.execute(query)\n get_db().commit()\n cur.close()\n return render_template('index.html')\n" }, { "alpha_fraction": 0.7591241002082825, "alphanum_fraction": 0.7682482004165649, "avg_line_length": 48.818180084228516, "blob_id": "ef8d5226dd2eaa3b423a0dc1c90e0c91bfc2bd16", "content_id": "481bfdb486bc6f6a8889585d3b6e69815d8b6aa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 548, "license_type": "no_license", "max_line_length": 87, "num_lines": 11, "path": "/README.md", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "# board\nA sample image board written in python\n## Development Environment\nTo setup your environment, follow the following steps:\n 1. Clone this repository and enter the working directory.\n 2. Make sure you have virtualenv installed: `python -m pip install --user virtualenv`\n 3. Initialize your environment in the *venv* directory: `python -m virtualenv venv`\n 4. Enter the environment: `source ./venv/bin/activate`\n 5. Install dependencies: `pip install -r requirements.txt`\n\nTo exit the environment, simply run `deactivate` from your shell.\n" }, { "alpha_fraction": 0.6530519723892212, "alphanum_fraction": 0.6587247848510742, "avg_line_length": 29.1849308013916, "blob_id": "800e4b71ba6f70c2223052cb3c13be4511518995", "content_id": "51e73ed5326abaf2c7618a6a9d70380172976e39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4407, "license_type": "no_license", "max_line_length": 81, "num_lines": 146, "path": "/board/static/board.js", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "//globals\nlet textfield = document.getElementById(\"posttextfield\");\n\n\nlet postobjarray = [];\nclass Post{\n constructor(posteridstring, postobj, replytoobjlist, replyfromobjlist){\n this.posteridstring = posteridstring\n this.postobj = postobj;\n this.replytoobjlist = replytoobjlist;\n this.replyfromobjlist = replyfromobjlist;\n }\n}\n\nlet containerarray = document.getElementsByClassName(\"container\");\n\n//create array of posts\nlet postarray = [];\nfor (let x = 0; x < containerarray.length; x++){\n postarray.push(containerarray[x].parentNode);\n}\n\n//create the array of post objects\nfor(let y = 0; y < postarray.length; y++){\n let postobj = postarray[y];\n let posteridstring = postobj.className;\n let replytoobjlist = [];\n let quotesarray = postobj.getElementsByClassName(\"quote\");\n for (let a = 0; a < quotesarray.length; a++){\n for (let z = (y - 1); z > -1; z--){\n if (quotesarray[a].innerHTML.substring(8) == postarray[z].className){\n replytoobjlist.push([postarray[z], quotesarray[a]]);\n postobjarray[z].replyfromobjlist.push(postarray[y]);\n break;\n }\n }\n }\n postobjarray.push(new Post(posteridstring, postobj, replytoobjlist, []));\n}\n\n\n//use created post objects to create reply references\nflythumbnails = document.getElementsByClassName(\"flythumbimage\");\nfor (let b = 0; b < postobjarray.length; b++){\n if (postobjarray[b].replytoobjlist != []){\n for (let c = 0; c < postobjarray[b].replytoobjlist.length; c++){\n\n //create the popup box\n let quoted = postobjarray[b].replytoobjlist[c][0].cloneNode(true);\n let quotebox = document.createElement(\"div\");\n postobjarray[b].replytoobjlist[c][1].onmouseenter = (event) => {\n quotebox.setAttribute(\"id\", \"quotedbox\");\n quotebox.appendChild(quoted);\n quotebox.style.left = (window.innerWidth/2).toString() + \"px\";\n quotebox.style.top = (window.innerHeight/2).toString()+ \"px\";\n document.body.appendChild(quotebox);\n };\n postobjarray[b].replytoobjlist[c][1].onmouseleave = () => {\n document.body.removeChild(quotebox);\n };\n }\n }\n //create reply headers\n if (postobjarray[b].replyfromobjlist != []){\n for (let d = 0; d < postobjarray[b].replyfromobjlist.length; d++){\n //for some reason wasn't working so had to write it out\n postobj = postobjarray[b].postobj;\n headers = postobj.getElementsByClassName(\"postheader\");\n header = headers[0];\n //\n let quote = document.createElement(\"span\");\n quote.setAttribute(\"class\", \"quote\");\n let quotebox = document.createElement(\"div\");\n quoted = postobjarray[b].replyfromobjlist[d].cloneNode(true);\n quote.innerHTML = \"&gt&gt\" + postobjarray[b].replyfromobjlist[d].className;\n quote.onmouseenter = (event) => {\n quotebox.setAttribute(\"id\", \"quotedbox\");\n quotebox.appendChild(quoted);\n quotebox.style.left = (window.innerWidth/2).toString() + \"px\";\n quotebox.style.top = (window.innerHeight/2).toString()+ \"px\";\n console.log(quotebox);\n document.body.appendChild(quotebox);\n };\n quote.onmouseleave = () => {\n document.body.removeChild(quotebox);\n };\n header.insertBefore(quote, header.nextsibling);\n }\n }\n flythumbnail = flythumbnails[b];\n flythumbnail.onclick = () => {\n post = postobjarray[b];\n postobj = post.postobj;\n textfield.value = textfield.value + \">>\" + postobj.className;\n };\n}\n\n//upload button shenanigans\nfunction upload(){\n document.getElementById(\"uploadFile\").click();\n}\n\n\n\n//resizing image\nfunction resizeImg(obj){\n if (obj.dataset.size == \"small\"){\n obj.style.height = \"100%\";\n obj.dataset.size = \"large\";\n }\n else{\n obj.style.height = \"200px\";\n obj.dataset.size = \"small\"\n }\n}\n\n\n\n//explosion when mousing over the blastoff button\n\nimg = document.createElement(\"IMG\");\nimg.id = \"explosion\";\nimg.src = \"/static/images/explosion.gif\";\n\nfunction explosionOn(){\n document.body.appendChild(img);\n}\n\nfunction explosionOff(){\n document.body.removeChild(img);\n}\n\n\n//color change when typing\n\ncolorarray = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"violet\"];\nlet i = 0\nfunction count(){\n let wordcount = document.getElementById(\"wordcount\");\n wordcount.innerHTML = (textfield.value).length + \" characters\";\n textfield.style.borderColor = colorarray[i];\n i++\n if(i == colorarray.length){\n i = 0;\n }\n}\n" }, { "alpha_fraction": 0.5978659391403198, "alphanum_fraction": 0.6162053942680359, "avg_line_length": 29.282827377319336, "blob_id": "9257e4d688893cea0060a7ce49e1444809a6f029", "content_id": "df96a242965a44817783a9f784606b5ca54aea68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2999, "license_type": "no_license", "max_line_length": 88, "num_lines": 99, "path": "/board/static/indexscript.js", "repo_name": "Chinry/flypaper", "src_encoding": "UTF-8", "text": "\nlet address = \"http://127.0.0.1:5000/json\";\nlet threadFlyArray = [];\n\nfunction query(){\n let input = document.getElementById(\"search\");\n let table = document.getElementById(\"table\");\n table.innerHTML = \"\";\n let topics = [];\n for (let i = 0; i < threadFlyArray.length; i++){\n if (input.value.toLowerCase() == \"\"){}\n else if (threadFlyArray[i].topic.toLowerCase().includes(input.value.toLowerCase())){\n obj = document.createElement(\"tr\");\n obj.setAttribute(\"class\", \"clickablefly\");\n obj.setAttribute(\"id\", threadFlyArray[i].id.toString());\n obj.innerHTML = \"<a href=\\\"/threads/\"+ threadFlyArray[i].id.toString() + \"\\\" >\" +\n threadFlyArray[i].id.toString() + \" - \" + threadFlyArray[i].topic + \"</a>\";\n table.appendChild(obj);\n }\n }\n}\n\nclass ThreadFly{\n constructor(thread_id, topic, xcoord, ycoord, xvector, yvector){\n this.id = thread_id;\n this.topic = topic;\n this.coord = {x:xcoord, y:ycoord};\n this.vector = {x:xvector, y:yvector};\n this.obj = document.createElement('div');\n this.obj.setAttribute(\"id\", thread_id.toString());\n this.obj.setAttribute(\"class\", \"fly\");\n this.obj.onclick = () => {\n window.location.href = \"/threads/\" + thread_id.toString();\n }\n this.obj.onmouseenter = () => {\n this.obj.style.color = \"rgb(143, 196, 153)\"\n this.obj.innerHTML = thread_id.toString() + \"<br><p2>\" + this.topic + \"</p2>\"\n }\n this.obj.onmouseleave = () => {\n this.obj.style.color = \"white\"\n this.obj.innerHTML = thread_id.toString();\n }\n this.obj.innerHTML = thread_id.toString();\n this.obj.style.left = this.coord.x + \"px\";\n this.obj.style.top = this.coord.y + \"px\";\n document.body.appendChild(this.obj);\n }\n update() {\n this.coord.x = this.coord.x + this.vector.x;\n this.coord.y = this.coord.y + this.vector.y;\n if(this.coord.x > window.innerWidth - 195 || this.coord.x < 20){\n this.vector.x = this.vector.x * -1;\n }\n if(this.coord.y > window.innerHeight - 50 || this.coord.y < 20){\n this.vector.y = this.vector.y * -1;\n }\n this.obj.style.left = this.coord.x + \"px\";\n this.obj.style.top = this.coord.y + \"px\";\n }\n}\n\nfunction massUpdate(){\n threadFlyArray.forEach((fly) => fly.update())\n}\n\nfunction createFlys(threadsArray, flyArray){\n threadsArray.forEach((thread) => {\n flyArray.push(\n new ThreadFly(thread[0], thread[2],\n (window.innerWidth)/2,\n (window.innerHeight)/2,\n (Math.random() * 20) - 10,\n (Math.random() * 20) - 10\n )\n );\n });\n return flyArray;\n}\n\nfunction oreqListener() {\n var data = JSON.parse(this.responseText);\n threadFlyArray = createFlys(data.threads, threadFlyArray);\n}\n\nfunction oreqError(err) {\n console.log('Fetch Error ', err);\n}\n\nvar oReq = new XMLHttpRequest();\noReq.onload = oreqListener;\noReq.onerror = oreqError;\noReq.open('get', address, true);\noReq.send();\n\n//initial UPDATE\nfor(let i = 0; i < 1000000; i++){\n massUpdate();\n}\n\nsetInterval(massUpdate, 40);\n" } ]
9
Aadamd123/flask_test
https://github.com/Aadamd123/flask_test
c80a8390553f1dd3e5ca5125e9dc8c4550a09ad0
b3266b79fa1e3116dc886d7200e0918c803cfd39
ddfa0b66b8a1a24140dcf91a31e8fcdd0336c378
refs/heads/main
2023-07-11T01:48:02.839179
2021-09-01T10:08:10
2021-09-01T10:08:10
402,015,625
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.639465868473053, "alphanum_fraction": 0.6750741600990295, "avg_line_length": 19.454545974731445, "blob_id": "3c622eecea02607a0d72d92f7093185857ef546c", "content_id": "796c443398ab7fbf7a0bbaec9ab994b9eacea0e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 55, "num_lines": 33, "path": "/hello.py", "repo_name": "Aadamd123/flask_test", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\n\n\n# Create a Flask Instance\napp = Flask(__name__)\n\n# Create a route decorator\[email protected]('/')\n\ndef index():\n first_name = '<strong>Aadam</strong>'\n stuff = [\"red\", \"yellow\", \"green\", 31]\n return render_template(\"index.html\", \n first_name=first_name,\n stuff=stuff)\n\n# localhost:5000/user/name\[email protected]('/user/<name>')\n\ndef user(name):\n return render_template(\"user.html\", user_name=name)\n\n# Custom error\n\n# Invalid url\[email protected](404)\ndef page_not_found(e):\n return render_template(\"404.html\"), 404\n\n# Server error\[email protected](500)\ndef page_not_found(e):\n return render_template(\"500.html\"), 500" } ]
1
Samadmemon991/net-scanner
https://github.com/Samadmemon991/net-scanner
8466b3c2f61c6ea9a44a02f4aed5d79b6a696205
3e162fd4ea6b3fc921b695b2c314c4db26b733df
2d5392f3d030c4fb09cce66e8350a8e6440ba731
refs/heads/master
2023-07-03T11:15:31.891417
2021-08-08T12:52:58
2021-08-08T12:52:58
393,959,688
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4470858871936798, "alphanum_fraction": 0.4900306761264801, "avg_line_length": 30.0238094329834, "blob_id": "973539798f6bd64e2db5f7396229bedf98d380d3", "content_id": "572f68ca3fcd35d905a99c6dfb7c0608d368376f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 84, "num_lines": 42, "path": "/net_scanner.py", "repo_name": "Samadmemon991/net-scanner", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n\nimport scapy.all as scapy\nimport subprocess\nimport re\n\ndef scan(ip):\n arp_request = scapy.ARP(pdst = ip)\n broadcast = scapy.Ether(dst = \"ff:ff:ff:ff:ff:ff\")\n arp_broadcast_request = broadcast/arp_request\n #scapy.ls(scapy.Ether())\n #print(arp_broadcast_request.show())\n ans = scapy.srp(arp_broadcast_request, timeout = 1, verbose= False)[0]\n print(\"IP\\t\\t\\tMAC Addr\")\n print(ip)\n print (\"\\n=============================================================\\n\")\n for element in ans:\n #print (\"\\n=============================================================\\n\")\n #print(element[1].show())\n print(element[1].psrc+\"\\t\\t\"+element[1].hwsrc)\n #print (\"\\n=============================================================\\n\")\n\ndef get_ip():\n ifconfig_result = subprocess.check_output([\"ifconfig\"]).decode()\n current_mac = re.search(r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\", ifconfig_result)\n #(r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\"\n if (not current_mac):\n print(\"[-] cannot read ip addr\")\n else:\n #print(current_mac.group(0))\n return(current_mac.group(0))\n\n\n\n\nscan(str(get_ip())+\"1/24\")\n\n#scan(\"192.168.0.1/24\")\n#scan(\"10.11.13.00/24\")\n\n#for x in range(100):\n # scan(\"192.168.43.\"+str(x))\n\n" } ]
1
ktbeck/SlugRoulette
https://github.com/ktbeck/SlugRoulette
ae9af723083fb0ae27eec951c52e33c152348696
0a3b28263170b1aec3bff88317febc6342aacd77
0d820b40e3eefb348781955067e573ce8becba81
refs/heads/master
2020-03-15T23:10:20.988903
2018-06-13T09:07:34
2018-06-13T09:07:34
132,388,386
0
0
null
2018-05-07T00:52:35
2018-06-10T02:53:45
2018-06-10T03:00:44
Python
[ { "alpha_fraction": 0.5420272946357727, "alphanum_fraction": 0.5457024574279785, "avg_line_length": 21.236913681030273, "blob_id": "810363fe949207fbfae935507d26a086b55c0a67", "content_id": "2e9ab11e583ff37ec359eba165f2b323029c1606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8435, "license_type": "no_license", "max_line_length": 103, "num_lines": 363, "path": "/static/js/default_index.js", "repo_name": "ktbeck/SlugRoulette", "src_encoding": "UTF-8", "text": "// This is the js for the default/index.html view.\r\n\r\nvar app = function() {\r\n\r\n var self = {};\r\n Vue.config.silent = false; // show all warnings\r\n\r\n // Extends an array\r\n self.extend = function(a, b) {\r\n for (var i = 0; i < b.length; i++) {\r\n a.push(b[i]);\r\n }\r\n };\r\n\r\n //var enumerate = function(v) { var k=0; return v.map(function(e) {e._idx = k++;});}k0;\r\n\r\n\t\r\n////////////////////////////////////// functions for chat box///////////////////////////////////////\r\n\r\n\tself.makeNewChat = function(data) {\r\n\t\tconsole.log(\"new chat added\");\r\n\t\tconsole.log(data);\r\n\r\n\t\t$.post(new_box,\r\n\t\t\t{\r\n\t\t\t\tTitle: self.vue.newTitle,\r\n\t\t\t\tis_group_chat: data\r\n\r\n\t\t\t}, function(data){\r\n\t\t\t\t\r\n\t\t\t\tself.vue.newTitle = null;\r\n\t\t\t});\r\n\t};\r\n \r\n\tself.getTitle = function(){\r\n\t\tconsole.log(self.vue.logged_in);\r\n\t\t//console.log(\"getting chats from database\");\r\n\t\t//console.log(self.vue.currentChat);\r\n\r\n\t\t$.getJSON(get_Title, {}, function(data){\r\n\r\n\t\t\t\tself.vue.chats = data.chats;\r\n\r\n\t\t\t\tsetTimeout(function(){\r\n\t\t\t\t\tif(self.vue.isRandom == false)\r\n\t\t\t\t\t\tself.getTitle();\r\n\t\t\t\t}, self.vue.normalDelay);\r\n\t\t\t});\r\n\t};\r\n\r\n\tself.getChat = function(){\r\n\t\t//console.log(\"getting the chat box of chosen chat room\");\r\n\t\t//console.log(self.vue.currentChat);\r\n\t\tconsole.log('asdas');\r\n\r\n\t\ttemp = -1;\r\n\t\tif(self.vue.currenChat != null)\r\n\t\t\ttemp = self.vue.currentChat.chat.length;\r\n\r\n\t\tif(self.vue.isRandom){\r\n\t\t\t$.getJSON(get_box,{\r\n\t\t\t\tID: self.vue.rserverId,\r\n\t\t\t\tcurrent: temp\r\n\t\t\t}, function(data){\r\n\t\t\t\tself.vue.rcurrentChat = data;\r\n\t\t\t});\r\n\t\t}\r\n\t\telse{\r\n\t\t\t$.getJSON(get_box, {\r\n\t\t\r\n\t\t\t\tID: self.vue.serverId,\r\n\t\t\t\tcurrent: temp\r\n\r\n\t\t\t}, function(data){\r\n\t\t\t\r\n\t\t\t\tconsole.log(data);\r\n\t\t\t\tif(data != 0){\r\n\t\t\t\t\t\tself.vue.currentChat = data;\r\n\r\n\t\t\t\t}\t\r\n\r\n\t\t\t\tsetTimeout(function(){\r\n\t\t\t\t\tif(self.vue.isServer == true && self.vue.isRandom == false)\r\n\t\t\t\t\t\tself.getChat();\r\n\t\t\t\t}, self.vue.chatDelay);\r\n\r\n\t\t\t});\r\n\t\t}\r\n\t};\r\n\r\n\tself.editChat = function(chat_id){\r\n\t\tconsole.log(\"adding new chat to chat box\");\r\n\t\t//alert(chat_id);\r\n\t\t$.post(edit_box,{\r\n\r\n\t\t\t\tchat_id: chat_id,\r\n\t\t\t\tNEW: self.vue.newChatting\r\n\r\n\t\t\t}, function(){\r\n\t\t\t\t\r\n\t\t\t\tself.vue.newChatting = null;\r\n\t\t\t\r\n\t\t\t});\r\n\r\n\t};\r\n\r\n\tself.delChat = function(chat_id){\r\n\t\tconsole.log(\"deleting a chat\");\r\n\t\t$.post(del_box, {\r\n\r\n\t\t\t\tchat_id: chat_id\t\r\n\r\n\t\t\t}, function (){});\r\n\t};\r\n\r\n\tself.refresh = function(){\r\n\r\n\t};\r\n/////////////////////////////////////// functions for queue //////////////////////////////////\r\n\r\n\r\n\tself.insertQueue = function (){\r\n\t\t$.post(insert_queue, {}, function(data){\r\n\r\n\t\t\tself.listOfQueue();\r\n\r\n\t\t});\r\n\r\n\t}\r\n\r\n\t//Gets information from database about queue\r\n\tself.listOfQueue = function (){\r\n\t\t$.getJSON(get_list_of_queues, {\r\n\t\t\t\t\t\r\n\t\t\t\tisChatting: self.vue.isChatting\r\n\t\r\n\t\t\t}, function(data){\r\n\t\t\t/*The array passed in from the api function (data) will have all the\r\n\t\t\t queue Fields stored in the 0th index. After that, it will store\r\n\t\t\t every user in the queue ONLY IF they are waiting to find someone else\r\n\t\t\t to chat with*/\r\n\t\t\t\t\r\n\t\t\tconsole.log(data);\r\n\t\t\tif(data != \"n\"){\t\r\n\t\t\t\t//gives the amount of people that are current in queue\r\n\t\t\t\tself.vue.queueLength = data.length;\r\n\r\n\t\t\t\t//is current user in chat\r\n\t\t\t\tif(data[0] != null){\r\n\t\t\t\t\tself.vue.isChatting = data[0].is_chatting;\r\n\t\t\t\t}\r\n\r\n\r\n\r\n\t\t\t\t/*if user has not been matched, we will pick a person through a list of \r\n\t\t\t\t other people that also havent been matched*/\r\n\t\t\t\tif(self.vue.isChatting == false){\r\n\r\n\t\t\t\t\t//only searches IF there is anyone to be found\r\n\t\t\t\t\tif(data.length > 1){\r\n\r\n\t\t\t\t\t\t//pairs the users \r\n\t\t\t\t\t\tvar randomUser = Math.floor(Math.random()*(data.length - 1)) + 1;\r\n\t\t\t\t\t\t$.post(match_users,{\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\tperson_id: data[randomUser].person_id\r\n\t\r\n\t\t\t\t\t\t\t}, function(data) {\r\n\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t//console.log(data);\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t});\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse{\r\n\t\t\t\t\t\tself.vue.searchingForChat += \".\";\r\n\t\t\t\t\t\tif(self.vue.searchingForChat.length > 5)\r\n\t\t\t\t\t\t\tself.vue.searchingForChat = \".\";\r\n\t\t\t\t\t}\r\n\t\r\n\t\t\t\t}\r\n\r\n\r\n\r\n\t\t\t\t//user is curently chatting\r\n\t\t\t\telse if(data[0] != null){\r\n\r\n\t\t\t\t\t\r\n\t\t\t\t\t//gets time remaining for chat\r\n\t\t\t\t\tself.vue.time = 60 - Math.floor(data[0].time_remain);\r\n\r\n\t\t\t\t\t//gets the length of queue\r\n\t\t\t\t\tlength = data[0].chats.length - 1;\r\n\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t//if user has started new chat, then we add new chat to listChat array\r\n\t\t\t\t\tif(self.vue.rserverId != data[0].chats[length]){\r\n\r\n\t\t\t\t\t\t//getting id of your chat box\r\n\t\t\t\t\t\tself.vue.rserverId = data[0].chats[length];\r\n\r\n\t\t\t\t\t\t//making space in array for random chat box\r\n\t\t\t\t\t\tself.vue.listChats.push(null);\r\n\t\r\n\t\t\t\t\t\t//getting id of other user's chat box\r\n\t\t\t\t\t\tself.vue.randomBox = data[0].chatting_with;\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//if chat is not new, then update latest that\r\n\t\t\t\t\telse if(self.vue.isRandom && data[0].chats[length] == self.vue.rserverId){\r\n\t\t\t\t\t\tself.getChat();\r\n\t\t\t\t\t\tself.vue.listChats[length] = self.vue.rcurrentChat;\r\n\t\t\t\t\t}\r\n\t\r\n\t\t\t\t\t//keeps track of whether a user has left and time limit\r\n\t\t\t\t\t$.post(countdown,{}, function(){});\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\t//periodically retrieve updates about textBox and queue\r\n\t\t\tif(self.vue.isRandom){\r\n\t\t\t\tsetTimeout(function(){\r\n\t\t\t\t\tself.listOfQueue();\r\n\t\t\t\t\t//console.log(data);\r\n\t\t\t\t}, self.vue.chatDelay);\r\n\t\t\t}\r\n\t\t\telse{\r\n\t\t\t\tself.vue.rcurrentChat = null;\r\n\t\t\t}\r\n\r\n\t\t});\r\n\r\n\r\n\t}\r\n\r\n\tself.removeQueue = function (){\r\n\t\t$.post(remove_queues, {}, function(data){});\r\n\t}\r\n\t\r\n\t//username functions\t\r\n\tself.set_username = function(){\r\n\t\t$.get(check_repeats, {\r\n\t\t\tusername: self.vue.username\r\n\t\t}, function(data) {\r\n\t\t\tif(self.vue.username == \"\"){\r\n\t\t\t\talert(\"Please enter a valid username\");\r\n\t\t\t}\r\n\t\t\telse{\r\n console.log(data);\r\n if (data == 0) {\r\n $.post(setUsername,\r\n {\r\n username: self.vue.username\r\n }, function (data) {\r\n self.get_username();\r\n });\r\n }\r\n else {\r\n alert(\"Username already taken\");\r\n }\r\n }\r\n })\r\n\t};\r\n\r\n\tself.get_username = function(){\r\n\t\t$.get(getUsername, {}, function(data){\r\n\t\t\t\tif(data == null){\r\n\t\t\t\t\tself.vue.needs_username = true;\r\n\t\t\t\t}\r\n\t\t\t\telse{\r\n\t\t\t\t\t// console.log(data);\r\n\t\t\t\t\tself.vue.needs_username = false;\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t};\r\n\r\n\t// Complete as needed.\r\n\tself.vue = new Vue({\r\n \tel: \"#vue-div\",\r\n \tdelimiters: ['${', '}'],\r\n \tunsafeDelimiters: ['!{', '}'],\r\n \t\tdata: {\r\n\t\t\r\n\t\t\t//These arrays are used to store retrived data from database\r\n\t\t\tchats: [],\r\n\t\t\tlistChats: [],\r\n\r\n\t\t\trcurrentChat: null,\r\n\t\t\tcurrentChat: null,\r\n\r\n\t\t\tnewTitle: null, //variable to temp store a new title\r\n\t\t\tnewChatting: null, //variable to temp store new text\r\n\r\n\t\t\t//variables to check if user has joined chat server\r\n\t\t\tisServer: false,\r\n\t\t\tserverId: null,\r\n\t\t\trserverId: null,\r\n\r\n\t\t\tsearching: \"\", //variable user to store search string\r\n\r\n\t\t\t//variable to check whether user is group chatting or seraching for random people\r\n\t\t\tisRandom: false,\r\n\r\n\t\t\tqueueLength: 0,\r\n\t\t\tisChatting: false,\r\n\r\n\t\t\t/*stores the id of your chat box and the chat box of the person you are chatting \r\n\t\t\t to in the random chat*/\r\n\t\t\trandomBox: null,\r\n\t\t\ttime: 0,\r\n\r\n\t\t\t//the intertval btwn pinging servers in milliseconds\r\n\t\t\tchatDelay: 100,\r\n\t\t\tnormalDelay: 500,\r\n\r\n\t\t\tsearchingForChat: \"\",\r\n\t\t\t\r\n\t\t\tusername: null,\r\n\t\t\tneeds_username: false\r\n\r\n \t},\r\n \tmethods: {\r\n\r\n \trefresh: self.refresh,\r\n\t\t\r\n\t\t\t//functions for chat\r\n\t\t\tmakeNewChat: self.makeNewChat,\r\n\t\t\t\r\n\t\t\tgetChat: self.getChat,\r\n\t\t\tgetTitle: self.getTitle,\r\n\r\n\t\t\teditChat: self.editChat,\r\n\t\t\tdelChat: self.delChat,\r\n\r\n\t\t\r\n\t\t\t//username functions\r\n\t\t\tset_username: self.set_username,\r\n\t\t\tget_username: self.get_username,\r\n\r\n\t\t\t//functions for queue\r\n\t\t\tinsertQueue: self.insertQueue,\r\n\t\t\tlistOfQueue: self.listOfQueue,\r\n\t\t\tremoveQueue: self.removeQueue,\r\n\r\n \t}\r\n\r\n\r\n \t});\r\n\r\n\t//-------- IF YOU WANT SOMETHING TO HAPPEN WHEN WEBPAGE IS LOADED, CALL FUNCTION BELOW ---------------\r\n\tif(self.vue.has_username !== null){\r\n\t\tself.get_username();\r\n\t}\r\n\tself.getTitle();\r\n \t$(\"#vue-div\").show();\r\n \treturn self;\r\n};\r\n\r\nvar APP = null;\r\n\r\n// This will make everything accessible from the js console;\r\n// for instance, self.x above would be accessible as APP.x\r\njQuery(function(){APP = app();});\r\n" }, { "alpha_fraction": 0.5147174596786499, "alphanum_fraction": 0.5206989049911499, "avg_line_length": 29.14215660095215, "blob_id": "415504bfa144b36a3dc8af90f1e775ee55b367b8", "content_id": "2ac34c448fb5b8d05e36d8e2f49489b1b66abb54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6353, "license_type": "no_license", "max_line_length": 109, "num_lines": 204, "path": "/controllers/api.py", "repo_name": "ktbeck/SlugRoulette", "src_encoding": "UTF-8", "text": "# Here go your api methods.\r\n\r\nimport time\r\n\r\ndef add_textBox():\r\n p = db.textBox.insert(\r\n Title = request.vars.Title,\r\n is_group_chat = request.vars.is_group_chat\r\n )\r\n return \"ok\"\r\n\r\n\r\ndef get_textBox():\r\n t = db(db.textBox.id == request.vars.ID).select().first()\r\n\r\n if t is not None and request.vars.current is -1:\r\n temp = dict(\r\n Title = t.Title,\r\n chat = t.chat,\r\n chatter = t.chatter,\r\n id = t.id,\r\n )\r\n \r\n return response.json(temp)\r\n\r\n if request.vars.current is not len(t.chat):\r\n temp = dict(\r\n Title = t.Title,\r\n chat = t.chat,\r\n chatter = t.chatter,\r\n id = t.id,\r\n text = len(t.chat),\r\n text2 = request.vars.current\r\n )\r\n return response.json(temp)\r\n return 0\r\n\r\ndef get_textTitle():\r\n chats = []\r\n for r in db().select(db.textBox.ALL):\r\n if r.is_group_chat == True:\r\n t = dict(\r\n Title = r.Title,\r\n id = r.id\r\n )\r\n chats.append(t)\r\n return response.json(dict(chats=chats))\r\n\r\n\r\ndef edit_textBox():\r\n chat = db(db.textBox.id == request.vars.chat_id).select().first()\r\n \r\n #updating the text in the box\r\n temp = chat.chat\r\n temp.append(request.vars.NEW)\r\n\r\n #updating the names that sent the text\r\n temp2 = chat.chatter\r\n if auth.user is None:\r\n temp2.append(\"Anonymous\") \r\n\r\n else:\r\n user = db(db.otherUserInfo.user_id == auth.user.id).select().first()\r\n temp2.append(user.username)\r\n\r\n #updating the time that the text was sent\r\n temp3 = chat.chat_time\r\n temp3.append(request.now)\r\n\r\n chat.update_record(chat = temp, chatter = temp2, chat_time = temp3)\r\n return \"ok\"\r\n\r\ndef del_textBox():\r\n db(db.textBox.id == request.vars.chat_id).delete()\r\n return \"ok\"\r\n\r\n###################################### FOR QUEUE ############################################\r\n\r\ndef insert_queue():\r\n #for r in db().select(db.queue.ALL):\r\n # if auth.user.id == r.person_id:\r\n # return \"not ok\"\r\n\r\n p = db.queue.insert(\r\n person_id = auth.user.id\r\n )\r\n return \"ok\"\r\n\r\n\r\ndef get_list_of_queuess():\r\n queue = []\r\n a = db(db.queue.person_id == auth.user.id).select().first()\r\n queue.append(a)\r\n a.update_record(respond = time.time())\r\n\r\n for r in db().select(db.queue.ALL):\r\n if (time.time() - r.respond) > 5:\r\n for rr in r.chats:\r\n db(db.textBox.id == rr.id).delete()\r\n db(db.queue.id == r.id).delete()\r\n \r\n\r\n #gets the data of every other user that isnt chatting with another random person\r\n if request.vars.isChatting == 'false':\r\n for r in db().select(db.queue.ALL):\r\n if r.person_id != auth.user.id:\r\n if r.is_chatting is False:\r\n temp = dict(\r\n\r\n person_id = r.person_id,\r\n is_chatting = r.is_chatting,\r\n\r\n ) \r\n queue.append(temp)\r\n\r\n return response.json(queue)\r\n\r\n\r\ndef match_users():\r\n\r\n user1 = db(db.queue.person_id == auth.user.id).select().first()\r\n user2 = db(db.queue.person_id == request.vars.person_id).select().first()\r\n user2_info = db(db.auth_user.id == request.vars.person_id).select().first()\r\n username1 = db(db.otherUserInfo.user_id == auth.user.id).select().first()\r\n username2 = db(db.otherUserInfo.user_id == request.vars.person_id).select().first()\r\n\r\n if user1 is not None and user2 is not None and user1.is_chatting == False and user2.is_chatting == False:\r\n \r\n #adding new textBox for first user\r\n temp = user1.chats\r\n p = db.textBox.insert(\r\n chat = ['You are now chatting with ' + username2.username]\r\n )\r\n temp.append(p)\r\n\r\n #adding new textBox for second user\r\n temp2 = user2.chats\r\n p2 = db.textBox.insert(\r\n chat = ['You are now chatting with ' + username1.username]\r\n )\r\n temp2.append(p2)\r\n\r\n tmp = time.time()\r\n user1.update_record(is_chatting = True, chats = temp, chatting_with = p2, time_limit = tmp)\r\n user2.update_record(is_chatting = True, chats = temp2, chatting_with = p , time_limit = tmp)\r\n\r\n return response.json(p)\r\n \r\n return \"n\"\r\n\r\ndef countdown():\r\n you = db(db.queue.person_id == auth.user.id).select().first()\r\n\r\n if you is not None:\r\n\r\n #check if other user is still in random chat. If not, leave this chat and find new one\r\n check =db(db.textBox.id == you.chatting_with).select().first()\r\n if check is None:\r\n you.update_record(is_chatting = False, chatting_with = 0)\r\n\r\n #if other user is still in chat, then keep track of time\r\n else:\r\n you.update_record(time_remain = time.time() - you.time_limit)\r\n\r\n #if time is up, split pair and make them avaliable\r\n if you.time_remain > 60:\r\n you.update_record(is_chatting = False, chatting_with = 0)\r\n\r\n return \"ok\"\r\n\r\n\r\ndef remove_queues():\r\n\r\n test = []\r\n #deletes all textBox in this array so that we dont have memory leaks\r\n user = db(db.queue.person_id == auth.user.id).select().first()\r\n if user is not None:\r\n for r in user.chats:\r\n db(db.textBox.id == r.id).delete()\r\n\r\n db(db.queue.person_id == auth.user.id).delete()\r\n return test\r\n\t\r\n\t\r\n#username functions\r\ndef check_username():\r\n check = db(db.otherUserInfo.username == request.vars.username).select().first()\r\n print(check)\r\n if check is None:\r\n # response.flash = T(\"Username already taken\")\r\n return 0\r\n return 1\r\n\r\ndef store_username():\r\n p = db.otherUserInfo.insert(username = request.vars.username)\r\n return \"ok\"\r\n\r\ndef grab_username():\r\n if auth.user != None:\r\n t = db(db.otherUserInfo.user_id == auth.user.id).select().first()\r\n return response.json(t)\r\n return \"ok\"\r\n\r\n#####################################################################################################\r\n" }, { "alpha_fraction": 0.5507246255874634, "alphanum_fraction": 0.5519323945045471, "avg_line_length": 36.511627197265625, "blob_id": "ea91b3747557725003f1f5cb367a3582b1da6907", "content_id": "ad4f5348d7ae3bae24ec7cdaa8618fd14a08fd76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1656, "license_type": "no_license", "max_line_length": 76, "num_lines": 43, "path": "/models/tables.py", "repo_name": "ktbeck/SlugRoulette", "src_encoding": "UTF-8", "text": "# Define your tables below (or better in another model file) for example\r\n#\r\n# >>> db.define_table('mytable', Field('myfield', 'string'))\r\n#\r\n# Fields can be 'string','text','password','integer','double','boolean'\r\n# 'date','time','datetime','blob','upload', 'reference TABLENAME'\r\n# There is an implicit 'id integer autoincrement' field\r\n# Consult manual for more options, validators, etc.\r\n\r\nimport datetime\r\nimport time\r\n\r\ndef get_user_email():\r\n return auth.user.email if auth.user is not None else None\r\n\r\ndb.define_table('textBox',\r\n Field('Title', default='template'),\r\n Field('is_group_chat', 'boolean'),\r\n Field('chat', 'list:string', default=[]),\r\n Field('chatter', 'list:string', default=[]),\r\n Field('chat_time', 'list:string', default=[]),\r\n Field('list_of_chatters', 'list:string', default=[]),\r\n Field('password', 'string'),\r\n )\r\n\r\ndb.define_table('queue',\r\n Field('person_id', 'integer'),\r\n Field('chats', 'list:reference textBox', default=[]),\r\n Field('is_chatting', 'boolean', default=False),\r\n\r\n Field('respond', 'float', default=time.time()),\r\n Field('time_limit', 'float', default=0),\r\n Field('time_remain', 'float', default=0),\r\n\r\n Field('chatting_with'),\r\n )\r\ndb.define_table('otherUserInfo',\r\n Field('user_id', 'integer', default=auth.user_id),\r\n Field('username', 'string'),\r\n Field('friendsList', 'list:string', default=[])\r\n )\r\n\r\ndb.textBox.id.readable = True\r\n" }, { "alpha_fraction": 0.5524023771286011, "alphanum_fraction": 0.564109742641449, "avg_line_length": 50.57635498046875, "blob_id": "cb4b6129ab36a795dd51d6ebedfb6bd018649b41", "content_id": "dbfcdf020e45f3224be457faf77873fcae7366b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 10677, "license_type": "no_license", "max_line_length": 206, "num_lines": 203, "path": "/views/default/index.html", "repo_name": "ktbeck/SlugRoulette", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n{{extend 'layout.html'}}\r\n\r\n{{block head}}\r\n<script src=\"{{=URL('static', 'js/vue.js')}}\"></script>\r\n<script>\r\n\t//api functions for chat box\r\n\tvar new_box = \"{{=URL('api', 'add_textBox', user_signature=True)}}\";\r\n\tvar edit_box = \"{{=URL('api', 'edit_textBox', user_signature=True)}}\";\r\n\tvar get_box = \"{{=URL('api', 'get_textBox', user_signature=True)}}\";\r\n\tvar get_Title = \"{{=URL('api', 'get_textTitle', user_signature=True)}}\";\r\n\tvar del_box = \"{{=URL('api', 'del_textBox', user_signature=True)}}\";\r\n\r\n \t//api functions for queue\r\n\tvar insert_queue = \"{{=URL('api', 'insert_queue', user_signature=True)}}\";\r\n\tvar get_list_of_queues= \"{{=URL('api', 'get_list_of_queuess', user_signature=True)}}\";\r\n\tvar remove_queues = \"{{=URL('api', 'remove_queues', user_signature=True)}}\";\r\n\r\n\tvar match_users = \"{{=URL('api', 'match_users', user_signature=True)}}\";\r\n\tvar countdown = \"{{=URL('api', 'countdown', user_signature=True)}}\";\r\n\tvar setUsername = \"{{=URL('api', 'store_username')}}\";\r\n\tvar getUsername = \"{{=URL('api', 'grab_username')}}\";\r\n\tvar check_repeats = \"{{=URL('api', 'check_username')}}\";\r\n\r\n</script>\r\n{{end}}\r\n\r\n<head>\r\n <meta charset=\"utf-8\">\r\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\r\n <meta name=\"description\" content=\"\">\r\n <meta name=\"author\" content=\"\">\r\n\r\n </head>\r\n\r\n<!--... makes sure that if user closes tab, they are removed from queue.\r\n\tDOES NOT WORK ON FIREFOX IF USER CLOSES WINDOW -->\r\n<body>\r\n\t<!-- Heading bar to log in/chat with people-->\r\n\t<header>\r\n <nav class=\"navbar navbar-expand-md navbar-dark fixed-top bg-dark\">\r\n <button class=\"navbar-toggler\" type=\"button\" data-toggle=\"collapse\" data-target=\".navbar-collapse\" aria-controls=\"navbarCollapse\" aria-expanded=\"false\" aria-label=\"Toggle navigation\">\r\n <span class=\"navbar-toggler-icon\"></span>\r\n </button>\r\n <div class=\"collapse navbar-collapse\" id=\"navbarCollapse\">\r\n <ul class=\"navbar-nav mr-auto\">\r\n <li class=\"nav-item active\">\r\n <a class=\"nav-link\" href=\"http://127.0.0.1:8000/SlugRoulette/default/index\">Home<span class=\"sr-only\">(current)</span></a>\r\n </li>\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"http://127.0.0.1:8000/SlugRoulette/default/chat\">Chat Anonymously</a>\r\n </li>\r\n\t {{if auth.user is None:}}\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"http://127.0.0.1:8000/SlugRoulette/default/user/register?_next=/SlugRoulette/default/index\">Sign up</a>\r\n </li>\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"http://127.0.0.1:8000/SlugRoulette/default/user/login?_next=/SlugRoulette/default/index\" >Sign in</a>\r\n </li>\r\n\t {{pass}}\r\n\t {{if auth.user:}}\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"http://127.0.0.1:8000/SlugRoulette/default/user/logout?_next=/SlugRoulette/default/index\">Log Out</a>\r\n </li>\r\n\t {{pass}}\r\n </ul>\r\n </div>\r\n </nav>\r\n </header>\r\n\r\n <!-- Carousel to showcase what we do -->\r\n\r\n <div id=\"myCarousel\" class=\"carousel slide\" data-ride=\"carousel\">\r\n <ol class=\"carousel-indicators\">\r\n <li data-target=\"#myCarousel\" data-slide-to=\"0\" class=\"active\"></li>\r\n <li data-target=\"#myCarousel\" data-slide-to=\"1\"></li>\r\n <li data-target=\"#myCarousel\" data-slide-to=\"2\"></li>\r\n </ol>\r\n <div class=\"carousel-inner\">\r\n <div class=\"carousel-item active\">\r\n <img class=\"first-slide\" src=\"../static/images/carousel1.jpg\" alt=\"First slide\">\r\n <div class=\"container\">\r\n <div class=\"carousel-caption text-left\" style=\" text-shadow:5px 5px #000000;\">\r\n <h1>Sign Up and Meet/Chat with Local Slugs!</h1>\r\n <p>As simple as signing up and clicking a button!</p>\r\n <p><a class=\"btn btn-lg btn-primary\" href=\"#\" role=\"button\">Sign up today</a></p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"carousel-item\">\r\n <img class=\"second-slide\" src=\"../static/images/background.jpg\" alt=\"Second slide\">\r\n <div class=\"container\">\r\n <div class=\"carousel-caption\">\r\n <h1>Alone? Randomly and Anonymously find people!</h1>\r\n <p>We will match you up with other people who want to meet someone new!</p>\r\n <p><a class=\"btn btn-lg btn-primary\" href=\"#\" role=\"button\" v-on:click=href=\"http://127.0.0.1:8000/SlugRoulette/default/user/register?_next=/SlugRoulette/default/index\">Find Someone!</a></p>\r\n </div>\r\n </div>\r\n </div>\r\n <!--<div class=\"carousel-item\">-->\r\n <!--<img class=\"third-slide\" src=\"../static/images/benbrode.jpg\" alt=\"Third slide\">-->\r\n <!--<div class=\"container\">-->\r\n <!--<div class=\"carousel-caption text-right\">-->\r\n <!--<h1>HAHAHAHAHAHAHAHAHAHAHAHAHAHA</h1>-->\r\n <!--<p>Get out of here Ben Brode.</p>-->\r\n <!--<p><a class=\"btn btn-lg btn-primary\" href=\"#\" role=\"button\">HAHAHAHAHAHAHAHAHAHAHAHAHAHA</a></p>-->\r\n <!--</div>-->\r\n <!--</div>-->\r\n </div>\r\n </div>\r\n <!--<a class=\"carousel-control-prev\" href=\"#myCarousel\" role=\"button\" data-slide=\"prev\">-->\r\n <!--<span class=\"carousel-control-prev-icon\" aria-hidden=\"true\"></span>-->\r\n <!--<span class=\"sr-only\">Previous</span>-->\r\n <!--</a>-->\r\n <!--<a class=\"carousel-control-next\" href=\"#myCarousel\" role=\"button\" data-slide=\"next\">-->\r\n <!--<span class=\"carousel-control-next-icon\" aria-hidden=\"true\"></span>-->\r\n <!--<span class=\"sr-only\">Next</span>-->\r\n <!--</a>-->\r\n </div>\r\n <!-- Buttons to go to the chat!!!!! -->\r\n <div class=\"chat-buttons\">\r\n </div>\r\n <!-- Market to visitors why they should use our services -->\r\n <div class=\"container marketing\">\r\n\r\n <!-- Three columns of text below the carousel -->\r\n <div class=\"row\">\r\n <div class=\"col-lg-4\">\r\n <img class=\"rounded-circle\" src=\"../static/images/feeling_lonely_large.jpg\" alt=\"Generic placeholder image\" width=\"140\" height=\"140\">\r\n <h2>Alone?</h2>\r\n <p>Don't feel down and blame yourself. Instead you should be reaching out and explore new opportunities! It's time for you to take the initiative and take your livelyhood back. \r\n Learn more about how our services can help you change how you feel.</p>\r\n <p><a class=\"btn btn-secondary\" href=\"#\" role=\"button\">View details &raquo;</a></p>\r\n </div><!-- /.col-lg-4 -->\r\n <div class=\"col-lg-4\">\r\n <img class=\"rounded-circle\" src=\"../static/images/bored.jpg\" alt=\"Generic placeholder image\" width=\"140\" height=\"140\">\r\n <h2>Bored?</h2>\r\n <p>What's more exciting than meeting new people? Discover new hobbies, cultures, and experiences with just one click! Bring out the adventurous spirit in you and discover \r\n something new! With the diversity this campus brings, theres no better chance to meet up!</p>\r\n <p><a class=\"btn btn-secondary\" href=\"#\" role=\"button\">View details &raquo;</a></p>\r\n </div><!-- /.col-lg-4 -->\r\n <div class=\"col-lg-4\">\r\n <img class=\"rounded-circle\" src=\"../static/images/wtf.jpg\" alt=\"Generic placeholder image\" width=\"140\" height=\"140\">\r\n <h2>How Did I Get Here??</h2>\r\n <p>Sometimes you just wander into the different and exotic parts of the internet. Why not take it further and meet up with your fellow slugs? You have nothing to lose!</p>\r\n <p><a class=\"btn btn-secondary\" href=\"#\" role=\"button\">View details &raquo;</a></p>\r\n </div><!-- /.col-lg-4 -->\r\n </div><!-- /.row -->\r\n\r\n <!-- Showcase a few of the features -->\r\n <hr class=\"featurette-divider\">\r\n\r\n <div class=\"row featurette\">\r\n <div class=\"col-md-7\">\r\n <h2 class=\"featurette-heading\">Anonymous Chatting<span class=\"text-muted\"> Don't be afraid!</span></h2>\r\n <p class=\"lead\">This isn't called roulette for no reason, while we pride ourselves for the multiple services we offer, we are most proud of our most defining \r\n service, the random anonymous chat. Simply join the queue and meet up with someone new! Let the internet be your guise and express yourself.</p>\r\n </div>\r\n <div class=\"col-md-5\">\r\n <br>\r\n <img class=\"featurette-image img-fluid mx-auto\" src=\"../static/images/anonymous.jpg\">\r\n </div>\r\n </div>\r\n\r\n <hr class=\"featurette-divider\">\r\n\r\n <div class=\"row featurette\">\r\n <div class=\"col-md-7 order-md-2\">\r\n <h2 class=\"featurette-heading\">Create a Profile!<span class=\"text-muted\"> Stay connected for longer!</span></h2>\r\n <p class=\"lead\">Sometimes you mignt run into the perfect someone. Don't lose touch! Create your own personal user and add users you personally meet! Don't be held\r\n back by the time limit. Talk for however long you want to!</p>\r\n </div>\r\n <div class=\"col-md-5 order-md-1\">\r\n <br><br>\r\n <img class=\"featurette-image img-fluid mx-auto\" src=\"../static/images/profile.jpg\">\r\n </div>\r\n </div>\r\n\r\n <hr class=\"featurette-divider\">\r\n\r\n <div class=\"row featurette\">\r\n <div class=\"col-md-7\">\r\n <h2 class=\"featurette-heading\">Join a random chatroom!<span class=\"text-muted\"> Meet multiple people!</span></h2>\r\n <p class=\"lead\">Sometimes talking with just 1 person is not enough! Users can create their own chatrooms! Join a room and find people you want\r\n to connect with! Maybe you can find a future friend or soulmate?</p>\r\n </div>\r\n <div class=\"col-md-5\">\r\n <br><br><br><br>\r\n <img class=\"featurette-image img-fluid mx-auto\" src=\"../static/images/random.jpg\">\r\n </div>\r\n </div>\r\n\r\n <hr class=\"featurette-divider\">\r\n\r\n <footer class=\"container\">\r\n <p class=\"float-right\"><a href=\"#\">Back to top</a></p>\r\n </footer>\r\n <!-- Bootstrap js -->\r\n <script src=\"{{=URL('static','js/jquery.js')}}\"></script>\r\n <script src=\"{{=URL('static','js/bootstrap.min.js')}}\"></script>\r\n</body>\r\n\r\n<script src=\"{{=URL('static', 'js/default_index.js')}}\"></script>\r\n\r\n\r\n" }, { "alpha_fraction": 0.7494875192642212, "alphanum_fraction": 0.7630176544189453, "avg_line_length": 50.9782600402832, "blob_id": "3d6359818218bae288a6894118a0bbfe503e1ea1", "content_id": "3f915fe1dc64faf395cb5adb14d0caec1aaedd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2439, "license_type": "no_license", "max_line_length": 424, "num_lines": 46, "path": "/README.md", "repo_name": "ktbeck/SlugRoulette", "src_encoding": "UTF-8", "text": "# SlugRoulette\r\nSlug Roulette is a chatting website built using web2py, vuejs, stupid.css, and bootstrap. It has random matching chat for users as well as chat rooms that anyone can join. The main purpose of the website was to better familiarize ourselves with web2py and vue, but also to show what we have learned since the beginning of the quarter.\r\n\r\n\r\n(Version 1.0)\r\nAdd button creates a new chat and anyone including people that are logged off can chat. The chat is a type in the tables.py called textBox and my current idea for storing the text is using an array. The 'chat' Field is an Array and any new line that someone sends into the chat will be stored as an new index in the array. The Chat as of now CANNOT update its text UNLESS the send button is clicked OR the page is refreshed.\r\n\r\n(version 1.1)\r\nDelete button added and delete function added to api.py and default_index.js. Chats can now be deleted.\r\n\r\n(version 1.2)\r\nNow the chat will list the user that typed the text.\r\n\r\n(version 1.3)\r\nNow the chat box will update realtime. If another user has texted, the texted will be displayed without any inputs to all other devices.\r\n\r\n(version 1.4) The chat list begins by showing a list of Titles and gives the user an option to join a chat box. If the user joins, they will be shown only the chat box where they can either participate in the chat or leave the chat. DELETE BUTTON DOES NOT WORK RIGHT NOW AND DOESNT NEED TO WORK.\r\n(version 1.41): No more loading screen\r\n\r\n(version 1.5)\r\nSearch function for chats. RANDOM CHAT AND CHAT SERVER BUTTON DONT WORK RIGHT NOW\r\n\r\n(version 1.6)\r\nNow added a new tab where user can enter a Queue. QUEUE DOES NOT CURRENTLY DO ANYTHING.\r\n\r\n(version 1.7) Password field added to the Join button.\r\n\r\n(version 2.0)\r\nRandom Chat now works. PLEASE IF YOU FIND BUG LIST IT UNDER PROBLEM.\r\n(UPDATE): fixed an issue where send button would not disappear in random chat after the chat was disconnected\r\n\r\n(versions 2.1)\r\nReadded README. Queue works, usernames work, and chat works. UI not totally connected.\r\n\r\n(version 2.2)\r\nUploaded main UI for main page. Content is mostly complete aside from some images needed and minor adjustments to bottom elements.\r\nChat has been migrated to another page.\r\n\r\n(version 2.4)\r\nQueue now correctly updates on all browsers.\r\n\r\n(version 2.5)\r\nChat UI is mostly complete/Main page is completely done.\r\n\r\n(version 2.6)\r\nMinor fixes to UI/margins/buttons etc.\r\n\r\n" } ]
5
guiferviz/word-count-telegram-bot
https://github.com/guiferviz/word-count-telegram-bot
480a08361c8f62f9c7a061ae6f5e86352c08d026
3f1266b8b74ed15155a2934d785cd43c0754e837
e59cc237462ecd9fd686938d667ea637f816ca0b
refs/heads/master
2016-09-02T02:52:25.096946
2015-09-07T23:40:04
2015-09-07T23:47:15
41,751,603
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5711610317230225, "alphanum_fraction": 0.5767790079116821, "avg_line_length": 29.485713958740234, "blob_id": "e0fd6ca8bee8273894b7c427eb0ce6ec405bd8e8", "content_id": "82e809e26f7cf42603478950dfdf0b3e1bda572a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2136, "license_type": "permissive", "max_line_length": 80, "num_lines": 70, "path": "/tests/handlers_tests.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\n\nimport json\nfrom webtest import app\nfrom mock import patch\n\nimport main\nimport test_suite\nimport telegram_token\n\nimport utils\n\n\nBOT_URL = '/' + telegram_token.TOKEN\n\n\nclass HandlersTests(test_suite.AppEngineTestBase):\n\n def test_404_when_root_post_request(self):\n \"\"\"\n Checks telegram bot webhook is not the root path.\n \"\"\"\n try:\n self.testapp.post('/')\n raise Exception('No 404 error asking for \"/\"')\n except app.AppError as error:\n self.assertTrue('404 Not Found' in error.message)\n\n def test_empty_body_request(self):\n \"\"\"\n Request with empty JSON body.\n \"\"\"\n self.testapp.post(BOT_URL)\n\n def test_garbage_request(self):\n \"\"\"\n Not empty and invalid JSON body.\n \"\"\"\n self.testapp.post(BOT_URL, 'garbage')\n\n def test_json_garbage_request(self):\n \"\"\"\n Request with valid JSON body but not as expected.\n \"\"\"\n self.testapp.post(BOT_URL, '{}')\n\n @patch('telegram.Bot.sendMessage')\n def test_text_message(self, mock_send_message):\n \"\"\"\n Checks that the bot does an echo with all the text messages it receives.\n \"\"\"\n json_str_body = json.dumps(utils.SAMPLE_TEXT_UPDATE.to_dict())\n self.testapp.post(BOT_URL, json_str_body)\n text = main.RESPONSE_TEXT % {\n \"letters\": 9,\n \"words\": 1,\n \"lines\": 1,\n }\n mock_send_message.assert_called_once_with(chat_id=utils.SAMPLE_CHAT.id,\n text=text)\n\n @patch('telegram.Bot.sendMessage')\n def test_sticker_message(self, mock_send_message):\n \"\"\"\n Checks that the bot doesn't respond to stickers.\n \"\"\"\n json_str_body = json.dumps(utils.SAMPLE_STICKER_UPDATE.to_dict())\n self.testapp.post(BOT_URL, json_str_body)\n # self.assertFalse(mock_send_message.called)\n # We send and error message:\n mock_send_message.assert_called_once_with(chat_id=utils.SAMPLE_CHAT.id,\n text=main.RESPONSE_NO_TEXT)\n" }, { "alpha_fraction": 0.6288998126983643, "alphanum_fraction": 0.6420361399650574, "avg_line_length": 31, "blob_id": "1462e517d2d4c76c6b72cfbd45d094d5278121fa", "content_id": "46662dcc8ec95e8d766a7b561e5ba1d056d561a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1218, "license_type": "permissive", "max_line_length": 63, "num_lines": 38, "path": "/tests/services_tests.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\n\nfrom service import WordCountService\n\nimport test_suite\n\n\nclass ServicesTests(test_suite.AppEngineTestBase):\n\n def test_count_letters_1(self):\n text = \"Harry??\"\n letters = WordCountService.count_letters(text)\n self.assertEqual(7, letters)\n\n def test_count_letters_2(self):\n text = \"Voldemort\\nHarry\"\n letters = WordCountService.count_letters(text)\n self.assertEqual(15, letters)\n\n def test_get_word_tokens(self):\n text = \"Hermione?? Crookshanks??? Miaaaauuu :)\"\n tokens = WordCountService.get_word_tokens(text)\n self.assertEqual(3, len(tokens))\n\n def test_get_word_symbols_tokens(self):\n text = \"Ron?? Scabbers??? Chiiii :3\"\n tokens = WordCountService.get_word_symbols_tokens(text)\n self.assertEqual(4, len(tokens))\n\n def test_count_lines(self):\n text = \"Voldemort\\nHarry\"\n lines = WordCountService.count_lines(text)\n self.assertEqual(2, lines)\n\n def test_count_1(self):\n text_1 = \"Hi there!\\n:)\"\n result = WordCountService.count(text_1)\n self.assertEquals(result[\"letters\"], 12)\n self.assertEquals(result[\"words\"], 3)\n self.assertEquals(result[\"lines\"], 2)\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 28.5, "blob_id": "1d0ae6b792068f7b2c6f6da8b6bd2ddc883af90d", "content_id": "5f526bf1d81f9c98e31dd5f947fb2977384b7296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "permissive", "max_line_length": 42, "num_lines": 2, "path": "/app/telegram_token.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\n# Talk to @BotFather to get the bot token.\nTOKEN = '70421870:AAFodp-o5HNizUqU0964ERt8lL7zoJ3ZUaE'\n" }, { "alpha_fraction": 0.40840139985084534, "alphanum_fraction": 0.42532089352607727, "avg_line_length": 33.93877410888672, "blob_id": "7175c48d5cb0697308faeb96186ba0bd23f982df", "content_id": "056243076d8f8a267efdc90f612abccd4bbfb0d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1714, "license_type": "permissive", "max_line_length": 71, "num_lines": 49, "path": "/tests/utils.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\n\nimport datetime\n\nimport telegram\n\n\nSAMPLE_USER = telegram.User(id=7,\n first_name='Harry',\n last_name='Potter',\n username='sexy_scar')\n\n# User chat, private conversation.\nSAMPLE_CHAT = SAMPLE_USER\n\nSAMPLE_DATE = datetime.date(2015, 9, 4)\n\n# TEXT\n\nSAMPLE_TEXT = 'Alohomora'\n\nSAMPLE_TEXT_MESSAGE = telegram.Message(message_id=1,\n from_user=SAMPLE_USER,\n date=SAMPLE_DATE,\n chat=SAMPLE_CHAT,\n text=SAMPLE_TEXT)\n\nSAMPLE_TEXT_UPDATE = telegram.Update(update_id=1,\n message=SAMPLE_TEXT_MESSAGE)\n\n# STICKER\n\nSAMPLE_STICKER_PHOTOSIZE = telegram.PhotoSize(file_id='photosize_id',\n width=10,\n height=10,\n file_size=100)\n\nSAMPLE_STICKER = telegram.Sticker(file_id='sticker_id',\n width=100,\n height=100,\n thumb=SAMPLE_STICKER_PHOTOSIZE,\n file_size=10000)\n\nSAMPLE_STICKER_MESSAGE = telegram.Message(message_id=1,\n from_user=SAMPLE_USER,\n date=SAMPLE_DATE,\n chat=SAMPLE_CHAT,\n sticker=SAMPLE_STICKER)\n\nSAMPLE_STICKER_UPDATE = telegram.Update(update_id=2,\n message=SAMPLE_STICKER_MESSAGE)\n" }, { "alpha_fraction": 0.6034482717514038, "alphanum_fraction": 0.6045976877212524, "avg_line_length": 23.799999237060547, "blob_id": "888187ccaca7c4a73854c7066fd63bc0f1a73f83", "content_id": "b076527838b1e563e648d01f9302f5c932b4d9d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "permissive", "max_line_length": 53, "num_lines": 35, "path": "/app/service.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\n\nfrom nltk.tokenize.regexp import RegexpTokenizer\n\n\nclass WordCountService():\n\n @classmethod\n def count(cls, text):\n return {\n \"letters\": cls.count_letters(text),\n \"words\": cls.count_words(text),\n \"lines\": cls.count_lines(text),\n }\n\n @classmethod\n def count_letters(cls, text):\n return len(text)\n\n @classmethod\n def count_words(cls, text):\n tokens = cls.get_word_symbols_tokens(text)\n return len(tokens)\n\n @classmethod\n def count_lines(cls, text):\n return text.count(\"\\n\") + 1\n\n @classmethod\n def get_word_tokens(cls, text):\n tokenizer = RegexpTokenizer('\\w+')\n return tokenizer.tokenize(text)\n\n @classmethod\n def get_word_symbols_tokens(cls, text):\n tokenizer = RegexpTokenizer('\\s+', gaps=True)\n return tokenizer.tokenize(text)\n" }, { "alpha_fraction": 0.8518518805503845, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 13, "blob_id": "448ea30fcf6c5ed65e47316fe406e1c869f0395f", "content_id": "71962c7b353f57edd60b785cf6e8985bae8a2d5a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 27, "license_type": "permissive", "max_line_length": 19, "num_lines": 2, "path": "/dev_requirements.txt", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "webtest\n-r requirements.txt" }, { "alpha_fraction": 0.7179487347602844, "alphanum_fraction": 0.7179487347602844, "avg_line_length": 37.5, "blob_id": "5892a6d21a70f385f6e0e581198b92fd75bd2dc2", "content_id": "5a3c757bdf7d81fa9fa15a3e2d25e5bb3f92ab08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "permissive", "max_line_length": 42, "num_lines": 2, "path": "/telegram_token.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\n# Talk to @BotFather to get the bot token.\nTOKEN = 'YOUR-TELEGRAM-BOT-TOKEN'\n" }, { "alpha_fraction": 0.6244934797286987, "alphanum_fraction": 0.6267446875572205, "avg_line_length": 28.613332748413086, "blob_id": "e3d1b15e637f96170b7b7a00b42321c0d24f6c54", "content_id": "207717b5e87f17c2feaf477defd4679a13b3969e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2221, "license_type": "permissive", "max_line_length": 86, "num_lines": 75, "path": "/app/main.py", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "# encoding=utf-8\n\n\nimport json\nimport logging\nimport webapp2\n\nimport appengine_config\nfrom google.appengine.api import modules\nfrom google.appengine.api import app_identity\n\nimport telegram\n\nimport telegram_token\nfrom service import WordCountService\n\n\n# Creating the bot and getting basic info of it.\nBOT = telegram.Bot(token=telegram_token.TOKEN)\nBOT_ME = BOT.getMe()\n\nif not appengine_config.DEBUG:\n # Setting the webhook (callback).\n VERSION = modules.get_current_version_name()\n HOST_NAME = app_identity.get_default_version_hostname()\n HOST_URL = 'https://{}-dot-{}/{}'.format(VERSION, HOST_NAME, telegram_token.TOKEN)\n BOT.setWebhook(HOST_URL)\n\n# Text to send back after substitution.\nRESPONSE_TEXT = \"\"\"Letters: %(letters)s\nWords: %(words)s\nLines: %(lines)s\n\"\"\"\n\nRESPONSE_NO_TEXT = \"Please, sent me some text. \" + telegram.Emoji.PAGE_FACING_UP\n\n\nclass MainPage(webapp2.RequestHandler):\n\n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(BOT_ME)\n self.response.write('\\n')\n self.response.write(HOST_URL)\n\n def post(self):\n json_str = self.request.body\n logging.info(\"POST Body: \" + json_str)\n try:\n json_obj = json.loads(json_str)\n update = telegram.Update.de_json(json_obj)\n chat_id = update.message.chat.id\n text = update.message.text.encode('utf-8')\n if text != '':\n logging.info(\"Message Text: \" + text)\n BOT.sendMessage(chat_id=chat_id,\n text=self.get_response(text))\n else:\n logging.info(\"No Message Text.\")\n BOT.sendMessage(chat_id=chat_id,\n text=self.get_response())\n except ValueError:\n logging.error('No body or bad JSON body.')\n except AttributeError:\n logging.error('No correct attributes in JSON body.')\n\n def get_response(self, text=None):\n if text:\n return RESPONSE_TEXT % WordCountService.count(text)\n\n return RESPONSE_NO_TEXT\n\napp = webapp2.WSGIApplication([\n ('/' + telegram_token.TOKEN, MainPage),\n], debug=appengine_config.DEBUG)\n" }, { "alpha_fraction": 0.7244582176208496, "alphanum_fraction": 0.7244582176208496, "avg_line_length": 52.66666793823242, "blob_id": "a8fd4ba6c557a00efae9574934c0755e7de65813", "content_id": "5ebd178a0b0db6c97cb375e9607515ba536eb797", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 323, "license_type": "permissive", "max_line_length": 135, "num_lines": 6, "path": "/README.md", "repo_name": "guiferviz/word-count-telegram-bot", "src_encoding": "UTF-8", "text": "\nWord Count Telegram Bot\n=======================\n\n[![Build Status](https://travis-ci.org/guiferviz/word-count-telegram-bot.svg)](https://travis-ci.org/guiferviz/word-count-telegram-bot)\n\nSimple Python Telegram Bot that runs on Google AppEngine. It counts letters, words and lines of all the text messages that it receives.\n" } ]
9
4Marvin2/AskMe
https://github.com/4Marvin2/AskMe
556292c42ebd94f9fb9d6c36d8cba7b03a5aa71e
62dff59041630a906dcd2fe44cde946bb25bf4d9
03b4951e21dd77aeae8a0d519e0edca0dd2a500a
refs/heads/main
2023-05-07T12:15:20.545531
2021-04-28T14:19:44
2021-04-28T14:19:44
347,346,911
0
3
null
2021-03-13T11:04:20
2021-04-13T17:05:00
2021-04-28T14:19:44
HTML
[ { "alpha_fraction": 0.5487006902694702, "alphanum_fraction": 0.554870069026947, "avg_line_length": 33.960784912109375, "blob_id": "4e41d7dc599d4d63e942d069f059adfe7396d562", "content_id": "3308175f5f76a2dff8976c0392c2cb2370e96382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5349, "license_type": "no_license", "max_line_length": 96, "num_lines": 153, "path": "/app/management/commands/fill_db.py", "repo_name": "4Marvin2/AskMe", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\nfrom app.models import Profile, Question, Answer, Tag, LikeQuestion, LikeAnswer\nfrom random import choice, sample, randint\nfrom faker import Faker\n\nf = Faker()\n\n\nclass Command(BaseCommand):\n help = 'Fill test database'\n\n def add_arguments(self, parser):\n parser.add_argument('--full', nargs='+', type=int)\n parser.add_argument('--users', nargs='+', type=int)\n parser.add_argument('--questions', nargs='+', type=int)\n parser.add_argument('--answers', nargs='+', type=int)\n parser.add_argument('--tags', nargs='+', type=int)\n parser.add_argument('--likes', nargs='+', type=int)\n\n parser.add_argument('--dusers', nargs='+', type=int)\n parser.add_argument('--dlikes', nargs='+', type=int)\n\n def handle(self, *args, **options):\n if options['full']:\n self.fill_full_db(options['full'][0])\n\n if options['users']:\n self.fill_users(options['users'][0])\n\n if options['tags']:\n self.fill_tags(options['tags'][0])\n\n if options['questions']:\n self.fill_questions(options['questions'][0])\n\n if options['answers']:\n self.fill_answers(options['answers'][0])\n\n if options['likes']:\n self.fill_likes_questions(options['likes'][0])\n self.fill_likes_answers(2 * options['likes'][0])\n\n if options['dusers']:\n self.delete_users()\n\n if options['dlikes']:\n self.delete_likes()\n\n @staticmethod\n def fill_users(count):\n for i in range(count):\n name = f.user_name()\n while User.objects.filter(username=name).exists():\n name = f.user_name()\n Profile.objects.create(\n user_id=User.objects.create_user(\n username=name,\n email=f.email(),\n password='qwert'),\n avatar=f'img/avatars/{i}.png',\n )\n\n @staticmethod\n def fill_tags(count):\n for i in range(count):\n tag = f.word()\n while Tag.objects.filter(tag=tag).exists():\n tag = f.word()\n Tag.objects.create(tag=tag)\n\n @staticmethod\n def fill_questions(count):\n profiles = list(Profile.objects.values_list('id', flat=True))\n for i in range(count):\n current_question = Question.objects.create(\n profile_id=Profile.objects.get(pk=choice(profiles)),\n title=f.sentence(),\n text=f.text(),\n )\n tags = list(Tag.objects.values_list('tag', flat=True))\n tags_for_question = sample(tags, k=randint(1, 5))\n current_question.tags.set(Tag.objects.add_tags_to_question(tags_for_question))\n\n @staticmethod\n def fill_answers(count):\n profiles = list(Profile.objects.values_list('id', flat=True))\n questions = list(Question.objects.values_list('id', flat=True))\n for i in range(count):\n Answer.objects.create(\n profile_id=Profile.objects.get(pk=choice(profiles)),\n question_id=Question.objects.get(pk=choice(questions)),\n text=f.text(),\n is_correct=randint(0, 1),\n )\n\n @staticmethod\n def fill_likes_questions(count):\n iterator = 0\n for question in Question.objects.all():\n profiles = list(Profile.objects.values_list('id', flat=True))\n current_profiles = Profile.objects.filter(id__in=sample(profiles, k=randint(0, 10)))\n for profile in current_profiles:\n LikeQuestion.objects.create(\n question_id=question,\n profile_id=profile,\n )\n iterator += 1\n if iterator == count:\n break\n if iterator == count:\n break\n\n @staticmethod\n def fill_likes_answers(count):\n iterator = 0\n for answer in Answer.objects.all():\n profiles = list(Profile.objects.values_list('id', flat=True))\n current_profiles = Profile.objects.filter(id__in=sample(profiles, k=randint(0, 15)))\n for profile in current_profiles:\n LikeAnswer.objects.create(\n answer_id=answer,\n profile_id=profile,\n )\n iterator += 1\n if iterator == count:\n break\n if iterator == count:\n break\n\n @staticmethod\n def delete_users():\n Profile.objects.all().delete()\n User.objects.all().delete()\n\n @staticmethod\n def delete_likes():\n LikeQuestion.objects.all().delete()\n LikeAnswer.objects.all().delete()\n\n def fill_full_db(self, count):\n self.fill_users(count)\n print('Users are created')\n self.fill_tags(count)\n print('Tags are created')\n self.fill_questions(count * 10)\n print('Questions are created')\n self.fill_answers(count * 100)\n print('Answers are created')\n self.fill_likes_questions(count * 100)\n print('Likes for questions are created')\n self.fill_likes_answers(count * 200)\n print('Likes for answers are created')\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.5776397585868835, "avg_line_length": 17.941177368164062, "blob_id": "24cf1dba46efc78fef7eb59c38f149b9d5db7bf7", "content_id": "fa81ddddcab2e1170f0ab4e6836743f263d1d868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/app/migrations/0002_rename_likeanswers_likeanswer.py", "repo_name": "4Marvin2/AskMe", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-14 19:54\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='LikeAnswers',\n new_name='LikeAnswer',\n ),\n ]\n" }, { "alpha_fraction": 0.5554689168930054, "alphanum_fraction": 0.5599532127380371, "avg_line_length": 46.49074172973633, "blob_id": "7c1347ac287915ce8ce03d5b01e9b37d7767134b", "content_id": "94bd9955af01903a5fa6427ce5426a2cd492096c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5416, "license_type": "no_license", "max_line_length": 148, "num_lines": 108, "path": "/app/migrations/0001_initial.py", "repo_name": "4Marvin2/AskMe", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-14 19:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('text', models.TextField(verbose_name='Текст ответа')),\n ('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),\n ('is_correct', models.BooleanField(default=False, verbose_name='Флаг правильного ответа')),\n ('rating', models.IntegerField(default=0, verbose_name='Рейтинг')),\n ],\n options={\n 'verbose_name': 'Ответ',\n 'verbose_name_plural': 'Ответы',\n },\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('avatar', models.ImageField(upload_to='avatars/%y/%m/%d', verbose_name='Аватар')),\n ('rating', models.IntegerField(default=0, verbose_name='Рейтинг')),\n ('user_id', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Профиль')),\n ],\n options={\n 'verbose_name': 'Профиль',\n 'verbose_name_plural': 'Профили',\n },\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('tag', models.CharField(max_length=16, unique=True, verbose_name='Тег')),\n ('rating', models.IntegerField(default=0, verbose_name='Рейтинг')),\n ],\n options={\n 'verbose_name': 'Тег',\n 'verbose_name_plural': 'Теги',\n },\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255, verbose_name='Заголовок')),\n ('text', models.TextField(verbose_name='Текст вопроса')),\n ('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),\n ('rating', models.IntegerField(default=0, verbose_name='Рейтинг')),\n ('profile_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.profile', verbose_name='Автор')),\n ('tags', models.ManyToManyField(to='app.Tag', verbose_name='Теги')),\n ],\n options={\n 'verbose_name': 'Вопрос',\n 'verbose_name_plural': 'Вопросы',\n },\n ),\n migrations.CreateModel(\n name='LikeQuestion',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_like', models.BooleanField(default=True, verbose_name='Лайк или дизлайк')),\n ('profile_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.profile', verbose_name='Профиль')),\n ('question_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.question', verbose_name='Вопрос')),\n ],\n options={\n 'verbose_name': 'Лайк вопроса',\n 'verbose_name_plural': 'Лайки вопросов',\n },\n ),\n migrations.CreateModel(\n name='LikeAnswers',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_like', models.BooleanField(default=True, verbose_name='Лайк или дизлайк')),\n ('answer_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.answer', verbose_name='Ответ')),\n ('profile_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.profile', verbose_name='Профиль')),\n ],\n options={\n 'verbose_name': 'Лайк ответа',\n 'verbose_name_plural': 'Лайки ответов',\n },\n ),\n migrations.AddField(\n model_name='answer',\n name='profile_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.profile', verbose_name='Профиль'),\n ),\n migrations.AddField(\n model_name='answer',\n name='question_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.question', verbose_name='Вопрос'),\n ),\n ]\n" }, { "alpha_fraction": 0.6159749627113342, "alphanum_fraction": 0.620516836643219, "avg_line_length": 30.924999237060547, "blob_id": "8fd40798e24f0b0b2aec21006a1477b2ad3d6ca1", "content_id": "3607c496af7fe50edc1e206b19a0a93890696249", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6734, "license_type": "no_license", "max_line_length": 100, "num_lines": 200, "path": "/app/models.py", "repo_name": "4Marvin2/AskMe", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass ProfileManager(models.Manager):\n def best_members(self):\n return self.order_by('-rating')[:10]\n\n\nclass Profile(models.Model):\n user_id = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='Профиль')\n avatar = models.ImageField(upload_to='avatars/%y/%m/%d', verbose_name='Аватар')\n rating = models.IntegerField(default=0, verbose_name='Рейтинг')\n\n objects = ProfileManager()\n\n def __str__(self):\n return self.user_id.get_username()\n\n class Meta:\n verbose_name = 'Профиль'\n verbose_name_plural = 'Профили'\n\n\nclass TagManager(models.Manager):\n def add_tags_to_question(self, added_tags):\n tags = self.filter(tag__in=added_tags)\n for tag in tags:\n tag.rating += 1\n tag.save()\n return tags\n\n def popular_tags(self):\n return self.order_by('-rating')[:15]\n\n\nclass Tag(models.Model):\n tag = models.CharField(unique=True, max_length=16, verbose_name='Тег')\n rating = models.IntegerField(default=0, verbose_name='Рейтинг')\n\n objects = TagManager()\n\n def __str__(self):\n return self.tag\n\n class Meta:\n verbose_name = 'Тег'\n verbose_name_plural = 'Теги'\n\n\nclass QuestionManager(models.Manager):\n def new_questions(self):\n return self.order_by('-date_joined')\n\n def hot_questions(self):\n return self.order_by('-rating')\n\n def by_tag(self, tag):\n return self.filter(tags__tag=tag).order_by('-rating')\n\n\nclass Question(models.Model):\n profile_id = models.ForeignKey('Profile', on_delete=models.CASCADE, verbose_name='Автор')\n title = models.CharField(max_length=255, verbose_name='Заголовок')\n text = models.TextField(verbose_name='Текст вопроса')\n date_joined = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')\n tags = models.ManyToManyField(Tag, verbose_name='Теги')\n rating = models.IntegerField(default=0, verbose_name='Рейтинг')\n number_of_answers = models.IntegerField(default=0, verbose_name='Количество ответов')\n\n objects = QuestionManager()\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Вопрос'\n verbose_name_plural = 'Вопросы'\n\n\nclass AnswerManager(models.Manager):\n def by_question(self, pk):\n return self.filter(question_id=pk).order_by('-rating')\n\n\nclass Answer(models.Model):\n profile_id = models.ForeignKey('Profile', on_delete=models.CASCADE, verbose_name='Профиль')\n question_id = models.ForeignKey('Question', on_delete=models.CASCADE, verbose_name='Вопрос')\n text = models.TextField(verbose_name='Текст ответа')\n date_joined = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')\n is_correct = models.BooleanField(default=False, verbose_name='Флаг правильного ответа')\n rating = models.IntegerField(default=0, verbose_name='Рейтинг')\n\n objects = AnswerManager()\n\n def __str__(self):\n return self.text\n\n def save(self, *args, **kwargs):\n if not self.pk:\n self.question_id.number_of_answers += 1\n self.question_id.save()\n super(Answer, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n self.question_id.number_of_answers -= 1\n self.question_id.save()\n super(Answer, self).delete(*args, **kwargs)\n\n def change_flag_is_correct(self):\n self.is_correct = not self.is_correct\n self.save()\n\n class Meta:\n verbose_name = 'Ответ'\n verbose_name_plural = 'Ответы'\n\n\nclass LikeQuestion(models.Model):\n question_id = models.ForeignKey('Question', on_delete=models.CASCADE, verbose_name='Вопрос')\n profile_id = models.ForeignKey('Profile', on_delete=models.CASCADE, verbose_name='Профиль')\n is_like = models.BooleanField(default=True, verbose_name='Лайк или дизлайк')\n\n def __str__(self):\n action = 'дизлайкнул'\n if self.is_like:\n action = 'лайкнул'\n return f'{self.profile_id.user_id.get_username()} {action} вопрос: {self.question_id.title}'\n\n def save(self, *args, **kwargs):\n if not self.pk:\n if self.is_like:\n self.question_id.rating += 1\n else:\n self.question_id.rating -= 1\n self.question_id.save()\n super(LikeQuestion, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n if self.is_like:\n self.question_id.rating -= 1\n else:\n self.question_id.rating += 1\n self.question_id.save()\n super(LikeQuestion, self).delete(*args, **kwargs)\n\n def change_flag_is_like(self):\n if self.is_like:\n self.question_id.rating += 2\n else:\n self.question_id.rating -= 2\n self.is_like = not self.is_like\n self.save()\n self.question_id.save()\n\n class Meta:\n verbose_name = 'Лайк вопроса'\n verbose_name_plural = 'Лайки вопросов'\n\n\nclass LikeAnswer(models.Model):\n answer_id = models.ForeignKey('Answer', on_delete=models.CASCADE, verbose_name='Ответ')\n profile_id = models.ForeignKey('Profile', on_delete=models.CASCADE, verbose_name='Профиль')\n is_like = models.BooleanField(default=True, verbose_name='Лайк или дизлайк')\n\n def __str__(self):\n action = 'дизлайкнул'\n if self.is_like:\n action = 'лайкнул'\n return f'{self.profile_id.user_id.get_username()} {action} ответ: {self.answer_id.text}'\n\n def save(self, *args, **kwargs):\n if not self.pk:\n if self.is_like:\n self.answer_id.rating += 1\n else:\n self.answer_id.rating -= 1\n self.answer_id.save()\n super(LikeAnswer, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n if self.is_like:\n self.answer_id.rating -= 1\n else:\n self.answer_id.rating += 1\n self.answer_id.save()\n super(LikeAnswer, self).delete(*args, **kwargs)\n\n def change_flag_is_like(self):\n if self.is_like:\n self.answer_id.rating += 2\n else:\n self.answer_id.rating -= 2\n self.is_like = not self.is_like\n self.save()\n self.answer_id.save()\n\n class Meta:\n verbose_name = 'Лайк ответа'\n verbose_name_plural = 'Лайки ответов'\n" }, { "alpha_fraction": 0.5779076218605042, "alphanum_fraction": 0.5812464952468872, "avg_line_length": 36.4375, "blob_id": "376f977b929204498de2888cf0c1c900d3d3219d", "content_id": "70420ef4e8c3567394a7173ed8907580546a30b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3594, "license_type": "no_license", "max_line_length": 80, "num_lines": 96, "path": "/app/views.py", "repo_name": "4Marvin2/AskMe", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom app.models import Profile, Question, Answer, Tag, LikeQuestion, LikeAnswer\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.http import Http404\nfrom django.shortcuts import render\nimport random\n\n\ndef paginate(objects_list, request, per_page=10):\n paginator = Paginator(objects_list, per_page)\n page = request.GET.get('page')\n objects = paginator.get_page(page)\n\n return objects\n\n\ndef index(request):\n questions = paginate(Question.objects.new_questions(), request)\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'index.html', {'questions': questions,\n 'popular_tags': popular_tags,\n 'best_members': best_members})\n\n\ndef hot_questions(request):\n questions = paginate(Question.objects.hot_questions(), request)\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'hot_questions.html', {'questions': questions,\n 'popular_tags': popular_tags,\n 'best_members': best_members})\n\n\ndef tag_questions(request, name):\n try:\n tag = Tag.objects.get(tag=name)\n questions = paginate(Question.objects.by_tag(name), request)\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'tag.html', {'tag': tag,\n 'questions': questions,\n 'popular_tags': popular_tags,\n 'best_members': best_members})\n except Tag.DoesNotExist:\n raise Http404\n\n\ndef answers_for_question(request, pk):\n try:\n question = Question.objects.get(pk=pk)\n question_answers = paginate(Answer.objects.by_question(pk), request, 3)\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'question.html', {'question': question,\n 'answers': question_answers,\n 'popular_tags': popular_tags,\n 'best_members': best_members})\n except Question.DoesNotExist:\n raise Http404\n\n\ndef login(request):\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'login.html', {'popular_tags': popular_tags,\n 'best_members': best_members})\n\n\ndef signup(request):\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'signup.html', {'popular_tags': popular_tags,\n 'best_members': best_members})\n\n\ndef ask(request):\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'ask.html', {'popular_tags': popular_tags,\n 'best_members': best_members})\n\n\ndef settings(request):\n popular_tags = Tag.objects.popular_tags()\n best_members = Profile.objects.best_members()\n\n return render(request, 'settings.html', {'popular_tags': popular_tags,\n 'best_members': best_members})\n" }, { "alpha_fraction": 0.5733944773674011, "alphanum_fraction": 0.6169725060462952, "avg_line_length": 23.22222137451172, "blob_id": "74df3ba5d54c1fdbd9cbf6dd778b2f6cfcf7d9e4", "content_id": "15c7d844c7a7a4243b8e399680b902b988c6eb24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 453, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/app/migrations/0003_question_number_of_answers.py", "repo_name": "4Marvin2/AskMe", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-16 21:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0002_rename_likeanswers_likeanswer'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='question',\n name='number_of_answers',\n field=models.IntegerField(default=0, verbose_name='Количество ответов'),\n ),\n ]\n" } ]
6
kimsh0117/codewars
https://github.com/kimsh0117/codewars
cefe4aa97a62ff0aaf53273ae7d077183a49a2ce
d66e81d4d099ab1314730e1168b4aec591b66362
e605ff920499ec1d3a339cd744d51065aa476a04
refs/heads/master
2020-03-07T19:51:47.304087
2018-04-11T05:50:27
2018-04-11T05:50:27
127,681,908
0
0
null
2018-04-02T00:13:48
2018-04-10T05:05:25
2018-04-11T05:50:28
Python
[ { "alpha_fraction": 0.7256637215614319, "alphanum_fraction": 0.7256637215614319, "avg_line_length": 21.600000381469727, "blob_id": "74ee25042f8a2588073fc77ad9ae59556920027d", "content_id": "fc80a4fa3d7b481cc1939af2d4d221211bffeaf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/8kyu/01-Opposite number/Opposite number.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# Very simple, given a number, find its opposite.\n\ndef opposite(number):\n # your solution here\n return -number\n" }, { "alpha_fraction": 0.5211132168769836, "alphanum_fraction": 0.5479846596717834, "avg_line_length": 25.367088317871094, "blob_id": "586254381ed9977f047faf8932fac9d037c6d080", "content_id": "56427982b8c676ce1c3c082addb271d98492afdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2094, "license_type": "no_license", "max_line_length": 131, "num_lines": 79, "path": "/4kyu/01-Next bigger number with the same digits/Next bigger number with the same digits.js", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "// You have to create a function that takes a positive integer number and returns the next bigger number formed by the same digits:\n\n// nextBigger(12)==21\n// nextBigger(513)==531\n// nextBigger(2017)==2071\n// If no bigger number can be composed using those digits, return -1:\n\n// nextBigger(9)==-1\n// nextBigger(111)==-1\n// nextBigger(531)==-1\n\n// function nextBigger(n) {\n// const arr = toStr(n);\n// const permutation = perm(arr).map(x => Number(x.join(''))).sort((a,b) => a - b).filter(x => x > n)\n// return arr.length === 1 || !permutation.length ? -1 : permutation[0]\n// }\n// function toStr(n) {\n// return n.toString(10).split('').map(x => parseInt(x))\n// }\n// function perm(xs) {\n// let ret = [];\n// for (let i = 0; i < xs.length; i = i + 1) { \n// let rest = perm(xs.slice(0, i).concat(xs.slice(i + 1))); \n// if (!rest.length) {\n// ret.push([xs[i]])\n// } else { \n// for (let j = 0; j < rest.length; j = j + 1) {\n// ret.push([xs[i]].concat(rest[j])) \n// } \n// } \n// } \n// return ret\n// };\n// function nextBigger(n) {\n// const leng = splitNumber(n).length;\n// const sw = swap(n);\n// return leng === 1 || sw === n ? -1 : sw;\n// }\n// function splitNumber(n) {\n// return n.toString(10).split('').map(x => parseInt(x))\n// }\n// function swap(n) {\n// let toArr = splitNumber(n);\n// let len = toArr.length-2;\n// return Number(toArr.slice(0,len).concat(toArr[toArr.length-1], toArr[toArr.length-2]).join(''));\n// }\n\n// 진식스 버전\nconst nextBigger = function(number) {\n const numJoin = array => {\n return parseInt(array.join(\"\"));\n };\n const numSplit = num => {\n return num.toString().split(\"\");\n };\n\n const num = numSplit(number);\n let result = -1;\n let a, b, len = num.length;\n\n if (len < 2) return result;\n else len = len - 1;\n\n while (len) {\n a = num[len - 1];\n b = num[len];\n if (a < b) {\n [a, b] = [b, a];\n num[len - 1] = a;\n num[len] = b;\n result = numJoin(num);\n if (number < result) return result;\n }\n len--;\n }\n\n return result;\n};\nconsole.log(swap(414))\n\n" }, { "alpha_fraction": 0.6513410210609436, "alphanum_fraction": 0.6513410210609436, "avg_line_length": 25.200000762939453, "blob_id": "c045a1696ef7e6362237aeec860705082b027ff3", "content_id": "e35556da05348dd19271402086dcc235cab228ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 112, "num_lines": 10, "path": "/8kyu/05-Convert boolean values to strings 'Yes' or 'No'/Convert boolean values to strings 'Yes' or 'No'.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# Complete the method that takes a boolean value and return a \"Yes\" string for true, or a \"No\" string for false.\n\ndef bool_to_word(boolean):\n return \"Yes\" if boolean == True else \"No\"\n\n# 다른 사람 풀이\n'''\ndef bool_to_word(bool):\n return ['No', 'Yes'][bool]\n'''" }, { "alpha_fraction": 0.5355932116508484, "alphanum_fraction": 0.5525423884391785, "avg_line_length": 18.733333587646484, "blob_id": "4539fd538c13b8ed3aded017f7dad8e8e446da59", "content_id": "41d341701e202c99e8f6bdd70a773b76e4dbf9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 295, "license_type": "no_license", "max_line_length": 34, "num_lines": 15, "path": "/6kyu/01-PersistentBugger/PersistentBugger.js", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "function persistence(num) {\n let count = 0;\n function number(n) {\n n = n.toString().split('');\n if(n.length === 1) return;\n count += 1;\n n = n.reduce((a,b)=>{\n return Number(a) * Number(b)\n });\n number(n);\n }\n number(num)\n return count;\n}\nconsole.log(persistence(39))" }, { "alpha_fraction": 0.568493127822876, "alphanum_fraction": 0.5993150472640991, "avg_line_length": 20.629629135131836, "blob_id": "d7a860e962fccb17064201e6dba7bb8679891b44", "content_id": "4421d09a4690d4e950fd00f173fbd4ae7aeec51f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 73, "num_lines": 27, "path": "/7kyu/04-Vowel Count/Vowel Count.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n# Return the number (count) of vowels in the given string.\n\n# We will consider a, e, i, o, and u as vowels for this Kata.\n\n# The input string will only consist of lower case letters and/or spaces.\n'''\nlst = ['a', 'b', 'c']\n\nif 'a' in lst:\n print('포함')\nelse:\n print('미포함')\n'''\ndef getCount(inputStr):\n num_vowels = 0\n asc = [97,101,105,111,117]\n for i in inputStr:\n if ord(i) in asc:\n num_vowels += 1\n return num_vowels\n\n# 다른사람 코드\n'''\ndef getCount(inputStr):\n return sum(1 for let in inputStr if let in \"aeiouAEIOU\")\n'''\n" }, { "alpha_fraction": 0.6408450603485107, "alphanum_fraction": 0.6502347588539124, "avg_line_length": 21.421052932739258, "blob_id": "01aaf6e4e51e6ca7adc8a32d2c04ff93a0aa4209", "content_id": "a833e37ef7bf140d758ca0019feaadf011e53f58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 90, "num_lines": 19, "path": "/8kyu/04-String repeat/String_repeat.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n# Write a function called repeatStr which repeats the given string string exactly n times.\n\n# repeatStr(6, \"I\") // \"IIIIII\"\n# repeatStr(5, \"Hello\") // \"HelloHelloHelloHelloHello\"\n\ndef repeat_str(repeat, string):\n return string * repeat\n\nrepeat_str(6, 'I')\n\n# 다른사람 풀이\n'''\ndef repeat_str(repeat, string):\n solution = \"\"\n for i in range(repeat):\n solution += string\n return solution\n'''\n" }, { "alpha_fraction": 0.5546038746833801, "alphanum_fraction": 0.5931477546691895, "avg_line_length": 18.5, "blob_id": "abbb97d6ba98a23dc5b95bb0a0b06ea7ecb66765", "content_id": "9e6d6df103a0a93f7f09fc3529f5d87fb759fb97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "no_license", "max_line_length": 75, "num_lines": 24, "path": "/8kyu/07-Sum of positive/Sum of positive.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# You get an array of numbers, return the sum of all of the positives ones.\n\n# Example [1,-4,7,12] => 1 + 7 + 12 = 20\n\n# Note: array may be empty, in this case return 0.\n\ndef positive_sum(arr):\n result = 0\n if len(arr) == 0:\n return 0\n else:\n for i in arr:\n if i > 0:\n result += i\n return result\n\n# 다른사람 풀이\n'''\ndef positive_sum(arr):\n return sum(x for x in arr if x > 0)\n\ndef positive_sum(arr):\n return sum(filter(lambda x: x > 0,arr))\n'''" }, { "alpha_fraction": 0.6633380651473999, "alphanum_fraction": 0.6776034235954285, "avg_line_length": 22.399999618530273, "blob_id": "8c25a5a3bc5aeeba25df79f15ae2f3c30cf8dd92", "content_id": "0b15e6d97c1532a16bdd5a1bb6ff8ad5096826a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 803, "license_type": "no_license", "max_line_length": 80, "num_lines": 30, "path": "/8kyu/03-Return Negative/Return Negative.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# In this simple assignment you are given a number and have to make it negative.\n# But maybe the number is already negative?\n\n# Example:\n\n# make_negative(1); return -1\n# make_negative(-5); return -5\n# make_negative(0); return 0\n# Notes:\n\n# The number can be negative already, in which case no change is required.\n# Zero (0) can't be negative, see examples above.\n\ndef make_negative( number ):\n if number == 0 or number < 0:\n return number\n else:\n return -number\n\n# 기초 문법\n# 파이썬에는 &&라는 연산자는 없습니다. 대신 and가 해당 기능을 대신합니다. 그 외에도 논리적 OR(||)대신 or를 쓰지요.\n\n# 다른사람 풀이\n'''\ndef make_negative( number ):\n return -abs(number)\n\ndef make_negative( number ):\n return -number if number > 0 else number\n'''" }, { "alpha_fraction": 0.5400000214576721, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 18.462963104248047, "blob_id": "c3cf785d93f1f2e3bf10540e16ecfa117b3fd75d", "content_id": "9837b475a93bb01917ddb3fd6c172ca4734eeaa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1062, "license_type": "no_license", "max_line_length": 111, "num_lines": 54, "path": "/6kyu/03-Sum of Digits /Sum of Digits.js", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "//In this kata, you must create a digital root function.\n\n//A digital root is the recursive sum of all the digits in a number. Given n, take the sum of the digits of n. \n//If that value has two digits, continue reducing in this way until a single-digit number is produced. \n//This is only applicable to the natural numbers.\n\n//Here's how it works (Ruby example given):\n\n/*\ndigital_root(16)\n=> 1 + 6\n=> 7\n\ndigital_root(942)\n=> 9 + 4 + 2\n=> 15 ...\n=> 1 + 5\n=> 6\n\ndigital_root(132189)\n=> 1 + 3 + 2 + 1 + 8 + 9\n=> 24 ...\n=> 2 + 4\n=> 6\n\ndigital_root(493193)\n=> 4 + 9 + 3 + 1 + 9 + 3\n=> 29 ...\n=> 2 + 9\n=> 11 ...\n=> 1 + 1\n=> 2\n*/\n\nfunction digital_root(n) {\n function recursion(num) {\n num = num.toString().split('');\n if(num.length === 1) return Number(num);\n let temp = num.reduce((a,b) => Number(a) + Number(b));\n return recursion(temp)\n }\n return recursion(n)\n}\n\n//다른 사람 풀이\n/*\nfunction digital_root(n) {\n if (n < 10) return n;\n \n return digital_root(\n n.toString().split('').reduce(function(acc, d) { return acc + +d; }, 0));\n}\n\n*/" }, { "alpha_fraction": 0.6203208565711975, "alphanum_fraction": 0.6452763080596924, "avg_line_length": 25.761905670166016, "blob_id": "183b4ef20577664a1340c75dc85c774ceefb897e", "content_id": "b74ac13a61121770736345402ac689a62a81e296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 92, "num_lines": 21, "path": "/7kyu/03-Odd or Even/Odd or Even.py", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "# Given an array of numbers, determine whether the sum of all of the numbers is odd or even.\n\n# Give your answer in string format as 'odd' or 'even'.\n\n# If the input array is empty consider it as: [0] (array with a zero).\n\n# Example:\n\n# oddOrEven([0]) returns \"even\"\n# oddOrEven([2, 5, 34, 6]) returns \"odd\"\n# oddOrEven([0, -1, -5]) returns \"even\"\nfrom functools import reduce\n\ndef oddOrEven(arr):\n return 'even' if reduce((lambda x, y: x + y), arr) % 2 == 0 else 'odd'\n\n#다른 사람 풀이\n'''\ndef oddOrEven(arr):\n return 'even' if sum(arr) % 2 == 0 else 'odd'\n'''" }, { "alpha_fraction": 0.6524973511695862, "alphanum_fraction": 0.6769394278526306, "avg_line_length": 36.63999938964844, "blob_id": "ce00441974c1b95d7c133bf6ab5afd42e71edb82", "content_id": "391c1f2f449dc94b2075a477707862834532c7bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 955, "license_type": "no_license", "max_line_length": 200, "num_lines": 25, "path": "/6kyu/02-Multiples of 3 or 5/Multiples of 3 or 5.js", "repo_name": "kimsh0117/codewars", "src_encoding": "UTF-8", "text": "//If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.\n\n//Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in.\n\n//Note: If the number is a multiple of both 3 and 5, only count it once.\n\n//배수판정법 공부\n\nfunction solution(number){\n let arr = [];\n for(let i = 1; i < number; i+=1) {\n arr.push(i);\n }\n return arr.filter((number)=>divisibilityThree(number) || divisibilityFive(number)).length > 0 ? arr.filter((number)=>divisibilityThree(number) || divisibilityFive(number)).reduce((a,b)=> a + b) : 0;\n}\n\nfunction divisibilityThree(number) {\n return number.toString().split('').reduce((a,b)=> Number(a) + Number(b)) % 3 ? false : true;\n}\n\nfunction divisibilityFive(number) {\n let temp = number.toString().split('');\n if(temp[temp.length-1] == 5 || temp[temp.length-1] == 0) return true;\n else return false;\n}\n" } ]
11
michaelverano/data_collect
https://github.com/michaelverano/data_collect
64864d4c84087dd58f5e03a6664c0f8958458d73
8bc6998df1452e6c23aa9e70d51f4d39a3837dd8
472c8a815202cbe3430dc8249c958c58cc682071
refs/heads/master
2020-03-27T04:16:40.633315
2018-08-24T01:57:42
2018-08-24T01:57:42
145,926,977
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7603833675384521, "alphanum_fraction": 0.7603833675384521, "avg_line_length": 23.115385055541992, "blob_id": "899a000b7d2be5be4d201896087cfa87cb228cf3", "content_id": "783103ffbfae48067fab5194387f315b11f3aa3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 626, "license_type": "no_license", "max_line_length": 76, "num_lines": 26, "path": "/README.md", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "data_collect.py\n\ncollects data and stores in csv file for later use.\njson file containing data is stored in their own folders.\ni.e. BTC folder, ETH, LTC, and XRP.\n\n\n\npython packages used:\n- requests [requires pip]\n- gdax [requires pip] [discontinued]\n- csv\n- datetime\n\nExchanges where data is pulled from:\nx Binance\nx Bitstamp\nx GDAX (Need to complete ID registration) [discontinued until XRP available]\nx CoinMarketCap\n\n#### BOOKMARK ####\nx Start pull data from binance API\nx start pulling data from Coin Market Cap (is there an API)\nx start pulling data from gdax [discontinued until XRP available]\n\n- write log for program." }, { "alpha_fraction": 0.7569786310195923, "alphanum_fraction": 0.7569786310195923, "avg_line_length": 26.68181800842285, "blob_id": "4dcedf0baadbce302ca63a422b3966470cd7d423", "content_id": "3d154dca2b04e84004e69d260f8f418487f87fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/main.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/home/mverano/data_collect/bin/python\n# main.py - controller for data_collect.py\n\nfrom modules import *\nimport requests\n\n\n### COLLECT DATA FROM DIFFERENT EXCHANGES ###\nbitstamp_data = bitstamp.main(requests)\nbinance_data = binance.main(requests)\nmarket_cap = coin_market_cap.get_market_caps(requests)\n\n\nwrite_data.navigate_to_data_folder()\n\n### WRITE DATA TO CSV ###\nwrite_data.bitstamp_to_csv(bitstamp_data, headers.bitstamp_headings)\nwrite_data.binance_to_csv(binance_data, headers.binance_headings)\nwrite_data.coin_market_cap(market_cap, headers.coin_market_cap_headings)\n\n### TO DO ###\nlog.create_log()\n" }, { "alpha_fraction": 0.3396226465702057, "alphanum_fraction": 0.3396226465702057, "avg_line_length": 21.714284896850586, "blob_id": "3f07301328981cd3ba16d71452d7fb8230942043", "content_id": "513ca57fec9cd0951525e25980c5417a15c46471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/modules/__init__.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "__all__ = ['bitstamp',\n 'write_data',\n 'binance',\n 'gdax',\n 'coin_market_cap',\n 'headers',\n 'log']\n" }, { "alpha_fraction": 0.5786407589912415, "alphanum_fraction": 0.5805824995040894, "avg_line_length": 22.409090042114258, "blob_id": "161cb3d9793bfbbc7780189178876c89942d0389", "content_id": "e4b9a0b9353c44a9752e8116834629e28cc0a70a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 74, "num_lines": 22, "path": "/modules/bitstamp.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# bitstamp.py - controller for using the bitstamp API.\n\n# collects data only.\n\ndef main(requests):\n\n coins = ['btcusd', 'ethusd', 'ltcusd', 'xrpusd']\n\n json_data = {}\n for coin in coins:\n # json_data.append(coin) # test\n #print('getting requests')\n r = requests.get('https://www.bitstamp.net/api/v2/ticker/' + coin)\n\n #print('appending json_data')\n json_data[coin] = r.json()\n\n #print('appending complete')\n \n \n return json_data\n" }, { "alpha_fraction": 0.5897436141967773, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 20.66666603088379, "blob_id": "787f906355da307d64b5d3527c7a34add27d8c9e", "content_id": "3d04b9149a23bc72ea999ba5644d64c79a0efaf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 88, "num_lines": 27, "path": "/modules/log.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#log.py - creates a log file for logging when program initiates.\n\nimport os\nimport datetime\nimport csv\n\ndef create_log():\n \"\"\"\n \n ###\n ### CHANGE THIS DIRECTORY ####\n ### os.chdir('YOUR DIRECTORY')\n ###\n\n if 'logs' not in os.listdir():\n os.makedirs('logs')\n \n os.chdir('./logs')\n \n log_file = open('log.csv', 'a')\n log_writer = csv.writer(log_file)\n log_writer.writerow(['Program Initiated - {}'.format(str(datetime.datetime.now()))])\n log_file.close()\n \"\"\"\n\n raise \"look at the log.py and change the directory\"\n" }, { "alpha_fraction": 0.6267605423927307, "alphanum_fraction": 0.6361502408981323, "avg_line_length": 37.727272033691406, "blob_id": "e9cdce3a63643ef4b2ce607318fed830221acc4e", "content_id": "d6ebb78fbfcbc2ffa25b8ff841e0659a3023560b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 96, "num_lines": 11, "path": "/modules/headers.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#headers.py - headers for writing CSV files.\n\nbitstamp_headings = ['low', 'high', 'last', 'open', 'bid', 'ask', 'volume', 'vwap', 'timestamp']\n\nbinance_headings = ['price', 'time']\n\ncoin_market_cap_headings = [\n 'name', 'id', 'symbol', 'price_usd', 'market_cap_usd',\n 'percent_change_1h', 'percent_change_7d', 'percent_change_24h',\n 'total_supply', 'max_supply', 'available_supply', 'last_updated']\n" }, { "alpha_fraction": 0.6991525292396545, "alphanum_fraction": 0.6991525292396545, "avg_line_length": 20.272727966308594, "blob_id": "a047fbbdbc7302005aa9fb57a24c006a85f89da8", "content_id": "fe70e209d611d26b87222b931dae0ad7d001ba45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 70, "num_lines": 11, "path": "/modules/gdax.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#gdax.py - get btc, ltc, etc, over usd prices from gdax.\n\n# import gdax\n# public_client = gdax.PublicClient()\n\n# public_client.get_products()\n\n\n\n### GDAX/COINBASE DATA WILL NOT BE COLLECTED UNTIL XRP IS AVAILABLE###\n\n\n" }, { "alpha_fraction": 0.6711409687995911, "alphanum_fraction": 0.6711409687995911, "avg_line_length": 23.83333396911621, "blob_id": "495221c3d9c6edc7b6bab7f55a697e908ff63039", "content_id": "de3a05cff742a2154755e5758a24c8d373e374e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/test.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "import datetime\n\ntest_file = open('test.txt', 'a')\ntest_file.write('\\ntest ' + str(datetime.datetime.now()))\ntest_File.write(' ')\ntest_file.close()\n" }, { "alpha_fraction": 0.5818540453910828, "alphanum_fraction": 0.5850098729133606, "avg_line_length": 29.54216957092285, "blob_id": "b6c9a8a7e2739589379e58c0fdd5e577b1429c42", "content_id": "69b31bf2a1c3e16fa174811e06620cd674d017de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2535, "license_type": "no_license", "max_line_length": 94, "num_lines": 83, "path": "/modules/write_data.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# write_data.py - controller for writing data from collected data.\n\nimport csv\nimport os\n\ndef write_headers(file_name, headers):\n new_file = open(file_name, 'a')\n csv_writer = csv.writer(new_file)\n csv_writer.writerow(headers)\n new_file.close()\n\ndef navigate_to_data_folder():\n raise \"change the directory on the write_data.navigate_to_data_folder\"\n #os.chdir('<YOUR DIRECTORY HERE')\n \n\ndef bitstamp_to_csv(bitstamp_data, headers):\n for coin in bitstamp_data:\n file_name = coin+'.csv'\n if file_name not in os.listdir():\n write_headers(file_name, headers)\n \n coin_data = open(file_name, 'a')\n\n csv_writer = csv.writer(coin_data)\n\n headers = ['low', 'high', 'last', 'open', 'bid', 'ask', 'volume', 'vwap', 'timestamp']\n items = [bitstamp_data[coin][header] for header in headers]\n\n #csv_writer.writerow(order)\n csv_writer.writerow(items)\n \n coin_data.close()\n\ndef binance_to_csv(binance_data, headers):\n coin_of_interest = ['BTCUSDT', 'LTCUSDT', 'ETHUSDT', 'XRPUSDT']\n api_time = binance_data['time']\n \n for coin in coin_of_interest:\n file_name = coin+'.csv'\n if file_name not in os.listdir():\n write_headers(file_name, headers)\n\n coin_data = open(file_name, 'a')\n csv_writer = csv.writer(coin_data)\n\n ### HEADINGS ###\n # price, time\n items = [binance_data[coin], api_time]\n\n #csv_writer.writerow(['price', 'time'])\n csv_writer.writerow(items)\n\n coin_data.close()\n\ndef coin_market_cap(market_cap, headings):\n for coin in market_cap:\n\n file_name = coin+'_market_cap.csv'\n if file_name not in os.listdir():\n write_headers(file_name, headings)\n\n coin_data = open(file_name, 'a')\n\n csv_writer = csv.writer(coin_data)\n\n # HEADINGS\n # name, id, price_usd, marketcap_usd,\n # percent_change_1h, percent_change_7d, percent_change_24h,\n # total_supply, max_supply, available_supply\n # last updated, symbol, price_usd\n \n # headings = ['name', 'id', 'symbol', 'price_usd', 'market_cap_usd',\n # 'percent_change_1h', 'percent_change_7d', 'percent_change_24h',\n # 'total_supply', 'max_supply', 'available_supply', 'last_updated',]\n \n row = []\n for heading in headings:\n row.append(market_cap[coin][heading])\n\n csv_writer.writerow(row)\n coin_data.close()\n" }, { "alpha_fraction": 0.7279411554336548, "alphanum_fraction": 0.7279411554336548, "avg_line_length": 18.428571701049805, "blob_id": "4e66dffc491a3d9c53e6c230ce5a2fb525da1a00", "content_id": "c390bf0d95f49f3b2a06886bb5aff53845b0d552", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 98, "num_lines": 14, "path": "/modules/timestamp_convert.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# timestamp_convert.py - converts the timestamp on collected data and turns it into readable time.\n\ndef bitstamp_time(timestamp):\n pass\n\ndef binance_time(timestamp):\n pass\n\ndef gdax_time(timestamp):\n pass\n\ndef coinbase_time(timestamp):\n pass\n" }, { "alpha_fraction": 0.6147757172584534, "alphanum_fraction": 0.6200527548789978, "avg_line_length": 26.071428298950195, "blob_id": "5fbd7145b70f9b7beeb679b2a5212cabd69455b6", "content_id": "a9e9a2547d938e1b39da2480e65ed0d4afeac1de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 379, "license_type": "no_license", "max_line_length": 84, "num_lines": 14, "path": "/modules/coin_market_cap.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#coin_market_cap.py - collects data for btc, eth, ltc, and xrp from coin market cap.\n\ndef get_market_caps(requests):\n crypto = ['bitcoin', 'litecoin', 'ethereum', 'ripple']\n link = 'https://api.coinmarketcap.com/v1/ticker/'\n\n data = {}\n for coins in crypto:\n r = requests.get(link + coins)\n\n data[coins] = r.json()[0]\n\n return data\n" }, { "alpha_fraction": 0.6506129503250122, "alphanum_fraction": 0.6514886021614075, "avg_line_length": 28.256410598754883, "blob_id": "1339a4671acbbcc72a3a30a1b1103b24b2e7830c", "content_id": "855e6c219a84aadbfbad9760ad1d0e26dedfbff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1142, "license_type": "no_license", "max_line_length": 76, "num_lines": 39, "path": "/modules/binance.py", "repo_name": "michaelverano/data_collect", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#binance.py - collects data from binance API (i.e. just the link).\n\nfrom datetime import datetime\n\n\ndef main(requests):\n binance_link = 'https://api.binance.com/api/v1/ticker/price?symbol='\n data = collect_data(requests, binance_link)\n data['time'] = str(datetime.now())\n \n json_xrpbtc = collect_xrp(requests, binance_link)\n data['XRPUSDT'] = convert_xrpbtc(json_xrpbtc['XRPBTC'], data['BTCUSDT'])\n\n return data\n \ndef collect_data(requests, binance_link):\n listed_data = [\n requests.get(binance_link+'BTCUSDT'),\n requests.get(binance_link+'LTCUSDT'),\n requests.get(binance_link+'ETHUSDT'),]\n\n data = {}\n\n for items in listed_data:\n data[items.json()['symbol']] = float(items.json()['price'])\n \n return data\n\n \n# Collect XRP/BTC data.\ndef collect_xrp(requests, binance_link):\n json_xrpbtc = requests.get(binance_link+'XRPBTC').json()\n return {json_xrpbtc['symbol'] : float(json_xrpbtc['price'])}\n \n# convert XRP/BTC to XRP BTC.\ndef convert_xrpbtc(json_xrpbtc, btc_price):\n \"\"\"Convert xrpbtc to xrpusdt\"\"\"\n return json_xrpbtc * btc_price \n" } ]
12
lollipas/ussimang
https://github.com/lollipas/ussimang
a421639fe18522d63a312524196c79b824bf98a0
8de90df823551b67213a585174489ba6eb0e56a1
1bbc726815349b1602232cc9092bd8bead4f0620
refs/heads/master
2022-11-24T11:51:48.881153
2020-08-03T12:28:18
2020-08-03T12:28:18
261,684,941
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5475308895111084, "alphanum_fraction": 0.5648148059844971, "avg_line_length": 32.8776969909668, "blob_id": "8992a16f3c871a376879b8c17b967c93f9b8ef6f", "content_id": "19f5619d585a3804a1c48e2b84a8ee3b58997805", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4875, "license_type": "no_license", "max_line_length": 116, "num_lines": 139, "path": "/ussimang.py", "repo_name": "lollipas/ussimang", "src_encoding": "UTF-8", "text": "\r\nimport tkinter\r\nfrom tkinter import messagebox\r\nimport random\r\nimport time\r\nploki_suurus = 20\r\n\r\n\r\n#NB!\r\n#USS SOOB TOIDU AINULT SIIS ARA KUI TA LAHEB KOGU PEA OSAS SELLE VASTU!\r\n\r\n\r\n\r\n\r\nclass Uss:\r\n def __init__(self,aken,varv):\r\n self.aken = aken\r\n \r\n \r\n self.keha_x=0\r\n self.keha_y=0\r\n self.pea_x= 0\r\n self.pea_y = 0\r\n self.ussi_positsioonid=[(100,100),(80,100),(60,100)] \r\n self.ouna_positsioonid = self.ouna_spawn()\r\n self.skoor = 0\r\n self.uss_pilt = tkinter.PhotoImage(file=\"uss.ppm\") \r\n self.oun_pilt = tkinter.PhotoImage(file = \"oun.ppm\")\r\n self.coords = self.aken.coords\r\n aken.create_image(*self.ouna_positsioonid, image = self.oun_pilt, tag=\"oun\") \r\n self.aken.height = self.aken.winfo_height()\r\n self.aken.width = self.aken.winfo_width()\r\n self.liikumis_suund = \"Right\"\r\n self.aken.bind_all(\"<Key>\", self.klaviatuur)\r\n self.kaotus = False\r\n\r\n \r\n for xpositsioon1,ypositsioon1 in self.ussi_positsioonid: \r\n self.uss = aken.create_image(xpositsioon1,ypositsioon1, image=self.uss_pilt, tags = \"uss\")\r\n \r\n\r\n \r\n \r\n def ouna_soomine(self):\r\n print(self.ussi_positsioonid,self.ouna_positsioonid) \r\n if self.ussi_positsioonid[0] == self.ouna_positsioonid: \r\n self.ussi_positsioonid.append(self.ussi_positsioonid[-1])\r\n self.skoor += 1\r\n aken.create_image(*self.ussi_positsioonid[-1], image=self.uss_pilt, tags = \"uss\") \r\n self.ouna_positsioonid = self.ouna_spawn()\r\n self.coords(aken.find_withtag(\"oun\"), *self.ouna_positsioonid)\r\n \r\n\r\n\r\n def ouna_spawn(self):\r\n while True:\r\n x_positsioon = random.randint(1, 29) * 10\r\n y_positsioon = random.randint(3, 30) * 10\r\n ouna_positsioon = (x_positsioon, y_positsioon)\r\n\r\n if ouna_positsioon not in self.ussi_positsioonid:\r\n return ouna_positsioon\r\n\r\n \r\n\r\n def liigu(self):\r\n \r\n \r\n self.uss_pea_asukohtX1,self.uss_pea_asukohtY1 = self.ussi_positsioonid[0]\r\n\r\n \r\n if self.liikumis_suund == \"Up\":\r\n uus_pea_positsioon = (self.uss_pea_asukohtX1, self.uss_pea_asukohtY1 - 10)\r\n \r\n elif self.liikumis_suund == \"Down\":\r\n uus_pea_positsioon = (self.uss_pea_asukohtX1, self.uss_pea_asukohtY1 + 10)\r\n \r\n elif self.liikumis_suund == \"Right\":\r\n uus_pea_positsioon = (self.uss_pea_asukohtX1 + 10, self.uss_pea_asukohtY1)\r\n \r\n elif self.liikumis_suund == \"Left\":\r\n uus_pea_positsioon = (self.uss_pea_asukohtX1 - 10, self.uss_pea_asukohtY1)\r\n \r\n \r\n self.ussi_positsioonid = [uus_pea_positsioon] + self.ussi_positsioonid[:-1]\r\n \r\n for osa, positsioon in zip(aken.find_withtag(\"uss\"), self.ussi_positsioonid):\r\n aken.coords(osa, positsioon)\r\n \r\n\r\n if self.uss_pea_asukohtX1>= self.aken.width:\r\n self.kaotus = True \r\n \r\n tkinter.messagebox.showinfo('Kaotus', 'Läksid mängualast välja! ' 'Punktide arv: {}'.format(self.skoor))\r\n\r\n if self.uss_pea_asukohtX1<= 0:\r\n self.kaotus = True \r\n tkinter.messagebox.showinfo('Kaotus', 'Läksid mängualast välja! ' 'Punktide arv: {}'.format(self.skoor))\r\n \r\n if self.uss_pea_asukohtY1 >= self.aken.height:\r\n self.kaotus = True\r\n tkinter.messagebox.showinfo('Kaotus', 'Läksid mängualast välja! ' 'Punktide arv: {}'.format(self.skoor))\r\n if self.uss_pea_asukohtY1 <= 0:\r\n self.kaotus = True\r\n tkinter.messagebox.showinfo('Kaotus', 'Läksid mängualast välja! ' 'Punktide arv: {}'.format(self.skoor))\r\n if (self.uss_pea_asukohtX1,self.uss_pea_asukohtY1) in self.ussi_positsioonid[2:]:\r\n self.kaotus = True\r\n tkinter.messagebox.showinfo('Kaotus', 'Sõid ennast ära! Punktide arv: {}'.format(self.skoor))\r\n \r\n\r\n def klaviatuur(self, e):\r\n uus_suund = e.keysym\r\n\r\n k6ik_suunad= (\"Up\", \"Down\", \"Left\", \"Right\")\r\n vastas_suunad = ({\"Up\", \"Down\"}, {\"Left\", \"Right\"})\r\n\r\n if (\r\n uus_suund in k6ik_suunad\r\n and {uus_suund, self.liikumis_suund} not in vastas_suunad\r\n ):\r\n self.liikumis_suund = uus_suund\r\n\r\n\r\ntk = tkinter.Tk()\r\ntk.title(\"Ussimäng\")\r\ntk.resizable(0,0)\r\ntk.wm_attributes(\"-topmost\", 1) \r\naken = tkinter.Canvas(tk,width=400,height=500)\r\naken.pack()\r\ntk.update()\r\naken_width = aken.winfo_width()\r\n\r\nuss=(Uss(aken,'black'))\r\n\r\nwhile uss.kaotus == False:\r\n uss.liigu()\r\n uss.ouna_soomine()\r\n tk.update_idletasks()\r\n tk.update()\r\n time.sleep(0.05)\r\n \r\n " }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 18.5, "blob_id": "753db5e2b8f39ca6e296b76fd0f46dab0cbe8af0", "content_id": "4310b458f0bf8a3a1f1f4faa529dcfc00db295af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "lollipas/ussimang", "src_encoding": "UTF-8", "text": "# ussimang\nA snake game made in python\n" } ]
2
Noorul834/PIAIC
https://github.com/Noorul834/PIAIC
8d4fa1372fbe0ecf766e11eb1cf7c58dec2bbcca
5ee1a4ab15c21ba7a01d7806f61e0d00ee6f6f6a
07d4100fbbc03b9fa9ca2ea7e37c8f2baab97255
refs/heads/master
2020-06-15T23:16:13.025354
2019-07-05T14:00:26
2019-07-05T14:00:26
195,418,106
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.681034505367279, "avg_line_length": 29.81818199157715, "blob_id": "ea61fda8ac8c20b821e48035d4b26ffc407c0ba5", "content_id": "5a26b79736531acca18ec76e580142bd836207b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 64, "num_lines": 11, "path": "/assignment13.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 13. Sum of n Positive Integers\r\n# Write a python program to sum of the first n positive integers\r\n# Program Console Sample 1:\r\n# Enter value of n: 5\r\n# Sum of n Positive integers till 5 is 15\r\n\r\nnum=0\r\nvalue=int(input(\"Enter the value of n: \"))\r\nfor i in range(value+1):\r\n num+=i\r\n print(f\"sum of n positive integers till {value} is {num}\")" }, { "alpha_fraction": 0.6965620517730713, "alphanum_fraction": 0.726457417011261, "avg_line_length": 37.47058868408203, "blob_id": "6f8efad9b8fb1792c14a4af94707d6a9f1cb3d3a", "content_id": "de04e89de3e1dd170fb6d51a83bc24c202683e0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 133, "num_lines": 17, "path": "/assignment03.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 3. Divisibility Check of two numbers\r\n# Write a Python program to check whether a number is completely divisible by another number. Accept two integer values form the user\r\n# Program Console Sample Output 1:\r\n# Enter numerator: 4\r\n# Enter Denominator: 2\r\n# Number 4 is Completely divisible by 2\r\n# Program Console Sample Output 2:\r\n# Enter numerator: 7\r\n# Enter Denominator: 4\r\n# Number 7 is not Completely divisible by 4\r\n\r\nnum1=int(input(\"Enter the numerator: \"))\r\nnum2=int(input(\"Enter the denominator: \"))\r\nif num1%num2==0:\r\n print(f\"number {num1} is completely divisible by {num2}.\")\r\nelse:\r\n print(f\"number {num1} is not completely divisible by {num2}.\")" }, { "alpha_fraction": 0.6721311211585999, "alphanum_fraction": 0.7076502442359924, "avg_line_length": 31.454545974731445, "blob_id": "221a6b83aabaac9c9ecb55437b58682c9cabda85", "content_id": "4736537e76a60f6f0f104bd07b4cd2ededce2d02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/assignment11.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 11. Feet to Centimeter Converter\r\n# Write a Python program to convert height in feet to centimetres.\r\n# Program Console Sample 1:\r\n# Enter Height in Feet: 5\r\n# There are 152.4 Cm in 5 ft\r\n# Reference:\r\n# https://www.rapidtables.com/convert/length/feet-to-cm.html\r\n\r\nfeet=int(input(\"Enter height in feet: \"))\r\ncm=feet*30.48\r\nprint(f\"There are {cm} cm in {feet} ft\")" }, { "alpha_fraction": 0.6569506525993347, "alphanum_fraction": 0.665919303894043, "avg_line_length": 32.46154022216797, "blob_id": "f205c7a68154a186a1e050cf3b5a82aff73ed7b7", "content_id": "ab52cb7266902cd10b5622be77aee3c34133ec3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 102, "num_lines": 13, "path": "/assignment05.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 5. Copy string 'n' times\r\n# Write a Python program to get a string which is 'n' (non-negative integer) copies of a given string.\r\n# Program Console Output:\r\n# Enter String: Hi\r\n# How many copies of String you need: 4\r\n# 4 Copies of Hi are HiHiHiHi\r\n\r\nstring=input(\"Enter a string: \")\r\nn=int(input(\"How many of copies of string you want?: \"))\r\nif n>0:\r\n print(f\"{n} copies of string {string} are: {string*n}\")\r\nelse:\r\n print(\"Invalid\")" }, { "alpha_fraction": 0.6860465407371521, "alphanum_fraction": 0.7325581312179565, "avg_line_length": 47.14285659790039, "blob_id": "3971d172cebfe3ff4b93b4dc0516b688b8a886b4", "content_id": "3fb6637d24c8b9b7352693cbeeaf66b147fc225a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 688, "license_type": "no_license", "max_line_length": 125, "num_lines": 14, "path": "/assignment09.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 9. Calculate Interest\r\n# Write a Python program to compute the future value of a specified principal amount, rate of interest, and a number of years\r\n# Program Console Sample 1:\r\n# Please enter principal amount: 10000\r\n# Please Enter Rate of interest in %: 0.1\r\n# Enter number of years for investment: 5\r\n# After 5 years your principal amount 10000 over an interest rate of 0.1 % will be 16105.1\r\n\r\n\r\nAmount = int(input(\"Enter any number: \"))\r\nInterest_percentage = float(input(\"Enter rate of interest in percentage: \"))\r\nyears = int(input(\"Enter number of year/years for investment: \"))\r\nfuture_value = Amount*((1+(0.01*Interest_percentage*100))**years)\r\nprint(round(future_value,2))\r\n" }, { "alpha_fraction": 0.6324324607849121, "alphanum_fraction": 0.6810810565948486, "avg_line_length": 26.461538314819336, "blob_id": "1e6e4c297a25c4f88d03b04186955ad94506eed5", "content_id": "a2a3c4c0a090649b54f869bd8ec677a48c10cd70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 53, "num_lines": 13, "path": "/assignment12.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 12. BMI Calculator\r\n# Write a Python program to calculate body mass index\r\n# Program Console Sample 1:\r\n# Enter Height in Cm: 180\r\n# Enter Weight in Kg: 75\r\n# Your BMI is 23.15\r\n\r\nimport math\r\nheight=int(input(\"Enter height in cm: \"))\r\nweight=int(input(\"Enter weight in kg: \"))\r\nBMI=(weight/(math.pow(height*0.012,2)))\r\nBMI= round(BMI,2)\r\nprint(f\"Your BMI is:{BMI}\")\r\n" }, { "alpha_fraction": 0.6051703691482544, "alphanum_fraction": 0.6603995561599731, "avg_line_length": 38.42856979370117, "blob_id": "443badce57f18274ee77c384009d1994867a05f8", "content_id": "1f3320d13c3180462268ce4d850feffe47763511", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 90, "num_lines": 21, "path": "/assignment10.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 10. Euclidean distance\r\n# write a Python program to compute the distance between the points (x1, y1) and (x2, y2).\r\n# Program Console Sample 1:\r\n# Enter Co-ordinate for x1: 2\r\n# Enter Co-ordinate for x2: 4\r\n# Enter Co-ordinate for y1: 4\r\n# Enter Co-ordinate for y2: 4\r\n# Distance between points (2, 4) and (4, 4) is 2\r\n# ###### Reference:\r\n# https://en.wikipedia.org/wiki/Euclidean_distance\r\n\r\nx1=int(input(\"Enter Co-ordinate for x1: \"))\r\nx2=int(input(\"Enter Co-ordinate for x2: \"))\r\ny1=int(input(\"Enter Co-ordinate for y1: \"))\r\ny2=int(input(\"Enter Co-ordinate for y2: \"))\r\nimport math\r\nprint(f\"(x1,y1))=({x1},{y1})\")\r\nprint(f\"(x2,y2))=({x2},{y2})\")\r\ndistance_formula = math.sqrt(pow(x2-x1,2) + pow(y2-y1,2))\r\ndistance_formula = round(distance_formula,2)\r\nprint(f\"The Distance Between Points ({x1},{y1}) and ({x2},{y2}) are: {distance_formula}\")\r\n\r\n" }, { "alpha_fraction": 0.589531660079956, "alphanum_fraction": 0.64462810754776, "avg_line_length": 24.071428298950195, "blob_id": "cd0ba64c9f98c774487c499f79abb75cd170cbb5", "content_id": "276a3bca477d077f9a0e8021808c4d53d1fea968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/assignment14.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 14. Digits Sum of a Number\r\n# Write a Python program to calculate the sum of the digits in an integer\r\n# Program Console Sample 1:\r\n# Enter a number: 15\r\n# Sum of 1 + 5 is 6\r\n# Program Console Sample 2:\r\n# Enter a number: 1234\r\n# Sum of 1 + 2 + 3 + 4 is 10\r\n\r\nnum=input(\"Enter any num: \")\r\nsum=0\r\nfor s in num:\r\n sum+=int(s)\r\nprint(f\"The sum of num is:\",sum)" }, { "alpha_fraction": 0.646789014339447, "alphanum_fraction": 0.6674311757087708, "avg_line_length": 29.14285659790039, "blob_id": "557f95dbadf7648ee07d391df2cf86625e91f5d6", "content_id": "c46509cbcf144514b95a3939016dc0e622e22cc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 139, "num_lines": 14, "path": "/assignment06.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 6. Check if number is Even or Odd\r\n# Write a Python program to find whether a given number (accept from the user) is even or odd, print out an appropriate message to the user\r\n# Program Console Output 1:\r\n# Enter Number: 4\r\n# 4 is Even\r\n# Program Console Output 2:\r\n# Enter Number: 9\r\n# 9 is Odd\r\n\r\nnum=int(input(\"Enter any num: \"))\r\nif num%2==0:\r\n print(f\"{num} is an even number.\")\r\nelse:\r\n print(f\"{num} is an odd number.\")\r\n" }, { "alpha_fraction": 0.7159090638160706, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 33.400001525878906, "blob_id": "f4b7c186948ad040bffbdd249722861d47801140", "content_id": "4581b72365cb9ed55c5441cb36e640873939894b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 96, "num_lines": 10, "path": "/assignment08.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 8. Triangle area\r\n# Write a Python program that will accept the base and height of a triangle and compute the area\r\n# Reference:\r\n# https://www.mathgoodies.com/lessons/vol1/area_triangle\r\n# Area of triangle\r\n\r\nbase=float(input(\"Enter the base: \"))\r\nheight=float(input(\"Enter the height: \"))\r\narea=1/2*base*height\r\nprint(f\"Area of triangle is {area}\")" }, { "alpha_fraction": 0.6703096628189087, "alphanum_fraction": 0.686703085899353, "avg_line_length": 27, "blob_id": "1557b25c3d206129f95667c1a23055b93fa5dc97", "content_id": "e748cbd674593c2b01364da605c3fae42e0a430d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 75, "num_lines": 19, "path": "/assignment02.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 2. Check the number either it's positive, negative or zero\r\n# Write a Python program to check if a number is positive, negative or zero\r\n# Program Console Sample Output 1:\r\n# Enter Number: -1\r\n# Negative Number Entered\r\n# Program Console Sample Output 2:\r\n# Enter Number: 2\r\n# Positive Number Entered\r\n# Program Console Sample Output 3:\r\n# Enter Number: 0\r\n# Zero Entered\r\n\r\nnum = int(input(\"Enter any number: \"))\r\nif num == 0:\r\n print(\"Number is zero.\")\r\nelif num > 0:\r\n print(\"Number is positive.\")\r\nelse:\r\n print(\"Number is negative.\")" }, { "alpha_fraction": 0.6320939064025879, "alphanum_fraction": 0.6399217247962952, "avg_line_length": 26.5, "blob_id": "f343cdeb3af9fb820fea5740fe47aad9a5e277c0", "content_id": "6c8e52af1bd8d4c36681aad73cb24b04697fdf8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/assignment07.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 7. Vowel Tester\r\n# Write a Python program to test whether a passed letter is a vowel or not\r\n# Program Console Output 1:\r\n# Enter a character: A\r\n# Letter A is Vowel\r\n# Program Console Output 2:\r\n# Enter a character: e\r\n# Letter e is Vowel\r\n# Program Console Output 2:\r\n# Enter a character: N\r\n# Letter N is not Vowel\r\n\r\nvowels=[\"a\", \"A\",\"e\", \"E\",\"i\",\"I,\",\"o\",\"O\",\"u\",\"U\"]\r\nletter=input(\"Enter a sound: \")\r\nif letter in vowels :\r\n print(f\"{letter} is a vowel\")\r\nelse:\r\n print(f\"{letter} is not a vowel\")" }, { "alpha_fraction": 0.7837837934494019, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 17.5, "blob_id": "3e99fd0d4cab44fc696f8ad9bf908d339b894c3c", "content_id": "58256689e0487268b4a774e5856c513414e8924b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# PIAIC\nAll the assignments of PIAIC\n" }, { "alpha_fraction": 0.6925064325332642, "alphanum_fraction": 0.7286821603775024, "avg_line_length": 41.22222137451172, "blob_id": "2f5e16fd9774d91745cb9f4ca7e97ed58fca767d", "content_id": "728502d24c48aa50b4e0d0d8956187b3f5512740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 97, "num_lines": 9, "path": "/assignment04.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 4. Calculate the Volume of a sphere\r\n# Write a Python program to get the volume of a sphere, please take the radius as input from user\r\n# Program Console Output:\r\n# Enter Radius of Sphere: 1\r\n# Volume of the Sphere with Radius 1 is 4.18\r\n\r\nradius=float(input(\"Enter the radius of the sphere: \"))\r\nvolume=1.33*3.142*radius**3\r\nprint(f\"volume of sphere with radius {radius} is {volume}\")" }, { "alpha_fraction": 0.6658097505569458, "alphanum_fraction": 0.7223650217056274, "avg_line_length": 37.099998474121094, "blob_id": "7f46738e623cf130cce2c48686811d6678436897", "content_id": "13b205ec3e6f0103f567e9bafbcbacdeb4cd4ee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 97, "num_lines": 10, "path": "/assignment01.py", "repo_name": "Noorul834/PIAIC", "src_encoding": "UTF-8", "text": "# 1. Calculate Area of a Circle\r\n# Write a Python program which accepts the radius of a circle from the user and compute the area.\r\n# Program Console Sample Output 1:\r\n# Input Radius: 0.5\r\n# Area of Circle with radius 0.5 is 0.7853981634\r\n#Solution:\r\n\r\nradius=float(input(\"Enter the radius of the circle: \"))\r\narea=3.142*radius**2\r\nprint(f\"Area of circle with radius {radius} is= {area}.\")" } ]
15
Junxi3166/python_study
https://github.com/Junxi3166/python_study
9c7366c6cd44c926a0376e16bf94d0a9a1da3fca
5792c054f9504cd75a2a92df3309891f690bfae3
33a7e619f11e5a89840274adb8afdf22ad8b601e
refs/heads/master
2021-04-29T06:27:09.705993
2020-09-09T07:55:16
2020-09-09T07:55:16
77,968,158
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4752475321292877, "alphanum_fraction": 0.5338283777236938, "avg_line_length": 17.58461570739746, "blob_id": "948fde651bb0e4ecece9a012bc079a7ac361678e", "content_id": "62a188f949375dddbed78dab062bc47c2dfd58a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 47, "num_lines": 65, "path": "/python_practice/other/sort.py", "repo_name": "Junxi3166/python_study", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n__author__ = 'junxi'\n\n\n\nlist1 = [42, 20, 17, 13, 28, 14, 23, 15]\n# 插入排序\n# def insert_sort(lists):\n# \t# 列表长度\n# \tcount = len(lists)\n# \tfor i in range(1, count): # 100 1-99 0-99\n# \t\tkey = lists[i] # i指列表下表\n# \t\tj = i - 1\n# \t\twhile j >= 0:\n# \t\t\tif lists[j] > key:\n# \t\t\t\tlists[j + 1] = lists[j]\n# \t\t\t\tlists[j] = key\n# \t\t\tj -= 1\n# \treturn lists\n#\n# print('插入排序结果:', insert_sort(list1))\n\ndef insert_sort(lists):\n\t# 列表长度\n\tcount = len(lists)\n\tfor i in range(1, count): # 100 1-99 0-99\n\t\tkey = lists[i] # i指列表下表\n\t\tj = i - 1\n\t\twhile j >= 0:\n\t\t\tif key < lists[j]:\n\t\t\t\tlists[j + 1] = lists[j]\n\t\t\t\tlists[j] = key\n\t\t\tj -= 1\n\treturn lists\n\nprint('插入排序结果:', insert_sort(list1))\n\n\n\nlist2 = [59, 20, 17, 13, 28, 14, 23, 83]\n# 希尔排序\ndef shell_sort(lists):\n\tcount = len(lists)\n\t# 增量缩减值 2倍\n\tstep = 2\n\t# 初始增量值\n\tgroup = int(count / step)\n\t# print(group)\n\twhile group > 0:\n\t\tfor i in range(0, group):\n\t\t\tj = i + group\n\t\t\twhile j < count:\n\t\t\t\tk = j - group\n\t\t\t\tkey = lists[j]\n\t\t\t\twhile k >= 0:\n\t\t\t\t\tif lists[k] > key:\n\t\t\t\t\t\tlists[k + group] = lists[k]\n\t\t\t\t\t\tlists[k] = key\n\t\t\t\t\tk -= group\n\t\t\t\tj += group\n\t\tgroup = int(group / step)\n\treturn lists\n\nprint('希尔排序结果:', shell_sort(list2))\n\n\n\n\n" }, { "alpha_fraction": 0.5958646535873413, "alphanum_fraction": 0.6296992301940918, "avg_line_length": 22.086956024169922, "blob_id": "9b02caf5ac963b7ed2c59b73ebd9df6f497c24f7", "content_id": "ea98ba9d903de5f58711d3e465c388329042e775", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 62, "num_lines": 23, "path": "/python_practice/other/map.py", "repo_name": "Junxi3166/python_study", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n__author__ = 'junxi'\n\n\nimport matplotlib.pyplot as plt\n\n'''matplotlib 绘图'''\n\n# 文字 财务报表\n# labels = '一季度', '二季度', '三季度', '四季度'\nlabels = 'one', 'two', 'three', 'four'\nsizes = [20, 30, 25, 25]\ncolors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\nexplode = (0, 0, 0, 0)\n\nplt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90)\n\nplt.axis('equal')\n\nplt.savefig('C://Users/xiaoxinsoso/Pictures/python/pie1.png')\nplt.show()\n\n" }, { "alpha_fraction": 0.650130569934845, "alphanum_fraction": 0.6579634547233582, "avg_line_length": 17.238094329833984, "blob_id": "fda162efc75236c284c5a336a47c98290caaa33a", "content_id": "90785acbf929d80108a3c51fa286baf54c5759e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/python_practice/game/石头剪刀布.py", "repo_name": "Junxi3166/python_study", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#引入模块'\nimport random\nimport sys\n\n#游戏规则\nwinlist = [['石头','剪刀'],['剪刀','布'],['布','石头']]\n#选择列表\nchoicelist = ['石头','剪刀','布']\n#用户提示\nprompt = '''可选项如下,\n(0)石头\n(1)剪刀\n(2)布\n(3)退出\n请输入你的选择(输入数字即可) '''\n\nwhile True:\n\ttry:\n\t\t#保存用户输入的数字\n\t\tchoicenum = int(input(prompt))\n\t\t#用户退出\n\t\tif choicenum == 3:\n\t\t\t#执行用户退出 跳出循环体\n\t\t\tbreak\n\t\t#用户选择之后和电脑比较 电脑的比较数据从哪来 随机产出数据\n\t\tcomchoice = random.choice(choicelist)\n\t\tuserchoice = choicelist[choicenum]\n\t\tbothchoice = [userchoice,comchoice]\n\n\t\tprint('你选择了%s, 电脑选择了%s'%(userchoice,comchoice))\n\t\t#判断输赢\n\t\tif userchoice == comchoice:\n\t\t\tprint('打成平手')\n\t\telif bothchoice in winlist:\n\t\t\tprint('你赢了, 你厉害')\n\t\telse:\n\t\t\tprint('你输了, 要不要再来一局')\n\texcept(KeyboardInterrupt,EOFError,ValueError,IndexError):\n\t\tprint('输入错误, 请重新输入')\n\t\t# sys.exit()\n\t\tpass\n" }, { "alpha_fraction": 0.611599326133728, "alphanum_fraction": 0.6274164915084839, "avg_line_length": 30.61111068725586, "blob_id": "5b403cb14d6630d335d1bc548e396c144338f96b", "content_id": "8a8b625f1687d07cd6fdd74be015ebc82917ce7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 140, "num_lines": 18, "path": "/python_practice/other/send_mail.py", "repo_name": "Junxi3166/python_study", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n__author__ = 'junxi'\n\nimport smtplib\nfrom email.mime.text import MIMEText\n\n# msg = MIMEText('send by python...', 'plain', 'utf-8')\nmsg = MIMEText('<html><body><h1>Hello</h1>' + '<p>send by <a href=\"http://www.xuegod-for.cn/yum\">python</a></body></html>', 'html', 'utf-8')\nmsg['From'] = \"[email protected]\"\nmsg[\"To\"] = \"[email protected]\"\nmsg[\"Subject\"] = \"python test\"\n\nserver = smtplib.SMTP_SSL('smtp.qq.com', 465)\nserver.set_debuglevel(1)\nserver.login(\"[email protected]\", \"xxxxxx\")\nserver.sendmail(\"[email protected]\",[\"[email protected]\"],msg.as_string())\nserver.quit()\n" }, { "alpha_fraction": 0.6663255095481873, "alphanum_fraction": 0.6673490405082703, "avg_line_length": 18.13725471496582, "blob_id": "bb44a6a9bb44f2dbec3891842c1f53bb65275641", "content_id": "e7e80572e348e1c766c483d55b190166de5c02e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 985, "license_type": "no_license", "max_line_length": 91, "num_lines": 51, "path": "/python_practice/other/用pyinotify监控Linux文件系统.py", "repo_name": "Junxi3166/python_study", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n__author__ = 'junxi'\n\nimport os\n\nfrom pyinotify import WatchManager, Notifier, ProcessEvent, IN_DELETE, IN_CREATE, IN_MODIFY\n\n\nclass EventHandler(ProcessEvent):\n\t\"\"\"事件处理\"\"\"\n\n\tdef process_IN_CREATE(self, event):\n\t\tprint(\"Create file: % s\" % os.path.join(event.path, event.name))\n\n\tdef process_IN_DELETE(self, event):\n\t\tprint(\"Deletefile: % s\" % os.path.join(event.path, event.name))\n\n\tdef process_IN_MODIFY(self, event):\n\t\tprint(\"Modifyfile: % s\" % os.path.join(event.path, event.name))\n\n\ndef FSMonitor(path):\n\twm = WatchManager()\n\n\tmask = IN_DELETE | IN_CREATE | IN_MODIFY\n\n\tnotifier = Notifier(wm, EventHandler())\n\n\twm.add_watch(path, mask, auto_add=True, rec=True)\n\n\tprint('now starting monitor % s' % (path))\n\n\n\twhile True:\n\n\t\ttry:\n\t\t\tnotifier.process_events()\n\n\t\t\tif notifier.check_events():\n\n\t\t\t\tnotifier.read_events()\n\n\t\texcept KeyboardInterrupt:\n\n\t\t\tnotifier.stop()\n\n\t\t\tbreak\n\nif __name__ == \"__main__\":\n\tFSMonitor('/root')\n\n" }, { "alpha_fraction": 0.68544602394104, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 13.199999809265137, "blob_id": "82d9b2d7a00dadd991963f9f43daafee62a9fca0", "content_id": "ff6ea075178bc32079676989c2b4cd1a5a3c9e59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/python_practice/other/tab_w.py", "repo_name": "Junxi3166/python_study", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n# __created by junxi__\n\ntry:\n\n import readline\n\nexcept ImportError:\n\n import pyreadline as readline\n\nimport rlcompleter\n\nreadline.parse_and_bind('tab: complete')\n" } ]
6
foxfluff/mandelbrot-py
https://github.com/foxfluff/mandelbrot-py
b5de91904d77510fc4321e0311e3c987601b58db
33115fd2ce3bad6619cd0d38e3f687bbf14a8b2e
4caca6bc33c99af4c26893ab40a0aa30f2762572
refs/heads/master
2020-03-27T22:24:34.138448
2016-06-19T05:09:45
2016-06-19T05:09:45
61,377,417
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5139616131782532, "alphanum_fraction": 0.5375218391418457, "avg_line_length": 27.649999618530273, "blob_id": "6019e70ecde421255bac9998ff7c95ec3eb16fba", "content_id": "1a7fa1987adb6296cdb6028165089f924743897f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1146, "license_type": "permissive", "max_line_length": 77, "num_lines": 40, "path": "/test.py", "repo_name": "foxfluff/mandelbrot-py", "src_encoding": "UTF-8", "text": "import mandelbrot\nimport math\n\niterations = 50\ntest_set = mandelbrot.mandelbrot(iterations)\n\n# just going to use console output, printing something like '#' for points in\n# the set. Assuming default console size of 80x25 (typical for Windows\n# systems). 'Minor' corrections for correct aspect ratio\n\nxmin, xmax = -2.0, 1.0\nymin, ymax = -1.0, 1.0\n\nchars = ('#', ' ', '-', ';', '*')\nupper_bound = len(chars) - 1\nlog_scale = math.log(iterations, upper_bound)\n\nwidth = 36 * 2 #correction for font not being square, mine happens to be 2:1\nheight = 24 * 1\n\nfor y in range(height):\n row = \"\"\n for x in range(width):\n real_x, real_y = (\n xmin + x * (xmax - xmin) / width,\n ymax - y * (ymax - ymin) / height\n )\n #print real_x, real_y\n in_set, iter = test_set.calc_point(complex(real_x, real_y))\n if in_set:\n row += chars[0]\n else:\n if iter == 0:\n i = 0\n else:\n i = int(upper_bound\n * math.log(iter, upper_bound)\n / log_scale)\n row += chars[i+1]\n print row\n" }, { "alpha_fraction": 0.828125, "alphanum_fraction": 0.828125, "avg_line_length": 31, "blob_id": "17cae9c39922422f5d488708753fd0964ae9a03f", "content_id": "4140d06574da38bb0366bdea7e4a684d5d470ab0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "permissive", "max_line_length": 47, "num_lines": 2, "path": "/README.md", "repo_name": "foxfluff/mandelbrot-py", "src_encoding": "UTF-8", "text": "# mandelbrot-py\nSimple class for calculating the Mandelbrot set\n" }, { "alpha_fraction": 0.5883142948150635, "alphanum_fraction": 0.5950302481651306, "avg_line_length": 27.09433937072754, "blob_id": "f266a5fcc8d7660ea24f45c9e2d6476731f8d0af", "content_id": "e93e5b1e08fc2c529f9a0705e020c94fc45d03b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1489, "license_type": "permissive", "max_line_length": 77, "num_lines": 53, "path": "/mandelbrot.py", "repo_name": "foxfluff/mandelbrot-py", "src_encoding": "UTF-8", "text": "\nclass mandelbrot(object):\n\n def __init__(self, iterations=50, cache=False):\n self.iterations = iterations\n self.cache = cache\n\n def calc_point(self, coordinate):\n z, iterinit = self.cache_retr(coordinate)\n # I sense that my future is full of grey hair and edge cases\n for iteration in xrange(iterinit, self.iterations):\n z = z**2 + coordinate\n if abs(z.real) > 2: break\n\n self.cache_point(coordinate, iteration, z) #inb4 edge cases\n if iteration == self.iterations - 1 and z.real < 2:\n result = True\n else:\n result = False\n #print 'false, %i' %iteration\n\n return result, iteration\n\n def cache_point(self, coordinate, iterations, value):\n if not self.cache:\n return False\n # actual caching implementation goes here :V\n\n def cache_retr(self, coordinate):\n # no cache exists yet sooooooooooo gonna just return as if nothing is\n # cached\n return complex(0, 0), 0\n\n # Properties\n\n @property\n def iterations(self):\n return self._iterations\n\n @iterations.setter\n def iterations(self, other):\n if not isinstance(other, int):\n raise TypeError\n self._iterations = other\n\n @property\n def cache(self):\n return self._cache\n\n @cache.setter\n def cache(self, other):\n if not isinstance(other, bool):\n raise TypeError\n self._cache = other" } ]
3
DanielGorgis/excel_reader
https://github.com/DanielGorgis/excel_reader
745205d9b029f2727a64d6e06eb31905aa563fcf
b7cbdd0db466d2d3398a9ee5440df65cf5129c13
b1620da9d1534add963c5dc1d3967010187b84f2
refs/heads/master
2020-09-07T15:17:42.950642
2020-01-26T00:45:44
2020-01-26T00:45:44
220,824,839
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44679298996925354, "alphanum_fraction": 0.4504373073577881, "avg_line_length": 31.512195587158203, "blob_id": "bd2a9d499743849c691703acd769118b68152845", "content_id": "64367923e24c03ebc5b0f88a5d346280eadeab2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1372, "license_type": "no_license", "max_line_length": 149, "num_lines": 41, "path": "/base.html", "repo_name": "DanielGorgis/excel_reader", "src_encoding": "UTF-8", "text": "<html>\r\n <head>\r\n {% block head %}\r\n <link rel=\"stylesheet\" href=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\" integrity=\"sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T\" crossorigin=\"anonymous\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='style.css') }}\">\r\n \r\n {% endblock %}\r\n </head>\r\n {% block body %}\r\n <body>\r\n\r\n <div class=\"container\">\r\n <div class=\"header clearfix\">\r\n <nav>\r\n <ul class=\"nav nav-pills float-right\">\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link active\" href=\"Home\">Home <span class=\"sr-only\">(current)</span></a>\r\n </li>\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"Guide\">Guide</a>\r\n </li>\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"Contact\">Contact</a>\r\n </li>\r\n <li class=\"nav-item\">\r\n <a class=\"nav-link\" href=\"Program\">Read excel</a>\r\n </li>\r\n </ul>\r\n </nav>\r\n <h3 class=\"text-muted\">IBMCIC excel reader <br></h3>\r\n </div>\r\n </body>\r\n {% endblock %}\r\n <div id=\"content\">{% block content %}{% endblock %}</div>\r\n <div id=\"footer\">\r\n {% block footer %}\r\n \r\n {% endblock %}\r\n </div>\r\n \r\n</html>" }, { "alpha_fraction": 0.5277777910232544, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 9.333333015441895, "blob_id": "1f96e9d7d2fa29061892adaf0497d5b74352be05", "content_id": "beb7af450c1bb90b4a93bd23079a14415d1d8005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 36, "license_type": "no_license", "max_line_length": 14, "num_lines": 3, "path": "/requirements.txt", "repo_name": "DanielGorgis/excel_reader", "src_encoding": "UTF-8", "text": "Flask == 1.1.1\r\npandas\r\nwerkzeug\r\n\r\n" }, { "alpha_fraction": 0.536740243434906, "alphanum_fraction": 0.5515573024749756, "avg_line_length": 26.756521224975586, "blob_id": "7708b042ce362c3baf60ca25c6940af642e687c7", "content_id": "90fa232d5d5c2391334a973aaf39ff0355c7cf60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 128, "num_lines": 115, "path": "/app.py", "repo_name": "DanielGorgis/excel_reader", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, send_file\r\napp = Flask(__name__)\r\nimport pandas as pd\r\nfrom werkzeug import secure_filename\r\n\r\n\r\n\r\n\r\[email protected]('/Menu', methods=['GET','POST'])\r\ndef Menu_func():\r\n return render_template('Menu.html')\r\n\r\n\r\[email protected]('/Guide', methods=['GET','POST'])\r\ndef index_test():\r\n return render_template('Guide.html')\r\n\r\n\r\[email protected]('/', methods=['GET','POST'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\[email protected]('/Program', methods=['GET','POST'])\r\ndef home():\r\n if request.method == 'POST':\r\n \r\n #request form fields \r\n #request form excel files\r\n f1 = request.files['file1']\r\n f2 = request.files['file2']\r\n f3 = request.files['file3']\r\n\r\n #request form column names\r\n f1_column = request.form['ColumnnameFile1']\r\n f2_column = request.form['ColumnnameFile2']\r\n filter_column = request.form['ColumnnameFile3']\r\n #request form sheet names\r\n f1_sheet = request.form['Sheetname1']\r\n f2_sheet = request.form['Sheetname2']\r\n filter_sheet = request.form['Sheetname3']\r\n \r\n #request form with searchable cols\r\n split_string = request.form['SearchCols']\r\n List_Of_Cols_To_Search = [x.strip() for x in split_string.split(',')]\r\n\r\n\r\n #save files\r\n f1.save(secure_filename(f1.filename))\r\n f2.save(secure_filename(f2.filename))\r\n f3.save(secure_filename(f3.filename))\r\n \r\n #use pd pandas to read .xlsx\r\n df1 = pd.read_excel(f1, sheet_name = f1_sheet)\r\n df2 = pd.read_excel(f2, sheet_name = f2_sheet)\r\n df3 = pd.read_excel(f3, sheet_name = filter_sheet)\r\n\r\n #---------------------------#---------------------#--------------------------#--------------------#--------------------#\r\n #Convert DF vars' to str. And to uppercase letters\r\n df1[f1_column] = df1[f1_column].str.upper() \r\n df2[f2_column] = df2[f2_column].str.upper()\r\n df3[filter_column] = df3[filter_column].str.upper()\r\n\r\n #Remove all hosts that aren't unique from df1\r\n filtered_unique_hosts_DF = df1[df1[f1_column].isin(df2[f2_column])]\r\n\r\n #get dataframe column to list \r\n filter_list = []\r\n\r\n for i in df3[filter_column]:\r\n filter_list.append(str(i))\r\n \r\n\r\n #Loop through all searchable cols\r\n for i in List_Of_Cols_To_Search:\r\n Last_Result_DF = filtered_unique_hosts_DF[filtered_unique_hosts_DF[i].isin(filter_list)]\r\n\r\n\r\n\r\n\r\n #Convert variable to excel for download\r\n Last_Result_DF.to_excel('LastResult.xlsx')\r\n\r\n\r\n return send_file('LastResult.xlsx') \r\n \r\n\r\n return render_template('Program.html')\r\n\r\n\r\[email protected]('/upload', methods=['GET','POST'])\r\n\r\ndef send():\r\n \r\n if request.method == 'POST':\r\n f = request.files['file1']\r\n f.save(secure_filename(f.filename))\r\n\r\n df = pd.read_excel(f)\r\n\r\n oneList = []\r\n for i in df['HOST ID']:\r\n oneList.append(str(i))\r\n print(oneList[5])\r\n\r\n df.to_excel('SomeFile.xlsx')\r\n \r\n \r\n return send_file('SomeFile.xlsx') \r\n \r\n\r\n return render_template('home.html')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n" }, { "alpha_fraction": 0.7207792401313782, "alphanum_fraction": 0.7445887327194214, "avg_line_length": 18.173913955688477, "blob_id": "c82efb72e7a2df5275a435ace6fdd73f096f78f2", "content_id": "d081afab355cdbfb0a20606c6fe3cd9208300686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 462, "license_type": "no_license", "max_line_length": 56, "num_lines": 23, "path": "/Dockerfile", "repo_name": "DanielGorgis/excel_reader", "src_encoding": "UTF-8", "text": "# Inherit python image\r\nFROM python:3.6-slim\r\n\r\n# Set up directories\r\nRUN mkdir /application\r\nWORKDIR /application\r\n\r\n# Copy python dependencies and install these\r\nCOPY requirements.txt .\r\nRUN pip install -r requirements.txt\r\n\r\n# Copy the rest of the application\r\nCOPY . .\r\n\r\n# Environment variables\r\nENV PYTHONUNBUFFERED 1\r\n\r\n# EXPOSE port 8000 to allow communication to/from server\r\nEXPOSE 8001\r\nSTOPSIGNAL SIGINT\r\n\r\nENTRYPOINT [\"python\"]\r\nCMD [\"flask_app.py\"]" } ]
4
leibowitz/graphene-django-extras
https://github.com/leibowitz/graphene-django-extras
d1674ab55f98218af359919e711a88e095d6ae63
34d525355d96cace8a695f38f2f0d324d1ba9ca0
fc2fddeb2d51989814e46e00a39f936fb758d957
refs/heads/master
2021-07-24T09:03:31.153541
2017-11-05T11:52:34
2017-11-05T11:52:34
109,574,849
0
0
null
2017-11-05T11:50:25
2017-11-05T00:13:59
2017-11-03T17:14:21
null
[ { "alpha_fraction": 0.592872679233551, "alphanum_fraction": 0.5929872989654541, "avg_line_length": 37.31081008911133, "blob_id": "c098cdd19822082672ee668eb2833e8271c3ccab", "content_id": "88364e9c8dbe2784c38375f660aff66353cf860e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8727, "license_type": "permissive", "max_line_length": 115, "num_lines": 222, "path": "/graphene_django_extras/types.py", "repo_name": "leibowitz/graphene-django-extras", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nfrom collections import OrderedDict\r\n\r\nfrom django.db.models import QuerySet\r\nfrom django.utils.functional import SimpleLazyObject\r\nfrom graphene import Field, InputField, ObjectType, Int\r\nfrom graphene.types.base import BaseOptions\r\nfrom graphene.types.inputobjecttype import InputObjectType, InputObjectTypeContainer\r\nfrom graphene.types.utils import yank_fields_from_attrs\r\nfrom graphene_django.fields import DjangoListField\r\nfrom graphene_django.utils import is_valid_django_model, DJANGO_FILTER_INSTALLED\r\n\r\nfrom .base_types import generic_django_object_type_factory\r\nfrom .converter import construct_fields\r\nfrom .registry import get_global_registry, Registry\r\nfrom .settings import graphql_api_settings\r\n\r\n__all__ = ('DjangoObjectType', 'DjangoInputObjectType', 'DjangoListObjectType')\r\n\r\n\r\nclass DjangoObjectOptions(BaseOptions):\r\n fields = None\r\n input_fields = None\r\n interfaces = ()\r\n model = None\r\n queryset = None\r\n registry = None\r\n connection = None\r\n create_container = None\r\n results_field_name = None\r\n filter_fields = ()\r\n input_for = None\r\n\r\n\r\nclass DjangoObjectType(ObjectType):\r\n @classmethod\r\n def __init_subclass_with_meta__(cls, model=None, registry=None, skip_registry=False,\r\n only_fields=(), exclude_fields=(), filter_fields=None,\r\n interfaces=(), **options):\r\n assert is_valid_django_model(model), (\r\n 'You need to pass a valid Django Model in {}.Meta, received \"{}\".'\r\n ).format(cls.__name__, model)\r\n\r\n if not registry:\r\n registry = get_global_registry()\r\n\r\n assert isinstance(registry, Registry), (\r\n 'The attribute registry in {} needs to be an instance of '\r\n 'Registry, received \"{}\".'\r\n ).format(cls.__name__, registry)\r\n\r\n if not DJANGO_FILTER_INSTALLED and filter_fields:\r\n raise Exception(\"Can only set filter_fields if Django-Filter is installed\")\r\n\r\n django_fields = yank_fields_from_attrs(\r\n construct_fields(model, registry, only_fields, exclude_fields),\r\n _as=Field,\r\n )\r\n\r\n _meta = DjangoObjectOptions(cls)\r\n _meta.model = model\r\n _meta.registry = registry\r\n _meta.filter_fields = filter_fields\r\n _meta.fields = django_fields\r\n\r\n super(DjangoObjectType, cls).__init_subclass_with_meta__(_meta=_meta, interfaces=interfaces, **options)\r\n\r\n if not skip_registry:\r\n registry.register(cls)\r\n\r\n def resolve_id(self, info):\r\n return self.pk\r\n\r\n @classmethod\r\n def is_type_of(cls, root, info):\r\n if isinstance(root, SimpleLazyObject):\r\n root._setup()\r\n root = root._wrapped\r\n if isinstance(root, cls):\r\n return True\r\n if not is_valid_django_model(type(root)):\r\n raise Exception((\r\n 'Received incompatible instance \"{}\".'\r\n ).format(root))\r\n model = root._meta.model\r\n return model == cls._meta.model\r\n\r\n @classmethod\r\n def get_node(cls, info, id):\r\n try:\r\n return cls._meta.model.objects.get(pk=id)\r\n except cls._meta.model.DoesNotExist:\r\n return None\r\n\r\n\r\nclass DjangoInputObjectType(InputObjectType):\r\n @classmethod\r\n def __init_subclass_with_meta__(cls, model=None, container=None, registry=None, skip_registry=False,\r\n connection=None, use_connection=None, only_fields=(), exclude_fields=(),\r\n filter_fields=None, input_for=\"create\", nested_fields=False, **options):\r\n assert is_valid_django_model(model), (\r\n 'You need to pass a valid Django Model in {}.Meta, received \"{}\".'\r\n ).format(cls.__name__, model)\r\n\r\n if not registry:\r\n registry = get_global_registry()\r\n\r\n assert isinstance(registry, Registry), (\r\n 'The attribute registry in {} needs to be an instance of '\r\n 'Registry, received \"{}\".'\r\n ).format(cls.__name__, registry)\r\n\r\n assert input_for.lower not in ('create', 'delete', 'update'), (\r\n 'You need to pass a valid input_for value in {}.Meta, received \"{}\".'\r\n ).format(cls.__name__, input_for)\r\n\r\n input_for = input_for.lower()\r\n\r\n if not DJANGO_FILTER_INSTALLED and filter_fields:\r\n raise Exception(\"Can only set filter_fields if Django-Filter is installed\")\r\n\r\n django_fields = yank_fields_from_attrs(\r\n construct_fields(model, registry, only_fields, exclude_fields, None, nested_fields),\r\n _as=Field, sort=False\r\n )\r\n\r\n django_input_fields = yank_fields_from_attrs(\r\n construct_fields(model, registry, only_fields, exclude_fields, input_for, nested_fields),\r\n _as=InputField, sort=False\r\n )\r\n\r\n if container is None:\r\n container = type(cls.__name__, (InputObjectTypeContainer, cls), {})\r\n\r\n _meta = DjangoObjectOptions(cls)\r\n _meta.by_polar = True\r\n _meta.model = model\r\n _meta.registry = registry\r\n _meta.filter_fields = filter_fields\r\n # _meta.fields = django_fields\r\n _meta.fields = django_input_fields\r\n _meta.input_fields = django_input_fields\r\n _meta.container = container\r\n _meta.connection = connection\r\n _meta.input_for = input_for\r\n\r\n super(InputObjectType, cls).__init_subclass_with_meta__(_meta=_meta, **options)\r\n\r\n if not skip_registry:\r\n registry.register(cls, for_input=input_for)\r\n\r\n @classmethod\r\n def get_type(cls):\r\n \"\"\"\r\n This function is called when the unmounted type (InputObjectType instance)\r\n is mounted (as a Field, InputField or Argument)\r\n \"\"\"\r\n return cls\r\n\r\n\r\nclass DjangoListObjectType(ObjectType):\r\n\r\n class Meta:\r\n abstract = True\r\n\r\n @classmethod\r\n def __init_subclass_with_meta__(cls, model=None, results_field_name=None, pagination=None,\r\n only_fields=(), exclude_fields=(), filter_fields=None,\r\n queryset=None, interfaces=(), **options):\r\n\r\n assert is_valid_django_model(model), (\r\n 'You need to pass a valid Django Model in {}.Meta, received \"{}\".'\r\n ).format(cls.__name__, model)\r\n\r\n if not DJANGO_FILTER_INSTALLED and filter_fields:\r\n raise Exception(\"Can only set filter_fields if Django-Filter is installed\")\r\n\r\n assert isinstance(queryset, QuerySet) or queryset is None, (\r\n 'The attribute queryset in {} needs to be an instance of '\r\n 'Django model queryset, received \"{}\".'\r\n ).format(cls.__name__, queryset)\r\n\r\n results_field_name = results_field_name or 'results'\r\n\r\n baseType = get_global_registry().get_type_for_model(model)\r\n\r\n if not baseType:\r\n baseType = generic_django_object_type_factory(DjangoObjectType, new_model=model,\r\n new_only_fields=only_fields,\r\n new_exclude_fields=exclude_fields,\r\n new_filter_fields=filter_fields)\r\n filter_fields = filter_fields or baseType._meta.filter_fields\r\n\r\n if pagination:\r\n result_container = pagination.get_pagination_field(baseType)\r\n else:\r\n global_paginator = graphql_api_settings.DEFAULT_PAGINATION_CLASS\r\n if global_paginator:\r\n global_paginator = global_paginator()\r\n description = '{} list, paginated by {}'.format(model.__name__, global_paginator.__name__)\r\n result_container = global_paginator.get_field(baseType, description=description)\r\n else:\r\n result_container = DjangoListField(baseType)\r\n\r\n _meta = DjangoObjectOptions(cls)\r\n _meta.model = model\r\n _meta.queryset = queryset\r\n _meta.baseType = baseType\r\n _meta.results_field_name = results_field_name\r\n _meta.filter_fields = filter_fields\r\n _meta.exclude_fields = exclude_fields\r\n _meta.only_fields = only_fields\r\n _meta.fields = OrderedDict([\r\n (results_field_name, result_container),\r\n ('count', Field(Int, name='totalCount', required=True, description=\"Total count of matches elements\"))\r\n ])\r\n\r\n super(DjangoListObjectType, cls).__init_subclass_with_meta__(_meta=_meta, interfaces=interfaces, **options)\r\n\r\n @classmethod\r\n def getOne(cls):\r\n return cls._meta.baseType\r\n" }, { "alpha_fraction": 0.7040039300918579, "alphanum_fraction": 0.7165315747261047, "avg_line_length": 28.55172348022461, "blob_id": "e29c6d4bf9b8c721e0311cc0b06d736b0163680e", "content_id": "94a525d64be38e38ed45d005b56226697c2ad61b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 16284, "license_type": "permissive", "max_line_length": 202, "num_lines": 551, "path": "/README.md", "repo_name": "leibowitz/graphene-django-extras", "src_encoding": "UTF-8", "text": "\n---\n\n# ![Graphene Logo](http://graphene-python.org/favicon.png) Graphene-Django-Extras [![PyPI version](https://badge.fury.io/py/graphene-django-extras.svg)](https://badge.fury.io/py/graphene-django-extras) \n\n\nThis package add some extra functionalities to graphene-django to facilitate the graphql use without Relay:\n 1. Allows pagination and filtering on Queries.\n 2. Allows to define DjangoRestFramework serializers based Mutations.\n 3. Adds support to Subscription's requests and its integration with websockets using Channels package. :muscle:\n\n## Installation\n\nFor installing graphene-django-extras, just run this command in your shell:\n\n```bash\npip install graphene-django-extras\n```\n\n## Documentation:\n\n### Extra functionalities:\n **Fields:**\n 1. DjangoObjectField\n 2. DjangoFilterListField\n 3. DjangoFilterPaginateListField\n 4. DjangoListObjectField (*Recommended for Queries definition*)\n\n **Mutations:**\n 1. DjangoSerializerMutation (*Recommended for Mutations definition*)\n\n **Types:**\n 1. DjangoListObjectType (*Recommended for Types definition*)\n 2. DjangoInputObjectType\n\n **Paginations:**\n 1. LimitOffsetGraphqlPagination\n 2. PageGraphqlPagination\n 3. CursorGraphqlPagination (*coming soon*)\n\n **Subscriptions:**\n 1. Subscription (*Abstract class to define subscriptions to a DjangoSerializerMutation class*)\n 2. GraphqlAPIDemultiplexer (*Custom WebSocket consumer subclass that handles demultiplexing streams*)\n\n\n### Queries and Mutations examples:\n\nThis is a basic example of graphene-django-extras package use. You can configure global params\nfor DjangoListObjectType classes pagination definitions on settings.py like this:\n\n```python\n GRAPHENE_DJANGO_EXTRAS = {\n 'DEFAULT_PAGINATION_CLASS': 'graphene_django_extras.paginations.LimitOffsetGraphqlPagination',\n 'DEFAULT_PAGE_SIZE': 20,\n 'MAX_PAGE_SIZE': 50,\n }\n```\n\n#### 1- Types Definition:\n\n```python\nfrom django.contrib.auth.models import User\nfrom graphene_django import DjangoObjectType\nfrom graphene_django_extras import DjangoListObjectType\nfrom graphene_django_extras.pagination import LimitOffsetGraphqlPagination\n\n\nclass UserType(DjangoObjectType):\n class Meta:\n model = User\n description = \" Type definition for a single user \"\n filter_fields = {\n 'id': ['exact', ],\n 'first_name': ['icontains', 'iexact'],\n 'last_name': ['icontains', 'iexact'],\n 'username': ['icontains', 'iexact'],\n 'email': ['icontains', 'iexact']\n }\n\n\nclass UserListType(DjangoListObjectType):\n class Meta:\n description = \" Type definition for user list \"\n model = User\n pagination = LimitOffsetGraphqlPagination(page_size=20)\n```\n\n#### 2- You can to define InputTypes for use on mutations:\n\n```python\nfrom graphene_django_extras import DjangoInputObjectType\n\n\nclass UserInput(DjangoInputObjectType):\n class Meta:\n description = \" User InputType definition to use as input on an Arguments class on traditional Mutations \"\n model = User\n```\n\n#### 3- You can define traditional mutations that use InputTypes or Mutations based on DRF serializers:\n\n```python\nimport graphene\nfrom graphene_django_extras import DjangoSerializerMutation\n\nfrom .serializers import UserSerializer\nfrom .types import UserType\nfrom .input_types import UserInputType\n\n\nclass UserSerializerMutation(DjangoSerializerMutation):\n \"\"\"\n DjangoSerializerMutation auto implement Create, Delete and Update functions\n \"\"\"\n class Meta:\n description = \" DRF serializer based Mutation for Users \"\n serializer_class = UserSerializer\n\n\nclass UserMutation(graphene.Mutation):\n \"\"\"\n On traditional mutation classes definition you must implement the mutate function\n \"\"\"\n\n user = graphene.Field(UserType, required=False)\n\n class Arguments:\n new_user = graphene.Argument(UserInput)\n\n class Meta:\n description = \" Graphene traditional mutation for Users \"\n\n @classmethod\n def mutate(cls, root, info, *args, **kwargs):\n ...\n```\n\n#### 4- Defining the Scheme file:\n\n```python\nimport graphene\nfrom graphene_django_extras import DjangoObjectField, DjangoListObjectField, DjangoFilterPaginateListField,\nDjangoFilterListField, LimitOffsetGraphqlPagination\nfrom .types import UserType, UserListType\nfrom .mutations import UserMutation, UserSerializerMutation\n\n\nclass Queries(graphene.ObjectType):\n # Possible User list queries definitions\n all_users = DjangoListObjectField(UserListType, description=_('All Users query'))\n all_users1 = DjangoFilterPaginateListField(UserType, pagination=LimitOffsetGraphqlPagination())\n all_users2 = DjangoFilterListField(UserType)\n all_users3 = DjangoListObjectField(UserListType, filterset_class=UserFilter, description=_('All Users query'))\n\n # Defining a query for a single user\n # The DjangoObjectField have a ID type input field, that allow filter by id and is't necessary to define resolve function\n user = DjangoObjectField(UserType, description=_('Single User query'))\n\n # Another way to define a query to single user\n user1 = DjangoObjectField(UserListType.getOne(), description=_('User list with pagination and filtering'))\n\n\nclass Mutations(graphene.ObjectType):\n user_create = UserSerializerMutation.CreateField(deprecation_reason='Some one deprecation message')\n user_delete = UserSerializerMutation.DeleteField()\n user_update = UserSerializerMutation.UpdateField()\n\n traditional_user_mutation = UserMutation.Field()\n```\n\n#### 5- Queries's examples:\n```js\n{\n allUsers(username_Icontains:\"john\"){\n results(limit:5, offset:5){\n id\n username\n firstName\n lastName\n }\n totalCount\n }\n\n allUsers1(lastName_Iexact:\"Doe\", limit:5, offset:0){\n id\n username\n firstName\n lastName\n }\n\n allUsers2(firstName_Icontains: \"J\"){\n id\n username\n firstName\n lastName\n }\n\n user(id:2){\n id\n username\n firstName\n }\n\n user1(id:2){\n id\n username\n firstName\n }\n}\n```\n\n#### 6- Mutations's examples:\n\n```js\nmutation{\n userCreate(newUser:{username:\"test\", password:\"test*123\"}){\n user{\n id\n username\n firstName\n lastName\n }\n ok\n errors{\n field\n messages\n }\n }\n\n userDelete(id:1){\n ok\n errors{\n field\n messages\n }\n }\n\n userUpdate(newUser:{id:1, username:\"John\"}){\n user{\n id\n username\n }\n ok\n errors{\n field\n messages\n }\n }\n}\n```\n\n### **Subscriptions:**\n\nThis first approach to add Graphql subscriptions support with Channels in **graphene-django-extras**, use **channels-api** package.\n\n#### 1- Defining custom Subscriptions classes:\n\nYou must to have defined a DjangoSerializerMutation class for each model that you want to define a Subscription class:\n\n```python\n# app/graphql/subscriptions.py\nimport graphene\nfrom graphene_django_extras.subscription import Subscription\nfrom .mutations import UserMutation, GroupMutation\n\n\nclass UserSubscription(Subscription):\n class Meta:\n mutation_class = UserMutation\n stream = 'users'\n description = 'User Subscription'\n\n\nclass GroupSubscription(Subscription):\n class Meta:\n mutation_class = GroupMutation\n stream = 'groups'\n description = 'Group Subscription'\n\n```\n\nAdd the subscriptions definitions into your app's schema:\n\n```python\n# app/graphql/schema.py\nimport graphene\nfrom .subscriptions import UserSubscription, GroupSubscription\n\n\nclass Subscriptions(graphene.ObjectType):\n user_subscription = UserSubscription.Field()\n GroupSubscription = PersonSubscription.Field()\n```\n\nAdd the app's schema into your project root schema:\n\n```python\n# schema.py\nimport graphene\nimport custom.app.route.graphql.schema\n\n\nclass RootQuery(custom.app.route.graphql.schema.Query, graphene.ObjectType):\n class Meta:\n description = 'The project root query definition'\n\n\nclass RootSubscription(custom.app.route.graphql.schema.Mutation, graphene.ObjectType):\n class Meta:\n description = 'The project root mutation definition'\n\n\nclass RootSubscription(custom.app.route.graphql.schema.Subscriptions, graphene.ObjectType):\n class Meta:\n description = 'The project root subscription definition'\n\n\nschema = graphene.Schema(\n query=RootQuery,\n mutation=RootMutation,\n subscription=RootSubscription\n)\n```\n\n#### 2- Defining Channels settings and custom routing config ( *For more information see Channels documentation* ):\n\nWe define app routing, as if they were app urls:\n\n```python\n# app/routing.py\nfrom graphene_django_extras.subscriptions import GraphqlAPIDemultiplexer\nfrom channels.routing import route_class\nfrom .graphql.subscriptions import UserSubscription, GroupSubscription\n\n\nclass CustomAppDemultiplexer(GraphqlAPIDemultiplexer):\n consumers = {\n 'users': UserSubscription.get_binding().consumer,\n 'groups': GroupSubscription.get_binding().consumer\n }\n\n\napp_routing = [\n route_class(CustomAppDemultiplexer)\n]\n```\n\nWe define project routing, as if they were project urls:\n\n```python\n# project/routing.py\nfrom channels import include\n\n\nproject_routing = [\n include(\"custom.app.folder.routing.app_routing\", path=r\"^/custom_websocket_path\"),\n]\n\n```\n\nYou should put into your INSTALLED_APPS the **channels** and **channels_api** modules and you must to add your project's routing definition into the CHANNEL_LAYERS setting:\n\n```python\n# settings.py\n...\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n ...\n 'channels',\n 'channels_api',\n\n 'custom_app'\n)\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"asgiref.inmemory.ChannelLayer\",\n \"ROUTING\": \"myproject.routing.project_routing\", # Our project routing\n },\n}\n...\n```\n\n#### 3- Subscription's examples:\n\nIn your WEB client you must define websocket connection to: *'ws://host:port/custom_websocket_path'*.\nWhen the connection is established, the server return a websocket's message like this:\n*{\"channel_id\": \"GthKdsYVrK!WxRCdJQMPi\", \"connect\": \"success\"}*, where you must store the **channel_id** value to later use in your graphql subscriptions request for subscribe or unsubscribe operations.\n\nThe graphql's subscription request accept five possible parameters:\n 1. **operation**: Operation to perform: subscribe or unsubscribe. (*required*)\n 2. **action**: Action to which you wish to subscribe: create, update, delete or all_actions. (*required*)\n 3. **channelId**: Identification of the connection by websocket. (*required*)\n 4. **id**: Object's ID field value that you wish to subscribe to. (*optional*)\n 5. **data**: Model's fields that you want to appear in the subscription notifications. (*optional*)\n\n```js\nsubscription{\n userSubscription(\n action: UPDATE,\n operation: SUBSCRIBE,\n channelId: \"GthKdsYVrK!WxRCdJQMPi\",\n id: 5,\n data: [ID, USERNAME, FIRST_NAME, LAST_NAME, EMAIL, IS_SUPERUSER]\n ){\n ok\n error\n stream\n }\n}\n```\n\nIn this case, the subscription request sent return a websocket message to client like this:\n*{\"action\": \"update\", \"operation\": \"subscribe\", \"ok\": true, \"stream\": \"users\", \"error\": null}*\nand from that moment each time than the user with id=5 get modified, you will receive a message through websocket's connection with the following format:\n\n```js\n{\n \"stream\": \"users\",\n \"payload\": {\n \"action\": \"update\",\n \"model\": \"auth.user\",\n \"data\": {\n \"id\": 5,\n \"username\": \"meaghan90\",\n \"first_name\": \"Meaghan\",\n \"last_name\": \"Ackerman\",\n \"email\": \"[email protected]\",\n \"is_superuser\": false\n }\n }\n}\n```\n\nFor unsubscribe you must send a graphql request like this:\n\n```js\nsubscription{\n userSubscription(\n action: UPDATE,\n operation: UNSUBSCRIBE,\n channelId: \"GthKdsYVrK!WxRCdJQMPi\",\n id: 5\n ){\n ok\n error\n stream\n }\n}\n```\n\n**NOTE:** Each time than the graphql's server restart, you must to reestablish the websocket connection and resend the graphql's subscription request with the new websocket connection id.\n\n\n## Change Log:\n\n#### v0.1.0-alpha12:\n 1. Added new settings param: MAX_PAGE_SIZE, to use on GRAPHENE_DJANGO_EXTRAS\n configuration dict for better customize DjangoListObjectType's pagination.\n 2. Added support to Django's field: GenericRel.\n 3. Improve model's fields calculation for to add all possible related and reverse fields.\n 4. Improved documentation translation.\n\n#### v0.1.0-alpha11:\n 1. Improved ordering for showed fields on graphqli's IDE.\n 2. Added better descriptions for auto generated fields.\n\n#### v0.1.0-alpha10:\n 1. Improve converter.py file to avoid create field for auto generate OneToOneField\n product of an inheritance.\n 2. Fixed bug in Emun generation for fields with choices of model inheritance child.\n\n#### v0.1.0-alpha9:\n 1. Fixed bug on GenericType and GenericInputType generations for\n Queries list Type and Mutations.\n\n#### v0.1.0-alpha6:\n 1. Fixed with exclude fields and converter function.\n\n#### v0.1.0-alpha5:\n 1. Updated dependencies to graphene-django>=2.0.\n 2. Fixed minor bugs on queryset_builder performance.\n\n#### v0.1.0-alpha4:\n 1. Add queryset options to DjangoListObjectType Meta class for specify wanted model queryset.\n 2. Add AuthenticatedGraphQLView on graphene_django_extras.views for use\n 'permission', 'authorization' and 'throttle' classes based on the DRF settings. Special thanks to\n [@jacobh](https://github.com/jacobh) for this\n [comment](https://github.com/graphql-python/graphene/issues/249#issuecomment-300068390)\n\n#### v0.1.0-alpha3:\n 1. Fixed bug on subscriptions when not specified any field in \"data\" parameter to bean return on notification\n message.\n\n#### v0.1.0-alpha2:\n 1. Fixed bug when subscribing to a given action (create, update pr delete).\n 2. Added intuitive and simple web tool to test notifications of graphene-django-extras subscription.\n\n#### v0.1.0-alpha1:\n 1. Added support to multiselect choices values for models.CharField with choices attribute,\n on queries and mutations. Example: Integration with django-multiselectfield package.\n 2. Added support to GenericForeignKey and GenericRelation fields, on queries and mutations.\n 3. Added first approach to support Subscriptions with Channels, with subscribe and unsubscribe operations.\n Using channels-api package.\n 4. Fixed minors bugs.\n\n#### v0.0.4:\n 1. Fix error on DateType encode.\n\n#### v0.0.3:\n 1. Implement custom implementation of DateType for use converter and avoid error on Serializer Mutation.\n\n#### v0.0.2:\n 1. Changed dependency of DRF to 3.6.4 on setup.py file, to avoid an import error produced by some changes in\n new version of DRF=3.7.0 and because DRF 3.7.0 dropped support to Django versions < 1.10.\n\n#### v0.0.1:\n 1. Fixed bug on DjangoInputObjectType class that refer to unused interface attribute.\n 2. Added support to create nested objects like in\n [DRF](http://www.django-rest-framework.org/api-guide/serializers/#writable-nested-representations),\n it's valid to SerializerMutation and DjangoInputObjectType, only is necessary to specify nested_fields=True\n on its Meta class definition.\n 3. Added support to show, only in mutations types to create objects and with debug=True on settings,\n inputs autocomplete ordered by required fields first.\n 4. Fixed others minors bugs.\n\n#### v0.0.1-rc.2:\n 1. Make queries pagination configuration is more friendly.\n\n#### v0.0.1-rc.1:\n 1. Fixed a bug with input fields in the converter function.\n\n#### v0.0.1-beta.10:\n 1. Fixed bug in the queryset_factory function because it did not always return a queryset.\n\n#### v0.0.1-beta.9:\n 1. Remove hard dependence with psycopg2 module.\n 2. Fixed bug that prevented use queries with fragments.\n 3. Fixed bug relating to custom django_filters module and ordering fields.\n\n#### v0.0.1-beta.6:\n 1. Optimizing imports, fix some minors bugs and working on performance.\n\n#### v0.0.1-beta.5:\n 1. Repair conflict on converter.py, by the use of get_related_model function with: OneToOneRel,\n ManyToManyRel and ManyToOneRel.\n\n#### v0.0.1-beta.4:\n 1. First commit.\n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.728723406791687, "avg_line_length": 30.33333396911621, "blob_id": "d817c8dcf14249477565d34d0922c633913a64a1", "content_id": "cd0a89bc4e4db8ec7bf39f5baae46ec5509375be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "permissive", "max_line_length": 53, "num_lines": 6, "path": "/graphene_django_extras/subscriptions/__init__.py", "repo_name": "leibowitz/graphene-django-extras", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom .consumers import GraphqlAPIDemultiplexer\nfrom .subscription import Subscription\n\n__author__ = 'Ernesto'\n__all__ = ('Subscription', 'GraphqlAPIDemultiplexer')\n" }, { "alpha_fraction": 0.618020236492157, "alphanum_fraction": 0.6288345456123352, "avg_line_length": 29.372358322143555, "blob_id": "826974cdbf190d57f001255e7f4805f6f998af83", "content_id": "c8571ac679c14601b5ac495abbbd3854e16c15e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 18679, "license_type": "permissive", "max_line_length": 340, "num_lines": 615, "path": "/README.rst", "repo_name": "leibowitz/graphene-django-extras", "src_encoding": "UTF-8", "text": "\nGraphene-Django-Extras\n======================\n\nThis package add some extra functionalities to **graphene-django** to facilitate the graphql use without Relay:\n 1. Allows pagination and filtering on Queries.\n 2. Allows to define DjangoRestFramework serializers based Mutations.\n 3. Adds support to Subscription's requests and its integration with websockets using **Channels** package.\n\nInstallation:\n-------------\n\nFor installing graphene-django-extras, just run this command in your shell:\n\n.. code:: bash\n\n pip install \"graphene-django-extras\"\n\nDocumentation:\n--------------\n\n**********************\nExtra functionalities:\n**********************\n **Fields:**\n 1. DjangoObjectField\n 2. DjangoFilterListField\n 3. DjangoFilterPaginateListField\n 4. DjangoListObjectField (Recommended for Queries definition)\n\n **Mutations:**\n 1.\tDjangoSerializerMutation (Recommended for Mutations definition)\n\n **Types:**\n 1.\tDjangoListObjectType (Recommended for Types definition)\n 2.\tDjangoInputObjectType\n\n **Paginations:**\n 1.\tLimitOffsetGraphqlPagination\n 2.\tPageGraphqlPagination\n 3.\tCursorGraphqlPagination (coming soon)\n\n **Subscriptions:**\n 1. Subscription (Abstract class to define subscriptions to a DjangoSerializerMutation)\n 2. GraphqlAPIDemultiplexer (Custom WebSocket consumer subclass that handles demultiplexing streams)\n\nQueries and Mutations examples:\n-------------------------------\n\nThis is a basic example of graphene-django-extras package use. You can configure global params for\nDjangoListObjectType classes pagination definitions on settings.py like this:\n\n.. code:: python\n\n GRAPHENE_DJANGO_EXTRAS = {\n 'DEFAULT_PAGINATION_CLASS': 'graphene_django_extras.paginations.LimitOffsetGraphqlPagination',\n 'DEFAULT_PAGE_SIZE': 20,\n 'MAX_PAGE_SIZE': 50,\n }\n\n********************\n1- Types Definition:\n********************\n\n\n.. code:: python\n\n from django.contrib.auth.models import User\n from graphene_django import DjangoObjectType\n from graphene_django_extras import DjangoListObjectType\n from graphene_django_extras.pagination import LimitOffsetGraphqlPagination\n\n\n class UserType(DjangoObjectType):\n class Meta:\n model = User\n description = \" Type definition for a single user object \"\n filter_fields = {\n 'id': ['exact', ],\n 'first_name': ['icontains', 'iexact'],\n 'last_name': ['icontains', 'iexact'],\n 'username': ['icontains', 'iexact'],\n 'email': ['icontains', 'iexact']\n }\n\n\n class UserListType(DjangoListObjectType):\n class Meta:\n description = \" Type definition for users objects list \"\n model = User\n pagination = LimitOffsetGraphqlPagination()\n\n\n*****************************************************\n2- You can to define InputTypes for use on mutations:\n*****************************************************\n\n.. code:: python\n\n from graphene_django_extras import DjangoInputObjectType\n\n\n class UserInput(DjangoInputObjectType):\n class Meta:\n description = \" User InputType definition to use as input on an Arguments class on traditional Mutations \"\n model = User\n\n\n**********************\n3- Defining Mutations:\n**********************\n\nYou can define traditional mutations that use InputTypes or Mutations based on DRF serializers:\n\n\n.. code:: python\n\n import graphene\n from .serializers import UserSerializer\n from graphene_django_extras import DjangoSerializerMutation\n from .types import UserType\n from .input_types import UserInputType\n\n\n class UserSerializerMutation(DjangoSerializerMutation):\n \"\"\"\n DjangoSerializerMutation auto implement Create, Delete and Update functions\n \"\"\"\n class Meta:\n description = \" DRF serializer based Mutation for Users \"\n serializer_class = UserSerializer\n\n\n class UserMutation(graphene.Mutation):\n \"\"\"\n On traditional mutation classes definition you must implement the mutate function\n \"\"\"\n\n user = graphene.Field(UserType, required=False)\n\n class Arguments:\n new_user = graphene.Argument(UserInput)\n\n class Meta:\n description = \" Graphene traditional mutation for Users \"\n\n @classmethod\n def mutate(cls, root, info, *args, **kwargs):\n ...\n\n\n********************\n4- Defining schemes:\n********************\n\n.. code:: python\n\n import graphene\n from graphene_django_extras import DjangoObjectField, DjangoListObjectField, DjangoFilterPaginateListField, DjangoFilterListField, LimitOffsetGraphqlPagination\n from .types import UserType, UserListType\n from .mutations import UserMutation, UserSerializerMutation\n\n\n class Queries(graphene.ObjectType):\n # Possible User list queries definitions\n all_users = DjangoListObjectField(UserListType, description=_('All Users query'))\n all_users1 = DjangoFilterPaginateListField(UserType, pagination=LimitOffsetGraphqlPagination())\n all_users2 = DjangoFilterListField(UserType)\n all_users3 = DjangoListObjectField(UserListType, filterset_class=UserFilter, description=_('All Users query'))\n\n # Defining a query for a single user\n # The DjangoObjectField have a ID type input field, that allow filter by id and is't necessary to define resolve function\n user = DjangoObjectField(UserType, description=_('Single User query'))\n\n # Another way to define a query to single user\n user1 = DjangoObjectField(UserListType.getOne(), description=_('User List with pagination and filtering'))\n\n\n class Mutations(graphene.ObjectType):\n user_create = UserSerializerMutation.CreateField(deprecation_reason='Some one deprecation message')\n user_delete = UserSerializerMutation.DeleteField()\n user_update = UserSerializerMutation.UpdateField()\n\n traditional_user_mutation = UserMutation.Field()\n\n\n**********************\n5- Queries's examples:\n**********************\n\n.. code:: python\n\n {\n allUsers(username_Icontains:\"john\"){\n results(limit:5, offset:5){\n id\n username\n firstName\n lastName\n }\n totalCount\n }\n\n allUsers1(lastName_Iexact:\"Doe\", limit:5, offset:0){\n id\n username\n firstName\n lastName\n }\n\n allUsers2(firstName_Icontains: \"J\"){\n id\n username\n firstName\n lastName\n }\n\n user(id:2){\n id\n username\n firstName\n }\n\n user1(id:2){\n id\n username\n firstName\n }\n }\n\n\n************************\n6- Mutations's examples:\n************************\n\n.. code:: python\n\n mutation{\n userCreate(newUser:{password:\"test*123\", email: \"[email protected]\", username:\"test\"}){\n user{\n id\n username\n firstName\n lastName\n }\n ok\n errors{\n field\n messages\n }\n }\n\n userDelete(id:1){\n ok\n errors{\n field\n messages\n }\n }\n\n userUpdate(newUser:{id:1, username:\"John\"}){\n user{\n id\n username\n }\n ok\n errors{\n field\n messages\n }\n }\n }\n\nSubscriptions:\n--------------\n\nThis first approach to add Graphql subscriptions support with Channels in graphene-django-extras, use channels-api package.\n\n*****************************************\n1- Defining custom Subscriptions classes:\n*****************************************\n\nYou must to have defined a DjangoSerializerMutation class for each model that you want to define a Subscription class:\n\n.. code:: python\n\n # app/graphql/subscriptions.py\n import graphene\n from graphene_django_extras.subscription import Subscription\n from .mutations import UserMutation, GroupMutation\n\n\n class UserSubscription(Subscription):\n class Meta:\n mutation_class = UserMutation\n stream = 'users'\n description = 'User Subscription'\n\n\n class GroupSubscription(Subscription):\n class Meta:\n mutation_class = GroupMutation\n stream = 'groups'\n description = 'Group Subscription'\n\n\nAdd the subscriptions definitions into your app's schema:\n\n.. code:: python\n\n # app/graphql/schema.py\n import graphene\n from .subscriptions import UserSubscription, GroupSubscription\n\n\n class Subscriptions(graphene.ObjectType):\n user_subscription = UserSubscription.Field()\n GroupSubscription = PersonSubscription.Field()\n\n\nAdd the app's schema into your project root schema:\n\n.. code:: python\n\n # schema.py\n import graphene\n import custom.app.route.graphql.schema\n\n\n class RootQuery(custom.app.route.graphql.schema.Query, graphene.ObjectType):\n class Meta:\n description = 'The project root query definition'\n\n\n class RootSubscription(custom.app.route.graphql.schema.Mutation, graphene.ObjectType):\n class Meta:\n description = 'The project root mutation definition'\n\n\n class RootSubscription(custom.app.route.graphql.schema.Subscriptions, graphene.ObjectType):\n class Meta:\n description = 'The project root subscription definition'\n\n\n schema = graphene.Schema(\n query=RootQuery,\n mutation=RootMutation,\n subscription=RootSubscription\n )\n\n\n********************************************************\n2- Defining Channels settings and custom routing config:\n********************************************************\n**Note**: For more information about this step see Channels documentation.\n\nYou must to have defined a DjangoSerializerMutation class for each model that you want to define a Subscription class:\n\nWe define app routing, as if they were app urls:\n\n.. code:: python\n\n # app/routing.py\n from graphene_django_extras.subscriptions import GraphqlAPIDemultiplexer\n from channels.routing import route_class\n from .graphql.subscriptions import UserSubscription, GroupSubscription\n\n\n class CustomAppDemultiplexer(GraphqlAPIDemultiplexer):\n consumers = {\n 'users': UserSubscription.get_binding().consumer,\n 'groups': GroupSubscription.get_binding().consumer\n }\n\n\n app_routing = [\n route_class(CustomAppDemultiplexer)\n ]\n\n\nDefining our project routing, like custom root project urls:\n\n.. code:: python\n\n # project/routing.py\n from channels import include\n\n project_routing = [\n include(\"custom.app.folder.routing.app_routing\", path=r\"^/custom_websocket_path\"),\n ]\n\n\nYou should put into your INSTALLED_APPS the channels and channels_api modules and you must to add your project's routing definition into the CHANNEL_LAYERS setting:\n\n.. code:: python\n\n # settings.py\n ...\n INSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n ...\n 'channels',\n 'channels_api',\n\n 'custom_app'\n )\n\n CHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"asgiref.inmemory.ChannelLayer\",\n \"ROUTING\": \"myproject.routing.project_routing\", # Our project routing\n },\n }\n ...\n\n\n***************************\n3- Subscription's examples:\n***************************\n\nIn your WEB client you must define websocket connection to: 'ws://host:port/custom_websocket_path'.\nWhen the connection is established, the server return a websocket's message like this:\n{\"channel_id\": \"GthKdsYVrK!WxRCdJQMPi\", \"connect\": \"success\"}, where you must store the channel_id value to later use in your graphql subscriptions request for subscribe or unsubscribe operations.\n\nThe graphql's subscription request accept five possible parameters:\n1. **operation**: Operation to perform: subscribe or unsubscribe. (required)\n2. **action**: Action to which you wish to subscribe: create, update, delete or all_actions. (required)\n3. **channelId**: Identification of the connection by websocket. (required)\n4. **id**: Object's ID field value that you wish to subscribe to. (optional)\n5. **data**: Model's fields that you want to appear in the subscription notifications. (optional)\n\n.. code:: python\n\n subscription{\n userSubscription(\n action: UPDATE,\n operation: SUBSCRIBE,\n channelId: \"GthKdsYVrK!WxRCdJQMPi\",\n id: 5,\n data: [ID, USERNAME, FIRST_NAME, LAST_NAME, EMAIL, IS_SUPERUSER]\n ){\n ok\n error\n stream\n }\n }\n\n\nIn this case, the subscription request sent return a websocket message to client like this: *{\"action\": \"update\", \"operation\": \"subscribe\", \"ok\": true, \"stream\": \"users\", \"error\": null}* and from that moment each time than the user with id=5 get modified, you will receive a message through websocket's connection with the following format:\n\n.. code:: python\n\n {\n \"stream\": \"users\",\n \"payload\": {\n \"action\": \"update\",\n \"model\": \"auth.user\",\n \"data\": {\n \"id\": 5,\n \"username\": \"meaghan90\",\n \"first_name\": \"Meaghan\",\n \"last_name\": \"Ackerman\",\n \"email\": \"[email protected]\",\n \"is_superuser\": false\n }\n }\n }\n\n\nFor unsubscribe you must send a graphql request like this:\n\n.. code:: python\n\n subscription{\n userSubscription(\n action: UPDATE,\n operation: UNSUBSCRIBE,\n channelId: \"GthKdsYVrK!WxRCdJQMPi\",\n id: 5\n ){\n ok\n error\n stream\n }\n }\n\n\n*NOTE*: Each time than the graphql's server restart, you must to reestablish the websocket connection and resend the graphql's subscription request with the new websocket connection id.\n\n\nChange Log:\n-----------\n\n***************\nv0.1.0-alpha12:\n***************\n1. Added new settings param: MAX_PAGE_SIZE, to use on GRAPHENE_DJANGO_EXTRAS configuration dict for better customize DjangoListObjectType's pagination.\n2. Added support to Django's field: GenericRel.\n3. Improve model's fields calculation for to add all possible related and reverse fields.\n4. Improved documentation translation.\n\n***************\nv0.1.0-alpha11:\n***************\n1. Improved ordering for showed fields on graphqli's IDE.\n2. Added better descriptions for auto generated fields.\n\n***************\nv0.1.0-alpha10:\n***************\n1. Improve converter.py file to avoid create field for auto generate OneToOneField product of an inheritance.\n2. Fixed bug in Emun generation for fields with choices of model inheritance child.\n\n**************\nv0.1.0-alpha9:\n**************\n1. Fixed bug on GenericType and GenericInputType generations for Queries list Type and Mutations.\n\n**************\nv0.1.0-alpha6:\n**************\n1. Fixed with exclude fields and converter function.\n\n**************\nv0.1.0-alpha5:\n**************\n1. Updated to graphene-django>=2.0.\n2. Fixed minor bugs on queryset_builder performance.\n\n**************\nv0.1.0-alpha4:\n**************\n1. Add **queryset** options to **DjangoListObjectType** Meta class for specify wanted model queryset.\n2. Add AuthenticatedGraphQLView on graphene_django_extras.views for use 'permission', 'authorization' and 'throttle' classes based on the DRF settings. Special thanks to `@jacobh <https://github.com/jacobh>`_ for this `comment <https://github.com/graphql-python/graphene/issues/249#issuecomment-300068390>`_.\n\n**************\nv0.1.0-alpha3:\n**************\n1. Fixed bug on subscriptions when not specified any field in \"data\" parameter to bean return on notification message.\n\n**************\nv0.1.0-alpha2:\n**************\n1. Fixed bug when subscribing to a given action (create, update pr delete).\n2. Added intuitive and simple web tool to test notifications of graphene-django-extras subscription.\n\n**************\nv0.1.0-alpha1:\n**************\n1. Added support to multiselect choices values for models.CharField with choices attribute, on queries and mutations. Example: Integration with django-multiselectfield package.\n2. Added support to GenericForeignKey and GenericRelation fields, on queries and mutations.\n3. Added first approach to support Subscriptions with **Channels**, with subscribe and unsubscribe operations. Using **channels-api** package.\n4. Fixed minors bugs.\n\n*******\nv0.0.4:\n*******\n1. Fix error on DateType encode.\n\n*******\nv0.0.3:\n*******\n1. Implement custom implementation of DateType for use converter and avoid error on Serializer Mutation.\n\n*******\nv0.0.2:\n*******\n1. Changed dependency of DRF to 3.6.4 on setup.py file, to avoid an import error produced by some changes in new version of DRF=3.7.0 and because DRF 3.7.0 dropped support to Django versions < 1.10.\n\n*******\nv0.0.1:\n*******\n1. Fixed bug on DjangoInputObjectType class that refer to unused interface attribute.\n2. Added support to create nested objects like in `DRF <http://www.django-rest-framework.org/api-guide/serializers/#writable-nested-representations>`, it's valid to SerializerMutation and DjangoInputObjectType, only is necessary to specify nested_fields=True on its Meta class definition.\n3. Added support to show, only in mutations types to create objects and with debug=True on settings, inputs autocomplete ordered by required fields first.\n4. Fixed others minors bugs.\n\n************\nv0.0.1-rc.2:\n************\n1. Make queries pagination configuration is more friendly.\n\n************\nv0.0.1-rc.1:\n************\n1. Fixed a bug with input fields in the converter function.\n\n***************\nv0.0.1-beta.10:\n***************\n1. Fixed bug in the queryset_factory function because it did not always return a queryset.\n\n**************\nv0.0.1-beta.9:\n**************\n1. Remove hard dependence with psycopg2 module.\n2. Fixed bug that prevented use queries with fragments.\n3. Fixed bug relating to custom django_filters module and ordering fields.\n\n**************\nv0.0.1-beta.6:\n**************\n1. Optimizing imports, fix some minors bugs and working on performance.\n\n**************\nv0.0.1-beta.5:\n**************\n1. Repair conflict on converter.py, by the use of get_related_model function with: OneToOneRel, ManyToManyRel and ManyToOneRel.\n\n**************\nv0.0.1-beta.4:\n**************\n1. First commit" } ]
4
roinaveiro/ads_trusto
https://github.com/roinaveiro/ads_trusto
b8bbfcf1fda4bfea2d14015fea72bd4ba711b0bb
af2dbf1980dac9a9396c115c924e1c57c3be598d
f858d1a4422a0d67d6d26e8c6e657b708fcdd599
refs/heads/main
2023-06-20T03:21:07.118459
2021-07-20T15:01:12
2021-07-20T15:01:12
305,450,022
0
0
null
2020-10-19T16:41:56
2020-11-13T09:58:24
2020-11-13T10:00:08
Jupyter Notebook
[ { "alpha_fraction": 0.4932481348514557, "alphanum_fraction": 0.51458740234375, "avg_line_length": 35.272178649902344, "blob_id": "aecf788e9bb0be721246f32c4fd7b568733fa0f0", "content_id": "26793d6bce91c316a5ccde1c7c029197cf652e97", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17995, "license_type": "permissive", "max_line_length": 130, "num_lines": 496, "path": "/ads.py", "repo_name": "roinaveiro/ads_trusto", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom scipy.stats import beta\n\n\nclass ADS:\n \"\"\"\n Class to simulate road\n Args:\n l (int): road lenght\n \"\"\"\n\n def __init__(self, road, char, driver, driver_char, driver_state_evol):\n\n # Road details and driver state\n self.road = road\n self.N = len(road)\n self.driver = driver\n\n # Init ads\n self.current_cell = 0 ## Current cell\n self.next_cell = self.road[self.current_cell + 1] ## I see CONTENT of next cell\n # self.mode = \"AUTON\" ## Start with auton\n\n # ODD and ENV variables\n self.Dir = np.ones([3,3])\n self.Dir[self.next_cell, self.current_cell] += 1 ## I update my knowledge, coz I see the next cell\n\n # Driver state variables\n self.driver_char = driver_char \n self.driver_state_evol = driver_state_evol\n self.char = char\n self.prior_driver_state = np.array([0.9, 0.1]) ## For every possible initial road state\n ## This is p(driver_state | char, road state)\n self.prob_driver_state = self.normalize(self.driver_char[str(self.char[0])].values \\\n * self.prior_driver_state)\n\n ## Relevant parameters\n self.env_states = self.driver_state_evol.index.unique(\"Obstacle\").values\n self.driver_states = self.driver_state_evol.index.unique(\"Current\").values\n\n self.content_current_cell = np.zeros(len(self.env_states))\n self.content_current_cell[ self.road[self.current_cell] ] = 1.0\n\n # Trajectory planning and utilities\n self.v_auton = {0:0, 1:2, 2:3} # Obstacle: velocity (AUTON mode)\n self.v_manual = {0:0, 1:1, 2:4} # Obstacle: velocity (MANUAL mode)\n self.u_d = {0:0.0, 1:0.1, 2:0.2, 3:0.3, 4:0.5}\n\n # Warnings\n self.driver_state_threshold = 0.85\n self.env_state_threshold_rock = 0.15\n self.env_state_threshold_puddle = 0.25\n ##\n self.rock_warnings = np.zeros(self.N) + 100\n self.puddle_warnings = np.zeros(self.N) + 100\n self.state_warnings = np.zeros(self.N) + 100\n\n # Issue first warnings\n ## Forecasts - Driver state\n state_pred = self.predict_driver_state()\n self.state_pred = np.vstack(list(state_pred.values()))\n\n ## Forecasts - Environment state\n env_pred = self.predict_env()\n self.env_pred = np.vstack(list(env_pred.values()))\n\n ## Issue warnings\n self.issue_warnings()\n \n # Decisions made and modes\n self.modes = np.array(['AUTON' for _ in range(self.N)], dtype=object)\n self.decision_auton = np.zeros(self.N) + 100\n self.decision_manual = np.zeros(self.N) + 100\n self.decision_manual_aware = np.zeros(self.N) + 100\n self.decision_manual_dist = np.zeros(self.N) + 100\n ##\n self.decision_auton[self.current_cell] = self.trajectory_planning(\"AUTON\")[0]\n self.decision_manual_aware[self.current_cell] = self.trajectory_planning(\"MANUAL_AWARE\")[0]\n self.decision_manual_dist[self.current_cell] = self.trajectory_planning(\"MANUAL_DIST\")[0]\n self.decision_manual[self.current_cell] = self.decision_manual_aware[self.current_cell]\n\n # Utilities attained\n self.utilities = np.zeros(self.N)\n self.utilities[self.current_cell] = self.compute_cell_utility('AUTON', \n self.decision_auton[self.current_cell])\n\n # Counters\n self.RtI = 0\n self.prop_RtI = 0\n self.emergency = 0\n self.crashes = 0\n self.skids = 0\n\n # For DIPA\n self.underperformance = 0\n self.dipa_alpha = 20\n self.dipa_beta = 20\n\n\n ##\n def move(self):\n\n # Update\n self.update()\n\n # Move \n self.current_cell += 1\n self.content_current_cell = np.zeros(len(self.env_states))\n self.content_current_cell[ self.road[self.current_cell] ] = 1.0\n self.next_cell = self.road[self.current_cell + 1] \n \n\n ## Forecasts - Driver state\n state_pred = self.predict_driver_state()\n self.state_pred = np.vstack(list(state_pred.values()))\n\n ## Forecasts - Environment state\n env_pred = self.predict_env()\n self.env_pred = np.vstack(list(env_pred.values()))\n\n # Issue warnings\n self.issue_warnings()\n\n # Make decisions\n self.decide()\n\n # Evaluate driving modes\n if self.modes[self.current_cell] == \"AUTON\":\n self.eval_RtI()\n self.utilities[self.current_cell] = self.compute_cell_utility('AUTON', \n self.decision_auton[self.current_cell])\n\n else:\n\n self.utilities[self.current_cell] = self.compute_cell_utility('MANUAL', \n self.decision_manual[self.current_cell])\n\n ###########################################################################################\n # For DIPA ###############################################################################\n ###########################################################################################\n d = self.decision_manual_aware[self.current_cell]\n exp_utility_aware = self.u_d[d] + (1 if self.road[self.current_cell] == 1 else 0) * -10 * \\\n (0.5 if d==2 else 0.8 if d==3 else 0.85 if d==4 else 0) + \\\n (1 if self.road[self.current_cell] == 0 else 0) * (-100 if d!=0 else 0)\n\n d = self.decision_manual_dist[self.current_cell]\n exp_utility_dist = self.u_d[d] + (1 if self.road[self.current_cell] == 1 else 0) * -10 * \\\n (0.5 if d==2 else 0.8 if d==3 else 0.85 if d==4 else 0) + \\\n (1 if self.road[self.current_cell] == 0 else 0) * (-100 if d!=0 else 0)\n\n exp_utility = self.prob_driver_state[0]*exp_utility_aware + \\\n self.prob_driver_state[1]*exp_utility_dist\n\n if self.utilities[self.current_cell] < exp_utility:\n # Underperformance\n self.underperformance += 1\n self.dipa_alpha += 1\n\n else:\n self.dipa_beta += 1\n\n\n\n ###########################################################################################\n ###########################################################################################\n\n\n self.counter_manual += 1 \n if self.prob_driver_state[1] > 0.75 or self.counter_manual >= 10:\n self.modes[self.current_cell + 1] = \"AUTON\"\n else:\n self.modes[self.current_cell + 1] = \"MANUAL\"\n \n\n def update(self):\n\n ## Update environment knowledge\n observed_cell = self.road[self.current_cell + 2]\n self.Dir[observed_cell, self.next_cell] += 1\n\n ## Update driver state knowledge\n aux = self.driver_state_evol.xs(self.next_cell, level=\"Obstacle\").values.T\n aux = np.dot(aux, self.prob_driver_state.T)\n self.prob_driver_state = self.normalize(self.driver_char\\\n [str(self.char[self.current_cell+1])].values * aux)\n\n\n def trajectory_planning(self, mode):\n\n if mode == \"AUTON\": \n max_env_pred = np.argmax(self.env_pred, axis=1)\n trajectory = np.vectorize(self.v_auton.get)(max_env_pred)\n return np.append(self.v_auton[self.road[self.current_cell]], trajectory)\n '''\n trajectory = np.zeros(5)\n for j in range(5):\n aux = [ self.compute_cell_exp_utility(\"AUTON\", self.env_pred[j,:], i) for i in [0,1,2,3] ]\n trajectory[j] = np.argmax(np.array(aux))\n\n current_decision = np.argmax(np.array([self.compute_cell_exp_utility(\"AUTON\", \\\n self.content_current_cell, i) for i in [0,1,2,3]]))\n\n return np.append(current_decision, trajectory)\n '''\n\n elif mode == \"MANUAL_AWARE\":\n env_pred = self.road[self.current_cell:self.current_cell+6]\n return np.vectorize(self.v_manual.get)(env_pred)\n\n else:\n env_pred = np.append(self.road[self.current_cell], np.array([2,2,2,2,2]))\n\n if self.road[self.current_cell + 1] == 0: \n env_pred[1] = 0\n\n if self.road[self.current_cell + 2] == 0:\n env_pred[2] = 0\n \n return np.vectorize(self.v_manual.get)(env_pred)\n\n def decide(self):\n\n # Make speed decisions and trajectory planning\n traj_auton = self.trajectory_planning(\"AUTON\")\n traj_manual_aware = self.trajectory_planning(\"MANUAL_AWARE\")\n traj_manual_dist = self.trajectory_planning(\"MANUAL_DIST\")\n\n self.traj_plan_auton = traj_auton[1:]\n self.traj_plan_manual_aware = traj_manual_aware[1:]\n self.traj_plan_manual_dist = traj_manual_dist[1:]\n\n self.decision_auton[self.current_cell] = traj_auton[0]\n self.decision_manual_aware[self.current_cell] = traj_manual_aware[0]\n self.decision_manual_dist[self.current_cell] = traj_manual_dist[0]\n\n if self.driver[self.current_cell - 1] == 0:\n\n if self.driver[self.current_cell] == 0:\n self.decision_manual[self.current_cell] = self.decision_manual_aware[self.current_cell]\n else:\n self.decision_manual[self.current_cell] = self.decision_manual_aware[self.current_cell-1] \n\n else:\n \n self.decision_manual[self.current_cell] = \\\n self.decision_manual_dist[self.current_cell -1]\n\n \n\n def predict_driver_state(self):\n\n predictions = {}\n env_pred = self.predict_env()\n\n\n ## One cell ahead.\n nstate_nobs = np.zeros( [len(self.driver_states), len(self.env_states)] )\n for i in self.env_states:\n aux = self.driver_state_evol.xs(i, level=\"Obstacle\").values.T\n nstate_nobs[:, i] = np.dot(aux, self.prob_driver_state.T)\n predictions[\"1\"] = np.dot(nstate_nobs, env_pred[\"1\"])\n\n for k in [2,3,4,5]:\n ## Step 1\n y_bwd = self.normalize_arr( ( self.normalize_arr(self.Dir) * env_pred[str(k-1)] ).T ) \n\n ## Step 2\n state_nobs = np.dot(nstate_nobs, y_bwd)\n\n ## Step 3\n nstate_nobs = np.zeros( [len(self.driver_states), len(self.env_states)] )\n for i in self.env_states:\n aux = self.driver_state_evol.xs(i, level=\"Obstacle\").values.T\n nstate_nobs[:, i] = np.dot(aux, state_nobs[:,i])\n\n ## Step 4\n predictions[str(k)] = np.dot(nstate_nobs, env_pred[str(k)])\n\n return(predictions)\n \n\n def predict_env(self):\n\n predictions = {}\n\n ## One cell ahead. This is observed\n if self.next_cell == 0:\n predictions[\"1\"] = np.array([1,0,0])\n elif self.next_cell == 1:\n predictions[\"1\"] = np.array([0,1,0])\n else:\n predictions[\"1\"] = np.array([0,0,1])\n\n\n ## Two cells ahead\n if self.road[self.current_cell + 2] == 0: ## Then I see the rock\n predictions[\"2\"] = np.array([1,0,0])\n else:\n prob = self.normalize( self.Dir[:, self.next_cell] )\n predictions[\"2\"] = np.append( 0, self.normalize(prob[1:]) )\n\n ## Three cells ahead\n if self.road[self.current_cell + 3] == 0: ## Then I see the rock\n predictions[\"3\"] = np.array([1,0,0])\n\n elif self.road[self.current_cell + 2] == 0: ## Previous cell was rock\n prob = self.normalize( self.Dir[:, self.road[self.current_cell + 2] ] )\n predictions[\"3\"] = np.append( 0, self.normalize(prob[1:]) )\n \n else: ## previous cell either clean or puddle\n aux = self.normalize_arr( self.Dir[1:, 1:] )\n predictions[\"3\"] = np.append(0, np.dot(aux, predictions[\"2\"][1:].T) )\n\n ## Four cells ahead\n if self.road[self.current_cell + 3] == 0: ## Previous cell was rock\n predictions[\"4\"] = self.normalize(self.Dir[:, 0])\n\n else: ## Previous was either puddle or clean\n predictions[\"4\"] = np.dot( self.normalize_arr(self.Dir[:,1:]), predictions[\"3\"][1:].T )\n\n ## Five cells ahead\n predictions[\"5\"] = np.dot( self.normalize_arr(self.Dir), predictions[\"4\"].T )\n\n return(predictions)\n\n\n def issue_warnings(self):\n\n ## Driver state warnings\n self.state_warnings[self.current_cell] = np.any(self.state_pred[:,1] > self.driver_state_threshold)\n\n ## Env state warning Rock\n self.rock_warnings[self.current_cell] = np.any(self.env_pred[3:,0] > self.env_state_threshold_rock)\n\n ## Env state warning Puddle\n self.puddle_warnings[self.current_cell] = np.any(self.env_pred[1:,1] > self.env_state_threshold_puddle)\n\n\n def compute_cell_utility(self, mode, d):\n\n if mode == \"AUTON\":\n\n if self.road[self.current_cell] == 0: #If rock\n if d!=0:\n self.crashes += 1\n ut_obstacle = -100 \n else:\n ut_obstacle = 0\n\n elif self.road[self.current_cell] == 1: #If puddle\n\n if d != 3:\n ut_obstacle = 0\n else:\n if np.random.random() < 0.95: # Skid!!\n self.skids += 1\n ut_obstacle = -10\n else:\n ut_obstacle = 0 \n\n else:\n ut_obstacle = 0\n\n ut = 0.1 + self.u_d[d] + ut_obstacle\n \n else:\n\n if self.road[self.current_cell] == 0: #If rock\n if d!=0:\n self.crashes += 1\n ut_obstacle = -100 \n else:\n ut_obstacle = 0\n\n elif self.road[self.current_cell] == 1: #If puddle\n\n if d == 2:\n\n if np.random.random() < 0.5: # Skid!!\n self.skids += 1\n ut_obstacle = -10\n else:\n ut_obstacle = 0\n\n elif d==3:\n\n if np.random.random() < 0.80: # Skid!!\n self.skids += 1\n ut_obstacle = -10\n else:\n ut_obstacle = 0\n\n elif d==4:\n\n if np.random.random() < 0.85: # Skid!!\n self.skids += 1\n ut_obstacle = -10\n else:\n ut_obstacle = 0\n\n else:\n ut_obstacle = 0\n \n \n else:\n ut_obstacle = 0.0\n\n ut = self.u_d[d] + ut_obstacle\n\n return ut\n\n\n def compute_cell_exp_utility(self, mode, env_pred, d):\n\n if mode == \"AUTON\":\n eut = 0.1 + self.u_d[d] + env_pred[1] * (-10*0.95 if d==3 else 0) + \\\n env_pred[0] * (-100 if d!=0 else 0)\n else:\n eut = self.u_d[d] + env_pred[1] * -10 * \\\n (0.5 if d==2 else 0.8 if d==3 else 0.85 if d==4 else 0) + \\\n env_pred[0] * (-100 if d!=0 else 0)\n\n return eut\n\n def evaluate_driving_modes(self):\n\n eut_auton = 0\n eut_manual_aware = 0\n eut_manual_dist = 0\n\n for i in range(5):\n\n eut_auton += self.compute_cell_exp_utility(\"AUTON\", self.env_pred[i,:], self.traj_plan_auton[i] )\n eut_manual_aware += self.compute_cell_exp_utility(\"MANUAL_AWARE\", self.env_pred[i,:], self.traj_plan_manual_aware[i] )\n eut_manual_dist += self.compute_cell_exp_utility(\"MANUAL_DIS\", self.env_pred[i,:], self.traj_plan_manual_dist[i] )\n\n\n return eut_auton, self.prob_driver_state[0] * eut_manual_aware + self.prob_driver_state[1] * eut_manual_dist\n\n def eval_RtI(self):\n\n if self.rock_warnings[self.current_cell] == 1 or self.puddle_warnings[self.current_cell] == 1:\n self.prop_RtI += 1\n eut_auton, eut_manual = self.evaluate_driving_modes()\n if eut_manual > eut_auton:\n self.RtI += 1\n if self.prob_driver_state[1] > 0.95:\n self.emergency +=1\n else:\n self.counter_manual = 0\n if self.driver[self.current_cell] == 1: \n self.modes[self.current_cell + 1] = \"MANUAL\"\n else:\n self.modes[self.current_cell + 3] = \"MANUAL\"\n\n if self.prob_driver_state[1] > 0.5:\n self.state_warnings[self.current_cell] += 1\n\n\n\n def complete_road(self):\n for i in range(self.N-6):\n self.move()\n\n def get_info(self):\n\n info = {}\n info[\"prop_manual\"] = np.sum(self.modes[:self.N-5] == \"MANUAL\") / (self.N-5)\n info[\"n_RtI\"] = self.RtI\n info[\"n_emergency\"] = self.emergency\n info[\"prop_rejected_RtI\"] = (self.prop_RtI - self.RtI) / self.prop_RtI\n condition = self.modes == \"MANUAL\"\n info[\"avg_len_man\"] = np.mean( np.diff(np.where(np.concatenate(([condition[0]],\n condition[:-1] != condition[1:],\n [True])))[0])[::2] )\n \n info[\"state_warnings\"] = np.sum(self.state_warnings[:self.N-5] != 0)\n info[\"rock_warnings\"] = np.sum(self.rock_warnings == 1)\n info[\"puddle_warnings\"] = np.sum(self.puddle_warnings == 1)\n info[\"crashes\"] = self.crashes\n info[\"skids\"] = self.skids\n info[\"utility\"] = np.mean(self.utilities[:self.N-5])\n\n info[\"prob_underp_above_th\"] = 1 - beta.cdf(0.3, self.dipa_alpha, self.dipa_beta)\n\n return info\n \n\n @staticmethod\n def normalize(arr):\n return arr / np.sum(arr)\n\n @staticmethod\n def normalize_arr(arr):\n return arr / np.sum(arr, axis=0)\n\n\n\n\n" }, { "alpha_fraction": 0.5397754907608032, "alphanum_fraction": 0.5461200475692749, "avg_line_length": 29.597015380859375, "blob_id": "9cac5af5137257db43de9b0069e6e6e3120f8e18", "content_id": "5583ad2ab6202846c072e2e2ecc4b98c2e7af05b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2049, "license_type": "permissive", "max_line_length": 105, "num_lines": 67, "path": "/simulator.py", "repo_name": "roinaveiro/ads_trusto", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\n\n\nclass simulator:\n \"\"\"\n Class to simulate road\n Args:\n l (int): road lenght\n \"\"\"\n\n def __init__(self, l, dynamics=None):\n self._l = l\n ##\n if dynamics is None:\n self._road_dynamics = pd.read_csv(\"data/road_state_evol\", index_col=0, delim_whitespace=True)\n ##\n self._driver_dynamics = pd.read_csv(\"data/driver_state_evol\", delim_whitespace=True)\n self._driver_dynamics.set_index([\"Current\", \"Obstacle\"], inplace=True)\n ##\n self._driver_char = pd.read_csv(\"data/driver_char\", index_col=0, delim_whitespace=True)\n else:\n self._road_dynamics = dynamics[0]\n ##\n self._driver_dynamics = dynamics[1]\n ##\n self._driver_char = dynamics[2]\n\n def simulate_road(self):\n\n road = np.empty(self._l, dtype=int)\n road[0] = 2\n\n for i in range(1, self._l):\n p = self._road_dynamics.loc[ road[i-1] ]\n road[i] = np.random.choice(self._road_dynamics.columns, p = p)\n\n return road\n\n def simulate_driver_state(self, road):\n\n driver = np.empty(self._l, dtype=int)\n driver[0] = 0\n\n for i in range(1, self._l):\n p = self._driver_dynamics.loc[ (driver[i-1], road[i]) ]\n driver[i] = np.random.choice(self._driver_dynamics.columns, p = p)\n\n return driver\n \n def simulate_driver_char(self, driver):\n\n driver_char = np.empty(self._l, dtype=int)\n \n for i in range(self._l):\n p = self._driver_char.loc[ driver[i] ]\n driver_char[i] = np.random.choice(self._driver_char.columns.astype(int), p = p)\n \n return driver_char\n\n def simulate_environment(self):\n \n road = self.simulate_road()\n driver = self.simulate_driver_state(road)\n driver_char = self.simulate_driver_char(driver)\n\n return {\"road\" : road, \"driver\" : driver, \"driver_char\" : driver_char}" }, { "alpha_fraction": 0.6013234257698059, "alphanum_fraction": 0.6319271922111511, "avg_line_length": 29.200000762939453, "blob_id": "c6e565f53b84234cbfeb348e111b13bf731dfe3e", "content_id": "ed37546b7ef83123846c0473c1045da04b747ae3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "permissive", "max_line_length": 92, "num_lines": 40, "path": "/experiments2.py", "repo_name": "roinaveiro/ads_trusto", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom simulator import simulator\nfrom ads import ADS\n\nN_sim = 1000\nresults = []\n\n\nroad_dynamics = pd.read_csv(\"data/road_state_evol\", index_col=0, delim_whitespace=True)\ndriver_dynamics = pd.read_csv(\"data/driver_state_evol\", delim_whitespace=True)\ndriver_dynamics.set_index([\"Current\", \"Obstacle\"], inplace=True)\ndriver_char = pd.read_csv(\"data/driver_char\", index_col=0, delim_whitespace=True)\n\ngrid = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\nresults = []\n\nfor i in range(N_sim):\n print(i)\n for j, pr in enumerate(grid):\n\n road_dynamics.loc[2][0] = pr\n road_dynamics.loc[2][2] = 1.0 - (road_dynamics.loc[2][0] + road_dynamics.loc[2][1]) \n\n sim = simulator(1000, [road_dynamics, driver_dynamics, driver_char])\n env = sim.simulate_environment()\n road = env[\"road\"]\n char = env[\"driver_char\"]\n driver = env[\"driver\"]\n\n ads = ADS(road, char, driver, driver_char, driver_dynamics)\n ads.complete_road()\n dirr = ads.get_info()\n dirr['n_exp'] = i\n dirr['pr_rock'] = pr\n results.append(dirr)\n \n\ndf = pd.DataFrame(results)\ndf.to_csv(\"results/sim3_prop_rock_1000.csv\", index=False)\n\n" }, { "alpha_fraction": 0.6671159267425537, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 26.518518447875977, "blob_id": "5de47adae8be1a3cff038d2822349a2a49394c35", "content_id": "b38e3af881b7747b070952f940f4607f8c61f9d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "permissive", "max_line_length": 85, "num_lines": 27, "path": "/experiments1.py", "repo_name": "roinaveiro/ads_trusto", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom simulator import simulator\nfrom ads import ADS\n\nN_sim = 1000\nresults = []\n\nfor i in range(N_sim):\n print(i)\n sim = simulator(1000)\n env = sim.simulate_environment()\n road = env[\"road\"]\n char = env[\"driver_char\"]\n driver = env[\"driver\"]\n\n driver_state_evol = pd.read_csv(\"data/driver_state_evol\", delim_whitespace=True)\n driver_state_evol.set_index([\"Current\", \"Obstacle\"], inplace=True)\n driver_char = pd.read_csv(\"data/driver_char\", index_col=0, delim_whitespace=True)\n\n ads = ADS(road, char, driver, driver_char, driver_state_evol)\n ads.complete_road()\n\n results.append(ads.get_info())\n\ndf = pd.DataFrame(results)\ndf.to_csv(\"results/sim_dipa.csv\", index=False)" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 54, "blob_id": "6cfb507c57767aae5b2fc08b73c60d3d609149a5", "content_id": "8d2907eb74100a6305199d0e1a8aa51a6ba99262", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 165, "license_type": "permissive", "max_line_length": 108, "num_lines": 3, "path": "/README.md", "repo_name": "roinaveiro/ads_trusto", "src_encoding": "UTF-8", "text": "# Managing driving modes in autonomous driving systems\n\nCode for the paper *Managing driving modes in autonomous driving systems*, by Ríos Insua, Caballero and Naveiro.\n" } ]
5
Whalepool/LeoBurnRatio
https://github.com/Whalepool/LeoBurnRatio
90480a1f03a4d5a2d35d59b5cad5c019b0581816
bed39e434f005d7c7974d20e394c91e7a7253a0a
1b7b6c53b2c2df35769f810381ff03c4aa3aa2d1
refs/heads/master
2021-05-20T08:51:27.601573
2020-04-01T15:39:54
2020-04-01T15:39:54
252,208,006
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6898042559623718, "alphanum_fraction": 0.7134729027748108, "avg_line_length": 33.3359375, "blob_id": "b5f825c6635c4dd6676d6e99119cef5f501b0d84", "content_id": "441725d5c6ffbcd8ddb25795665d9bdb5c112450", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4394, "license_type": "no_license", "max_line_length": 114, "num_lines": 128, "path": "/calculate.py", "repo_name": "Whalepool/LeoBurnRatio", "src_encoding": "UTF-8", "text": "import pandas as pd \nfrom pprint import pprint\nfrom datetime import datetime\nimport talib as ta\n\nema_period = 50\nrolling_ema_lookback = 20\ndataset = []\nnode = []\ncounter = 0 \n\nfname = 'data/leoburns.data'\nwith open(fname) as f:\n lines = f.read().splitlines() \n\nfor l in lines: \n counter += 1 \n if counter == 4:\n counter = 1\n dataset.append(node)\n node = []\n\n if counter == 1:\n node.append(datetime.strptime(l, '%Y-%m-%d %H:%M:%S'))\n if counter == 2:\n node.append(l)\n if counter == 3:\n node.append(float(l.replace(',','')))\n\n\nleodf = pd.DataFrame(dataset)\nleodf.columns = ['Timestamp','TxId','leo_burn_amount']\nleodf.set_index(leodf['Timestamp'], inplace=True)\nleodf.sort_index(inplace=True)\n\nleodf_resampled = leodf.resample('3H', closed='left', label='left').mean()\nleodf_resampled = leodf_resampled.shift(-1)\n\n\nleo_candles = pd.read_csv('data/LEOUSD_3H_2018-01-01-present.csv', parse_dates=[0], infer_datetime_format=True)\nleo_candles.set_index(leo_candles['timestamp'], inplace=True)\nleo_candles.sort_index(inplace=True)\nleo_candles.rename(columns={'open': 'leo_open', 'volume': 'leo_volume'}, inplace=True)\nleo_candles.drop(['timestamp','high','low','close'], axis=1, inplace=True)\n\n\noutput = pd.concat([leo_candles, leodf_resampled], axis=1)\noutput['burn_amount_usd'] = output['leo_burn_amount'] * output['leo_open']\n\n\nbtc_candles = pd.read_csv('data/BTCUSD_3H_2018-01-01-present.csv', parse_dates=[0], infer_datetime_format=True)\nbtc_candles.set_index(btc_candles['timestamp'], inplace=True)\nbtc_candles.sort_index(inplace=True)\nbtc_candles.rename(columns={'open': 'btc_open', 'volume': 'btc_volume'}, inplace=True)\nbtc_candles.drop(['timestamp','high','low','close'], axis=1, inplace=True)\nbtc_candles['btc_usd_volume'] = btc_candles['btc_volume'] * btc_candles['btc_open']\n\n\noutput = pd.concat([output, btc_candles], axis=1)\noutput['burn_amount_usd_EMA'] = ta.EMA(output['burn_amount_usd'].ffill(), ema_period)\noutput['btc_usd_volume_EMA'] = ta.EMA(output['btc_usd_volume'], ema_period)\n\n\n\nimport matplotlib \nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle, Patch\nimport matplotlib.patches as mpatches\n# from matplotlib.finance import candlestick_ohlc\nfrom matplotlib.ticker import ScalarFormatter, NullFormatter\n\n\noutput['m2dates'] = output.index.map(mdates.date2num)\nnumber_correlation = output['burn_amount_usd_EMA'].corr(output['btc_usd_volume_EMA'])\noutput['corr'] = output['burn_amount_usd_EMA'].rolling(rolling_ema_lookback).corr(output['btc_usd_volume_EMA'])\noutput['corr_smoothed'] = ta.EMA(output['corr'], ema_period)\noutput = output['2019-06-01 00:00':]\n\npprint(output.to_csv('output.csv', encoding='utf-8'))\n\npprint(output.tail(20))\n\nfig = plt.figure(facecolor='black', figsize=(22, 12), dpi=100)\n\n# Plot into a rectangle\nrect1 = [0.1, 0.1, 1, 1]\n\n# Add this rectangle to the figure\nax1 = fig.add_axes(rect1, facecolor='#f6f6f6') \n\n# Add the title to the axis\nax1.set_title( 'LEO Burns to BTC USD Price correlation: '+str(number_correlation), fontsize=20, fontweight='bold')\n\n# Set the date as the x axis \nax1.xaxis_date()\nfig.autofmt_xdate()\n\nax1.plot(output.index.values, output['burn_amount_usd_EMA'], color='blue') \nax1.set_xlabel(str(ema_period)+\" EMA USD LEO Burn amount\", fontsize=10)\nax1.tick_params(axis='y', colors='blue')\n\nax1t = ax1.twinx()\nax1t.plot(output.index.values, output['btc_usd_volume_EMA'], color='red') \nax1t.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:,.0f}'))\nax1t.set_xlabel(str(ema_period)+\" EMA Smoothed USD Normalised BTCUSD volume\", fontsize=10)\nax1t.tick_params(axis='y', colors='red')\n\nax2t = ax1.twinx()\nax2t.plot( output.index.values, output['corr_smoothed'], color='orange')\nax2t.spines[\"right\"].set_position((\"axes\", 1.05))\nax2t.set_xlabel(\"Rolling \"+str(rolling_ema_lookback)+\" day window correlation\", fontsize=10)\nax2t.tick_params(axis='y', colors='orange')\n\n\nh = [\n mpatches.Patch(color='blue', label=str(ema_period)+' EMA USD normalised LEO burn amount'),\n mpatches.Patch(color='red', label=str(ema_period)+' EMA USD normalised BTCUSD volume'),\n mpatches.Patch(color='orange', label='Rolling '+str(rolling_ema_lookback)+' day window correlation'),\n]\n\nax1.legend(handles=h, loc='upper left')\n\n\nfilename = 'output.png'\nplt.savefig(filename, bbox_inches='tight')" }, { "alpha_fraction": 0.7130801677703857, "alphanum_fraction": 0.7436708807945251, "avg_line_length": 44.0476188659668, "blob_id": "d8a091ad2ecc9f878ae095debd747d93ceab0bf2", "content_id": "7cb7481f8d13c4a26b41fc0fee7edfe24e7e1a39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 948, "license_type": "no_license", "max_line_length": 108, "num_lines": 21, "path": "/readme.md", "repo_name": "Whalepool/LeoBurnRatio", "src_encoding": "UTF-8", "text": "# Bitfinex LEO Burn to volume Ratio script by @whalepoolbtc - https://whalepool.io \n\nexample run `python calculate.py`\n\n- Imports leo burn data [data/leoburns.data]\n- Resamples to 3h period (as the burn is 1m after the 3h open)\n- Gets the LEO candle data [ohlcv - data/LEOUSD_3H_2018-01-01-present.csv]\n- Merges the 2 data sets, normalises the amount of LEO burned to USD\n- Gets the BTC candle data [ohlcv - data/BTCUSD_3H_2018-01-01-present.csv]\n- Produces normalsied USD volume from BTCUSD['volume'] * open (since the LEO burn happens 1m after the open)\n- Gets an 8 period EMA of (the LEO burn amount in USD and the BTC volume in USD)\n- Calculates the correlation (main number top of the chart)\n- Calculates a rolling 10 period correlation which gets plotted (orange line)\n- Outputs chart\n\n \nFor more info join [@whalepoolbtc](https://t.me/whalepoolbtc) on telegram \n\n## Example output \n\n![Example output](https://i.imgur.com/290RerV.png)\n\n\n" } ]
2
rayhanramin/STQA_assignment2
https://github.com/rayhanramin/STQA_assignment2
2bbc5c88968517e8628cd935a249cb7fa691c408
6911f0fc5cb5ff880871b2158b5e5eb61bc72c96
86f0281f1c6fe2da323fed340697a74983de115b
refs/heads/main
2023-03-22T20:34:57.182761
2021-03-05T04:15:08
2021-03-05T04:15:08
344,691,322
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8255033493041992, "alphanum_fraction": 0.8389261960983276, "avg_line_length": 73, "blob_id": "ae4fd2f24c356ead17fa70fe41fcbc439ee07101", "content_id": "e4f5c2310323286679c54df4dee522add1c9b7bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 149, "license_type": "no_license", "max_line_length": 128, "num_lines": 2, "path": "/README.md", "repo_name": "rayhanramin/STQA_assignment2", "src_encoding": "UTF-8", "text": "# STQA_assignment2\nThis folder includes the source code that implements the requirements of assignment 2 for software testing and quality assurance \n" }, { "alpha_fraction": 0.6019522547721863, "alphanum_fraction": 0.660520613193512, "avg_line_length": 59.20000076293945, "blob_id": "652433ae038e68cbf5691451428a9dc8129720e8", "content_id": "cc1e25afd0096f3c787116430d653baee2e49d41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1844, "license_type": "no_license", "max_line_length": 94, "num_lines": 30, "path": "/final/test_bmi.py", "repo_name": "rayhanramin/STQA_assignment2", "src_encoding": "UTF-8", "text": "from bmi import bmi_cal\r\nimport unittest\r\n\r\nclass test_bmi_val(unittest.TestCase):\r\n def test_bmi_input_height(self):\r\n bm=bmi_cal()\r\n self.assertEqual(bm.calculate_bmi(0,0,0),\"Height can not be zero\")\r\n self.assertEqual(bm.calculate_bmi(-1,0,0),\"Height can not be negative\")\r\n self.assertEqual(bm.calculate_bmi(-1,-1,0),\"Height can not be negative\")\r\n self.assertEqual(bm.calculate_bmi(0,0,1501),\"Height can not be zero\")\r\n self.assertEqual(bm.calculate_bmi(-1,0,1500),\"Height can not be negative\")\r\n self.assertEqual(bm.calculate_bmi(-1,-1,-1),\"Height can not be negative\")\r\n self.assertEqual(bm.calculate_bmi(5,-1,0),\"Height can not be negative\")\r\n self.assertEqual(bm.calculate_bmi(0,12,0),\"Inches value should be in between 0 to 11\")\r\n self.assertEqual(bm.calculate_bmi(6,3,1501),\"Weight can not be greater than 1500lbs\")\r\n self.assertEqual(bm.calculate_bmi(10,0,0),\"Height can not be 10 feet or more\")\r\n self.assertEqual(bm.calculate_bmi(10,0,-1),\"Height can not be 10 feet or more\")\r\n self.assertEqual(bm.calculate_bmi(10,0,140),\"Height can not be 10 feet or more\")\r\n self.assertEqual(bm.calculate_bmi(10,0,1500),\"Height can not be 10 feet or more\")\r\n self.assertEqual(bm.calculate_bmi(10,0,1501),\"Height can not be 10 feet or more\")\r\n\r\n def test_bmi_input_weight(self):\r\n bm = bmi_cal()\r\n self.assertEqual(bm.calculate_bmi(5,10,0),\"Weight can not be zero or negative\")\r\n self.assertEqual(bm.calculate_bmi(5,10,-1),\"Weight can not be zero or negative\")\r\n self.assertEqual(bm.calculate_bmi(6,3,1500),\"Your bmi is 192.0 and you are obese\")\r\n\r\n def test_bmi_valid(self):\r\n bm=bmi_cal()\r\n self.assertEqual(bm.calculate_bmi(5,0,140),\"Your bmi is 28.0 and you are over weight\")\r\n " }, { "alpha_fraction": 0.5424486398696899, "alphanum_fraction": 0.5728328824043274, "avg_line_length": 39.14814758300781, "blob_id": "1d665db9c07839b9d2add6d9ce9fcdc316ee21fe", "content_id": "82f413cb9e1911a6e49b62da3ee941e3c86d39d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1119, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/final/retirement.py", "repo_name": "rayhanramin/STQA_assignment2", "src_encoding": "UTF-8", "text": "import math\r\n\r\n\r\nclass ret:\r\n def retirement_cal(self,age,salary,percent,target):\r\n self.age=age\r\n self.salary=salary\r\n self.percent=percent\r\n self.target=target\r\n age_limit = 100 \r\n\r\n if self.age <=0 or self.age > 100:\r\n return (\"Age cannot be zero or negative or greater than 100\")\r\n elif self.salary <=0 or self.salary > 500000:\r\n return(\"Salary cannot be zero or negative or greater than 500k\")\r\n elif self.percent <= 0 or self.percent >100:\r\n return(\"Percentage cannot be zero or negative or greater than 100\")\r\n elif self.target <=0:\r\n return(\"Target amount can not be zero or negative\")\r\n else:\r\n saving_per_season = (float(salary)*(self.percent/100))*1.35\r\n years_till_goal = math.ceil(self.target/saving_per_season)\r\n final_age = self.age+years_till_goal\r\n if final_age <= age_limit:\r\n return(\"The goal will be met when the age is {}\".format(final_age))\r\n else:\r\n return(\"The goal will not be met\")\r\n " }, { "alpha_fraction": 0.478730171918869, "alphanum_fraction": 0.50984126329422, "avg_line_length": 41.25, "blob_id": "27d5d6e0521e904bb8032f80d74cdb7819998dad", "content_id": "a280b79bbcdc2e4b1ee577df49dc6d95ee8fab05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1575, "license_type": "no_license", "max_line_length": 96, "num_lines": 36, "path": "/final/bmi.py", "repo_name": "rayhanramin/STQA_assignment2", "src_encoding": "UTF-8", "text": "\r\n\r\n\r\nclass bmi_cal:\r\n\r\n def calculate_bmi(self, feet, inches, weight):\r\n self.feet = feet\r\n self.inches = inches\r\n self.weight = weight\r\n\r\n #refine alittle bit more\r\n if self.feet == 0 and self.inches == 0: \r\n s = \"Height can not be zero\"\r\n return s\r\n elif self.feet < 0 or self.inches < 0: \r\n s = \"Height can not be negative\"\r\n return s\r\n elif self.feet >= 10:\r\n return(\"Height can not be 10 feet or more\")\r\n elif self.inches not in range(0,11):\r\n return (\"Inches value should be in between 0 to 11\")\r\n elif self.weight <= 0:\r\n return (\"Weight can not be zero or negative\")\r\n elif self.weight > 1500:\r\n return (\"Weight can not be greater than 1500lbs\")\r\n else:\r\n leng = float(((self.feet*12)+self.inches)*0.025)\r\n wt = float(self.weight*0.45)\r\n bmi = float(wt/(leng*leng))\r\n if bmi <= 0:\r\n return (\"BMI can not be negative or zero. Sorry there must be something wwrong\")\r\n elif bmi < 18.5:\r\n return (\"Your bmi is {} and you are under weight\".format(bmi))\r\n elif bmi >= 18.5 and bmi <=24.9:\r\n return (\"Your bmi is {} and you are normal weight\".format(bmi))\r\n elif bmi >= 25 and bmi <=29.9:\r\n return (\"Your bmi is {} and you are over weight\".format(bmi))\r\n elif bmi >= 30:\r\n return (\"Your bmi is {} and you are obese\".format(bmi))\r\n " }, { "alpha_fraction": 0.48876839876174927, "alphanum_fraction": 0.4934159517288208, "avg_line_length": 38.09375, "blob_id": "0599e26ad93524289076d76efb622418d12b2992", "content_id": "9aa630788747462c096b992ddf181d4eaf315d88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1291, "license_type": "no_license", "max_line_length": 90, "num_lines": 32, "path": "/final/main_file.py", "repo_name": "rayhanramin/STQA_assignment2", "src_encoding": "UTF-8", "text": "from bmi import bmi_cal\r\nfrom retirement import ret\r\nimport retirement\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n try:\r\n print(\"Press 1 to calculate BMI\")\r\n print(\"Press 2 to calculate Retirement benefit\")\r\n print(\"Press 0 to exit\")\r\n choice = int(input(\"My choice is: \"))\r\n\r\n if choice == 0:\r\n break\r\n elif choice == 1:\r\n bm = bmi_cal()\r\n feet = int(input(\"Height in feet:\"))\r\n feet = float(feet)\r\n inches = float(input(\"Height in inches:\"))\r\n weight = float(input(\"Your weight in pounds:\"))\r\n result = bm.calculate_bmi(feet,inches,weight)\r\n print(result)\r\n elif choice == 2:\r\n age=int(input(\"What is your current age:\"))\r\n salary = int(input(\"What is your annual salary:\"))\r\n percent = float(input(\"What percentage of salary are you going to save:\"))\r\n target = int(input(\"What is your target savings:\"))\r\n rt = ret()\r\n res=rt.retirement_cal(age,salary,percent,target)\r\n print(res)\r\n except ValueError:\r\n print(\"Invalid input. Please try again.\")\r\n " }, { "alpha_fraction": 0.5800535082817078, "alphanum_fraction": 0.68742835521698, "avg_line_length": 65.1025619506836, "blob_id": "7669058f22352ab8fb05f8e7c6f22477d3163ce4", "content_id": "aea1d24d13ddd9d917b8ec7a67f1f65b73e1056c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2617, "license_type": "no_license", "max_line_length": 119, "num_lines": 39, "path": "/final/test_retirement.py", "repo_name": "rayhanramin/STQA_assignment2", "src_encoding": "UTF-8", "text": "import unittest\r\nfrom retirement import ret\r\n\r\nclass test_ret_goal(unittest.TestCase):\r\n def test_ret_age(self):\r\n rt=ret()\r\n self.assertEqual(rt.retirement_cal(0,0,0,0),\"Age cannot be zero or negative or greater than 100\")\r\n self.assertEqual(rt.retirement_cal(-1,0,0,0),\"Age cannot be zero or negative or greater than 100\")\r\n self.assertEqual(rt.retirement_cal(101,0,0,0),\"Age cannot be zero or negative or greater than 100\")\r\n self.assertEqual(rt.retirement_cal(101,65000,10,100000),\"Age cannot be zero or negative or greater than 100\")\r\n\r\n def test_ret_salary(self):\r\n rt=ret()\r\n self.assertEqual(rt.retirement_cal(100,0,0,0),\"Salary cannot be zero or negative or greater than 500k\")\r\n self.assertEqual(rt.retirement_cal(30,-1,0,0),\"Salary cannot be zero or negative or greater than 500k\")\r\n self.assertEqual(rt.retirement_cal(30,500001,0,0),\"Salary cannot be zero or negative or greater than 500k\")\r\n\r\n def test_ret_percent(self):\r\n rt=ret()\r\n self.assertEqual(rt.retirement_cal(25,65000,0,0),\"Percentage cannot be zero or negative or greater than 100\")\r\n self.assertEqual(rt.retirement_cal(25,65000,-1,0),\"Percentage cannot be zero or negative or greater than 100\")\r\n self.assertEqual(rt.retirement_cal(25,65000,101,0),\"Percentage cannot be zero or negative or greater than 100\")\r\n\r\n def test_ret_target(self):\r\n rt = ret()\r\n self.assertEqual(rt.retirement_cal(25, 65000, 25, 0), \"Target amount can not be zero or negative\")\r\n self.assertEqual(rt.retirement_cal(25, 65000, 25, -1), \"Target amount can not be zero or negative\")\r\n\r\n def test_ret_valid(self):\r\n rt=ret()\r\n self.assertEqual(rt.retirement_cal(25,85000,15,1500000),\"The goal will not be met\")\r\n self.assertEqual(rt.retirement_cal(1,85000,15,1500000),\"The goal will be met when the age is 89\")\r\n self.assertEqual(rt.retirement_cal(100,85000,15,1500000),\"The goal will not be met\")\r\n self.assertEqual(rt.retirement_cal(25,85000,20,1000000),\"The goal will be met when the age is 69\")\r\n self.assertEqual(rt.retirement_cal(25,1,20,1000000),\"The goal will not be met\")\r\n self.assertEqual(rt.retirement_cal(25,500000,20,1000000),\"The goal will be met when the age is 33\")\r\n self.assertEqual(rt.retirement_cal(25,85000,20,1000000),\"The goal will be met when the age is 69\")\r\n self.assertEqual(rt.retirement_cal(25,85000,1,1000000),\"The goal will not be met\")\r\n self.assertEqual(rt.retirement_cal(25,85000,100,1000000),\"The goal will be met when the age is 34\")\r\n" } ]
6
ArsenalLevel3/PythonLearning
https://github.com/ArsenalLevel3/PythonLearning
620ae7916dd79927c377a7c6425353f1792a24a1
5505ceabdbeaa726d8aec38910d61ddd8bad4775
e2256a4c81738fe7938317dcfca94e0409d069e3
refs/heads/master
2020-12-25T14:58:04.992318
2017-07-14T09:21:17
2017-07-14T09:21:17
67,563,596
0
0
null
2016-09-07T02:14:11
2016-09-08T15:34:14
2017-03-03T09:15:12
Python
[ { "alpha_fraction": 0.645901620388031, "alphanum_fraction": 0.6950819492340088, "avg_line_length": 13.523809432983398, "blob_id": "70687f94ff5ad95a24c27150d4151258d614d6ef", "content_id": "9c73548877f56f59b6a5be9f412f7bdf54fa5953", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/Learning/socket/Client.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n\n# socket client in python\n\nimport socket\n#import argparse\n#import time\n\nhost = '127.0.0.1'\nPORT = 9999\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect((host,PORT))\n\ndata = raw_input(\"Enter your words: \")\n\n# 接受新消息\ns.send(data)\nprint s.recv(1024)\ns.send('exit')\ns.close()\n" }, { "alpha_fraction": 0.5422459840774536, "alphanum_fraction": 0.5732620358467102, "avg_line_length": 25.714284896850586, "blob_id": "68b00bacbdf9a732b3f1144a7d5424fd6b9dec81", "content_id": "7111209dd75b80dfd94a4a5ed8d5d9e43aaddfc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/Learning/Sort/insertionSort.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "\ndef insertionSort(alist):\n for index in range(1,len(alist)):\n current_value = alist[index]\n position = index\n while position > 0 and alist[position-1] > current_value:\n alist[position] = alist[position-1]\n position = position-1\n alist[position] = current_value\n\n\ndef insertionSortBinarysearch(alist):\n for index in range(1,len(alist)):\n current_value = alist[index]\n position = index\n low=0\n high=index-1\n while low<=high:\n mid=(low+high)/2\n if alist[mid]>current_value:\n high=mid-1\n else:\n low=mid+1\n \n while position > low:\n alist[position] = alist[position-1]\n position = position - 1\n\n alist[position] = current_value\n\n\nalist = [54,26,93,15,77,44,55,20]\ninsertionSort(alist)\nprint(alist)\ninsertionSortBinarysearch(alist)\nprint(alist)" }, { "alpha_fraction": 0.6819923520088196, "alphanum_fraction": 0.6858237385749817, "avg_line_length": 12.789473533630371, "blob_id": "874bdfd062e03fad7a1a8313e2f31385a1809ed8", "content_id": "ccb0518794dbc23aed5b2de98e5740495f3660e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 31, "num_lines": 19, "path": "/Learning/Module/Example-Module/EX_Module01.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 认识模块\n\n# 导入模块\n# import math;\n# math.pi;\n# print math.pi;\n\n# sys模块\n# import sys\n# # 查看系统版本信息\n# print sys.version\n# # 查看目录地址\n# print sys.executable\n# # 返回Windows操作系统的信息\n# print sys.getwindowsversion()\n# # 查看当先模块已经导入的方法\n# print sys.modules.keys()" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 21, "blob_id": "309c2e44b8fe579852531a6cb6501243c7b896d1", "content_id": "ab6877ee8b642a8fcb5ea69731c910d0174b5cb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/README.md", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# PythonLearning\nSome notes about my python\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5285714268684387, "avg_line_length": 7.791666507720947, "blob_id": "56baadee2c1bff3f1fb1eb6af60894a835107400", "content_id": "8b6ce5417e0349331f3024097c91b3bd415cdc7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 20, "num_lines": 24, "path": "/Learning/func/Example-func/EX_Func01.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# example-函数的定义\n\n\n\n# 1.实现去字符串长度\n\n# a=\"hello-Python\";\n# print len(a);\n\n\n# 2.实现字符串的切割\n\n# a=\"student\";\n# b=a.split(\"u\");\n# print b;\n\n# 自定义函数\n\n# def a():\n# print \"hello\";\n# print 777;\n# a();" }, { "alpha_fraction": 0.6549815535545349, "alphanum_fraction": 0.7250922322273254, "avg_line_length": 27.578947067260742, "blob_id": "b09b2094adc0dd37e27aef5439307a14bd05b88a", "content_id": "103423841fa7e521d203a1566c0d18ce8b2a6982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 137, "num_lines": 19, "path": "/Learning/Spiders/Single_thread_crawler/STC/EX_STC03.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 网页爬虫\nimport requests\nimport re\n#下面三行是编码转换的功能,大家现在不用关心。\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n#hea是我们自己构造的一个字典,里面保存了user-agent\nhea = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'}\nhtml = requests.get('http://jp.tingroom.com/yuedu/yd300p/',headers = hea)\n\nhtml.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码。\n# 编写正则表达式寻找需要爬取的内容\nchinese = re.findall('color: #039;\">(.*?)</a>',html.text,re.S)\nfor each in chinese:\n print each" }, { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.717391312122345, "avg_line_length": 10.5, "blob_id": "f8f57391aef7f381aefbcfe0e88b1e489b42c4e0", "content_id": "102e383d5547944dda49d50c948e999888a8fc52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 15, "num_lines": 4, "path": "/Learning/Module/Example-Module/EX_Module02.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 产生.pyc文件的方法\nimport zipfile;\n" }, { "alpha_fraction": 0.4735812246799469, "alphanum_fraction": 0.4794520437717438, "avg_line_length": 17.25, "blob_id": "328ca85105671d0cd7a4d13ba16a75df07d64493", "content_id": "de57347eb2d8e2694e2bd65d75b6a7b8d4aed46b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1354, "license_type": "no_license", "max_line_length": 36, "num_lines": 56, "path": "/Learning/Data_Structure_Acquainted/Example_DSA/EX_DSA02.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 栈的实现\nclass Stack():#定义一个栈的类\n def __init__(st,size):\n '''栈的初始化\n\n 两个参数分别代表栈的主体和栈的容量\n 栈的基本形式是列表'''\n st.stack=[]\n st.size=size\n st.top=-1\n\n def push(st,content):\n '''堆栈操作\n\n 参数为栈的主体和要压入栈的数据\n 每压入一个数据栈顶指针st.top自加一\n 通过append方法将数据压入栈'''\n if st.Full():\n print \"Stack is Full\"\n else:\n st.stack.append(content)\n st.top=st.top+1\n\n def out(st):\n '''出栈操作\n\n 通过函数Empty判断'''\n if st.Empty():\n print \"Stack is Empty!\"\n else:\n st.top=st.top-1\n\n def Full(st):\n '''判断栈是否已经满了\n\n 通过栈顶指针st.top与栈的容量比较'''\n if st.top==st.size:\n return True\n else:\n return False\n\n def Empty(st):\n '''判断栈是否已经空了\n\n 已经空了就不能进行出栈操作,没有空就能继续进行出战操作\n '''\n if st.top==-1:\n print True\n else:\n print False\n\nq=Stack(7)#调用栈Stack\nprint q.push(\"hello\")#数据的入栈\nprint q.out()#数据的出栈\n" }, { "alpha_fraction": 0.48924732208251953, "alphanum_fraction": 0.5483871102333069, "avg_line_length": 13.384614944458008, "blob_id": "51370e8e73dc64fca538c4a4bf422219ee922276", "content_id": "29067341658f118688190322743b8e7cf7cf0d0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 24, "num_lines": 13, "path": "/Learning/func/Example-func/EX_Func05.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 文档字符串的使用\n\n# def func01(i,j):\n# '''这个函数实现一个乘法运算。\n#\n# 函数会返回一个乘法运算的结果。'''\n# k=i*j;\n# return k;\n# print func01.__doc__;\n# help(func01);\n# print func01(4,7);" }, { "alpha_fraction": 0.6402671933174133, "alphanum_fraction": 0.6650763154029846, "avg_line_length": 33.96666717529297, "blob_id": "429d140ecd7c65c497f77b0747569aacec6894a7", "content_id": "0bbfac149f82a76b47960ba7380fad5f8e0cd4ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 75, "num_lines": 30, "path": "/Learning/Sort/shellSort.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n'''\n类似合并排序和插入排序的结合体,二路合并排序将原来的数组分成左右两部分,希尔排序则将数组按照一定的间隔分成几部分,每部分采用插入排序来排序,\n有意思的是这样做了之后,元素很多情况下就差不多在它应该呆的位置,所以效率不一定比插入排序差。时间复杂度为$[O(n),O(n^2)]$。\n'''\n\ndef shellSort(alist):\n #how many sublists,also how many elements in a sublist\n sublist_count = len(alist) //2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n gap_insertion_sort(alist,start_position,sublist_count)\n print(\"after increments of size\",sublist_count,\"The list is\",alist)\n sublist_count = sublist_count //2\n\ndef gap_insertion_sort(alist,start,gap):\n #start+gap is the second element in this sublist\n for i in range(start + gap,len(alist),gap):\n current_value = alist[1]\n position = i\n while position >= gap and alist[position - gap] > current_value:\n alist[position] = alist[position - gap] #move backward\n position = position -gap\n alist[position] = current_value\n\n\nalist = [54,26,93,17,77,31,44,55,20,88]\nshellSort(alist)\nprint(alist)" }, { "alpha_fraction": 0.6197183132171631, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 7, "blob_id": "1678388bc4984c7e43351372fe155acc9c43cf8d", "content_id": "e20177f0866e706e07e7b1775ecb5a8864688e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 18, "num_lines": 9, "path": "/Learning/Module/Example-Module/EX_Module05.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n\n#自定义模块的使用\nimport example01\n\ni=9\nj=2\nexample01.add(i,j)" }, { "alpha_fraction": 0.7798165082931519, "alphanum_fraction": 0.7844036817550659, "avg_line_length": 17.25, "blob_id": "a71dcf1c9ecf21015661d3d1cac15ebd3b06cdae", "content_id": "361b3ea62924f66308174e6b121cdb25ab0ed8b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/Learning/Module/Example-Module/EX_Module03.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 使用方法-from...import\n# import sys#导入模块\n# from sys import version#导入sys模块的同时导入version方法\n# print version\n\n#使用方法-from...import*\n\nfrom sys import *#导入sys模块的同时导入sys模块对应的多有属性和方法\nprint version\nprint executable" }, { "alpha_fraction": 0.4353741407394409, "alphanum_fraction": 0.5034013390541077, "avg_line_length": 10.760000228881836, "blob_id": "b39044fb4c60d33c01c29a2d3fd0a874e50196f1", "content_id": "85cb14a53529fc43defb25dfe613c88883cde33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 21, "num_lines": 25, "path": "/Learning/func/Example-func/EX_Func04.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8;\n\n# 函数的调用和返回值\n\n# 函数的调用\n\n# 函数的返回值\n\n'''通过return语句调用'''\n# 返回一个值\n# def func01():\n# i=7;\n# return i;\n# print func01();\n\n# 返回多个值\n# def func02(i,j):\n# k=i*j;\n# return (i,j,k);\n# '''返回例子01'''\n# # x=func02(5,7);\n# # print x;\n# '''返回例子01'''\n# y,z,m=func02(4,5);\n# print y;\n" }, { "alpha_fraction": 0.6102941036224365, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 11.454545021057129, "blob_id": "277a45b59dff30b1c0d0b85e2de262ce3c358acf", "content_id": "1140a7c9ddcbf5e3005647c053860a8b18547adb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/Learning/Module/Example-Module/EX_Module06.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n#使用dir()函数\n# import sys#导入模块\n# dir(sys)#列出所有功能\n# print sys.__doc__#使用功能__doc__\n\n#扩展\nd=[]\ndir(d)#列表d的属性\nprint d.__class__" }, { "alpha_fraction": 0.7420290112495422, "alphanum_fraction": 0.7449275255203247, "avg_line_length": 22.066667556762695, "blob_id": "e2aeb6820ab7729bd52f551ee4339863e47b8185", "content_id": "9e16d4c44482174356c0833c7c241662f5363af7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/Learning/Data_Structure_Acquainted/Example_DSA/EX_DSA01.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n# 数据结构实例\n\n# Python内置的数据结构有元组、列表、字典\n\n# 现在有三个物品,分别是“apple”“orange”“pear”,需要将三个物品存储起来\n\n# 方式一:这三个物品每个物品按顺序分别存储到一个柜子中,这些物品可以取出来,如下所示\n[\"apple\",\"orange\",\"pear\"]\n\n# 方式二:这三个物品每个物品按顺序分别存储到一个柜子中,这些物品不可以取出来,也不可以放新物品跟其挤在一个柜子,如下所示\n(\"apple\",\"orange\",\"pear\")\n\n# 方式三:这三个物品不仅按顺序分别存储到一个柜子中,而且每个柜子还得有个名字\n{\"sam\":\"apple\",\"Jac\":\"orange\",\"mating\":\"pear\"}" }, { "alpha_fraction": 0.7108843326568604, "alphanum_fraction": 0.7755101919174194, "avg_line_length": 28.350000381469727, "blob_id": "ec589e71da993b84a1fa264a0535196bff740080", "content_id": "f3a9581aabe490eac22a03fbd1981aeb386ba7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 842, "license_type": "no_license", "max_line_length": 136, "num_lines": 20, "path": "/Learning/Spiders/Single_thread_crawler/STC/EX_STC02.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 修改http头获取源代码\n# 网站会对访问它的程序进行检查,通过http头的文件实现\nimport re\nimport requests\n\n# 下面三行是编码转换功能,先忽略\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"gb18030\")\n# header是我们自己构造的一个字典,里面保存了user-agent,即修改的http头\n# 进入网站查看源代码,进入Network标签,刷新,进入任意一个标签查看Headers--Requests Headers--user-agent,复制即可\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}\n# 调用header标签的方式\nhtml = requests.get('http://jp.tingroom.com/yuedu/yd300p/',headers = header)\n\nhtml.encoding = 'utf-8'#这一行是将编码转为utf-8否则中文会显示乱码。\n\nprint html.text\n\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 38.5, "blob_id": "bee1be59829d59db98f5f500a8b0aede5a55c7bc", "content_id": "d26ad65c2b5e3328e59e13e8dd3fb991b9448d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/Learning/Spiders/Directional_Spider/Examples/novelspider/main.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "\n\nfrom scrapy import cmdline\ncmdline.execute(\"scrapy crawl novelspider\".split())" }, { "alpha_fraction": 0.7184750437736511, "alphanum_fraction": 0.7478005886077881, "avg_line_length": 19.117647171020508, "blob_id": "cdb0ae529bf697e6ee58dbcb732a5bdd30d52615", "content_id": "5e23a455fb0fb4696dba37c21e70174c52bf2b29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/Learning/webservice/webservice.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nfrom suds.client import Client\nfrom suds.transport.https import HttpAuthenticated\n\nurl = 'http://172.20.40.7/Integration/Sync/Service/GetAllVideo.asmx?wsdl'\n\nclient = Client(url)\n\nprint client\n\nprint client.service.AllVideo(name = '网球比赛')" }, { "alpha_fraction": 0.6297709941864014, "alphanum_fraction": 0.6450381875038147, "avg_line_length": 20.58333396911621, "blob_id": "b12b7874a41d87983e2c0f24026cd3582ae02fa9", "content_id": "e0313ae7be69b411204216aa96e673ed64fe413a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 51, "num_lines": 12, "path": "/Learning/Spiders/Directional_Spider/Examples/douban/douban/spiders/spider.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n\n\nfrom scrapy.contrib.spiders import CrawlSpider\n\nclass Douban(CrawlSpider):\n name = \"douban\"\n start_urls = ['http://movie.douban.com/top250']\n\n def parse(self, response):\n print response.body\n print response.url\n\n\n\n" }, { "alpha_fraction": 0.5119904279708862, "alphanum_fraction": 0.5503597259521484, "avg_line_length": 26.33333396911621, "blob_id": "0f402b27fc85721c58b3666103c67d3c7649d48d", "content_id": "64b2214f19ab7b97f0e0bf7e59aad04a418f336f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 77, "num_lines": 30, "path": "/Learning/Sort/shortBubbleSort.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n'''\n每个回合都从第一个元素开始和他后面的元素比较,如果比它后面的元素更大的话就交换,一直重复,直到这个元素到了它能到达的位置。\n每次遍历都将剩下的元素中最大的那个放到了序列的“最后”(除去了前面已经排好的那些元素)。\n注意检测是否已经完成了排序,如果已完成就可以退出了。\n时间复杂度O(n^2)。\n\npython支持对两个数字同时进行交换a,b = b,a就可以交换a和b的值了。\n'''\n\ndef short_bubble_sort(alist):\n exchanges = True\n pass_num = len(alist) - 1\n while pass_num > 0 and exchanges:\n exchanges = False\n for i in range(pass_num):\n if alist[i] > alist[i + 1]:\n exchanges = True\n # temp = alist[i]\n # alist[i] = alist[i + 1]\n # alist[i + 1] = temp\n alist[i],alist[i + 1] = alist[i + 1],alist[i] #superise!~_~\n pass_num = pass_num - 1\n\n\nif __name__ == '__main__':\n alist = [20,40,30,90,60,80,70,50,110,100]\n short_bubble_sort(alist)\n print(alist)\n\n \n" }, { "alpha_fraction": 0.42698413133621216, "alphanum_fraction": 0.49365079402923584, "avg_line_length": 12.145833015441895, "blob_id": "bee0d543b3dfcd62213b817da2d87773807c68f8", "content_id": "cb303ae0f05cc92f982491f7df9de060348a728d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 30, "num_lines": 48, "path": "/Learning/func/Example-func/EX_Func02.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8;\n\n# example-函数的形参与实参\n\n\n# 参数的概念\n\n# a=\"asdfg\"\n# print len(a);\n\n# 什么是形参\n\n# def function01(a,b):\n# if a>b:\n# print a;\n# else:\n# print b;\n\n# 什么是实参\n\n# def function02(a,b):\n# if a>b:\n# print a;\n# else:\n# print b;\n# function02(2,5);\n\n# 参数的传递\n\n# def function03(a,b):\n# if a>b:\n# print \"a>b\";\n# else:\n# print \"a<=b\";\n# function03(2,5);\n\n# 关键参数\n\n# def function04(a=1,b=2,c=4):\n# print a;\n# print b;\n# print c;\n# function04(5);\n# function04(b=7,a=8);\n# function04(5,c=2,b=4);\n# function04(b=4,c=3,a=1);\n'''注意,参数不能冲突'''\n# function04(b=2,c=3,6);" }, { "alpha_fraction": 0.6104100942611694, "alphanum_fraction": 0.6230283975601196, "avg_line_length": 19.483871459960938, "blob_id": "d7101f8b1f453c3d2ea2d4af5e00ddc13858cd19", "content_id": "0ee9995129382ffaae1b016db0769775a712c4f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 68, "num_lines": 31, "path": "/Learning/The_basic_regular_expressions/Example_BRE/EX_BRE02.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 导入re库文件\nimport re\n\nold_url='http://www.jikexueyuan.com/course/android/?pageNum=2'\ntotal_page=20\n\nf = open('text.txt','r')\nhtml = f.read()\nf.close()\n\n# 爬取标题\ntitle = re.search('<title>(.*?)</title>',html,re.S).group(1)\nprint title\n\n# 爬取链接\nlinks = re.findall('href=\"(.*?)\"',html,re.S)\nfor each in links:\n print each\n\n# 抓取部分文字,先打再小\ntext_fied = re.findall('<ul>(.*?)</ul>',html,re.S)[0]\nthe_text = re.findall('\">(.*?)</a>',text_fied,re.S)\nfor every_text in the_text:\n print every_text\n\n# sub实现翻页\nfor i in range(2,total_page+1):\n new_link = re.sub('pageNum = \\d+','pageNum = %d'%i,old_url,re.S)\n print new_link" }, { "alpha_fraction": 0.5023364424705505, "alphanum_fraction": 0.5280373692512512, "avg_line_length": 20.424999237060547, "blob_id": "2bfacea8f1c5bf0aae3d7f4da00a8ca94d4d030b", "content_id": "9eb0ef3fb1fd7e6d54565a538fb5573097df01ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 61, "num_lines": 40, "path": "/Learning/socket/Server.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n\nimport socket\nimport time\nimport threading\n\n\ndef tcplink(sock,addr):\n print 'accept new connection from %s:%s...' % addr\n\n while True:\n data = sock.recv(1024)\n if data:\n if data == \"s01d\":\n sock.send('happy')\n if data == \"s02d\":\n sock.send('sad')\n if data == \"s03d\":\n sock.send('normal')\n else:\n sock.send('input error')\n break\n \n sock.close()\n print 'Connection from %s:%s closed.' % addr\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n# 监听端口:\ns.bind(('127.0.0.1',9999))\ns.listen(5)\nprint 'wating for connection...'\n\n\nwhile True:\n # 接受一个新的连接\n sock,addr = s.accept()\n # 创建新的线程来处理TCP连接\n t = threading.Thread(target=tcplink,args=(sock,addr))\n t.start()" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.560606062412262, "avg_line_length": 13.84615421295166, "blob_id": "7a2adb2d3b2cb5021ba8778b79043e0829f297cc", "content_id": "871b7c31164f37aa418b2b354f8d8ba201a36b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 32, "num_lines": 13, "path": "/Learning/Module/Example-Module/EX_Module04.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 首先我们分别看一下这个模块在不同场景中的__name__的值\n# print __name__\n\n\n#__name__的使用\n#其次我们看一下__name__属性的常用情况\n\n# if __name__==\"__main__\":\n# print \"It's main\"\n# else:\n# print \"It's not main\"\n\n\n\n\n\n" }, { "alpha_fraction": 0.4530651271343231, "alphanum_fraction": 0.4597701132297516, "avg_line_length": 17, "blob_id": "4553254ea74c5176b3b99efcda3174fee79b60ac", "content_id": "c0ba30b632294a6c4e57368a9ad854dbd7d0e887", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1262, "license_type": "no_license", "max_line_length": 44, "num_lines": 58, "path": "/Learning/Data_Structure_Acquainted/Example_DSA/EX_DSA03.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# 队列的实现\n\n\nclass Queue():\n def __init__(qu, size):\n '''队列的初始化\n\n 两个参数为队列的主体和队列的容量\n 使用列表进行\"[]\"声明\n 对手和对位的定义'''\n qu.queue = []\n qu.size = size\n qu.head = -1\n qu.tail = -1\n\n def Empty(qu):\n '''判断队列是否为空\n\n 队尾指针和对位指针是否相等'''\n if qu.head == qu.tail:\n return True\n else:\n return False\n\n def Full(qu):\n '''判断队列是否已满'''\n if qu.tail - qu.head + 1 == qu.size:\n return True\n else:\n return False\n\n def enQueue(qu, content):\n '''入队方法\n\n 首先判断是否已满\n 通过append方法将数据压入队列'''\n if qu.Full():\n print \"Queue is Full!\"\n else:\n qu.queue.append(content)\n qu.tail = qu.tail + 1\n\n def outQueue(qu):\n '''出队操作\n\n 首先判断队列是否为空'''\n if qu.Empty():\n print \"Queue is Empty!\"\n else:\n qu.head = qu.head + 1\n\nq=Queue(7)\nprint q.Empty()\nprint q.enQueue(\"hello\")\nprint q.Empty()\nprint q.outQueue()\n" }, { "alpha_fraction": 0.5675287246704102, "alphanum_fraction": 0.6005747318267822, "avg_line_length": 22.965517044067383, "blob_id": "ce1966d0719e058f00cdf786c4462d0552fb97bf", "content_id": "319b5d191eb7109362b530cbeec878f88421b314", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 896, "license_type": "no_license", "max_line_length": 51, "num_lines": 29, "path": "/Learning/Sort/quickSort.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n'''\n通过一趟排序将要排序的数据分割成独立的两部分,\n其中一部分的所有数据都比另外一部分的所有数据都要小,然后再按此方法对这两部分数据分别进行快速排序,\n整个排序过程可以递归进行,以此达到整个数据变成有序序列。\n'''\n\ndef quick_sort(lists, left, right):\n if left >= right:\n return lists\n key = lists[left]\n low = left\n high = right\n while left < right:\n while left < right and lists[right] >= key:\n right -= 1\n lists[left] = lists[right]\n while left < right and lists[left] <= key:\n left += 1\n lists[left] = lists[left]\n lists[right] = key\n quick_sort(lists, low, left - 1)\n quick_sort(lists, left + 1, high)\n return lists\n\nlists = [54,26,93,17,77,31,44,55,20]\nquick_sort(lists)\nprint(lists)\n\n" }, { "alpha_fraction": 0.5476190447807312, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 14.875, "blob_id": "a74fb8fe89112e34281d8db330e40fbbcb8a776e", "content_id": "0846e92d058717cd3137a66cac3a6377d9a85b79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/Learning/Module/Example-Module/EX_Module05(01).py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\nnum01=0\nnum02=0\ndef func(num01,num02):\n answer = num01 *num02\n return answer\na= func(num01,num02)\nprint a" }, { "alpha_fraction": 0.4166666567325592, "alphanum_fraction": 0.4756944477558136, "avg_line_length": 10.5600004196167, "blob_id": "1b282bb7f3359a938938a800f7a4565acd314e1e", "content_id": "7884b6f8c6ecaa6fb588afa23fac557ef895093a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 38, "num_lines": 25, "path": "/Learning/func/Example-func/EX_Func03.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8;\n\n# 作用域\n\n# 局部变量\n\n# def func02(a):\n# i=7;\n# print i;\n# i=9;\n# '''i为函数中的局部变量,i=9不执行'''\n# func02(i);'''i为7'''\n# '''i为全局变量,输出9'''\n# print i;\n\n# 全局变量\n\n# def func03():\n# global i;'''global:全局变量申明的关键字'''\n# i=7;\n# print i;\n# i = 9;\n# func03();\n# i=9;\n# print i;" }, { "alpha_fraction": 0.5859546661376953, "alphanum_fraction": 0.5910753607749939, "avg_line_length": 28.354839324951172, "blob_id": "16f3a7b7b1e01b22108531bc7598e58a1fe47020", "content_id": "05f9abf49b3597905ce7b4518fd39c6032dac67f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3016, "license_type": "no_license", "max_line_length": 89, "num_lines": 93, "path": "/Learning/Spiders/Single_thread_crawler/STC/EX_STC04.py", "repo_name": "ArsenalLevel3/PythonLearning", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\n# import requests\n# import re\n#\n# url = 'http://www.crowdfunder.com/browse/deals'\n# # html = requests.get(url).text\n# # print html\n#\n# data = {\n# 'entities_only':'true',\n# 'page':'2'\n# }\n#\n# html_post = requests.post(url,data=data)\n# title = re.findall('\"card-title\">(.*?)</div>',html_post.text,re.S)\n# for each in title:\n# print each\n\n\n\n# 爬取计科学院课程\n# 目标网站:http://www.jikexueyuan.com/course/\n# 目标内容:课程名称,课程介绍,课程时间,课程等级,学习人数\n# 设计知识:requests获取网页,re.sub换页,正则表达式匹配内容\n\nimport re\nimport requests\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nclass spider(object):\n def __init__(self):\n print u'开始爬取内容。。。'\n\n # getsource用来获取网页源代码\n def getsource(self,url):\n html = requests.get(url)\n return html.text\n\n # changepage用来生产不同页数的链接\n def changepage(self,url,total_page):\n now_page = int(re.search('pageNum=(\\d+)',url,re.S).group(1))\n page_group = []\n for i in range (now_page,total_page+1):\n link = re.sub('pageNum=\\d+','pageNum=%s'%i,url,re.S)\n page_group.append(link)\n return page_group\n\n # geteveryclass用来抓取每个课程块的信息\n def geteveryclass(self,source):\n everyclass = re.findall('(<li deg=\"\".*?</li>)',source,re.S)\n return everyclass\n\n # getinfo用来从每个课程块中提取出我们需要的信息\n def getinfo(self,eachclass):\n info = {}\n info['title'] = re.search('target=\"_blank\">(>*?)</a>',eachclass,re.S).group(1)\n info['content'] = re.search('</h2><p>(.*?)</em>',eachclass,re.S).group(1)\n timeandlevel = re.findall('<em>(.*?)</em>',eachclass,re.S)\n info['classtime'] = timeandlevel[0]\n info['classlevel'] = timeandlevel[1]\n info['learning'] = re.search('\"learn-number\">(.*?)</em>',eachclass,re.S).group(1)\n return info\n\n # saveinfo用来保存结果到info.txt文件中\n def saveinfo(self,classinfo):\n f = open('info.txt','a')\n for each in classinfo:\n f.writelines('title:'+ each['title'] + '\\n')\n f.writelines('content:'+ each['content'] + '\\n')\n f.writelines('classtime:'+ each['ticlasstimetle'] + '\\n')\n f.writelines('classlevel:'+ each['classlevel'] + '\\n')\n f.writelines('learning:'+ each['learning'] + '\\n\\n')\n f.close()\n\n\nif __name__=='__main__':\n\n classinfo = []\n url = 'http://www.jikexueyuan.com/course/?pageNum=1'\n jikespider = spider()\n all_links = jikespider.changepage(url,10)\n for link in all_links:\n print u'正在处理页面:' + link\n html = jikespider.getsource(link)\n everyclass = jikespider.geteveryclass(html)\n for each in everyclass:\n info = jikespider.getinfo(each)\n classinfo.append(info)\n\n jikespider.saveinfo(classinfo)\n\n\n\n\n" } ]
29
kkkemp/lessons-dev
https://github.com/kkkemp/lessons-dev
f48e5adc830ab99e7a5120e6aaa62600a8bf727a
f74a60c0bd900e1f772ac721bd0d6df21b4a4c88
870ed0435589e02f70113cc0218370774640bd17
refs/heads/master
2022-12-02T17:16:38.609055
2020-08-04T22:54:47
2020-08-04T22:54:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5638090968132019, "alphanum_fraction": 0.5696393847465515, "avg_line_length": 27.398773193359375, "blob_id": "ec4185325b127eefc72e945d8100bb84f665a566", "content_id": "3b1035cfccd70a51c53afaa072b130c53e493cfe", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4631, "license_type": "permissive", "max_line_length": 138, "num_lines": 163, "path": "/supplementary/hourofci.py", "repo_name": "kkkemp/lessons-dev", "src_encoding": "UTF-8", "text": "import ipywidgets as widgets\nimport requests\n# Retrieve username\nimport getpass\n# Encoding\nimport hashlib\n\n# Execute a notebook\nimport io\nimport nbformat \nfrom IPython import get_ipython\n\n# v7 - Add user_agent parameter, which is from the notebook\ndef SubmitBtn(user_agent,lesson,lesson_level,question,widget):\n \"\"\" Display a submit button\n\n Input:\n lesson (e.g., \"geospatial-data\")\n lesson_level (e.g., \"beginner\")\n question - defined behind instead of in the notebooks\n widget - the widget of which value will be submitted\n \"\"\"\n\n # Submit button\n button = widgets.Button(\n description = 'Submit',\n disabled = False,\n button_style = '',\n icon = 'check'\n )\n \n display(button)\n \n # Output\n output = widgets.Output()\n display(output) \n \n\n # Submit function\n def submit(b):\n\n # Logging\n host = \"check.hourofci.org\"\n port = \"4000\" \n answer = widget.value\n\n # v6 - Retrieve username\n if str(getpass.getuser()).split('-')[0] == \"jupyter\":\n username = str(getpass.getuser()).split('-')[1] # In Jupyterhub, getuser() = Jupyter-username\n else:\n username = str(getpass.getuser())\n # v7 - Encode username\n username_hash = hashlib.md5(username.encode()).hexdigest()\n\n # v7 - Encode user agent\n user_agent_hash = hashlib.md5(user_agent.encode()).hexdigest()\n\n # v6 - Add username \n # v7 - Add user agent\n url = \"https://{}:{}/{}/{}/{}/{}/{}/{}\".format(host, port, username_hash, user_agent_hash, lesson, lesson_level, question, answer)\n # print(url)\n # Send_request\n r = requests.get(url)\n\n # Print widget value\n with output:\n output.clear_output()\n print(widget.value)\n if r.status_code == requests.codes.ok:\n print(\"Submit Successfully!\")\n \n button.on_click(submit)\n\n\n\n# v8 - Add a run button to run a cell and record this action\ndef RunBtn(user_agent,lesson,lesson_level,nbfilename,code_tag):\n \"\"\" Display a run button\n\n Input:\n lesson (e.g., \"geospatial-data\")\n lesson_level (e.g., \"beginner\")\n nbfilename - name of the current jupyter notebook\n code_tag - tags of the code cell we want to run (e.g., \"Q1\")\n \"\"\"\n\n # Run button\n button = widgets.Button(\n description = ' Run',\n disabled = False,\n button_style = '',\n icon = 'step-forward'\n )\n \n display(button)\n \n # Output\n output = widgets.Output()\n display(output) \n \n # Run function\n def run(b):\n output.clear_output()\n\n # v8 - Run a cell\n # TODO: add the path of the file\n nbfile = nbfilename+\".ipynb\"\n execute_notebook_cell(nbfile,code_tag)\n\n # Logging\n host = \"check.hourofci.org\"\n port = \"4000\" \n\n # Retrieve username\n if str(getpass.getuser()).split('-')[0] == \"jupyter\":\n username = str(getpass.getuser())\n else:\n username = str(getpass.getuser()).split('-')[1] # In Jupyterhub, getuser() =jupyter-username\n # Encode username\n username_hash = hashlib.md5(username.encode()).hexdigest()\n\n # Encode user agent\n user_agent_hash = hashlib.md5(user_agent.encode()).hexdigest()\n\n url = \"https://{}:{}/{}/{}/{}/{}/{}/{}\".format(host, port, username_hash, user_agent_hash, lesson, lesson_level, code_tag, \"Run\")\n # Send_request\n r = requests.get(url)\n # print(r.status_code)\n\n # Give feedback if run sucessfully\n with output:\n # output.clear_output()\n if r.status_code == requests.codes.ok:\n print(\"\\n\\nRun Successfully!\")\n\n def execute_notebook_cell(nbfile,code_tag):\n \"\"\" Execute the specific cells in a notebook\n \"\"\"\n with io.open(nbfile) as f:\n nb = nbformat.read(f, 4) \n \n ip = get_ipython()\n \n # The results are shown in the output area so they can be cleared\n with output:\n for cell in nb.cells:\n if 'tags' in cell.metadata and code_tag in cell.metadata.tags:\n ip.run_cell(cell.source)\n \n button.on_click(run)\n\n\n# def execute_notebook_cell(nbfile,code_tag):\n# \"\"\" Execute the specific cells in a notebook\n# \"\"\"\n# with io.open(nbfile) as f:\n# nb = nbformat.read(f, 4) \n \n# ip = get_ipython()\n \n# for cell in nb.cells:\n# if 'tags' in cell.metadata and code_tag in cell.metadata.tags:\n# ip.run_cell(cell.source)\n\n\n" } ]
1
gabriele905/Web-Scraper
https://github.com/gabriele905/Web-Scraper
870963396ad4b6ed0705c77ceee8be6b98554fac
c345390cd26569cdee3f36f97c4fa7f89762ad4f
7d71b8b3c4fde7b6b21ddd7022627a27a03e97b4
refs/heads/master
2022-02-22T13:36:59.363591
2019-07-12T11:07:54
2019-07-12T11:07:54
196,565,002
0
0
null
2019-07-12T11:07:14
2019-07-12T11:07:57
2022-12-08T05:19:07
Python
[ { "alpha_fraction": 0.47121649980545044, "alphanum_fraction": 0.48443156480789185, "avg_line_length": 38.043479919433594, "blob_id": "3481318327025e801f83e15d97de3837a59a27cf", "content_id": "0973f5dc63550c0742611bc462640c2bc4cbfd5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5524, "license_type": "no_license", "max_line_length": 117, "num_lines": 138, "path": "/parsedata.py", "repo_name": "gabriele905/Web-Scraper", "src_encoding": "UTF-8", "text": "import sys\r\nimport json\r\nfrom classes.Author import Author\r\nfrom classes.Book import Book\r\nfrom classes.Press import Press\r\n\r\n\r\ndef open_book():\r\n mas = []\r\n\r\n with open('PageData.json', encoding=\"utf8\") as json_data:\r\n json_data = json.load(json_data)\r\n for index, i in enumerate(json_data):\r\n book = Book(json_data[index]['author'], json_data[index]['title'], json_data[index]['press'],\r\n json_data[index]['year'], json_data[index]['pages'], json_data[index]['price'])\r\n mas.append(book)\r\n return mas\r\n\r\n\r\ndef connect(number):\r\n a = \"\"\r\n if number == len(sys.argv) - 1:\r\n a = sys.argv[number]\r\n elif len(sys.argv) - 1 > number:\r\n for x in range(number, len(sys.argv)):\r\n if x < len(sys.argv) - 1:\r\n a = a + sys.argv[x] + \" \"\r\n elif x == len(sys.argv) - 1:\r\n a = a + sys.argv[x]\r\n else:\r\n quit(\"Connection error\")\r\n return a\r\n\r\n\r\nif __name__ == '__main__':\r\n bookmas = open_book()\r\n ats = \"\"\r\n\r\n if sys.argv[1] == \"cat\":\r\n press = Press(connect(3))\r\n if sys.argv[2] == \"countA\": # skaiciuoja kiek kategorija turi autoriu cat countA Fairytale\r\n try:\r\n ats = (press.count_authors(bookmas))\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"countB\": # skaiciuoja kiek kategorija turi knygu cat countB Fairytale\r\n try:\r\n ats = (press.count_books(bookmas))\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"printB\": # isspausdina visas kategorijos knygas(visa info) cat printB Fairytale\r\n try:\r\n ats = press.print_books(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n else:\r\n quit(\"Function is not found\")\r\n\r\n elif sys.argv[1] == \"aut\":\r\n author = Author(connect(3))\r\n if sys.argv[2] == \"countB\": # skaiciuoja kiek autorius turi knygu aut countB Lucy Lee\r\n try:\r\n ats = (author.count_books(bookmas))\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findN\": # randa naujausia autoriaus knyga aut findN Lucy Lee\r\n try:\r\n ats = author.find_newest(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findO\": # randa seniausia autoriaus knyga aut findO Lucy Lee\r\n try:\r\n ats = author.find_oldest(bookmas)\r\n except Exception as e:\r\n quit(e)\r\n elif sys.argv[2] == \"printB\": # isspausdina visas autoriaus knygas(visa info) aut printB Lucy Lee\r\n try:\r\n ats = author.print_books(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n else:\r\n quit(\"Function is not found\")\r\n\r\n elif sys.argv[1] == \"book\":\r\n if sys.argv[2] == \"info\": # isspausdina visa info apie knyga book info Room\r\n try:\r\n book = Book(0, connect(3), 0, 0, 0, 0)\r\n ats = book.print_book_info(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findByY\": # randa ivestu metu knygas book findByY 2017\r\n try:\r\n int(sys.argv[3])\r\n book = Book(0, 0, 0, sys.argv[3], 0, 0)\r\n ats = book.find_by_year(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findN\": # randa naujausia knyga book findN\r\n try:\r\n book = Book(0, 0, 0, 0, 0, 0)\r\n ats = book.find_newest(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findO\": # randa seniausia knyga book findO\r\n try:\r\n book = Book(0, 0, 0, 0, 0, 0)\r\n ats = book.find_oldest(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findMinPa\": # randa knyga turincia maziausiai puslapiu book findMinPa\r\n try:\r\n book = Book(0, 0, 0, 0, 0, 0)\r\n ats = book.find_least_pages(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findAveragePr\": # randa visu knygu kainu vidurki book findAveragePr\r\n try:\r\n book = Book(0, 0, 0, 0, 0, 0)\r\n ats = book.find_average_price(bookmas)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n elif sys.argv[2] == \"findPr\": # randa kaina uzsakymo imant book findPr 5 Room\r\n try:\r\n book = Book(0, connect(4), 0, 0, 0, 0) # kazkokia knyga kazkoki skaiciu vnt\r\n quantity = sys.argv[3]\r\n ats = book.find_price_by_quantity(bookmas, quantity)\r\n except:\r\n quit(\"Unsuccessful attempt\")\r\n else:\r\n quit(\"Function is not found\")\r\n else:\r\n quit(\"Selection is not found\")\r\n\r\n file = open(\"Result.txt\", \"w\")\r\n file.write(str(ats))\r\n file.close()\r\n print(\"Duomenys issaugoti testfile.txt faile\")\r\n print(ats)" }, { "alpha_fraction": 0.4931185841560364, "alphanum_fraction": 0.5030746459960938, "avg_line_length": 27.69565200805664, "blob_id": "bf654781d0949ccf3897272634598579f30c57ef", "content_id": "1c7d66aba03cbb4aaff2d9ef33945783f61eb17d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3415, "license_type": "no_license", "max_line_length": 76, "num_lines": 115, "path": "/multi.py", "repo_name": "gabriele905/Web-Scraper", "src_encoding": "UTF-8", "text": "import math\r\nimport threading\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport json\r\nimport io, time\r\n\r\nats = []\r\nb = []\r\nurl = \"https://www.patogupirkti.lt/\"\r\nresponse = requests.get(url, timeout = 5)\r\nsoup = BeautifulSoup(response.content, \"html.parser\")\r\n\r\nfirstt = soup.find('a', class_='arrow-right pull-right mt5')\r\nif firstt.has_attr('href'):\r\n b.append(firstt.attrs['href']+\"&limit=36\")\r\n b.append(b[0]+\"&p=2\")\r\n\r\nlock = threading.Lock()\r\nfor index,i in enumerate(b):\r\n url = b[index]\r\n response = requests.get(url, timeout = 5)\r\n soup = BeautifulSoup(response.content, \"html.parser\")\r\n for link in soup.find_all('a') :\r\n k = 0\r\n if link.has_attr('href'):\r\n a = link.attrs['href']\r\n if(a.endswith(\".html\")):\r\n for index, l in enumerate (ats):\r\n try:\r\n lock.acquire(True)\r\n print(l, \" - Lock acquired\")\r\n if ats[index] == a:\r\n k=k+1\r\n finally:\r\n lock.release()\r\n print(l, \" - Lock released\")\r\n if k == 0:\r\n ats.append(a)\r\n\r\ndata = [] # create a list to store the items\r\ndef get_info(start, end):\r\n for link in range(start, end):\r\n url = ats[link]\r\n response = requests.get(url, timeout = 5)\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n\r\n\r\n if soup.find(class_=\"author\").find('h2'):\r\n author = soup.find(class_=\"author\").find('h2').text\r\n else:\r\n author = \"-\"\r\n\r\n if soup.find('h1'):\r\n title = soup.find('h1').text\r\n else:\r\n title = \"-\"\r\n\r\n if soup.find('span', itemprop = \"name\"):\r\n press = soup.find('span', itemprop = \"name\").text\r\n else:\r\n press = \"-\"\r\n\r\n if soup.find(itemprop = \"copyrightYear\"):\r\n year = soup.find(itemprop = \"copyrightYear\").text\r\n else:\r\n year = \"0\"\r\n\r\n if soup.find(itemprop = \"numberOfPages\"):\r\n pages = soup.find(itemprop = \"numberOfPages\").text\r\n else:\r\n pages = \"0\"\r\n\r\n if soup.find('span', class_ = \"font-16\"):\r\n pr = soup.find('span', class_ = \"font-16\").text[0:-2]\r\n pr = pr.split(\",\")\r\n price = pr[0]+\".\"+pr[1]\r\n else:\r\n price = \"0\"\r\n\r\n item = {}\r\n item['author'] = author\r\n item['title'] = title\r\n item['press'] = press\r\n item['year'] = year\r\n item['pages'] = pages\r\n item['price'] = price\r\n data.append(item) # add the item to the list\r\n\r\nstart_time = time.time()\r\nthread_count = 16\r\nbook_count = len(ats)-1\r\nthread_list = []\r\n\r\nfor i in range(thread_count):\r\n start = math.floor(i * book_count / thread_count) + 1\r\n end = math.floor((i + 1) * book_count / thread_count) + 1\r\n thread_list.append(threading.Thread(target=get_info, args=(start, end)))\r\n\r\nfor thread in thread_list:\r\n thread.start()\r\n\r\nfor thread in thread_list:\r\n thread.join()\r\n\r\nwith io.open('PageData.json', 'w', encoding='utf8') as json_file:\r\n json_file.write(\r\n '[' +\r\n ',\\n'.join(json.dumps(i, ensure_ascii=False) for i in data) +\r\n ']\\n')\r\n\r\nend_time = time.time()\r\nprint(\"OK\")\r\nprint(\"Time taken: \" + str(end_time - start_time) + \"sec\")\r\n" }, { "alpha_fraction": 0.5179283022880554, "alphanum_fraction": 0.5290836691856384, "avg_line_length": 26.224720001220703, "blob_id": "3e16c7a70ec21e05ad8f51466c920da5d11a4819", "content_id": "16be02b3ab94661eedf4a3066329e94751346387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2510, "license_type": "no_license", "max_line_length": 69, "num_lines": 89, "path": "/webscraper.py", "repo_name": "gabriele905/Web-Scraper", "src_encoding": "UTF-8", "text": "import time\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport json\r\nimport io\r\n\r\nats = []\r\nb = []\r\nurl = \"https://www.patogupirkti.lt/\"\r\nresponse = requests.get(url, timeout = 5)\r\nsoup = BeautifulSoup(response.content, \"html.parser\")\r\n\r\nfirstt = soup.find('a', class_='arrow-right pull-right mt5')\r\nif firstt.has_attr('href'):\r\n b.append(firstt.attrs['href']+\"&limit=36\")\r\n b.append(b[0]+\"&p=2\")\r\n\r\nfor index,i in enumerate(b):\r\n url = b[index]\r\n response = requests.get(url, timeout = 5)\r\n soup = BeautifulSoup(response.content, \"html.parser\")\r\n for link in soup.find_all('a') :\r\n k = 0\r\n if link.has_attr('href'):\r\n a = link.attrs['href']\r\n if(a.endswith(\".html\")):\r\n for index, l in enumerate (ats):\r\n if ats[index] == a:\r\n k=k+1\r\n if k == 0:\r\n ats.append(a)\r\n\r\ndata = [] # create a list to store the items\r\nstart_time = time.time()\r\nfor link in ats:\r\n url = link\r\n response = requests.get(url, timeout = 5)\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n\r\n\r\n if soup.find(class_=\"author\").find('h2'):\r\n author = soup.find(class_=\"author\").find('h2').text\r\n else:\r\n author = \"-\"\r\n\r\n if soup.find('h1'):\r\n title = soup.find('h1').text\r\n else:\r\n title = \"-\"\r\n\r\n if soup.find('span', itemprop = \"name\"):\r\n press = soup.find('span', itemprop = \"name\").text\r\n else:\r\n press = \"-\"\r\n\r\n if soup.find(itemprop = \"copyrightYear\"):\r\n year = soup.find(itemprop = \"copyrightYear\").text\r\n else:\r\n year = \"0\"\r\n\r\n if soup.find(itemprop = \"numberOfPages\"):\r\n pages = soup.find(itemprop = \"numberOfPages\").text\r\n else:\r\n pages = \"0\"\r\n\r\n if soup.find('span', class_ = \"font-16\"):\r\n pr = soup.find('span', class_ = \"font-16\").text[0:-2]\r\n pr = pr.split(\",\")\r\n price = pr[0]+\".\"+pr[1]\r\n else:\r\n price = \"0\"\r\n\r\n item = {}\r\n item['author'] = author\r\n item['title'] = title\r\n item['press'] = press\r\n item['year'] = year\r\n item['pages'] = pages\r\n item['price'] = price\r\n data.append(item) # add the item to the list\r\n\r\nwith io.open('PageData.json', 'w', encoding='utf8') as json_file:\r\n json_file.write(\r\n '[' +\r\n ',\\n'.join(json.dumps(i, ensure_ascii=False) for i in data) +\r\n ']\\n')\r\nend_time = time.time()\r\nprint(\"OK\")\r\nprint(\"Time taken: \" + str(end_time - start_time) + \"sec\")" } ]
3
adityam97/user-login-system
https://github.com/adityam97/user-login-system
ec31c993482868cd11d384a9bd8d8b75573be523
099dd92970acc0712fecb8d88df179f2b477edb4
fa3f197378035290f6b6fae6e66ed45fa842d26e
refs/heads/main
2023-03-26T02:17:37.374716
2021-03-22T14:18:05
2021-03-22T14:18:05
348,293,819
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41101694107055664, "alphanum_fraction": 0.5911017060279846, "avg_line_length": 8, "blob_id": "e4f6b297df93e768bc161d3199c24e9cfdaa847d", "content_id": "eb7709ad698c7d8973de943d4ac2de1f99ad6f50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 472, "license_type": "no_license", "max_line_length": 24, "num_lines": 47, "path": "/requirements.txt", "repo_name": "adityam97/user-login-system", "src_encoding": "UTF-8", "text": "Authlib==0.15.3\r\n\r\ncertifi==2020.12.5\r\n\r\ncffi==1.14.5\r\n\r\nchardet==4.0.0\r\n\r\nclick==7.1.2\r\n\r\ncryptography==3.4.6\r\n\r\nFlask==1.1.2\r\n\r\nFlask-MySQLdb==0.2.0\r\n\r\nidna==2.10\r\n\r\nitsdangerous==1.1.0\r\n\r\nJinja2==2.11.3\r\n\r\nMarkupSafe==1.1.1\r\n\r\nmysql==0.0.2\r\n\r\nmysql-connector==2.2.9\r\n\r\nmysqlclient==2.0.3\r\n\r\noauthlib==3.1.0\r\n\r\npycparser==2.20\r\n\r\nrequests==2.25.1\r\n\r\nrequests-oauthlib==1.3.0\r\n\r\nurllib3==1.26.4\r\n\r\nWerkzeug==1.0.1\r\n\r\npandas==1.1.5\r\n\r\nnumpy==1.19.1\r\n\r\ngunicorn==20.0.4\r\n\r\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 36, "blob_id": "2512749d512f2b6cb184bb8b9b4d23c508669746", "content_id": "3b901511ab459d37ebe14880d6fb5598577efc07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 36, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/db.sql", "repo_name": "adityam97/user-login-system", "src_encoding": "UTF-8", "text": "SELECT * FROM new_schema1.new_table;" }, { "alpha_fraction": 0.6129186153411865, "alphanum_fraction": 0.621499240398407, "avg_line_length": 28.3426570892334, "blob_id": "59822ddfc14bbf2147515d75eb7010839225444f", "content_id": "d935859f610a9d92995e6547e158384de7e5a039", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8391, "license_type": "no_license", "max_line_length": 133, "num_lines": 286, "path": "/app.py", "repo_name": "adityam97/user-login-system", "src_encoding": "UTF-8", "text": "import mysql.connector\nimport MySQLdb.cursors\nimport mysql\nimport flask\nimport os\nimport json\nimport requests_oauthlib\nimport pandas as pd\nfrom flask import Flask, render_template, request,session, url_for, redirect, send_file\nfrom flask_mysqldb import MySQL\nfrom authlib.integrations.flask_client import OAuth\nfrom requests_oauthlib.compliance_fixes import facebook_compliance_fix\nimport numpy\nimport csv\n\n#creating Json file and reading it\n# dbJson = open(r'mysql.json')\n# dbData = dbJson.read()\n\n# #parse json\n# dbInfo = json.loads(dbData)\n\n#initializing app\napp = Flask(__name__) \noauth = OAuth(app)\nfb_app = flask.Flask(__name__)\n\n\n\n#data_frame = pd.read_csv(\"data.csv\")\n\n \n\n\napp.secret_key=\"Adi\"\n\n# db config for user login\n# app.config['MYSQL_HOST'] = dbInfo[\"host\"]\n# app.config['MYSQL_USER'] = dbInfo[\"user\"]\n# app.config['MYSQL_PASSWORD'] = dbInfo[\"password\"] \n# app.config['MYSQL_DB'] = dbInfo[\"DB\"]\n# print(app.config)\n\n#config for social login google\napp.config['SECRET_KEY'] = \"THIS SHOULD BE SECRET\"\napp.config['GOOGLE_CLIENT_ID'] = \"44649573811-g4vfucn8as4u39m24pca3v8rppddj7h4.apps.googleusercontent.com\"\napp.config['GOOGLE_CLIENT_SECRET'] = \"lYfaygH0WrU5r_2my_JsEMXa\"\n\n#config for social login fb\n#URL = \"https://679e4c83.ngrok.io\"\nURL = \"https://userlogin-system.herokuapp.com\"\n# URL = \"localhost:8000\"\n\nFB_CLIENT_ID = \"870940717019310\"\nFB_CLIENT_SECRET = \"51fec30fd5b076f732c89b1000e8ee0e\"\nFB_AUTHORIZATION_BASE_URL = \"https://www.facebook.com/dialog/oauth\"\nFB_TOKEN_URL = \"https://graph.facebook.com/oauth/access_token\"\nFB_SCOPE = [\"email\"]\n\n# This allows us to use a plain HTTP callback\nos.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\n\ngoogle = oauth.register(\n name = 'google',\n client_id = app.config[\"GOOGLE_CLIENT_ID\"],\n client_secret = app.config[\"GOOGLE_CLIENT_SECRET\"],\n access_token_url = 'https://accounts.google.com/o/oauth2/token',\n access_token_params = None,\n authorize_url = 'https://accounts.google.com/o/oauth2/auth',\n authorize_params = None,\n api_base_url = 'https://www.googleapis.com/oauth2/v1/',\n userinfo_endpoint = 'https://openidconnect.googleapis.com/v1/userinfo', # This is only needed if using openId to fetch user info\n client_kwargs = {'scope': 'openid email profile'},\n)\n\n\n#initialize app\n# mysql=MySQL(app)\n\[email protected]('/', methods = ['POST', 'GET'])\ndef home():\n return render_template(\"base.html\")\n\[email protected](\"/login\", methods = ['POST', 'GET'])\ndef login():\n return render_template('login.html')\n\[email protected](\"/signup\", methods = ['POST', 'GET'])\ndef signup():\n return render_template('signup.html')\n\[email protected]('/success', methods = ['POST'])\ndef success():\n email = set()\n if request.method == 'POST':\n\n # username = request.form['username']\n user = request.form['email']\n #email = request.form['email']\n password = request.form['password']\n #mycursor = mysql.connection.cursor()\n # print(email)\n # print(password)\n #mycursor.execute(\"select * from new_table where email = '\" + user + \"' and password = '\" + password + \"'\")\n \n data = pd.read_csv(\"data.csv\")\n #print(data)\n #mysql.connection.commit()\n # print(username,emailid,password)\n\n \n for i in data[\"emailid\"]:\n email.add(i)\n if user in email:\n pwd = data[\"password\"].loc[data[\"emailid\"]==user].iloc[0]\n \n print(\"password from user\",str(password))\n print(\"password from database\",str(pwd))\n \n if str(pwd) == password:\n username = data['username'].loc[data['emailid'] == user].iloc[0]\n session['user'] = user\n session['username'] = username\n return render_template(\"profile.html\", username = session['username'])\n else:\n login = True\n return render_template(\"login.html\", login=login)\n else:\n lol = True\n return render_template(\"login.html\",lol=lol)\n # return \"success\"\n\n\[email protected]('/signsuccess', methods = ['POST','GET'])\ndef signsuccess():\n if request.method == 'POST':\n\n username = request.form['username']\n email = request.form['email']\n password = request.form['password']\n #mycursor = mysql.connection.cursor()\n #mycursor.execute(\"insert into new_table(email,password) values(%s,%s)\", (email, password))\n #mysql.connection.commit()\n # print(\"success\")\n session['user'] = email\n print(username)\n print(session[\"user\"])\n with open (\"data.csv\", \"a\") as csvfile:\n writer = csv.writer(csvfile)\n email = email\n password = password\n username = username\n writer.writerow([username, email, password])\n \n return redirect(url_for('login'))\n \n #return render_template('success.html')\n else:\n return render_template('login.html')\n\n \n \[email protected](\"/profile\",methods = ['POST', 'GET'])\ndef user():\n if \"user\" in session:\n # print(session[\"user\"])\n user = session['user']\n return render_template('profile.html', content = user)\n else:\n return render_template('login.html')\n\[email protected]('/logout')\ndef logout():\n if 'user' in session:\n session.pop('user', None)\n #flash(\"you have been loged out!\",\"info\")\n return redirect(url_for('login')) \n else:\n return '<p>User already logged out</p>'\n\n# def write_json (data, filename = \"details.json\"):\n# with open (filename, \"w\") as f:\n# json.dump(data, f, indent=4)\n\nglobal userDetails \nuserDetails = {}\[email protected](\"/user_details\", methods = ['POST','GET'])\ndef user_details():\n print(session[\"user\"])\n \n if request.method == \"POST\":\n userDetails = request.form\n userDetails = userDetails.to_dict()\n print(\"before username and email\", userDetails)\n userDetails['user'] = session['username']\n print(userDetails[\"user\"])\n\n user = session['user'] \n userDetails['email'] = user\n\n\n \n \n print(userDetails[\"email\"])\n\n print(userDetails)\n\n if not userDetails[\"num2\"]:\n del userDetails[\"num2\"]\n if not userDetails[\"num3\"]:\n del userDetails[\"num3\"]\n\n with open ('details.json','w') as f:\n json.dump(userDetails, f)\n\n return redirect('/download')\n\[email protected](\"/download\")\ndef download():\n obj ='details.json'\n return send_file(obj, as_attachment = True)\n\n#google login \[email protected]('/login/google')\ndef google_login():\n google = oauth.create_client('google')\n redirect_uri = url_for('google_authorize', _external=True)\n return google.authorize_redirect(redirect_uri)\n \n\n# Google authorize route\[email protected]('/login/google/authorize')\ndef google_authorize():\n google = oauth.create_client('google')\n token = google.authorize_access_token()\n resp = google.get('userinfo').json()\n print(f\"\\n{resp}\\n\")\n return \"You are successfully signed in using google\"\n \n\n#creating fb routes \[email protected](\"/fb-login\")\ndef fb_login():\n facebook = requests_oauthlib.OAuth2Session(\n FB_CLIENT_ID, redirect_uri=URL + \"/fb-callback\", scope=FB_SCOPE\n )\n authorization_url, _ = facebook.authorization_url(FB_AUTHORIZATION_BASE_URL)\n\n return flask.redirect(authorization_url)\n\n\[email protected](\"/fb-callback\")\ndef callback():\n facebook = requests_oauthlib.OAuth2Session(\n FB_CLIENT_ID, scope = FB_SCOPE, redirect_uri=URL + \"/fb-callback\"\n )\n\n # we need to apply a fix for Facebook here\n facebook = facebook_compliance_fix(facebook)\n\n facebook.fetch_token(\n FB_TOKEN_URL,\n client_secret=FB_CLIENT_SECRET,\n authorization_response=flask.request.url,\n #return \"You are successfully signed in using facebook\"\n \n )\n facebook_user_data = facebook.get(\n \"https://graph.facebook.com/me?fields=id,name,email,picture{url}\"\n ).json()\n\n email = facebook_user_data[\"email\"]\n name = facebook_user_data[\"name\"]\n picture_url = facebook_user_data.get(\"picture\", {}).get(\"data\", {}).get(\"url\")\n\n return f\"\"\"\n User information: <br>\n Name: {name} <br>\n Email: {email} <br>\n Avatar <img src=\"{picture_url}\"> <br>\n <a href=\"/\">Home</a>\n \"\"\"\n \n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.6563430428504944, "alphanum_fraction": 0.6789755821228027, "avg_line_length": 32.26530456542969, "blob_id": "62846935f498f9eda5495baff2a55db97d7870ec", "content_id": "503fde988786254f0ee40495efd2a058fb3f2dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1679, "license_type": "no_license", "max_line_length": 133, "num_lines": 49, "path": "/google_app.py", "repo_name": "adityam97/user-login-system", "src_encoding": "UTF-8", "text": "from flask import Flask,redirect,render_template,url_for\r\nfrom authlib.integrations.flask_client import OAuth\r\n\r\n\r\napp = Flask(__name__)\r\n\r\noauth = OAuth(app)\r\n\r\napp.config['SECRET_KEY'] = \"THIS SHOULD BE SECRET\"\r\napp.config['GOOGLE_CLIENT_ID'] = \"44649573811-g4vfucn8as4u39m24pca3v8rppddj7h4.apps.googleusercontent.com\"\r\napp.config['GOOGLE_CLIENT_SECRET'] = \"lYfaygH0WrU5r_2my_JsEMXa\"\r\n\r\ngoogle = oauth.register(\r\n name = 'google',\r\n client_id = app.config[\"GOOGLE_CLIENT_ID\"],\r\n client_secret = app.config[\"GOOGLE_CLIENT_SECRET\"],\r\n access_token_url = 'https://accounts.google.com/o/oauth2/token',\r\n access_token_params = None,\r\n authorize_url = 'https://accounts.google.com/o/oauth2/auth',\r\n authorize_params = None,\r\n api_base_url = 'https://www.googleapis.com/oauth2/v1/',\r\n userinfo_endpoint = 'https://openidconnect.googleapis.com/v1/userinfo', # This is only needed if using openId to fetch user info\r\n client_kwargs = {'scope': 'openid email profile'},\r\n)\r\n\r\[email protected]('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n# Google login route\r\[email protected]('/login/google')\r\ndef google_login():\r\n google = oauth.create_client('google')\r\n redirect_uri = url_for('google_authorize', _external=True)\r\n return google.authorize_redirect(redirect_uri)\r\n\r\n\r\n# Google authorize route\r\[email protected]('/login/google/authorize')\r\ndef google_authorize():\r\n google = oauth.create_client('google')\r\n token = google.authorize_access_token()\r\n resp = google.get('userinfo').json()\r\n print(f\"\\n{resp}\\n\")\r\n return \"You are successfully signed in using google\"\r\n\r\nif __name__==\"__main__\":\r\n app.run(\"127.0.0.1\",\"8000\",debug=True)\r\n" } ]
4
alexanderzjs/CommunicationClient
https://github.com/alexanderzjs/CommunicationClient
ad4b3c32afec62555b223963e8acf91e82dad465
94bd1fce9e13d86e3ff63999ddd7bc1f115dba9e
05483e478d2678ab9f9167e0386a66e748992e16
refs/heads/main
2023-04-14T00:11:41.175835
2021-03-26T05:11:31
2021-03-26T05:11:31
351,668,868
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6137565970420837, "alphanum_fraction": 0.6402116417884827, "avg_line_length": 24.772727966308594, "blob_id": "80deba3b35d69ca9bda783583f64143acfda65da", "content_id": "8462ff2864ebdcb7d8152461dc9f60e21fdbe041", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 83, "num_lines": 22, "path": "/rest_server.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify\nimport redis\n\n\napp = Flask(__name__)\n\n\[email protected]('/transfer',methods=[\"POST\"])\ndef transfer_data():\n party_number = request.json[\"party_number\"]\n value = request.json[\"value\"]\n try:\n r.rpush(str(party_number), value)\n except Exception as e:\n print(e)\n return jsonify({\"response\": \"200\"})\n\n\nif __name__ == \"__main__\":\n pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\n r = redis.Redis(connection_pool=pool)\n app.run(debug=False, host='0.0.0.0', port=8888)\n" }, { "alpha_fraction": 0.6342925429344177, "alphanum_fraction": 0.6558753252029419, "avg_line_length": 26.799999237060547, "blob_id": "29b0e5ef50ad0f30679a8929be6674bc867bfc1d", "content_id": "d96598ae3f2ac0903fe9f8e33460d5474f1c753a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 109, "num_lines": 30, "path": "/websocket_client.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "import time\nimport redis\nimport socketio\n\n\nclass WebSocketClient:\n\n def __init__(self):\n pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\n self.r = redis.Redis(connection_pool=pool)\n self.sio = socketio.Client()\n self.sio.connect('http://localhost:8888')\n\n def send(self, value, party_number):\n data = {\"party_number\": party_number, \"value\": value}\n self.sio.emit(\"transfer\", data)\n\n def disconnect(self):\n self.sio.disconnect()\n\n\nclient = WebSocketClient()\nstart_time = time.time()\nnumber_of_requests = 2000\nfor i in range(0, number_of_requests):\n client.send(i, 0)\n time.sleep(0.001)\nend_time = time.time()\nprint(\"elapsed time for \" + str(number_of_requests) + \" requests is \" + str(end_time - start_time) + \" secs\")\nclient.disconnect()\n" }, { "alpha_fraction": 0.6525335907936096, "alphanum_fraction": 0.6742502450942993, "avg_line_length": 27.47058868408203, "blob_id": "1c32af42777bab29666c79fb7aac56067c2888ff", "content_id": "da3cf5b43df01504cd7c1772d46a53fd3fd3166b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 109, "num_lines": 34, "path": "/grpc_client.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "import time\nimport grpc\nimport redis\n\nimport transfer_pb2_grpc\nimport transfer_pb2\n\n\nclass GrpcClient:\n\n def __init__(self):\n pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\n self.r = redis.Redis(connection_pool=pool)\n channel = grpc.insecure_channel(\"localhost:8888\")\n self.stub = transfer_pb2_grpc.TransferStub(channel)\n\n def send(self, value, party_number):\n data = transfer_pb2.Request(party_number=party_number, value=value)\n response = self.stub.transfer(data)\n if response.value != 200:\n raise Exception(\"server has some exceptions\")\n\n def disconnect(self):\n pass\n\n\nclient = GrpcClient()\nstart_time = time.time()\nnumber_of_requests = 2000\nfor i in range(0, number_of_requests):\n client.send(i, 0)\nend_time = time.time()\nprint(\"elapsed time for \" + str(number_of_requests) + \" requests is \" + str(end_time - start_time) + \" secs\")\nclient.disconnect()" }, { "alpha_fraction": 0.6417322754859924, "alphanum_fraction": 0.6601049900054932, "avg_line_length": 29.479999542236328, "blob_id": "b21efc19b7fe1657232b7f23dce21991251d02b7", "content_id": "05508939d25a15d72fbdff8ce5520c79b8bb8eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 109, "num_lines": 25, "path": "/rest_client.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "import time\nimport requests\nimport redis\n\n\nclass RestClient:\n\n def __init__(self):\n pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\n self.r = redis.Redis(connection_pool=pool)\n\n def send(self, value, party_number):\n data = {\"party_number\": party_number, \"value\": value}\n response = requests.post(\"http://localhost:8888/transfer\", json=data)\n if response.ok is False:\n raise Exception(\"cannot send to server\")\n\n\nclient = RestClient()\nstart_time = time.time()\nnumber_of_requests = 2000\nfor i in range(0, number_of_requests):\n client.send(i, 0)\nend_time = time.time()\nprint(\"elapsed time for \" + str(number_of_requests) + \" requests is \" + str(end_time - start_time) + \" secs\")\n" }, { "alpha_fraction": 0.8380952477455139, "alphanum_fraction": 0.8380952477455139, "avg_line_length": 34, "blob_id": "18ac9425e3aa9e4047bc81815e5faff27b8bdd6d", "content_id": "e63c32b102c55985d27ba0614097db4653b5d047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 105, "license_type": "no_license", "max_line_length": 81, "num_lines": 3, "path": "/README.md", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "# CommunicationClient\n\nThis repo contains different examples of web client and web server instantiation.\n" }, { "alpha_fraction": 0.6482617855072021, "alphanum_fraction": 0.6707566380500793, "avg_line_length": 30.580644607543945, "blob_id": "a488ec63dbe6280c2a8777c5169344394356f10a", "content_id": "0c50b6dbd036a68344a7410cb2418e4689320d54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 978, "license_type": "no_license", "max_line_length": 103, "num_lines": 31, "path": "/grpc_server.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "from concurrent import futures\nimport grpc\nimport redis\n\nimport transfer_pb2_grpc\nimport transfer_pb2\n\n\nclass TransferServer(transfer_pb2_grpc.TransferServicer):\n\n def __init__(self, comm_host=\"localhost\", comm_port=8888, redis_host='localhost', redis_port=6379):\n pool = redis.ConnectionPool(host=redis_host, port=redis_port, decode_responses=True)\n self.r = redis.Redis(connection_pool=pool)\n\n def transfer(self, request, context):\n response = transfer_pb2.Response()\n try:\n self.r.rpush(str(request.party_number), request.value)\n except Exception as e:\n print(e)\n finally:\n response.value = 200\n return response\n\n\nif __name__ == '__main__':\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n transfer_pb2_grpc.add_TransferServicer_to_server(TransferServer(), server)\n server.add_insecure_port(\"[::]:8888\")\n server.start()\n server.wait_for_termination()" }, { "alpha_fraction": 0.6312399506568909, "alphanum_fraction": 0.6553945541381836, "avg_line_length": 26, "blob_id": "38a0b39ec1fa4c5c05ce7e83de657eb01cc7d1d1", "content_id": "5080923e3d5ed572e86df8800471e55c74fc227c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/websocket_server.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "import redis\nfrom flask import Flask\nfrom flask_socketio import send, SocketIO\n\napp = Flask(__name__)\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\n\n\[email protected]('transfer')\ndef transfer(json, methods=['GET', 'POST']):\n party_number = json[\"party_number\"]\n value = json[\"value\"]\n try:\n r.rpush(str(party_number), value)\n except Exception as e:\n print(e)\n return 200\n\n\nif __name__ == \"__main__\":\n pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\n r = redis.Redis(connection_pool=pool)\n socketio.run(host=\"0.0.0.0\", port=8888, debug=True, app=app)\n" }, { "alpha_fraction": 0.49865952134132385, "alphanum_fraction": 0.5201072096824646, "avg_line_length": 30.97142791748047, "blob_id": "3acb985c92a8fe0f65e04fe9fd9d7cd3dc8ec7fc", "content_id": "ccb38ee5e3f21be1c56b202710060da3dd25c679", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1119, "license_type": "no_license", "max_line_length": 83, "num_lines": 35, "path": "/socket_server.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "import socket\nimport redis\nimport json\n\n\ndef start_service(host, port, redis):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((host, port))\n sock.listen(1)\n while True:\n conn, addr = sock.accept()\n while True:\n size = 0\n DATA_SIZE = 55\n accumulated_data = ''\n while size < DATA_SIZE:\n data = bytes.decode(conn.recv(55), encoding='utf-8')\n if len(data) == 0:\n break\n accumulated_data += data\n size += len(data)\n if size < DATA_SIZE:\n break\n json_obj = json.loads(accumulated_data)\n party_number = json_obj[\"party_number\"]\n value = json_obj[\"value\"]\n try:\n redis.rpush(str(int(party_number, 16)), int(value, 16))\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\n r = redis.Redis(connection_pool=pool)\n start_service(\"0.0.0.0\", 8888, r)\n" }, { "alpha_fraction": 0.608798086643219, "alphanum_fraction": 0.633150041103363, "avg_line_length": 33.4054069519043, "blob_id": "e11a4a53bf6f47f3b9f3eb8e65e98cd4d9d8c3c5", "content_id": "7c789c316acb5c2667b6a79166768cc51e104dff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1273, "license_type": "no_license", "max_line_length": 112, "num_lines": 37, "path": "/socket_client.py", "repo_name": "alexanderzjs/CommunicationClient", "src_encoding": "UTF-8", "text": "import time\nimport redis\nimport socket\nimport json\n\n\nclass SocketClient:\n\n def __init__(self, comm_host='localhost', comm_port=8888, redis_host='localhost', redis_port=6379):\n pool = redis.ConnectionPool(host=redis_host, port=redis_port, decode_responses=True)\n self.r = redis.Redis(connection_pool=pool)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((comm_host, comm_port))\n\n def send(self, value, party_number, l=64):\n value = value % (1 << l)\n data = json.dumps({\"party_number\": '0x{:02X}'.format(party_number), \"value\": '0x{:016X}'.format(value)})\n total_sent = 0\n while total_sent < len(data):\n sent = self.sock.send(bytes(data[total_sent:], encoding='utf-8'))\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n total_sent = total_sent + sent\n\n def disconnect(self):\n self.sock.close()\n\n\nclient = SocketClient()\nstart_time = time.time()\nnumber_of_requests = 2000\nfor i in range(0, number_of_requests):\n client.send(i, 0)\n time.sleep(0.001)\nend_time = time.time()\nprint(\"elapsed time for \" + str(number_of_requests) + \" requests is \" + str(end_time - start_time) + \" secs\")\nclient.disconnect()\n" } ]
9
manishbisoi/titanic
https://github.com/manishbisoi/titanic
d27f3d169aa14dd5a2958d6089315a1e7e0f9c79
a96a3e200458cebf1cc19ed4307801017cc1377c
865a7507cd2a0f955f8b07a90be5f85107da6bfd
refs/heads/main
2023-05-05T15:25:13.692541
2021-05-24T10:46:58
2021-05-24T10:46:58
370,317,275
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6442195177078247, "alphanum_fraction": 0.6594005227088928, "avg_line_length": 31.363636016845703, "blob_id": "3ec80256e598a8dcf1c75cf68884075db76569d5", "content_id": "c021f9b589b7a942bea61237c4d61e8378f7ca7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2569, "license_type": "no_license", "max_line_length": 96, "num_lines": 77, "path": "/titanic_naiveBayes.py", "repo_name": "manishbisoi/titanic", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n\r\nsns.color_palette(\"mako\", as_cmap=True)\r\n\"\"\"\r\nList of Columns along with data types\r\nPassengerId int64\r\nSurvived int64\r\nPclass int64\r\nName object\r\nSex object\r\nAge float64\r\nSibSp int64\r\nParch int64\r\nTicket object\r\nFare float64\r\nCabin object\r\nEmbarked object\r\n\"\"\"\r\n\r\ntrain_d = pd.read_csv('./train.csv', sep=',')\r\ntest_d = pd.read_csv('./test.csv', sep=',')\r\n\r\ntrain_d = train_d.dropna(how='any', axis = 0)\r\nfeatures = ['Age', 'SibSp', 'Parch', 'Fare']\r\nX = train_d[features]\r\ny = train_d['Survived']\r\n\r\n\"\"\"\r\nirrelevant_features = ['Name', 'Ticket', 'Cabin','PassengerId','Sex','Embarked']\r\ntrain_d = train_d.drop(irrelevant_features, axis = 1)\r\ntest_d = test_d.drop(irrelevant_features, axis = 1)\r\ntrain_d = train_d.drop(['Survived'], axis=1)\r\n\r\ntrain_d = train_d.dropna(axis = 0, how='any')\r\ntest_d = test_d.dropna(axis=0, how='any')\r\n#train_d.to_csv('TEMP__TrainingData.csv')\r\n#test_d.to_csv('TEMP__TestingData.csv')\r\n\"\"\"\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\r\nreg = GaussianNB()\r\nreg.fit(X_train, y_train)\r\npredictions = []\r\nfor each in reg.predict(X_test):\r\n if each > 0.5 : predictions.append(1)\r\n else : predictions.append(0)\r\n\r\n#print(reg.score(X_train, y_train))\r\n#print(mean_absolute_error( reg.predict(X_test), y_test ))\r\n#print(predictions, y_test)\r\n#print(accuracy_score(reg.predict(X_test), y_test))\r\ncf_matrix = confusion_matrix(y_test, predictions)\r\n\r\n#Seaborn heatmap plot\r\ngroup_names = [\"True Neg\",\"False Pos\",\"False Neg\",\"True Pos\"]\r\ngroup_counts = [\"{0:0.0f}\".format(value) for value in cf_matrix.flatten()]\r\ngroup_percentages = [\"{0:.2%}\".format(value) for value in cf_matrix.flatten()/np.sum(cf_matrix)]\r\nlabels = [f\"{v1}\\n{v2}\\n{v3}\" for v1, v2, v3 in zip(group_names,group_counts,group_percentages)]\r\nlabels = np.asarray(labels).reshape(2,2)\r\nsns.heatmap(cf_matrix, annot=labels, fmt='')\r\n#plt.show()\r\nplt.title(\"Naive Bayes Predictions\")\r\nplt.savefig(\"Naive_bayes_Pred_HeatMap.jpg\")\r\n\r\n#print(type(predictions), type(y_test.tolist()))\r\n\r\n#df = pd.DataFrame({'Predictions':predictions, 'Actuals':y_test})\r\n#df.to_csv(\"Predictions and Actuals.csv\")\r\n" }, { "alpha_fraction": 0.6247357130050659, "alphanum_fraction": 0.6405919790267944, "avg_line_length": 31.785715103149414, "blob_id": "851c4c3dc13865b3b0b80e1aef4111ac859bd0f3", "content_id": "d5cd18ff1f38f4d1f7ab36625d4dab7fb30bb0bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/Titanic_Visualize.py", "repo_name": "manishbisoi/titanic", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nimport matplotlib.pyplot as plt\r\n#from sklearn.linear_model import LinearRegression\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import mean_absolute_error\r\n\r\n\"\"\"\r\nList of Columns along with data types\r\nPassengerId int64\r\nSurvived int64\r\nPclass int64\r\nName object\r\nSex object\r\nAge float64\r\nSibSp int64\r\nParch int64\r\nTicket object\r\nFare float64\r\nCabin object\r\nEmbarked object\r\n\"\"\"\r\ntrain_d = pd.read_csv('./train.csv', sep=',')\r\nx = train_d[train_d['Sex'] == 'male']['Survived'].value_counts().tolist()\r\nmen_survived = train_d[train_d['Sex']=='male' & train_d['Survived'] == true]\r\nmen_not_survived = train_d.shape[0] - men_survived\r\nplt.bar([\"men_survived\", \"men_not_survived\"], [men_survived, men_not_survived])\r\nplt.show()\r\n" } ]
2
jannon/django-queued-storage
https://github.com/jannon/django-queued-storage
0be6c64f56a975fedf05e1e6c71051dc90489217
ec5a0d0c48e6da6a678d7ea5833859c64416053d
a883957384212012253dcc1d9d67b8b7837db1bf
refs/heads/master
2020-12-03T05:21:46.105543
2015-06-25T03:52:21
2015-06-25T03:52:21
25,415,881
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45588234066963196, "alphanum_fraction": 0.5, "avg_line_length": 16, "blob_id": "01b21aeb9e241b434d1dc0b024a43971ddc86f5c", "content_id": "ad8faf71721a09f9f61ceed83ec545731a6b0124", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "permissive", "max_line_length": 31, "num_lines": 4, "path": "/queued_storage/__init__.py", "repo_name": "jannon/django-queued-storage", "src_encoding": "UTF-8", "text": "# flake8: noqa\n\n__version__ = '0.7'\n__author__ = 'Jannis Leidel <[email protected]>'\n" }, { "alpha_fraction": 0.5879629850387573, "alphanum_fraction": 0.6805555820465088, "avg_line_length": 19.5238094329834, "blob_id": "f91808a769c8428dcec9cc36df46484dd85cec2c", "content_id": "106db8f95e295be9f6afe30bcbb7b918cdc4b545", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 432, "license_type": "permissive", "max_line_length": 43, "num_lines": 21, "path": "/tox.ini", "repo_name": "jannon/django-queued-storage", "src_encoding": "UTF-8", "text": "[tox]\ndownloadcache = {distshare}\nargs_are_paths = false\nenvlist =\n {py27,py32,py33,py34}-django-{17,18}\n\n[testenv]\nbasepython =\n py27: python2.7\n py32: python3.2\n py33: python3.3\n py34: python3.4\nusedevelop = true\nsetenv =\n CELERY_CONFIG_MODULE=tests.celeryconfig\ncommands = make test\nwhitelist_externals = make\ndeps =\n django-17: Django>=1.7,<1.8\n django-18: Django>=1.8,<1.9\n -rtests/requirements.txt\n\n" }, { "alpha_fraction": 0.8488371968269348, "alphanum_fraction": 0.8604651093482971, "avg_line_length": 8.55555534362793, "blob_id": "b90266ea6f57f94298c9202bfc2c9cf4c8b8455e", "content_id": "f498ec8454f38c571116357c24e3219049ee9563", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 86, "license_type": "permissive", "max_line_length": 14, "num_lines": 9, "path": "/tests/requirements.txt", "repo_name": "jannon/django-queued-storage", "src_encoding": "UTF-8", "text": "flake8\ncoverage\ncelery\ndjango-appconf\nSQLAlchemy\nanyjson\npytest-django\npytest-cov\nsix\n" } ]
3
swipeapp/DataMining
https://github.com/swipeapp/DataMining
96541c13c24cc28a48a0fca03d1b889e1026a640
b188eae25d2e7603abe13f0f09a51a8368ff4ab1
ae58138bc66d1311e968ffa787461b0eada6b678
refs/heads/master
2016-08-09T04:26:02.767079
2015-07-09T20:50:53
2015-07-09T20:51:59
36,305,254
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8041958212852478, "alphanum_fraction": 0.8041958212852478, "avg_line_length": 14.88888931274414, "blob_id": "197cd51da963525dc2e49845e109c5f12b3801cf", "content_id": "ab19d570e51f16bca74ab72a87c3b945a81dfeb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 143, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/README.md", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "# DataMining\n\n\nAirline Crawlers:\nDelta and Jetblue require Chromedriver to run\n\nSpirit and VirginAir run on Firefox\n\nand rest run on PhantomJS\n" }, { "alpha_fraction": 0.49655863642692566, "alphanum_fraction": 0.5080298185348511, "avg_line_length": 45.2876091003418, "blob_id": "9f30cf7f0e05074b0df008aee9a18c2415da42e6", "content_id": "8b55a2e2b1c6232c93d01f2f8d6207db208eb02c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20924, "license_type": "no_license", "max_line_length": 831, "num_lines": 452, "path": "/Alaska Air/alaskaair.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import re\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchWindowException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nfrom sys import argv, exit\nfrom datetime import datetime\nimport sched\nimport random\nfrom decimal import *\nimport MySQLdb\n\nsrclist = []\ndestlist = []\n\nclass getFares(): # Class for crawling the website\n\n def __init__(self):\n\n #self.srclist = []\n #self.destlist = []\n self.src = ''\n self.dest = ''\n\n month = random.choice(range(int(datetime.now().month)+1, int(datetime.now().month) + 2))\n day = random.choice(range(1, 29))\n year = datetime.now().year\n self.dptdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n month = random.randint(month, (month + 9))\n day = random.choice(range(1, 29))\n if month > 12:\n year = year + 1\n month = month % 12\n\n self.retdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n # self.p_arrdt = []\n self.p_flghtlist = []\n self.p_src = []\n self.p_dest = []\n self.a_flghtlist = []\n self.a_awards = []\n self.a_src = []\n self.a_dest = []\n self.p_fare = []\n self.d_res = {}\n\n self.browser = webdriver.PhantomJS()#(executable_path='C:\\Users\\parth_000\\Desktop\\phantomjs\\bin\\phantomjs')#Chrome(\"C:\\Users\\parth_000\\Desktop\\Alaska\\chromedriver\")\n\n def fillDtls(self): # Function for filling in details\n\n # user_agent = (\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) \" + \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36\")\n\n # dcap = dict(DesiredCapabilities.PHANTOMJS)\n # dcap[\"phantomjs.page.settings.userAgent\"] = user_agent\n\n # self.browser = webdriver.Firefox()#PhantomJS(desired_capabilities=dcap)\n self.browser.get('http://www.alaskaair.com/') # Get response from the website\n if self.src == '':\n self.src = srclist[random.randint(0, len(srclist) - 1)]\n self.dest = destlist[random.randint(0, len(destlist) - 1)]\n\n # Enter the source value\n WebDriverWait(self.browser, 20).until(\n EC.presence_of_element_located((By.ID, \"fromCity1\"))\n )\n #time.sleep(5)\n\n From = self.browser.find_element_by_xpath('//*[@id=\"fromCity1\"]')\n From.clear()\n From.send_keys(self.src)\n From.send_keys(Keys.TAB)\n\n # Enter the destination value\n To = self.browser.find_element_by_xpath('//*[@id=\"toCity1\"]')\n To.send_keys(self.dest)\n To.send_keys(Keys.TAB)\n\n # Enter the Departure Date value\n # deptDt = self.browser.find_elements_by_xpath('.//td[@month=\"'+str(month)+'\"][year=\"'+str(year)+'\"]')\n # for i in deptDt:\n\n departDt = self.browser.find_element_by_xpath('//*[@id=\"departureDate1\"]')\n departDt.clear()\n departDt.send_keys(self.dptdt)\n departDt.send_keys(Keys.TAB)\n\n # Enter the Return Date value\n returnDt = self.browser.find_element_by_xpath('//*[@id=\"returnDate\"]')\n returnDt.clear()\n returnDt.send_keys(self.retdt)\n returnDt.send_keys(Keys.TAB)\n self.browser.save_screenshot('screen.png')\n # self.srchPrice()\n\n def srchPrice(self): # Function for Searching by Price\n print \"Search Criteria: Price\"\n\n # Select the radio button and click submit\n\n self.browser.find_element_by_xpath('//*[@id=\"findFlights\"]').click()\n print \"criteria selected\"\n\n try:\n WebDriverWait(self.browser, 150).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"MatrixTable\"))\n )\n except TimeoutException:\n return\n\n try:\n # Get the flight details from the table and traverse each of the option available\n # Append the flight details in a list p_flghtlist\n\n dtls = self.browser.find_elements_by_xpath('//table[contains(@id, \"MatrixTable\")]/tbody')\n # print dtls\n for i in dtls:\n flights = i.find_elements_by_xpath('.//tr[contains(@id, \"flightInfoRow\")]')\n print \"test1\"\n for j in flights:\n tmp_list = []\n\n self.p_src.append(j.get_attribute(\"orig\"))\n self.p_dest.append(j.get_attribute(\"dest\"))\n img = j.find_elements_by_xpath(\n './/div[contains(@class, \"FlightCarrierImage\")]') # .//td[contains(@class, \"FlightCell\")]/ul/li')\n for k in img:\n # print k.find_element_by_xpath('.//img').get_attribute(\"title\")\n if k.find_element_by_xpath('..//div[contains(@class, \"FlightNumber\")]').text[:4]:\n # print k.find_element_by_xpath('..//div[contains(@class, \"FlightNumber\")]').text[:4]\n tmp_list.append(k.find_element_by_xpath('.//img').get_attribute(\n \"title\") + \" \" + k.find_element_by_xpath(\n '..//div[contains(@class, \"FlightNumber\")]').text[:4])\n\n self.p_flghtlist.append(tmp_list)\n\n tmp_list = []\n try:\n if j.find_element_by_xpath(\n './/td[contains(@class, \"BestDealColumn\")][contains(@id, \"td_Price\")]'):\n tmp_list.append(float(j.find_element_by_xpath(\n './/td[contains(@class, \"BestDealColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text[1:].replace(',','')))\n else:\n tmp_list.append(0)\n\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n if j.find_element_by_xpath(\n './/td[contains(@class, \"FullFlexColumn\")][contains(@id, \"td_Price\")]'):\n tmp_list.append(float(j.find_element_by_xpath(\n './/td[contains(@class, \"FullFlexColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text[1:].replace(',','')))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n\n if j.find_element_by_xpath(\n './/td[contains(@class, \"FirstClassDealColumn\")][contains(@id, \"td_Price\")]'):\n tmp_list.append(float(j.find_element_by_xpath(\n './/td[contains(@class, \"FirstClassDealColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text[1:].replace(',','')))\n else:\n tmp_list.append(0)\n\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n if j.find_element_by_xpath(\n './/td[contains(@class, \"FirstClassColumn\")][contains(@id, \"td_Price\")]'):\n tmp_list.append(float(j.find_element_by_xpath(\n './/td[contains(@class, \"FirstClassColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text[1:].replace(',','')))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n self.p_fare.append(tmp_list)\n\n # print self.p_flghtlist\n # print self.p_fare\n # print \"test\"\n\n # Get the departure time and arrival time of the flights and store them in seperate lists\n\n # n = 1\n # for i in dtls:\n # self.p_dpttime.append(i.find_element_by_xpath('.//td/table/tbody/tr[1]/td[contains(@class, \"tdDepart\")]/div[2]/strong').text)\n # try:\n # self.p_arrtime.append(i.find_element_by_xpath('./td/table/tbody/tr[last()-1 ]/td[contains(@class, \"tdArrive\")]/div[2]/strong').text)\n # self.p_arrdt.append(i.find_element_by_xpath('./td/table/tbody/tr[last()-1]/td[contains(@class, \"tdArrive\")]/div[3]/b').text)\n # except NoSuchElementException:\n # self.p_arrtime.append(i.find_element_by_xpath('./td/table/tbody/tr[last()]/td[contains(@class, \"tdArrive\")]/div[2]/strong').text)\n # self.p_arrdt.append(i.find_element_by_xpath('./td/table/tbody/tr[last()]/td[contains(@class, \"tdArrive\")]/div[3]/b').text)\n\n # self.p_fare.append(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text)\n # print self.p_dpttime\n # print self.p_arrtime\n # print self.p_arrdt\n # print self.p_fare\n # self.srchAwards()\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n def srchAwards(self): # Function for searching by Award miles\n\n try:\n print \"Search Criteria: Award Points\"\n\n self.browser.find_element_by_xpath('//*[@id=\"awardReservation\"]').click()\n\n #self.browser.find_element_by_xpath('//*[@id=\"ShoppingForm\"]/div[2]/a[1]').click()\n #self.browser.find_element_by_xpath('//*[@id=\"awardReservation\"]').click()\n self.browser.find_element_by_xpath('//*[@id=\"findFlights\"]').click()\n\n print \"criteria selected\"\n\n try:\n WebDriverWait(self.browser, 50).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"MatrixTable\"))\n )\n except TimeoutException:\n return\n\n dtls = self.browser.find_elements_by_xpath('//table[contains(@id, \"MatrixTable\")]/tbody')\n\n self.browser.save_screenshot('screen.png')\n for i in dtls:\n\n flights = i.find_elements_by_xpath('.//tr[contains(@class, \"Option\")]')\n\n print \"test2\"\n for j in flights:\n tmp_list = []\n self.a_src.append(j.get_attribute(\"orig\"))\n self.a_dest.append(j.get_attribute(\"dest\"))\n img = j.find_elements_by_xpath(\n './/div[contains(@class, \"FlightCarrierImage\")]') # .//td[contains(@class, \"FlightCell\")]/ul/li')\n for k in img:\n if k.find_element_by_xpath('..//div[contains(@class, \"FlightNumber\")]').text[:4]:\n tmp_list.append(k.find_element_by_xpath('.//img').get_attribute(\n \"title\") + \" \" + k.find_element_by_xpath(\n '..//div[contains(@class, \"FlightNumber\")]').text[:4])\n\n self.a_flghtlist.append(tmp_list)\n\n tmp_list = []\n try:\n if j.find_element_by_xpath(\n './/td[contains(@class, \"CoachAwardColumn\")][contains(@id, \"td_Price\")]'):\n price = j.find_element_by_xpath(\n './/td[contains(@class, \"CoachAwardColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text\n\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',price).group(0))*1000))\n else:\n tmp_list.append(0)\n\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n if j.find_element_by_xpath(\n './/td[contains(@class, \"CoachFullFlexColumn\")][contains(@id, \"td_Price\")]'):\n price = j.find_element_by_xpath(\n './/td[contains(@class, \"CoachFullFlexColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',price).group(0))*1000))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n\n if j.find_element_by_xpath(\n './/td[contains(@class, \"FirstAwardColumn\")][contains(@id, \"td_Price\")]'):\n price = j.find_element_by_xpath(\n './/td[contains(@class, \"FirstAwardColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',price).group(0))*1000))\n else:\n tmp_list.append(0)\n\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n if j.find_element_by_xpath(\n './/td[contains(@class, \"FirstFullFlexColumn\")][contains(@id, \"td_Price\")]'):\n price = j.find_element_by_xpath(\n './/td[contains(@class, \"FirstFullFlexColumn\")][contains(@id, \"td_Price\")]/div[contains(@class, \"PriceCell\")]/label[contains(@class, \"Price\")]').text\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',price).group(0))*1000))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n self.a_awards.append(tmp_list)\n\n # print self.a_flghtlist\n # print self.a_awards\n\n # self.d_res[n] = [self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]]\n\n # Compare the details obtained by both search criteria and filter out the common flight options\n # Store the required details in a dictionary\n n = 1\n\n if self.p_src:\n self.src = self.p_src[0]\n self.dest = self.p_dest[0]\n\n for i in range(0, len(self.p_flghtlist)): # len(self.p_flghtlist)+len(self.a_flghtlist)):\n for j in range(0, len(self.a_flghtlist)):\n if self.p_flghtlist[i] == self.a_flghtlist[j] and self.p_src[i] == self.a_src[j]:# and self.p_dest[i] == self.a_dest[j]:\n # print \"success\"\n if self.p_src[i] == self.src:\n self.d_res[n] = [self.p_src[i], self.p_dest[i], self.dptdt, self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]]\n \n elif self.p_src[i] == self.dest:\n self.d_res[n] = [self.p_src[i], self.p_dest[i],self.retdt, self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]] \n\n if self.a_awards[j][0] != 0:\n self.d_res[n].append(self.p_fare[i][0]/self.a_awards[j][0])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][1] != 0:\n self.d_res[n].append(self.p_fare[i][1]/self.a_awards[j][1])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][2] != 0:\n self.d_res[n].append(self.p_fare[i][2]/self.a_awards[j][2])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][3] != 0:\n self.d_res[n].append(self.p_fare[i][3]/self.a_awards[j][3])\n else:\n self.d_res[n].append(0)\n\n n += 1\n\n print self.d_res\n # else:\n # del self.p_flghtlist[i]\n # del self.p_dpttime[i]\n # del self.p_arrtime[i]\n # del self.p_fare[i]\n # i -= 1\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n # self.browser.quit()\n # self.writeCSV()\n\n def writeCSV(self): # Function for writing data into results.csv file from result dictionary\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['From','To','From Date','Flight #','Coach Lowest Price','Coach Refundable Price','First Class Lowest','First Class Refundable','Coach Lowest Awards','Coach Refundable Awards','First Class Lowest Awards','First Class Refundable Awards']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # writer.writeheader()\n\n for i in range(1, len(self.d_res.keys())+1):\n # print self.d_res[i][1]\n # print self.d_res[i][1][1]\n writer.writerow({'From': self.d_res[i][3], 'To': self.d_res[i][4], 'From Date': self.d_res[i][5],'Flight #': self.d_res[i][0],\n 'Coach Lowest Price': self.d_res[i][1][0],\n 'Coach Refundable Price': self.d_res[i][1][1],\n 'First Class Lowest': self.d_res[i][1][2],\n 'First Class Refundable': self.d_res[i][1][3],\n 'Coach Lowest Awards': self.d_res[i][2][0],\n 'Coach Refundable Awards': self.d_res[i][2][1],\n 'First Class Lowest Awards': self.d_res[i][2][2],\n 'First Class Refundable Awards': self.d_res[i][2][3]})\n\n print \"results.csv updated\"\n\n def writeDb(self):\n\n mydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n cursor = mydb.cursor()\n print \"Updating Table\"\n for i in range(1, len(self.d_res.keys())+1):\n #if row[1] != \"\"\n\n cursor.execute('INSERT INTO Swipe.r_alaska ( src, dest, from_dt, flight_no,P_Coach_Lowest,P_Coach_Refundable, P_First_Class_Lowest, P_First_Class_Refundable,A_Coach_Lowest,A_Coach_Refundable,A_First_Class_Lowest,A_First_Class_Refundable,Value_Coach_Lowest,Value_Coach_Refundable,Value_First_Class_Lowest,Value_First_Class_Refundable) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\",\"%s\", \"%s\", \"%s\", \"%s\",\"%s\");'.format(from_dt=datetime.strptime(self.d_res[i][2],'%m/%d/%Y').strftime('%Y-%m-%d %H:%M:%S')),[self.d_res[i][0],self.d_res[i][1],self.d_res[i][3],self.d_res[i][4][0],self.d_res[i][4][1],self.d_res[i][4][2],self.d_res[i][4][3],self.d_res[i][5][0],self.d_res[i][5][1],self.d_res[i][5][2],self.d_res[i][5][3],self.d_res[i][6],self.d_res[i][7],self.d_res[i][8],self.d_res[i][9]])\n cursor.execute('commit;')\n\n print \"Table Updated\"\n cursor.close()\n\ndef readCSV(): # Function for reading the input file\n filename = 'alaskaairroutes1.csv'\n print \"Start\"\n with open(filename, 'rU') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n srclist.append(row[0])\n destlist.append(row[1])\n except csv.Error as e:\n exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n\ndef Start():\n myClassObject = getFares() # ,argv[3],argv[4])\n myClassObject.fillDtls()\n myClassObject.srchPrice()\n myClassObject.fillDtls()\n myClassObject.srchAwards()\n try:\n myClassObject.browser.quit()\n except NoSuchWindowException:\n None\n myClassObject.writeDb()\n print time.ctime()\n print \"End\"\n\n\n# myClassObject.readCSV()\n\ndef delay(interval):\n print time.ctime()\n s.enter(1, 1, Start, ())\n time.sleep(interval*60)\n return 1\n\n\nif __name__ == \"__main__\":\n if len(argv) != 2:\n print 'usage: %s <Interval>' % argv[0]\n exit(1)\n\n # myClassObject = getFares(argv[1],argv[2],argv[3],argv[4])\n s = sched.scheduler(time.time, time.sleep)\n # s.enter(1, 1, Start, ())\n # s.run()\n readCSV()\n while delay(int(argv[1])):\n # s.enter(1, 1, Start, ())\n # delay(int(argv[1])*60)\n s.run()\n\n# myClassObject.fillDtls()\n" }, { "alpha_fraction": 0.5423231720924377, "alphanum_fraction": 0.5558812618255615, "avg_line_length": 41.3726692199707, "blob_id": "cecf46b784f968ffbc9577713845ca68247d35cb", "content_id": "e0f201cf12d0e38f2337b48163808a478b3cbfe5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13647, "license_type": "no_license", "max_line_length": 617, "num_lines": 322, "path": "/United/United.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import re\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchWindowException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nfrom sys import argv, exit\nfrom datetime import datetime\nimport sched\nimport random\nimport MySQLdb\n\nsrclist = []\ndestlist = []\n\nclass getFares(): #Class for crawling the website\t\n\n def __init__(self):\n self.src = ''\n self.dest = ''\n\n month = random.choice(range(int(datetime.now().month)+1, int(datetime.now().month) + 2))\n day = random.choice(range(1, 29))\n year = datetime.now().year\n self.dptdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n month = random.randint(month, (month + 9))\n day = random.choice(range(1, 29))\n if month > 12:\n year = year + 1\n month = month % 12\n\n self.retdt = datetime(year, month, day).strftime('%m/%d/%Y')\n self.p_flghtlist = []\n #self.p_flghtlist1 = []\n self.a_flghtlist = []\n self.a_awards = []\n self.p_fare = []\n self.d_res = {}\n self.browser = webdriver.PhantomJS()\n\n def fillDtls(self): #Function for filling in details\n\n\n #self.browser = webdriver.Firefox()#PhantomJS(desired_capabilities=dcap)\n self.browser.get('http://www.united.com/web/en-US/default.aspx?root=1') #Get response from the website\n\n if self.src == '' and self.dest == '':\n self.src = srclist[random.randint(0, len(srclist) - 1)]\n self.dest = destlist[random.randint(0, len(destlist) - 1)]\n\n #Enter the source value\n From = self.browser.find_element_by_xpath('//input[contains(@id, \"ctl00_ContentInfo_Booking1_Origin_txtOrigin\")]')\n From.send_keys(self.src)\n From.send_keys(Keys.ARROW_DOWN)\n From.send_keys(Keys.TAB)\n\n #Enter the destination value\n To = self.browser.find_element_by_xpath('//input[contains(@id, \"ctl00_ContentInfo_Booking1_Destination_txtDestination\")]')\n To.send_keys(self.dest)\n To.send_keys(Keys.ARROW_DOWN)\n To.send_keys(Keys.TAB)\n\n #Enter the Departure Date value\n departDt = self.browser.find_element_by_xpath('//input[contains(@id, \"ctl00_ContentInfo_Booking1_DepDateTime_Depdate_txtDptDate\")]')\n departDt.clear()\n departDt.send_keys(self.dptdt)\n\n #Enter the Return Date value\n returnDt = self.browser.find_element_by_xpath('//input[contains(@id, \"ctl00_ContentInfo_Booking1_RetDateTime_Retdate_txtRetDate\")]')\n returnDt.clear()\n returnDt.send_keys(self.retdt)\n #self.srchPrice()\n\n self.browser.save_screenshot('screen.png')\n\n def srchPrice(self, option): #Function for Searching by Price\n print \"Search Criteria: Price\"\n\n dropdown = self.browser.find_element_by_xpath('//*[@id=\"ctl00_ContentInfo_Booking1_Cabins_cboCabin\"]')\n for i in range(1,option):\n dropdown.send_keys(Keys.ARROW_DOWN)\n\n #Select the radio button and click submit\n self.browser.find_element_by_xpath('//input[contains(@id, \"ctl00_ContentInfo_Booking1_SearchBy_rdosearchby1\")]').click()\n print \"criteria selected\"\n\n self.browser.find_element_by_xpath('//input[contains(@id, \"ctl00_ContentInfo_Booking1_btnSearchFlight\")]').click()\n\n try:\n WebDriverWait(self.browser, 50).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"revenueSegments\"))\n )\n except TimeoutException:\n return\n\n try:\n #Get the flight details from the table and traverse each of the option available\n #Append the flight details in a list p_flghtlist\n\n dtls = self.browser.find_elements_by_xpath('//tr[contains(@id, \"trSegBlock\")]')#/tr[1]/td[contains(@class, \"tdSegmentDtl\")]/div[1]/b')\n #p_flghtlist = []\n for i in dtls:\n flights = i.find_elements_by_xpath('.//td/table/tbody/tr/td[contains(@class, \"tdSegmentDtl\")]/div[1]')\n tmp_list = []\n for j in flights:\n tmp_list.append(j.find_element_by_tag_name(\"b\").text)\n\n if option == 1:\n self.p_flghtlist.append(tmp_list)\n\n # Get the fare details\n tmp_list = []\n tmp_list.append(float(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text[1:].replace(',','')))\n self.p_fare.append(tmp_list)\n\n elif option == 2:\n #self.p_flghtlist1.append(tmp_list)\n\n if tmp_list in self.p_flghtlist:\n self.p_fare[self.p_flghtlist.index(tmp_list)].append(float(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text[1:].replace(',','')))\n else:\n self.p_flghtlist.append(tmp_list)\n\n tmp_list = []\n tmp_list.append(0.0)\n tmp_list.append(float(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text[1:].replace(',','')))\n self.p_fare.append(tmp_list)\n\n\n if option == 2:\n for i in self.p_fare:\n if len(i) < 2:\n i.append(0.0)\n #self.p_flghtlist.remove(j)\n # Get the fare details\n # tmp_list = []\n # tmp_list.append(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text)\n #\tself.p_fare.append(tmp_list)\n #print self.p_dpttime\n #print self.p_arrtime\n #print self.p_arrdt\n #print self.p_fare\n #self.srchAwards()\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n def srchAwards(self): #Function for searching by Award miles\n try:\n print \"Search Criteria: Award Points\"\n\n self.browser.find_element_by_xpath('//*[@id=\"ctl00_ContentInfo_Booking1_SearchBy_rdosearchby3\"]').click()\n\n print \"criteria selected\"\n self.browser.find_element_by_xpath('//*[@id=\"ctl00_ContentInfo_Booking1_btnSearchFlight\"]').click()\n\n try:\n WebDriverWait(self.browser, 50).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"rewardResults\"))\n )\n except TimeoutException:\n return\n\n dtls = self.browser.find_elements_by_xpath('//table[contains(@class, \"rewardResults\")]/tbody[2]/tr')\n\n for i in dtls:\n\n tmp_list = []\n try:\n if i.find_element_by_xpath('.//td[1]/div[contains(@class, \"divMileage\")]').text == \"\":\n tmp_list.append(0.0)\n else:\n miles = i.find_element_by_xpath('.//td[1]/div[contains(@class, \"divMileage\")]').text\n tmp_list.append(float(re.search('^[0-9]+',miles.replace(',','')).group(0)))\n\n if i.find_element_by_xpath('.//td[2]/div[contains(@class, \"divMileage\")]').text == \"\":\n tmp_list.append(0.0)\n else:\n miles = i.find_element_by_xpath('.//td[2]/div[contains(@class, \"divMileage\")]').text\n tmp_list.append(float(re.search('^[0-9]+',miles.replace(',','')).group(0)))\n\n if i.find_element_by_xpath('.//td[3]/div[contains(@class, \"divMileage\")]').text == \"\":\n tmp_list.append(0.0)\n else:\n miles = i.find_element_by_xpath('.//td[3]/div[contains(@class, \"divMileage\")]').text\n tmp_list.append(float(re.search('^[0-9]+',miles.replace(',','')).group(0)))\n\n if i.find_element_by_xpath('.//td[4]/div[contains(@class, \"divMileage\")]').text == \"\":\n tmp_list.append(0.0)\n else:\n miles = i.find_element_by_xpath('.//td[4]/div[contains(@class, \"divMileage\")]').text\n tmp_list.append(float(re.search('^[0-9]+',miles.replace(',','')).group(0)))\n\n self.a_awards.append(tmp_list)\n\n except NoSuchElementException:\n self.a_awards.append([0.0, 0.0, 0.0, 0.0])\n\n flights = i.find_elements_by_xpath('.//td[contains(@class, \"tdSegmentDtl\")]/div[1]')\n\n tmp_list = []\n for j in flights:\n tmp_list.append(j.find_element_by_tag_name(\"b\").text)\n\n self.a_flghtlist.append(tmp_list)\n\n # Compare the details obtained by both search criteria and filter out the common flight options\n # Store the required details in a dictionary\n n = 1\n for i in range(0,len(self.p_flghtlist)):\n for j in range(0,len(self.a_flghtlist)):\n\n if self.p_flghtlist[i] == self.a_flghtlist[j]:\n #print \"success\"\n\n self.d_res[n] = [';'.join(self.p_flghtlist[i]), self.p_fare[i], self.a_awards[j]]\n\n if self.a_awards[j][0] != 0:\n self.d_res[n].append(float(self.p_fare[i][0]/self.a_awards[j][0]))\n else:\n self.d_res[n].append(0.0)\n\n if self.a_awards[j][2] != 0:\n self.d_res[n].append(float(self.p_fare[i][1]/self.a_awards[j][2]))\n else:\n self.d_res[n].append(0.0) \n\n n += 1\n #else:\n #del self.p_flghtlist[i]\n #del self.p_dpttime[i]\n #del self.p_arrtime[i]\n #del self.p_fare[i]\n #i -= 1\n print self.d_res\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n\n def writeCSV(self): # Function for writing data into results.csv file from result dictionary\n with open('results.csv','a') as csvfile:\n fieldnames = ['From','To','Flight #','Lowest Price','First Class Lowest','Coach Lowest Awards','Coach Refundable Awards','First Class Lowest Awards','First Class Refundable Awards']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for i in range(1,len(self.d_res.keys())+1):\n writer.writerow({'From':self.src, 'To':self.dest, 'Flight #':self.d_res[i][0], 'Lowest Price':self.d_res[i][1][0], 'First Class Lowest':self.d_res[i][1][1], 'Coach Lowest Awards':self.d_res[i][2][0] ,'Coach Refundable Awards':self.d_res[i][2][1],'First Class Lowest Awards':self.d_res[i][2][2],'First Class Refundable Awards':self.d_res[i][2][3]})\n\n print \"results.csv updated\"\n\n def writeDb(self):\n\n mydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n cursor = mydb.cursor()\n print \"Updating Table\"\n for i in range(1, len(self.d_res.keys())+1):\n #if row[1] != \"\"\n cursor.execute('INSERT INTO Swipe.r_united ( src, dest, from_dt, flight_no,P_Coach_Lowest, P_First_Class_Lowest,A_Coach_Lowest,A_Coach_Refundable,A_First_Class_Lowest,A_First_Class_Refundable, Value_Coach_Lowest, Value_First_Class_Lowest) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\",\"%s\",\"%s\",\"%s\",\"%s\");'.format(from_dt=datetime.strptime(self.dptdt,'%m/%d/%Y').strftime('%Y-%m-%d %H:%M:%S')),[self.src,self.dest,self.d_res[i][0],self.d_res[i][1][0],self.d_res[i][1][1],self.d_res[i][2][0],self.d_res[i][2][1],self.d_res[i][2][2],self.d_res[i][2][3], self.d_res[i][3], self.d_res[i][4]])\n cursor.execute('commit;')\n\n print \"Table Updated\"\n cursor.close()\n\ndef readCSV(): # Function for reading the input file\n filename = 'unitedroutes.csv'\n print \"Start\"\n with open(filename, 'rU') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n srclist.append(row[0])\n destlist.append(row[1])\n except csv.Error as e:\n exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n print \"End\"\n\ndef Start():\n myClassObject = getFares()\n myClassObject.fillDtls()\n myClassObject.srchPrice(1)\n try:\n myClassObject.browser.quit()\n except NoSuchWindowException:\n None\n myClassObject.browser = webdriver.PhantomJS()\n myClassObject.fillDtls()\n myClassObject.srchPrice(2)\n #try:\n # myClassObject.browser.quit()\n #except NoSuchWindowException:\n # None\n #myClassObject.browser = webdriver.PhantomJS()\n myClassObject.fillDtls()\n myClassObject.srchAwards()\n try:\n myClassObject.browser.quit()\n except NoSuchWindowException:\n None\n myClassObject.writeDb()\n\ndef delay(interval):\n print time.time()\n s.enter(1, 1, Start, ())\n time.sleep(interval*60)\n return 1\n\nif __name__ == \"__main__\":\n if len(argv) != 2:\n print 'usage: %s <Interval>' %argv[0]\n exit(1)\n\n #myClassObject = getFares(argv[1],argv[2],argv[3],argv[4])\n s = sched.scheduler(time.time, time.sleep)\n\n readCSV()\n while delay(int(argv[1])):\n #s.enter(1, 1, Start, ())\n s.run()\n\n" }, { "alpha_fraction": 0.531119167804718, "alphanum_fraction": 0.5448868274688721, "avg_line_length": 40.69230651855469, "blob_id": "fa35e93095805fd0a81fca8dd254fdc2238ee99f", "content_id": "b0868796795ff7fa9c413ff9494e756fc8694f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14093, "license_type": "no_license", "max_line_length": 527, "num_lines": 338, "path": "/Hawaiian/Hawaiin.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import re\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchWindowException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom decimal import *\nimport csv\nfrom sys import argv, exit\nfrom datetime import datetime\nimport sched\nimport random\nimport MySQLdb\n\nsrclist = []\ndestlist = []\n\nclass getFares(): #Class for crawling the website\t\n\n def __init__(self):\n self.src = ''\n self.dest = ''\n\n month = random.choice(range(int(datetime.now().month)+1, int(datetime.now().month) + 2))\n day = random.choice(range(1, 29))\n year = datetime.now().year\n self.dptdt = datetime(year, month, day).strftime('%Y-%m-%d')\n\n month = random.randint(month, (month + 9))\n day = random.choice(range(1, 29))\n if month > 12:\n year = year + 1\n month = month % 12\n\n self.retdt = datetime(year, month, day).strftime('%Y-%m-%d')\n self.p_flghtlist = []\n self.a_flghtlist = []\n self.a_awards = []\n self.p_fare = []\n self.d_res = {}\n self.browser = webdriver.PhantomJS()\n\n def signIn(self):\n\n self.browser.find_element_by_xpath('/html/body/div[1]/div[1]/nav[2]/div[1]/div/ul[2]/li/div/ul/li[1]/a').click()\n\n time.sleep(10)\n self.browser.find_element_by_xpath('//*[@id=\"login\"]/fieldset/div[1]/div/div/div/input').send_keys('[email protected]')\n self.browser.find_element_by_xpath('//*[@id=\"login\"]/fieldset/div[2]/div/div/div/input').send_keys('swipe352')\n self.browser.find_element_by_xpath('//*[@id=\"login\"]/input').click()\n\n def fillDtls(self): #Function for filling in details\n\n #self.browser.set_window_position(10000,10000,self.browser.current_window_handle)\n\n #self.browser = webdriver.Firefox()#PhantomJS(desired_capabilities=dcap)\n self.browser.get('https://beta.hawaiianairlines.com/book/flights') #Get response from the website\n\n try:\n self.signIn()\n\n if self.src == '' and self.dest == '':\n if random.randint(0,100) % 2 == 0:\n self.src = srclist[random.randint(0, len(srclist) - 1)]\n self.dest = destlist[random.randint(0, len(destlist) - 1)]\n else:\n self.dest = srclist[random.randint(0, len(srclist) - 1)]\n self.src = destlist[random.randint(0, len(destlist) - 1)]\n\n try:\n WebDriverWait(self.browser, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, \".ng-pristine.ng-valid.ng-valid-required\"))\n )\n except TimeoutException:\n time.sleep(10)#self.browser.close()\n\n #Enter the source value\n From = self.browser.find_element_by_xpath('//*[@id=\"multiSegment\"]/li/div[1]/div/div[1]/div/div/div/div/div[2]/input[2]') #find_element_by_css_selector(\".ng-pristine.ng-valid.ng-valid-required\")#\n From.send_keys(self.src)\n time.sleep(2)\n From.send_keys(Keys.TAB)\n\n #Enter the destination value\n To = self.browser.find_element_by_xpath('//*[@id=\"multiSegment\"]/li/div[1]/div/div[2]/div/div/div/div/div[2]/input[2]')\n To.send_keys(self.dest)\n time.sleep(2)\n To.send_keys(Keys.ENTER)\n\n #Enter the Departure Date value\n dd = self.dptdt.split(\"-\")\n ad = self.retdt.split(\"-\")\n now = datetime.now()\n amonths = (int(ad[0]) - int(dd[0]))*12 + int(ad[1]) - int(dd[1])\n dmonths = (int(dd[0]) - now.year)*12 + int(dd[1]) - now.month\n\n self.browser.find_element_by_xpath('//*[@id=\"departDate[0]\"]/div/div/div/div[2]/input').click()\n time.sleep(2)\n for x in range(0, dmonths):\n self.browser.find_element_by_css_selector(\".next\").click()\n\n for i in self.browser.find_elements_by_xpath('//*[@id=\"months\"]/li[2]/table/tbody/tr/td/div/span'):\n if i.text and int(i.text) == int(dd[2]):\n i.click()\n break\n\n #Enter the Return Date value\n self.browser.find_element_by_xpath('//*[@id=\"returnDate[0]\"]/div/div/div/div[2]/input').click()\n for x in range(0, amonths):\n self.browser.find_element_by_css_selector(\".next\").click()\n\n for i in self.browser.find_elements_by_xpath('//*[@id=\"months\"]/li[2]/table/tbody/tr/td/div/span'):\n if i.text and int(i.text) == int(ad[2]):\n i.click()\n break\n except NoSuchElementException:\n pass\n # returnDt.send_keys(self.retdt)\n #self.srchPrice()\n\n #self.browser.save_screenshot('screen.png')\n\n def srchPrice(self): #Function for Searching by Price\n print \"Search Criteria: Price\"\n\n print \"criteria selected\"\n time.sleep(1)\n\n try:\n self.browser.find_element_by_css_selector(\".btn-primary.btn-cta-search.ng-scope\").click()\n except WebDriverException:\n self.fillDtls()\n\n try:\n WebDriverWait(self.browser, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, \".flight-results\"))\n )\n except TimeoutException:\n time.sleep(10)#self.browser.close()\n\n try:\n #Get the flight details from the table and traverse each of the option available\n time.sleep(10)\n for j in self.browser.find_elements_by_xpath('.//span[contains(@ng-hide, \"detailsVisible\")]'):\n j.click()\n\n dtls = self.browser.find_elements_by_xpath('//section[contains(@id, \"result\")]')\n #p_flghtlist = []\n for i in dtls:\n flights = i.find_elements_by_xpath('.//div[@class = \"flight-details\"]/div/div/span[contains(@class, \"flight-number\")]')\n tmp_list = []\n for j in flights:\n tmp_list.append(j.text)\n self.p_flghtlist.append(tmp_list)\n\n # Get the fare details\n tmp_list = []\n try:\n price = i.find_element_by_xpath('.//div[contains(@class, \"seat-class\")][1]/div/a/span').text[1:]\n if price:\n tmp_list.append(float(Decimal(price.replace(',',''))))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n price = i.find_element_by_xpath('.//div[contains(@class, \"seat-class\")][3]/div/a/span').text[1:]\n if price:\n tmp_list.append(float(Decimal(price.replace(',',''))))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n try:\n price = i.find_element_by_xpath('.//div[contains(@class, \"seat-class\")][2]/div/a/span').text[1:]\n if price:\n tmp_list.append(float(Decimal(price.replace(',',''))))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n \n self.p_fare.append(tmp_list)\n\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n def srchAwards(self): #Function for searching by Award miles\n\n print \"Search Criteria: Award Points\"\n\n #self.browser.find_element_by_css_selector(\".radio-label.ng-binding.no-sublabel\").click()\n time.sleep(1) \n\n try:\n self.browser.find_element_by_xpath(\"/html/body/div[1]/div[2]/div/form/div/div/div/div[3]/div[2]/div/div/div[2]/div/div[2]/div/span\").click()\n self.browser.find_element_by_css_selector(\".btn-primary.btn-cta-search.ng-scope\").click()\n\n print \"criteria selected\"\n\n time.sleep(20)\n\n for j in self.browser.find_elements_by_xpath('.//span[contains(@ng-hide, \"detailsVisible\")]'):\n j.click()\n\n dtls = self.browser.find_elements_by_xpath('//section[contains(@id, \"result\")]')\n\n for i in dtls:\n\n flights = i.find_elements_by_xpath('.//div[@class = \"flight-details\"]/div/div/span[contains(@class, \"flight-number\")]')\n tmp_list = []\n for j in flights:\n tmp_list.append(j.text)\n self.a_flghtlist.append(tmp_list)\n\n # Get the award details\n tmp_list = []\n try:\n awards = i.find_element_by_xpath('.//div[contains(@class, \"seat-class\")][1]/div/a/span').text\n if awards:\n tmp_list.append(float(re.search('^[0-9.]+',awards.replace(',','')).group(0)))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n awards = i.find_element_by_xpath('.//div[contains(@class, \"seat-class\")][2]/div/a/span').text\n if awards:\n tmp_list.append(float(re.search('^[0-9.]+',awards.replace(',','')).group(0)))\n else:\n tmp_list.append(0)\n except NoSuchElementException:\n tmp_list.append(0)\n\n self.a_awards.append(tmp_list)\n # Compare the details obtained by both search criteria and filter out the common flight options\n # Store the required details in a dictionary\n n = 1\n #print self.p_fare\n #print self.p_flghtlist\n for i in range(0,len(self.p_flghtlist)):\n for j in range(0,len(self.a_flghtlist)):\n\n if self.p_flghtlist[i] == self.a_flghtlist[j]:\n #print \"success\"\n self.d_res[n] = [self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]]\n\n if self.a_awards[j][0] != 0:\n self.d_res[n].append(self.p_fare[i][0]/self.a_awards[j][0])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][1] != 0:\n self.d_res[n].append(self.p_fare[i][1]/self.a_awards[j][1])\n else:\n self.d_res[n].append(0) \n\n n += 1\n \n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n\n def writeCSV(self): # Function for writing data into results.csv file from result dictionary\n with open('results.csv','a') as csvfile:\n fieldnames = ['From','To','Flight #','Lowest Price','First Class Lowest','Coach Lowest Awards','Coach Refundable Awards','First Class Lowest Awards','First Class Refundable Awards']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for i in range(1,len(self.d_res.keys())+1):\n writer.writerow({'From':self.src, 'To':self.dest, 'Flight #':self.d_res[i][0], 'Lowest Price':self.d_res[i][1][0], 'First Class Lowest':self.d_res[i][1][1], 'Coach Lowest Awards':self.d_res[i][2][0] ,'Coach Refundable Awards':self.d_res[i][2][1],'First Class Lowest Awards':self.d_res[i][2][2],'First Class Refundable Awards':self.d_res[i][2][3]})\n\n print \"results.csv updated\"\n\n def writeDb(self):\n\n mydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n cursor = mydb.cursor()\n print \"Updating Table\"\n for i in range(1, len(self.d_res.keys())+1):\n #if row[1] != \"\"\n cursor.execute('INSERT INTO Swipe.r_hawaiian ( src, dest, from_dt, flight_no,P_Coach_Lowest, P_First_Class_Lowest,A_Coach_Lowest,A_First_Class_Lowest, Value_Coach_Lowest, Value_First_Class_Lowest) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");'.format(from_dt=datetime.strptime(self.dptdt,'%Y-%m-%d').strftime('%Y-%m-%d %H:%M:%S')),[self.src,self.dest,self.d_res[i][0],self.d_res[i][1][0],self.d_res[i][1][1],self.d_res[i][2][0],self.d_res[i][2][1], self.d_res[i][3], self.d_res[i][4]])\n cursor.execute('commit;')\n\n print \"Table Updated\"\n cursor.close()\n\ndef readCSV(): # Function for reading the input file\n filename = 'HawaiianFlights.csv'\n print \"Start\"\n with open(filename, 'rU') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n srclist.append(row[0])\n destlist.append(row[1])\n except csv.Error as e:\n exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n\ndef Start():\n myClassObject = getFares()\n myClassObject.fillDtls()\n myClassObject.srchPrice()\n #try:\n # myClassObject.browser.close()\n #except NoSuchWindowException:\n # None\n #myClassObject.browser = webdriver.Firefox()\n myClassObject.fillDtls()\n myClassObject.srchAwards()\n try:\n myClassObject.browser.quit()\n except NoSuchWindowException:\n None\n myClassObject.writeDb()\n\ndef delay(interval):\n print time.time()\n s.enter(1, 1, Start, ())\n time.sleep(interval*60)\n return 1\n\nif __name__ == \"__main__\":\n if len(argv) != 2:\n print 'usage: %s <Interval>' %argv[0]\n exit(1)\n\n #myClassObject = getFares(argv[1],argv[2],argv[3],argv[4])\n s = sched.scheduler(time.time, time.sleep)\n\n readCSV()\n while delay(int(argv[1])):\n #s.enter(1, 1, Start, ())\n s.run()" }, { "alpha_fraction": 0.6982248425483704, "alphanum_fraction": 0.7136094570159912, "avg_line_length": 30.314815521240234, "blob_id": "3c729a78c4deef55478cd1e7e2100066f658d506", "content_id": "48c86c429ae5b67216441d946e8e8f7ffa103e92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1692, "license_type": "no_license", "max_line_length": 77, "num_lines": 54, "path": "/JetBlue/Crawler.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport time\nimport FormPage\nimport Flight\nimport ResultPage\nimport CSVHelper\nimport DatetimeHelper\nfrom random import randint\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\n\nloop = 3 # do it 3 times\ntimeInterval = 5*60 # run in every 10 sec\nwhile loop > 0:\n driver = webdriver.Chrome()\n #driver = webdriver.PhantomJS()\n driver.set_window_position(1000,1000,driver.current_window_handle)\n driver.get(\"http://www.jetblue.com/\")\n\n destinations = CSVHelper.getDestinations()\n\n startDate = DatetimeHelper.randomDate(time.strftime(\"%m/%d/%Y\"),5)\n endDate = DatetimeHelper.randomDate(startDate,1)\n \n currentPair = randint(0,len(destinations)-1)\n data = {\n 'from': destinations[currentPair][0],\n 'to': destinations[currentPair][1],\n 'departure_day': startDate,\n 'arrive_day' : endDate\n }\n try:\n FormPage.FormPage(driver).fill_form(data).submit()\n except WebDriverException:\n pass\n\n try:\n WebDriverWait(driver, 30).until(\n EC.presence_of_element_located((By.ID, \"resultsFFBlock1\"))\n )\n except TimeoutException:\n pass\n try:\n allFlights = ResultPage.ResultPage(driver).collectSearchResult(data);\n CSVHelper.writeDb(allFlights)\n except NoSuchElementException:\n pass\n driver.quit()\n #loop = loop - 1\n time.sleep(timeInterval)" }, { "alpha_fraction": 0.5449367761611938, "alphanum_fraction": 0.5549041032791138, "avg_line_length": 38.661128997802734, "blob_id": "dcc566b9c86906c3074f8f73f038b4ebef71af71", "content_id": "e4081cb7359808ae8b0afa2b088278ac7a7a73f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11941, "license_type": "no_license", "max_line_length": 522, "num_lines": 301, "path": "/Delta/delta.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchWindowException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nfrom sys import argv, exit\nfrom datetime import datetime\nimport sched\nimport random\nimport httplib\nfrom decimal import *\nimport MySQLdb\n\nsrclist = []\ndestlist = []\n\nclass getFares(): #Class for crawling the website\t\n\n def __init__(self):\n self.src = ''\n self.dest = ''\n\n month = random.choice(range(int(datetime.now().month)+1, int(datetime.now().month) + 2))\n day = random.choice(range(1, 29))\n year = datetime.now().year\n self.dptdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n month = random.randint(month, (month + 9))\n day = random.choice(range(1, 29))\n if month > 12:\n year = year + 1\n month = month % 12\n\n self.retdt = datetime(year, month, day).strftime('%m/%d/%Y')\n self.p_flghtlist = []\n #self.p_flghtlist1 = []\n self.a_flghtlist = []\n self.a_awards = []\n self.p_fare = []\n self.d_res = {}\n self.browser = webdriver.Chrome()\n\n def fillDtls(self): #Function for filling in details\n\n self.browser.set_window_position(1000,1000,self.browser.current_window_handle)\n #self.browser = webdriver.Firefox()#Firefox(desired_capabilities=dcap)\n self.browser.get('http://www.delta.com/') #Get response from the website\n\n if self.src == '' and self.dest == '':\n self.src = srclist[random.randint(0, len(srclist) - 1)]\n self.dest = destlist[random.randint(0, len(destlist) - 1)]\n\n try:\n WebDriverWait(self.browser, 10).until(\n EC.presence_of_element_located((By.ID, \"originCity\"))\n )\n except TimeoutException:\n self.browser.quit()\n return\n\n #Enter the source value\n From = self.browser.find_element_by_xpath('//*[@id=\"originCity\"]')\n From.send_keys(self.src)\n From.send_keys(Keys.TAB)\n\n #Enter the destination value\n To = self.browser.find_element_by_xpath('//*[@id=\"destinationCity\"]')\n To.send_keys(self.dest)\n To.send_keys(Keys.TAB)\n\n #Enter the Departure Date value\n departDt = self.browser.find_element_by_name(\"departureDate\")#.find_element_by_xpath('//*[@id=\"departureDate\"]')\n departDt.send_keys(self.dptdt)\n\n #Enter the Return Date value\n returnDt = self.browser.find_element_by_xpath('//*[@id=\"returnDate\"]')\n returnDt.send_keys(self.retdt)\n #self.srchPrice()\n\n #self.browser.save_screenshot('screen.png')\n\n def srchPrice(self): #Function for Searching by Price\n print \"Search Criteria: Price\"\n\n try:\n #Select the radio button and click submit\n self.browser.find_element_by_xpath('//*[@id=\"cashBtn\"]').click()\n print \"criteria selected\"\n #self.browser.save_screenshot('screen.png')\n self.browser.find_element_by_xpath('//*[@id=\"findFlightsSubmit\"]').click()\n\n try:\n WebDriverWait(self.browser, 30).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"fareSectionFlightNumber\"))\n )\n\n except TimeoutException:\n #self.browser.quit()\n return\n\n #Get the flight details from the table and traverse each of the option available\n #Append the flight details in a list p_flghtlist\n\n dtls = self.browser.find_elements_by_xpath('//div[contains(@class, \"floatLeft\")]/div[contains(@class, \"contextRoot\")]')#/tr[1]/td[contains(@class, \"tdSegmentDtl\")]/div[1]/b')\n #p_flghtlist = []\n for i in dtls:\n tmp_list = []\n flights = i.find_elements_by_xpath('.//div[contains(@class, \"fareSectionFlightNumber\")]/span')\n for j in flights:\n tmp_list.append(j.text[:7])\n\n self.p_flghtlist.append(tmp_list)\n\n tmp_list = []\n\n try:\n tmp_list.append(float(Decimal(i.find_element_by_xpath('.//td[2]/div[contains(@class, \"priceHolder\")]/span').text[1:].replace(',',''))))\n except NoSuchElementException:\n tmp_list.append(0)\n except InvalidOperation:\n tmp_list.append(0)\n\n try:\n tmp_list.append(float(Decimal(i.find_element_by_xpath('.//td[3]/div[contains(@class, \"priceHolder\")]/span').text[1:].replace(',',''))))\n except NoSuchElementException:\n tmp_list.append(0)\n except InvalidOperation:\n tmp_list.append(0)\n\n self.p_fare.append(tmp_list)\n print self.p_flghtlist\n # Get the fare details\n # tmp_list = []\n # tmp_list.append(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text)\n #\tself.p_fare.append(tmp_list)\n #print self.p_dpttime\n #print self.p_arrtime\n #print self.p_arrdt\n #print self.p_fare\n #self.srchAwards()\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n except httplib.BadStatusLine:\n pass\n\n def srchAwards(self): #Function for searching by Award miles\n try:\n print \"Search Criteria: Award Points\"\n\n self.browser.find_element_by_xpath('//*[@id=\"milesBtn\"]').click()\n\n print \"criteria selected\"\n self.browser.find_element_by_xpath('//*[@id=\"findFlightsSubmit\"]').click()\n\n try:\n WebDriverWait(self.browser, 30).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"fareSectionFlightNumber\"))\n )\n except TimeoutException: \n return\n\n #self.browser.save_screenshot('screen.png')\n dtls = self.browser.find_elements_by_xpath('//div[contains(@class, \"floatLeft\")]/div')\n\n for i in dtls:\n tmp_list = []\n flights = i.find_elements_by_xpath('.//div[contains(@class, \"fareSectionFlightNumber\")]/span')\n for j in flights:\n tmp_list.append(j.text[:7])\n\n self.a_flghtlist.append(tmp_list)\n\n tmp_list = []\n try:\n tmp_list.append(float(Decimal(i.find_element_by_xpath('.//td[2]/div[contains(@class, \"priceHolder\")]/span[1]').text.replace(',',''))))\n except NoSuchElementException:\n tmp_list.append(0)\n except:\n tmp_list.append(0)\n\n try:\n tmp_list.append(float(Decimal(i.find_element_by_xpath('.//td[3]/div[contains(@class, \"priceHolder\")]/span[1]').text.replace(',',''))))\n except NoSuchElementException:\n tmp_list.append(0)\n except:\n tmp_list.append(0)\n\n self.a_awards.append(tmp_list)\n\n print self.a_flghtlist\n\n # Compare the details obtained by both search criteria and filter out the common flight options\n # Store the required details in a dictionary\n n = 1\n for i in range(0,len(self.p_flghtlist)):\n for j in range(0,len(self.a_flghtlist)):\n if self.p_flghtlist[i] == self.a_flghtlist[j]:\n #print \"success\"\n print i , j\n self.d_res[n] = [';'.join(self.p_flghtlist[i]), self.p_fare[i], self.a_awards[j]]\n \n if self.a_awards[j][0] != 0:\n self.d_res[n].append(self.p_fare[i][0]/self.a_awards[j][0])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][1] != 0:\n self.d_res[n].append(self.p_fare[i][1]/self.a_awards[j][1])\n else:\n self.d_res[n].append(0) \n\n n += 1\n #else:\n #del self.p_flghtlist[i]\n #del self.p_dpttime[i]\n #del self.p_arrtime[i]\n #del self.p_fare[i]\n #i -= 1\n print self.d_res\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n except httplib.BadStatusLine:\n pass\n\n def writeCSV(self): # Function for writing data into results.csv file from result dictionary\n with open('results.csv','a') as csvfile:\n fieldnames = ['From','To','From Date','Flight #','Lowest Price','First Class Lowest','Coach Lowest Awards','First Class Lowest Awards']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for i in range(1,len(self.d_res.keys())+1):\n writer.writerow({'From':self.src, 'To':self.dest, 'From Date':self.dptdt, 'Flight #':self.d_res[i][0], 'Lowest Price':self.d_res[i][1][0], 'First Class Lowest':self.d_res[i][1][1], 'Coach Lowest Awards':self.d_res[i][2][0] ,'First Class Lowest Awards':self.d_res[i][2][1]})\n\n print \"results.csv updated\"\n\n def writeDb(self):\n\n mydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n cursor = mydb.cursor()\n print \"Updating table\"\n for i in range(1, len(self.d_res.keys())+1):\n #if row[1] != \"\"\n cursor.execute('INSERT INTO Swipe.r_delta ( src, dest, from_dt, flight_no,P_Coach_Lowest, P_First_Class_Lowest,A_Coach_Lowest,A_First_Class_Lowest, Value_Coach_Lowest, Value_First_Class_Lowest) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");'.format(from_dt=datetime.strptime(self.dptdt,'%m/%d/%Y').strftime('%Y-%m-%d %H:%M:%S')),[self.src,self.dest,self.d_res[i][0],self.d_res[i][1][0],self.d_res[i][1][1],self.d_res[i][2][0],self.d_res[i][2][1],self.d_res[i][3],self.d_res[i][4]])\n cursor.execute('commit;')\n\n print \"Table updated\"\n cursor.close()\n\ndef readCSV(): # Function for reading the input file\n filename = 'DeltaAirRoutes.csv'\n print \"Start\"\n with open(filename, 'rU') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n srclist.append(row[0])\n destlist.append(row[1])\n except csv.Error as e:\n exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n print \"End\"\n\ndef Start():\n myClassObject = getFares()\n myClassObject.fillDtls()\n myClassObject.srchPrice()\n try:\n myClassObject.browser.quit()\n except WebDriverException:#NoSuchWindowException:\n None\n myClassObject.browser = webdriver.Chrome()\n myClassObject.fillDtls()\n myClassObject.srchAwards()\n try:\n myClassObject.browser.quit()\n except WebDriverException:#NoSuchWindowException:\n None\n myClassObject.writeDb()\n\ndef delay(interval):\n print time.time()\n s.enter(1, 1, Start, ())\n time.sleep(interval*60)\n return 1\n\nif __name__ == \"__main__\":\n if len(argv) != 2:\n print 'usage: %s <Interval>' %argv[0]\n exit(1)\n\n #myClassObject = getFares(argv[1],argv[2],argv[3],argv[4])\n s = sched.scheduler(time.time, time.sleep)\n\n readCSV()\n while delay(int(argv[1])):\n #s.enter(1, 1, Start, ())\n s.run()\n\n" }, { "alpha_fraction": 0.5184417366981506, "alphanum_fraction": 0.5304399132728577, "avg_line_length": 40.16158676147461, "blob_id": "13294064b5fa0d6ac9f132f70f89e9bebb2280f4", "content_id": "7aba60f2812f5330ffa4e46683cdd0e87fd8762d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13504, "license_type": "no_license", "max_line_length": 506, "num_lines": 328, "path": "/Spirit/Spirit.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import re\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchWindowException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nfrom sys import argv, exit\nfrom datetime import datetime\nimport sched\nimport random\nfrom decimal import *\nimport MySQLdb\n\nsrclist = []\ndestlist = []\n\nclass getFares(): # Class for crawling the website\n\n def __init__(self):\n\n self.src = ''\n self.dest = ''\n\n month = random.choice(range(int(datetime.now().month)+1, int(datetime.now().month) + 2))\n day = random.choice(range(1, 29))\n year = datetime.now().year\n self.dptdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n month = random.randint(month, (month + 6))\n day = random.choice(range(1, 29))\n if month > 12:\n year = year + 1\n month = month % 12\n\n self.retdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n # self.p_arrdt = []\n self.p_flghtlist = {}\n self.a_flghtlist = {}\n self.a_awards = []\n self.p_fare = []\n self.d_res = {}\n\n self.browser = webdriver.Firefox()\n\n def signIn(self):\n\n self.browser.find_element_by_xpath('//*[@id=\"loginlink\"]').click()\n\n self.browser.find_element_by_xpath('//*[@id=\"emailField\"]').send_keys('[email protected]')\n self.browser.find_element_by_xpath('//*[@id=\"passwordField\"]').send_keys('swipe352')\n self.browser.find_element_by_xpath('//*[@id=\"login\"]/p/button').click()\n\n time.sleep(2)\n\n def fillDtls(self): # Function for filling in details\n self.browser.set_window_size(1120, 1120) \n\n try:\n self.browser.get('https://www.spirit.com/Default.aspx') # Get response from the website\n except NoSuchWindowException:\n self.browser = webdriver.Firefox()\n\n self.signIn()\n\n try:\n WebDriverWait(self.browser, 100).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"flight_booking\"))\n )\n except TimeoutException:\n self.fillDtls()\n\n if self.src == '':\n self.src = srclist[random.randint(0, len(srclist) - 1)]\n self.dest = destlist[random.randint(0, len(destlist) - 1)] \n\n time.sleep(10)\n self.browser.find_element_by_xpath('//*[@id=\"booking-type\"]/div[1]/ul/li[1]/a/span[2]').click()\n From = self.browser.find_element_by_xpath('//*[@id=\"departCityCodeSelect\"]')\n #From.clear() \n try: \n From.click()\n From.send_keys(self.src)\n From.send_keys(Keys.TAB)\n except ElementNotVisible:\n From = self.browser.find_element_by_xpath('//*[@id=\"departCityCodeSelect\"]')\n From.click()\n From.send_keys(self.src)\n From.send_keys(Keys.TAB)\n\n # Enter the destination value\n To = self.browser.find_element_by_xpath('//*[@id=\"destCityCodeSelect\"]')\n #To.clear()\n To.send_keys(self.dest)\n To.send_keys(Keys.TAB)\n\n # Enter the Departure Date value\n dd = self.dptdt.split(\"/\")\n ad = self.retdt.split(\"/\")\n now = datetime.now()\n amonths = (int(ad[2]) - int(dd[2]))*12 + int(ad[0]) - int(dd[0])\n dmonths = (int(dd[2]) - now.year)*12 + int(dd[0]) - now.month\n\n self.browser.find_element_by_xpath('//*[@id=\"departDateDisplay\"]').click()\n for x in range(0, dmonths):\n self.browser.find_element_by_xpath('//*[@id=\"ui-datepicker-div\"]/div[2]/div/a').click()\n\n for i in self.browser.find_elements_by_xpath('//*[@id=\"ui-datepicker-div\"]/div[2]/table/tbody/tr/td'):\n if i.text and int(i.text) == int(dd[1]):\n i.click()\n break\n\n #Enter the Return Date value\n self.browser.find_element_by_xpath('//*[@id=\"returnDateDisplay\"]').click()\n for x in range(0, amonths):\n self.browser.find_element_by_xpath('//*[@id=\"ui-datepicker-div\"]/div[2]/div/a').click()\n\n for i in self.browser.find_elements_by_xpath('//*[@id=\"ui-datepicker-div\"]/div[2]/table/tbody/tr/td'):\n if i.text and int(i.text) == int(ad[1]):\n i.click()\n break\n\n def srchPrice(self): # Function for Searching by Price\n print \"Search Criteria: Price\"\n\n # Select the radio button and click submit\n\n self.browser.find_element_by_xpath('//*[@id=\"book-travel-form\"]/p/button').click()\n print \"criteria selected\"\n\n try:\n WebDriverWait(self.browser, 50).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"flightOptionsSort\")) #//*[@id=\"js-matrix-departure-lowest\"]\n )\n except TimeoutException:\n self.fillDtls\n\n try:\n # Get the flight details from the table and traverse each of the option available\n # Append the flight details in a list p_flghtlist\n n = 1\n rslts = self.browser.find_elements_by_xpath('//table[contains(@class, \"flightOptionsSort\")]')\n for tbl in rslts:\n\n tbl_id = tbl.get_attribute(\"id\")\n if tbl_id == \"1\":\n tmp_src = self.dest\n tmp_dest = self.src\n else:\n tmp_src = self.src\n tmp_dest = self.dest\n\n dtls = tbl.find_elements_by_xpath('.//tbody[2]/tr')\n for dtl in dtls:\n tmp_list = []\n tmp_list.append(tmp_src)\n tmp_list.append(tmp_dest)\n try:\n tmp_list.append(dtl.find_element_by_xpath('.//td[contains(@class, \"depart\")]').text) #//*[@id=\"market1_trip_1\"]/td[2]\n tmp_list.append(dtl.find_element_by_xpath('.//td[contains(@class, \"arrive\")]').text.strip('\\n'))\n try:\n tmp_list.append(float(Decimal(dtl.find_element_by_xpath('//td[contains(@class, \"bareFare\")]/div[contains(@class, \"memberFare\")]/span/label/em').text[1:])))\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n tmp_list.append(float(Decimal(dtl.find_element_by_xpath('//td[contains(@class, \"bareFare\")]/div[contains(@class, \"standardFare\")]/label/em').text[1:]))) #//*[@id=\"market1_trip_1\"]/td[6]/div/label/em\n except NoSuchElementException:\n tmp_list.append(0)\n\n self.p_flghtlist[n] = tmp_list\n n += 1\n except NoSuchElementException:\n pass\n\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n def srchAwards(self): # Function for searching by Award miles\n\n try:\n print \"Search Criteria: Award Points\"\n\n self.browser.find_element_by_xpath('//*[@id=\"calendarMarket1\"]/div[2]/div[2]/ul/li[2]/label').click()\n\n print \"criteria selected\"\n n = 1\n rslts = self.browser.find_elements_by_xpath('//table[contains(@class, \"flightOptionsSort\")]')\n for tbl in rslts:\n\n tbl_id = tbl.get_attribute(\"id\")\n if tbl_id == \"1\":\n tmp_src = self.dest\n tmp_dest = self.src\n else:\n tmp_src = self.src\n tmp_dest = self.dest\n\n dtls = tbl.find_elements_by_xpath('.//tbody[2]/tr')\n for dtl in dtls:\n tmp_list = []\n tmp_list.append(tmp_src)\n tmp_list.append(tmp_dest)\n try:\n tmp_list.append(dtl.find_element_by_xpath('.//td[contains(@class, \"depart\")]').text) #//*[@id=\"market1_trip_1\"]/td[2]\n tmp_list.append(dtl.find_element_by_xpath('.//td[contains(@class, \"arrive\")]').text.strip('\\n'))\n try:\n tmp_list.append(int(re.search('^[0-9.]+',dtl.find_element_by_xpath('//td[contains(@class, \"bareFare\")]/div[contains(@class, \"memberFare\")]/span/label').text.replace(',','')).group(0)))\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n tmp_list.append(int(re.search('^[0-9.]+',dtl.find_element_by_xpath('//td[contains(@class, \"bareFare\")]/div[contains(@class, \"standardFare\")]/label').text.replace(',','')).group(0))) #//*[@id=\"market1_trip_1\"]/td[6]/div/label/em\n except NoSuchElementException:\n tmp_list.append(0)\n\n self.a_flghtlist[n] = tmp_list\n n += 1\n except NoSuchElementException:\n pass\n\n # print self.a_flghtlist\n # print self.a_awards\n\n # self.d_res[n] = [self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]]\n\n # Compare the details obtained by both search criteria and filter out the common flight options\n # Store the required details in a dictionary\n n = 1\n\n for i in range(1, len(self.p_flghtlist)+1): # len(self.p_flghtlist)+len(self.a_flghtlist)):\n for j in range(1, len(self.a_flghtlist)+1):\n if self.p_flghtlist[i][2] == self.a_flghtlist[j][2] and self.p_flghtlist[i][3] == self.a_flghtlist[j][3] and self.p_flghtlist[i][0] == self.a_flghtlist[j][0]:# and self.p_dest[i] == self.a_dest[j]:\n \n if self.p_flghtlist[i][0] == self.src:\n self.d_res[n] = self.p_flghtlist[i]\n self.d_res[n].append(self.a_flghtlist[j][4])\n self.d_res[n].append(self.a_flghtlist[j][5])\n self.d_res[n].append(self.dptdt)\n else:\n self.d_res[n] = self.p_flghtlist[i]\n self.d_res[n].append(self.a_flghtlist[j][4])\n self.d_res[n].append(self.a_flghtlist[j][5])\n self.d_res[n].append(self.retdt)\n\n if self.a_flghtlist[j][4] != 0:\n self.d_res[n].append(self.p_flghtlist[i][4]/self.a_flghtlist[j][4])\n else:\n self.d_res[n].append(0)\n\n if self.a_flghtlist[j][5] != 0:\n self.d_res[n].append(self.p_flghtlist[i][5]/self.a_flghtlist[j][5])\n else:\n self.d_res[n].append(0) \n n += 1\n print self.d_res\n\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n def writeDb(self):\n\n mydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n cursor = mydb.cursor()\n print \"Updating table\"\n for i in range(1, len(self.d_res.keys())+1):\n #if row[1] != \"\"\n cursor.execute('INSERT INTO Swipe.r_spirit ( src, dest, from_dt, flight_no,P_Standard, P_Member,A_Standard,A_Member, Value_Standard, Value_Member) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");'.format(from_dt=datetime.strptime(self.d_res[i][8],'%m/%d/%Y').strftime('%Y-%m-%d %H:%M:%S')),[self.d_res[i][0],self.d_res[i][1],[self.d_res[i][2],self.d_res[i][3]],self.d_res[i][4],self.d_res[i][5],self.d_res[i][6],self.d_res[i][7], self.d_res[i][9], self.d_res[i][10]])\n cursor.execute('commit;')\n\n print \"Table updated\"\n cursor.close()\n\ndef readCSV(): # Function for reading the input file\n filename = 'SpiritAirRoutes.csv'\n print \"Start\"\n with open(filename, 'rU') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n srclist.append(row[0])\n destlist.append(row[1])\n except csv.Error as e:\n exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n\ndef Start():\n myClassObject = getFares() # ,argv[3],argv[4])\n myClassObject.fillDtls()\n myClassObject.srchPrice()\n #try:\n # myClassObject.browser.close()\n #except NoSuchWindowException:\n # None\n #myClassObject.browser = webdriver.Firefox()#executable_path='C:\\Users\\parth_000\\Desktop\\phantomjs\\bin\\phantomjs.exe')\n #myClassObject.fillDtls()\n myClassObject.srchAwards()\n try:\n myClassObject.browser.quit()\n except NoSuchWindowException:\n None\n myClassObject.writeDb()\n print time.ctime()\n print \"End\"\n\ndef delay(interval):\n print time.ctime()\n s.enter(interval*60, 1, Start, ())\n #time.sleep(interval)\n return 1\n\n\nif __name__ == \"__main__\":\n if len(argv) != 2:\n print 'usage: %s <Interval>' % argv[0]\n exit(1)\n\n # myClassObject = getFares(argv[1],argv[2],argv[3],argv[4])\n s = sched.scheduler(time.time, time.sleep)\n # s.enter(1, 1, Start, ())\n # s.run()\n readCSV()\n while delay(int(argv[1])):\n s.run()\n\n" }, { "alpha_fraction": 0.5049947500228882, "alphanum_fraction": 0.5180044174194336, "avg_line_length": 42.70050811767578, "blob_id": "881a18b9e3a5260c5850bc018c9bbb1abc372183", "content_id": "4ebc0acdce1db5439f36bafa59e544f884e9ca5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17220, "license_type": "no_license", "max_line_length": 753, "num_lines": 394, "path": "/AA/AA.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import re\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchWindowException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport csv\nfrom sys import argv, exit\nfrom datetime import datetime\nimport sched\nimport random\nfrom decimal import *\nimport MySQLdb\n\nsrclist = []\ndestlist = []\n\nclass getFares(): # Class for crawling the website\n\n def __init__(self):\n\n #self.srclist = []\n #self.destlist = []\n self.src = ''\n self.dest = ''\n\n month = random.choice(range(int(datetime.now().month)+1, int(datetime.now().month) + 2))\n day = random.choice(range(1, 29))\n year = datetime.now().year\n self.dptdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n month = random.randint(month, (month + 9))\n day = random.choice(range(1, 29))\n if month > 12:\n year = year + 1\n month = month % 12\n\n self.retdt = datetime(year, month, day).strftime('%m/%d/%Y')\n\n # self.p_arrdt = []\n self.p_flghtlist = []\n self.a_flghtlist = []\n self.a_awards = []\n self.p_fare = []\n self.d_res = {}\n\n self.browser = webdriver.PhantomJS()#(executable_path='C:\\Users\\parth_000\\Desktop\\phantomjs\\bin\\phantomjs')#Chrome(\"C:\\Users\\parth_000\\Desktop\\Alaska\\chromedriver\")\n\n def fillDtls(self): # Function for filling in details\n\n try:\n self.browser.get('http://www.aa.com/homePage.do') # Get response from the website\n except NoSuchWindowException:\n self.browser = webdriver.PhantomJS()\n\n if self.src == '':\n self.src = srclist[random.randint(0, len(srclist) - 1)]\n self.dest = destlist[random.randint(0, len(destlist) - 1)]\n\n try:\n WebDriverWait(self.browser, 100).until(\n EC.presence_of_element_located((By.ID, \"reservationFlightSearchForm.originAirport\"))\n )\n except TimeoutException:\n self.fillDtls()\n\n From = self.browser.find_element_by_xpath('//*[@id=\"reservationFlightSearchForm.originAirport\"]')\n From.clear()\n From.send_keys(self.src)\n From.send_keys(Keys.TAB)\n\n # Enter the destination value\n To = self.browser.find_element_by_xpath('//*[@id=\"reservationFlightSearchForm.destinationAirport\"]')\n To.clear()\n To.send_keys(self.dest)\n To.send_keys(Keys.TAB)\n\n # Enter the Departure Date value\n \n departDt = self.browser.find_element_by_xpath('//*[@id=\"aa-leavingOn\"]')\n departDt.clear()\n departDt.send_keys(self.dptdt)\n departDt.send_keys(Keys.TAB)\n\n # Enter the Return Date value\n returnDt = self.browser.find_element_by_xpath('//*[@id=\"aa-returningFrom\"]')\n returnDt.clear()\n returnDt.send_keys(self.retdt)\n returnDt.send_keys(Keys.TAB)\n #self.browser.save_screenshot('screen.png')\n\n def srchPrice(self): # Function for Searching by Price\n print \"Search Criteria: Price\"\n\n # Select the radio button and click submit\n\n self.browser.find_element_by_xpath('//*[@id=\"flightSearchForm.button.reSubmit\"]').click()\n print \"criteria selected\"\n\n try:\n WebDriverWait(self.browser, 150).until(\n EC.presence_of_element_located((By.ID, \"js-matrix-departure-lowest\")) #//*[@id=\"js-matrix-departure-lowest\"]\n )\n except TimeoutException:\n return\n\n try:\n # Get the flight details from the table and traverse each of the option available\n # Append the flight details in a list p_flghtlist\n\n self.browser.find_element_by_xpath('//*[@id=\"resultPerPage-departure-lowest\"]/option[3]').click()\n\n dtls = self.browser.find_elements_by_xpath('//table[contains(@id, \"js-matrix-departure-lowest\")]/tbody/tr')\n print \"test1\"\n\n tmp_list1 = []\n for j in dtls:\n tmp_list = []\n if j.get_attribute(\"id\")[-1:] == \"0\":\n\n try:\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',j.find_element_by_xpath('.//td[contains(@id, \"EconomySuperSaver\")]/label').text[1:]).group(0))))\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',j.find_element_by_xpath('.//td[contains(@id, \"EconomySuperSaverBundle1\")]/label').text[1:]).group(0))))\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',j.find_element_by_xpath('.//td[contains(@id, \"EconomySuperSaverBundle2\")]/label').text[1:]).group(0))))\n except NoSuchElementException:\n tmp_list.append(0)\n\n try:\n tmp_list.append(float(Decimal(re.search('^[0-9.]+',j.find_element_by_xpath('.//td[contains(@id, \"FirstSpecial-lowest-departure\")]/label').text[1:]).group(0))))\n except NoSuchElementException:\n tmp_list.append(0)\n\n self.p_fare.append(tmp_list)\n\n if tmp_list1:\n self.p_flghtlist.append(tmp_list1)\n tmp_list1 = []\n\n try:\n tmp_list1.append(j.find_element_by_xpath('.//td[1]/span[contains(@class, \"aa-flight-number\")]/a').text)\n except NoSuchElementException:\n pass\n\n # Get the departure time and arrival time of the flights and store them in seperate lists\n\n # n = 1\n # for i in dtls:\n # self.p_dpttime.append(i.find_element_by_xpath('.//td/table/tbody/tr[1]/td[contains(@class, \"tdDepart\")]/div[2]/strong').text)\n # try:\n # self.p_arrtime.append(i.find_element_by_xpath('./td/table/tbody/tr[last()-1 ]/td[contains(@class, \"tdArrive\")]/div[2]/strong').text)\n # self.p_arrdt.append(i.find_element_by_xpath('./td/table/tbody/tr[last()-1]/td[contains(@class, \"tdArrive\")]/div[3]/b').text)\n # except NoSuchElementException:\n # self.p_arrtime.append(i.find_element_by_xpath('./td/table/tbody/tr[last()]/td[contains(@class, \"tdArrive\")]/div[2]/strong').text)\n # self.p_arrdt.append(i.find_element_by_xpath('./td/table/tbody/tr[last()]/td[contains(@class, \"tdArrive\")]/div[3]/b').text)\n\n # self.p_fare.append(i.find_element_by_xpath('.//span[contains(@class, \"fResultsPrice\")]').text)\n # print self.p_dpttime\n # print self.p_arrtime\n # print self.p_arrdt\n # print self.p_fare\n # self.srchAwards()\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n def srchAwards(self): # Function for searching by Award miles\n\n try:\n print \"Search Criteria: Award Points\"\n\n self.browser.find_element_by_xpath('//*[@id=\"booking-module-extra-content\"]/div[2]/p/a').click()\n\n #self.browser.find_element_by_xpath('//*[@id=\"ShoppingForm\"]/div[2]/a[1]').click()\n #self.browser.find_element_by_xpath('//*[@id=\"awardReservation\"]').click()\n self.browser.find_element_by_xpath('//*[@id=\"aa-tab-4\"]/a').click()\n time.sleep(3)\n\n self.browser.find_element_by_xpath('//*[@id=\"awardFlightSearchForm.datesFlexible.false\"]').click()\n\n self.browser.find_element_by_xpath('//*[@id=\"awardFlightSearchForm.button.go\"]').click()\n\n print \"criteria selected\"\n\n try:\n WebDriverWait(self.browser, 50).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"aa_selectedFlightsBox\"))\n )\n except TimeoutException:\n return\n\n try:\n self.browser.find_element_by_xpath('//*[@id=\"airlines-1-0\"]').click()\n except NoSuchElementException:\n pass\n\n for i, a in zip(self.browser.find_elements_by_xpath('//*[@id=\"awardList\"]/li'),range(0,4)):\n if not (\"Inactive\" in i.get_attribute(\"class\")):\n i.click()\n try:\n page = len(self.browser.find_elements_by_xpath('//*[@id=\"pgNt\"]/li'))\n for p in range(1,page+1):\n self.browser.find_element_by_xpath('//*[@id=\"pgNt\"]/li['+str(p)+']').click()\n dtls = self.browser.find_elements_by_xpath('//*[@class=\"aa_flightListContainer\"]/div')\n\n #dtls = sum(dtls, [])\n for j in dtls:\n tmp_list = []\n tmp_list1 = {}\n for k in j.find_elements_by_xpath('.//div[contains(@class, \"ca_flightSlice\")]'):\n tmp_list.append(k.find_element_by_xpath('.//div[2]/p[1]/a').text)\n\n #tmp_list1[a] = int(j.find_element_by_xpath('.//div[1]/div[1]/div').text[:-1])\n\n if tmp_list not in self.a_flghtlist:\n self.a_flghtlist.append(tmp_list)\n tmp_list1[a] = float(Decimal(j.find_element_by_xpath('.//div[1]/div[1]/div').text[:-1]))*1000\n self.a_awards.append(tmp_list1)\n else:\n self.a_awards[self.a_flghtlist.index(tmp_list)][a] = float(Decimal(j.find_element_by_xpath('.//div[1]/div[1]/div').text[:-1]))*1000\n #if not self.a_flghtlist:\n # self.a_flghtlist.append(tmp_list)\n #else:\n # if tmp_list in self.a_flghtlist:\n # self.a_flghtlist.append(tmp_list)\n\n except NoSuchElementException:\n pass\n # self.d_res[n] = [self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]]\n\n # Compare the details obtained by both search criteria and filter out the common flight options\n # Store the required details in a dictionary\n n = 1\n\n for i in range(0, len(self.p_flghtlist)): # len(self.p_flghtlist)+len(self.a_flghtlist)):\n for j in range(0, len(self.a_flghtlist)):\n if self.p_flghtlist[i] == self.a_flghtlist[j]:# and self.p_dest[i] == self.a_dest[j]:\n # print \"success\"\n try:\n self.a_awards[j][0]\n except KeyError:\n self.a_awards[j][0] = 0\n\n try:\n self.a_awards[j][1]\n except KeyError:\n self.a_awards[j][1] = 0\n\n try:\n self.a_awards[j][2]\n except KeyError:\n self.a_awards[j][2] = 0\n\n try:\n self.a_awards[j][3]\n except KeyError:\n self.a_awards[j][3] = 0\n\n self.d_res[n] = [self.dptdt, self.p_flghtlist[i], self.p_fare[i], self.a_awards[j]]\n\n if self.a_awards[j][0] != 0:\n self.d_res[n].append(self.p_fare[i][0]/self.a_awards[j][0])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][1] != 0:\n self.d_res[n].append(self.p_fare[i][1]/self.a_awards[j][1])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][2] != 0:\n self.d_res[n].append(self.p_fare[i][2]/self.a_awards[j][2])\n else:\n self.d_res[n].append(0)\n\n if self.a_awards[j][3] != 0:\n self.d_res[n].append(self.p_fare[i][3]/self.a_awards[j][3])\n else:\n self.d_res[n].append(0)\n\n n += 1\n\n print self.d_res\n # else:\n # del self.p_flghtlist[i]\n # del self.p_dpttime[i]\n # del self.p_arrtime[i]\n # del self.p_fare[i]\n # i -= 1\n except NoSuchElementException:\n print \"Wrong Parameter Values\"\n\n # self.browser.close()\n # self.writeCSV()\n\n def writeCSV(self): # Function for writing data into results.csv file from result dictionary\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['From','To','From Date','Flight #','Coach Lowest Price','Coach Refundable Price','First Class Lowest','First Class Refundable','Coach Lowest Awards','Coach Refundable Awards','First Class Lowest Awards','First Class Refundable Awards']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # writer.writeheader()\n\n for i in range(1, len(self.d_res.keys())+1):\n # print self.d_res[i][1]\n # print self.d_res[i][1][1]\n writer.writerow({'From': self.d_res[i][3], 'To': self.d_res[i][4], 'From Date': self.d_res[i][5],'Flight #': self.d_res[i][0],\n 'Coach Lowest Price': self.d_res[i][1][0],\n 'Coach Refundable Price': self.d_res[i][1][1],\n 'First Class Lowest': self.d_res[i][1][2],\n 'First Class Refundable': self.d_res[i][1][3],\n 'Coach Lowest Awards': self.d_res[i][2][0],\n 'Coach Refundable Awards': self.d_res[i][2][1],\n 'First Class Lowest Awards': self.d_res[i][2][2],\n 'First Class Refundable Awards': self.d_res[i][2][3]})\n\n print \"results.csv updated\"\n\n def writeDb(self):\n\n mydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n cursor = mydb.cursor()\n print \"Updating Table\"\n for i in range(1, len(self.d_res.keys())+1):\n #if row[1] != \"\"\n cursor.execute('INSERT INTO Swipe.r_AA ( src, dest, from_dt, flight_no,P_Choice,P_Coice_Essn, P_Choice_First, P_First,A_Eco_MileSaver,A_Eco_Anytime,A_First_MileSaver,A_First_Anytime, Value_Choice, Value_Choice_Essn, Value_Choice_First, Value_First) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");'.format(from_dt=datetime.strptime(self.d_res[i][0],'%m/%d/%Y').strftime('%Y-%m-%d %H:%M:%S')),[self.src,self.dest,self.d_res[i][1],int(self.d_res[i][2][0]),int(self.d_res[i][2][1]),int(self.d_res[i][2][2]),int(self.d_res[i][2][3]),self.d_res[i][3][0],self.d_res[i][3][1],self.d_res[i][3][2],self.d_res[i][3][3],self.d_res[i][4],self.d_res[i][5],self.d_res[i][6],self.d_res[i][7]])\n cursor.execute('commit;')\n\n print \"Table Updated\"\n cursor.close()\n\ndef readCSV(): # Function for reading the input file\n filename = 'AAAirRoutes.csv'\n print \"Start\"\n with open(filename, 'rU') as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n srclist.append(row[0])\n destlist.append(row[1])\n except csv.Error as e:\n exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n\ndef Start():\n myClassObject = getFares() # ,argv[3],argv[4])\n myClassObject.fillDtls()\n myClassObject.srchPrice()\n #try:\n # myClassObject.browser.close()\n #except NoSuchWindowException:\n # None\n #myClassObject.browser = webdriver.Firefox()#executable_path='C:\\Users\\parth_000\\Desktop\\phantomjs\\bin\\phantomjs.exe')\n myClassObject.fillDtls()\n myClassObject.srchAwards()\n try:\n myClassObject.browser.quit()\n except NoSuchWindowException:\n None\n myClassObject.writeDb()\n print time.ctime()\n print \"End\"\n\n\n# myClassObject.readCSV()\n\ndef delay(interval):\n print time.ctime()\n s.enter(interval*60, 1, Start, ())\n #time.sleep(interval)\n return 1\n\n\nif __name__ == \"__main__\":\n if len(argv) != 2:\n print 'usage: %s <Interval>' % argv[0]\n exit(1)\n\n # myClassObject = getFares(argv[1],argv[2],argv[3],argv[4])\n s = sched.scheduler(time.time, time.sleep)\n # s.enter(1, 1, Start, ())\n # s.run()\n readCSV()\n while delay(int(argv[1])):\n s.run()\n\n# myClassObject.fillDtls()\n" }, { "alpha_fraction": 0.5886238813400269, "alphanum_fraction": 0.5955963134765625, "avg_line_length": 43.6721305847168, "blob_id": "5a4e5e1f51b22ecfa0376a126a5a623c84da5e26", "content_id": "ff8d387c928b82a18755017966bfe547fd15165c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2725, "license_type": "no_license", "max_line_length": 119, "num_lines": 61, "path": "/JetBlue/FormPage.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.keys import Keys\nimport datetime\nclass FormPage(object):\n def __init__(self,driver):\n self.driver = driver\n\n def find_by_xpath(self,locator):\n try:\n element = WebDriverWait(self.driver, 30).until(\n EC.presence_of_element_located((By.XPATH, locator))\n )\n return element\n except TimeoutException:\n return\n\n def fill_form(self, data):\n dd = data['departure_day'].split(\"/\")\n ad = data['arrive_day'].split(\"/\")\n now = datetime.datetime.now()\n amonths = (int(ad[2]) - int(dd[2]))*12 + int(ad[0]) - int(dd[0])\n dmonths = (int(dd[2]) - now.year)*12 + int(dd[0]) - now.month\n\n try:\n self.find_by_xpath('//input[@id = \"jbBookerDepart\"]').clear()\n self.find_by_xpath('//input[@id = \"jbBookerDepart\"]').send_keys(data['from'])\n self.find_by_xpath('//input[@id = \"jbBookerDepart\"]').send_keys(Keys.ENTER)\n #print self.driver.find_element_by_css_selector(\".suggestions_wrapper\").get_attribute('innerHTML');\n\n self.find_by_xpath('//input[@id = \"jbBookerArrive\"]').send_keys(data['to'])\n #self.find_by_xpath('//input[@id = \"jbBookerDepart\"]').send_keys(Keys.ENTER)\n self.find_by_xpath('//li[@val = \"'+data['to']+'\"]').click()\n self.find_by_xpath('//input[@id = \"jbBookerCalendarDepart\"]').click()\n except AttributeError:\n pass\n except WebDriverException:\n pass\n\n for x in range(0, dmonths):\n self.driver.find_element_by_css_selector(\".cal_right_arrow\").click();\n #self.find_by_xpath('//a[@title = \"Next\"]').click()\n # time.sleep(1);\n self.find_by_xpath('//span[text() = \"'+str(int(dd[1]))+'\"]').click()\n \n self.find_by_xpath('//input[@id = \"jbBookerCalendarReturn\"]').click()\n \n for x in range(0, amonths):\n self.driver.find_elements_by_css_selector(\".cal_right_arrow\")[1].click();\n # time.sleep(1);\n self.find_by_xpath('//div[@class=\"calendar_wrapper cal_active\"]//span[text() = \"'+str(int(ad[1]))+'\"]').click()\n self.find_by_xpath('//label[text() = \"TrueBlue Points\"]').click()\n return self \n\n def submit(self):\n #self.driver.save_screenshot('screen.png')\n self.find_by_xpath('//input[@value = \"Find it\"]').click()\n" }, { "alpha_fraction": 0.4943181872367859, "alphanum_fraction": 0.4943181872367859, "avg_line_length": 28.38888931274414, "blob_id": "a58906624d9256d20dc31615a7252f5ad457533f", "content_id": "02cd13f8159bfed4a648cab3c3c94484226c1596", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 530, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/JetBlue/Flight.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "class Flight:\n def __init__(self):\n self.fromAirport = ''\n self.to = ''\n self.date = ''\n self.flightNumber = ''\n #self.departTime = ''\n #self.arriveTime = ''\n self.dollarsBlue = ''\n self.dollarsBluePlus = ''\n self.dollarsBlueFlex = ''\n self.pointsBlue = ''\n self.pointsBluePlus = ''\n self.pointsBlueFlex = ''\n self.valueBlue = ''\n self.valueBluePlus = ''\n self.valueBlueFlex = '' \n #self.extraCharges = ''" }, { "alpha_fraction": 0.4831696152687073, "alphanum_fraction": 0.4940955638885498, "avg_line_length": 50.19208908081055, "blob_id": "adfe31d59fba4512d114398d5cc0c725b7d10425", "content_id": "b307391cdd013494ebe5f3478eeb7aca0e600080", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9063, "license_type": "no_license", "max_line_length": 189, "num_lines": 177, "path": "/JetBlue/ResultPage.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport time\nimport datetime\nimport Flight\nfrom BeautifulSoup import BeautifulSoup\nimport re\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import WebDriverException\n\nclass ResultPage(object):\n def __init__(self,driver):\n self.driver = driver\n\n def collectSearchResult(self, data):\n flightsMap = {}\n allFlights = []\n d_res = {}\n #flightRows = self.driver.find_elements_by_css_selector(\".resultsArea\")\n\n time.sleep(30)\n\n if self.driver.find_elements_by_xpath('//table[contains(@class, \"resultWith\")]'):\n flightRows = self.driver.find_elements_by_xpath('//table[contains(@class, \"resultWith\")]/tbody')\n\n for i in flightRows:\n flights = ''\n tmp = i.find_elements_by_xpath('.//tr/td[1]/div/div[3]/span/span[1]')\n\n for j in range(0,len(tmp)):\n flights = flights + tmp[j].text + ','\n flight = Flight.Flight()\n flight.fromAirport = data['from']\n flight.to = data['to']\n flight.date = data['departure_day']\n\n if i.find_element_by_xpath('.//tr[1]/td[1]/div/div[2]').text.strip() != data['from']:\n flight.fromAirport = data['to']\n flight.to = data['from']\n flight.date = data['arrive_day']\n\n flight.flightNumber = flights\n #flight.departTime = i.find_element_by_xpath('.//tr/td[1]/div/div[1]')\n #flight.arriveTime = i.find_element_by_xpath('.//tr/td[2]/div[1]')\n flight.lowestPrice = 0\n\n try:\n flight.pointsBlue = float(i.find_element_by_xpath('.//td[4]/div/table/tbody/tr/td[2]/div/span/span[1]').text.replace('pts','').replace(',',''))\n except:\n flight.pointsBlue = 0.0\n try:\n flight.pointsBluePlus = float(i.find_element_by_xpath('.//td[5]/div/table/tbody/tr/td[2]/div/span/span[1]').text.replace('pts','').replace(',',''))\n except:\n flight.pointsBluePlus = 0.0\n try:\n flight.pointsBlueFlex = float(i.find_element_by_xpath('.//td[6]/div/table/tbody/tr/td[2]/div/span/span[1]').text.replace('pts','').replace(',',''))\n except:\n flight.pointsBlueFlex = 0.0\n #flight.extraCharges = i.find_element_by_xpath('.//tr/td[2]/div[1]')\n allFlights.append(flight)\n flightsMap[flight.flightNumber] = flight\n\n try:\n self.driver.find_element_by_xpath('//*[@id=\"filterFlightBot\"]/div[2]/div[2]/ul/li[2]/label').click()\n\n time.sleep(30)\n\n flightRows = self.driver.find_elements_by_xpath('//table[contains(@class, \"resultWith\")]/tbody')\n n = 1\n\n for i in flightRows:\n flights = ''\n tmp = i.find_elements_by_xpath('.//span[contains(@class, \"flightCode\")]')\n\n for j in range(0,len(tmp)):\n flights = flights + tmp[j].text + ','\n\n if flights in flightsMap:\n flight = flightsMap[flights]\n try:\n flight.dollarsBlue = float(i.find_element_by_xpath('.//td[4]/div/table/tbody/tr/td[2]/div/label').text[1:].replace(',',''))\n except:\n flight.dollarsBlue = 0.0\n try:\n flight.dollarsBluePlus = float(i.find_element_by_xpath('.//td[5]/div/table/tbody/tr/td[2]/div/label').text[1:].replace(',',''))\n except:\n flight.dollarsBluePlus = 0.0\n try:\n flight.dollarsBlueFlex = float(i.find_element_by_xpath('.//td[6]/div/table/tbody/tr/td[2]/div/label').text[1:].replace(',',''))\n except:\n flight.dollarsBlueFlex = 0.0\n\n if flight.pointsBlue != 0:\n flight.valueBlue = flight.dollarsBlue/flight.pointsBlue\n if flight.pointsBluePlus != 0:\n flight.valueBluePlus = flight.dollarsBluePlus/flight.pointsBluePlus \n if flight.pointsBlueFlex != 0:\n flight.valueBlueFlex = flight.dollarsBlueFlex/flight.pointsBlueFlex\n d_res[n] = flight\n n += 1\n except WebDriverException:\n pass\n\n elif self.driver.find_elements_by_xpath('//div[contains(@class, \"flight-row\")]'):\n print \"returning\"\n return\n flightRows = self.driver.find_elements_by_xpath('//div[contains(@class, \"flight-row\")]')\n\n for i in range(1,len(flightRows)):\n soup = BeautifulSoup(flightRows[i].get_attribute('innerHTML'))\n #print soup.find(class_=\"clearfix\")\n try:\n if soup:\n flights = ''\n for flightNumber in soup.findAll(\"a\", {\"class\": \"flight-number\"}):\n flights = flights + flightNumber.string.strip() + ','\n\n flight = Flight.Flight()\n flight.fromAirport = data['from']\n flight.to = data['to']\n flight.date = data['departure_day']\n if soup.find(\"span\", {\"class\": \"city\"}).string.strip() != data['from']:\n flight.fromAirport = data['to']\n flight.to = data['from']\n flight.date = data['arrive_day']\n flight.flightNumber = flights\n #flight.departTime = soup.find(\"time\").string.strip()\n #flight.arriveTime = soup(\"time\", limit=2)[1].string.strip()\n flight.lowestPrice = 0\n flight.pointsBlue = soup.findAll(\"span\", {\"class\": \"ptsValue\"}, limit=3)[1].text.replace('pts', '').strip() #soup.findAll(\"span\", {\"class\": \"label\"}, limit=3)[1]\n flight.pointsBlue = int(re.search(\"^[0-9.]+\", flight.points.replace(',','')).group(0))\n\n flight.pointsBluePlus = soup.findAll(\"span\", {\"class\": \"ptsValue\"}, limit=3)[2].text.replace('pts', '').strip() #soup.findAll(\"span\", {\"class\": \"label\"}, limit=3)[1]\n flight.pointsBluePlus = int(re.search(\"^[0-9.]+\", flight.points.replace(',','')).group(0))\n\n flight.pointsBlueFlex = soup.findAll(\"span\", {\"class\": \"ptsValue\"}, limit=3)[3].text.replace('pts', '').strip() #soup.findAll(\"span\", {\"class\": \"label\"}, limit=3)[1]\n flight.pointsBlueFlex = int(re.search(\"^[0-9.]+\", flight.points.replace(',','')).group(0))\n #flight.extraCharges = soup.find(\"span\", {\"class\": \"label\"}).text.replace('pts', '').strip()\n allFlights.append(flight)\n flightsMap[flight.flightNumber] = flight\n except TypeError as e:\n print e.message\n except AttributeError:\n pass\n\n print allFlights\n\n self.driver.find_element_by_xpath('//span[text() = \"Dollars\"]').click()\n\n time.sleep(1)\n flightRows1 = self.driver.find_elements_by_css_selector(\".flight-row\");\n n = 1\n for i in range(1,len(flightRows1)):\n #print flightRows1[i].get_attribute('innerHTML')\n #text = flightRows1[i].get_attribute('innerHTML')\n soup = BeautifulSoup(flightRows1[i].get_attribute('innerHTML'))\n try:\n if soup:\n flights = ''\n for flightNumber in soup.findAll(\"a\", {\"class\": \"flight-number\"}):\n flights = flights + flightNumber.string.strip() + ','\n if flights in flightsMap:\n flight = flightsMap[flights]\n flight.lowestPrice = int(soup.find(\"span\", {\"class\": \"label\"}).string.strip()[1:].replace(',',''))\n #flight.lowestPrice = int(re.search(\"^[0-9.]+\", flight.lowestPrice.replace(',',''))[1:].group(0))\n flight.refundablePrice = int(soup.findAll(\"span\", {\"class\": \"label\"}, limit=3)[1].text[1:].replace(',',''))\n d_res[n] = flight\n n += 1\n\n except TypeError:\n pass\n except AttributeError:\n pass\n #self.driver.save_screenshot('screen.png')\n return d_res\n" }, { "alpha_fraction": 0.6814159154891968, "alphanum_fraction": 0.6873156428337097, "avg_line_length": 46.30232620239258, "blob_id": "093171ec3ee7f8eaae4534459b1c2a931d0a4f07", "content_id": "fde1ace9ca6b5c72d91cb9d21912638cd51bffe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2036, "license_type": "no_license", "max_line_length": 714, "num_lines": 43, "path": "/JetBlue/CSVHelper.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import Flight\nimport csv\nfrom datetime import datetime\nimport time\nimport MySQLdb\n\n\ndef getDestinations():\n\tdestinations = []\n\twith open('JetblueFlightList.csv', 'rb') as csvfile:\n\t\tspamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\t\tfor row in spamreader:\n\t\t\tdestinations.append(row)\n\treturn destinations\n\n\ndef writeToCSV(allFlights):\n\tnow = datetime.datetime.now()\n\tfileName = now.strftime(\"%m-%d-%Y_%H_%M\")\n\twith open('flightDetails.csv', 'a') as csvfile:\n\t\tfieldnames = ['date', 'fromTime', 'toTime', 'fromAirport', 'toAirport', 'flightNumber', 'points', 'lowestPrice',\n\t\t\t\t\t 'extraCharges']\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\tfor flight in allFlights:\n\t\t\twriter.writerow({'date': flight.date, 'fromTime': flight.departTime, 'toTime': flight.arriveTime,\n\t\t\t\t\t\t\t 'fromAirport': flight.fromAirport, 'toAirport': flight.to,\n\t\t\t\t\t\t\t 'flightNumber': flight.flightNumber, 'points': flight.points,\n\t\t\t\t\t\t\t 'lowestPrice': flight.lowestPrice, 'extraCharges': flight.extraCharges})\n\n\ndef writeDb(d_res):\n\tmydb = MySQLdb.connect(host='52.1.22.82', user='swipe', passwd='swipe12345')\n\tcursor = mydb.cursor()\n\tprint \"Updating Table\"\n\tfor i in range(1, len(d_res.keys())+1):\n\t\t# if row[1] != \"\"\n\t\tcursor.execute('INSERT INTO Swipe.r_jetblue ( src, dest, from_dt, flight_no,P_Dollars_Blue,P_Dollars_Blue_Plus,P_Dollars_Blue_Flex,A_Dollars_Blue,A_Dollars_Blue_Plus,A_Dollars_Blue_Flex,Value_Dollars_Blue,Value_Dollars_Blue_Plus,Value_Dollars_Blue_Flex) VALUES (\"%s\", \"%s\", \"{from_dt}\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");'.format(from_dt=datetime.strptime(d_res[i].date, '%m/%d/%Y').strftime('%Y-%m-%d %H:%M:%S')), [d_res[i].fromAirport, d_res[i].to, d_res[i].flightNumber, d_res[i].dollarsBlue, d_res[i].dollarsBluePlus, d_res[i].dollarsBlueFlex, d_res[i].pointsBlue, d_res[i].pointsBluePlus, d_res[i].pointsBlueFlex,d_res[i].valueBlue, d_res[i].valueBluePlus, d_res[i].valueBlueFlex])\n\t\tcursor.execute('commit;')\n\n\n\tprint \"Table Updated\"\n\tcursor.close()\n" }, { "alpha_fraction": 0.6230216026306152, "alphanum_fraction": 0.6388489007949829, "avg_line_length": 26.799999237060547, "blob_id": "afb9889119ee0d63a4044d717e2df2bae34e242d", "content_id": "3870e76e9299b4bd10427edcecf8ff58ee35c61e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 82, "num_lines": 25, "path": "/JetBlue/DatetimeHelper.py", "repo_name": "swipeapp/DataMining", "src_encoding": "UTF-8", "text": "import random\nimport time\nimport datetime\n\ndef strTimeProp(start, end, format, prop):\n stime = time.mktime(time.strptime(start, format))\n etime = time.mktime(time.strptime(end, format))\n\n ptime = stime + prop * (etime - stime)\n\n return time.strftime(format, time.localtime(ptime))\n\ndef add_month(start_time, months): \n format = '%m/%d/%Y'\n ret = time.strptime(start_time, format)\n t = list(ret)\n t[1] += months\n if t[1] > 12:\n t[0] += 1 + int(months / 12)\n t[1] %= 12\n return time.strftime(format,time.localtime(time.mktime(tuple(t))))\n\n\ndef randomDate(start,month):\n return strTimeProp(start, add_month(start,month), '%m/%d/%Y', random.random())\n" } ]
13
jess-daniel/cs-module-project-hash-tables
https://github.com/jess-daniel/cs-module-project-hash-tables
8b784c9e802188de1cadcb3b9d5447abef9dd4f8
083448c3e0d16cf258eda75ba9eb506423dc801b
73481b16a20a6ba064afd9b6b29b05063aac6b87
refs/heads/master
2022-12-10T13:26:43.894223
2020-08-06T21:09:46
2020-08-06T21:09:46
284,772,139
0
0
null
2020-08-03T18:06:55
2020-08-06T21:09:49
2020-09-01T15:57:12
Python
[ { "alpha_fraction": 0.6500810980796814, "alphanum_fraction": 0.6571119427680969, "avg_line_length": 25.797101974487305, "blob_id": "81042caf4b6fa29b160d466964fc4a6c29322f08", "content_id": "087ac73362ecc083b3cac08169628e4de03aef68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1849, "license_type": "no_license", "max_line_length": 78, "num_lines": 69, "path": "/applications/markov/markov.py", "repo_name": "jess-daniel/cs-module-project-hash-tables", "src_encoding": "UTF-8", "text": "import random\n\n# Understand:\n\n# Plan:\n# Read input.txt and spit into words\n# Analyze the text, building up the dataset\n# choose a random \"start word\" to begin\n# start word starts with \" or first or second starts with capital letter\n# Make a list of start words\n# loop through, choose a random following word\n# stop at a stop word\n# word ends with a . ? ! ... or second to last character\n\n# why a hashtable? Good way to relate info, associate key/value pair\n# frequent lookups, key: word, value: list of words that can follow that word\n\n# Read in all the words in one go\nwith open(\"input.txt\") as f:\n words = f.read()\n # print(words) -> prints whole text\n # split into words\nsplit_words = words.split()\n\n\n# TODO: analyze which words can follow other words\n# Your code here\ndataset = {}\n\nfor i in range(len(split_words) - 1):\n word = split_words[i]\n next_word = split_words[i + 1]\n\n # if word is not in the dataset, add it in\n if word not in dataset:\n dataset[word] = [next_word]\n # word is in dataset, so add to the list\n else:\n dataset[word].append(next_word)\n\n# Make a list of start words\nstart_words = []\nfor key in dataset.keys():\n if key[0].isupper() or len(key) > 1 and key[1].isupper():\n start_words.append(key)\n\nword = random.choice(start_words)\n\n\n# TODO: construct 5 random sentences\n# Your code here\n# TODO: wrap around another loop to get 5 random sentences\n\nstopped = False\nstop_signs = \"?.!\"\ncount = 0\n\nwhile count < 5:\n count += 1\n stopped = False\n print(count)\n while not stopped:\n print(word, end=\" \")\n # if stop word, stop\n if word[-1] in stop_signs or len(word) > 1 and word[-2] in stop_signs:\n stopped = True\n # choose a randon following word\n following_words = dataset[word]\n word = random.choice(following_words)\n" }, { "alpha_fraction": 0.5967976450920105, "alphanum_fraction": 0.5967976450920105, "avg_line_length": 26.479999542236328, "blob_id": "c6a6df5caedb674f52123437c41b1b5e0b71b49c", "content_id": "bad095d80c00d307ec875374e33c19a680fed0e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "no_license", "max_line_length": 85, "num_lines": 25, "path": "/applications/word_count/word_count.py", "repo_name": "jess-daniel/cs-module-project-hash-tables", "src_encoding": "UTF-8", "text": "cache = {}\n\nchars_to_ignore = \":;,.-+=/'\"\n\n\ndef word_count(s):\n # Your code here\n # returns a words with their counts\n words = s.split()\n print(words)\n for character in chars_to_ignore:\n # error: list has no object replace\n new_words = words.replace(character, \"\")\n return new_words\n print(new_words)\n\n\nword_count(\"Hello, my cat. And my cat doesn't say hello back.\")\n\n# if __name__ == \"__main__\":\n# print(word_count(\"\"))\n# print(word_count(\"Hello\"))\n# print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n# print(word_count(\n# 'This is a test of the emergency broadcast network. This is only a test.'))\n" } ]
2
Bobinoza/Projeto-input
https://github.com/Bobinoza/Projeto-input
d24219244a03f86372b2c12b1a835bbf0510d5f2
f72cefe77d003c5b8fc9b4ac3c6c7fed649d4d9e
08a99d32e8aa3d72eb525c735b0e1ab82da5a590
refs/heads/master
2020-03-20T03:35:07.212137
2018-10-04T23:30:50
2018-10-04T23:30:50
137,149,558
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.547855794429779, "alphanum_fraction": 0.5828757882118225, "avg_line_length": 39.5987663269043, "blob_id": "0212e01e96c0d51f29ee00e0ff5535f74f583af1", "content_id": "c68ff1e1fc7929e223cbb0e03f0f5e99cb48a383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6764, "license_type": "no_license", "max_line_length": 128, "num_lines": 162, "path": "/Projeto Lírios.py", "repo_name": "Bobinoza/Projeto-input", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter.ttk import Notebook\r\n\r\n\r\nclass Main:\r\n def __init__(self, master):\r\n self.master = master\r\n self.abas = Notebook()\r\n self.frame_aba1 = Frame(self.abas) # Aba superior 'Débitos'.\r\n self.frame_aba2 = Frame(self.abas) # Aba 'REALIZAR PEDIDOS'.\r\n self.frame_aba3 = Frame(self.abas) # Aba 'ENTREGAS'\r\n\r\n self.abas.add(self.frame_aba1, text='Débitos') # Nomeando as abas.\r\n self.abas.add(self.frame_aba2, text='Realizar Pedido') # Nomeando as abas.\r\n self.abas.add(self.frame_aba3, text='Entregas') # Nomeando as abas.\r\n self.abas.pack(anchor=W)\r\n\r\n # Início quadrado ADICIONAR DÉBITO.\r\n self.frame = LabelFrame(self.frame_aba1, text='ADICIONAR DÉBITO')\r\n self.frame.grid(row=0, column=0, padx=20, pady=20)\r\n\r\n Label(self.frame, text='Nome :').grid(row=1, column=1)\r\n self.name = Entry(self.frame)\r\n self.name.grid(row=1, column=2, padx=10)\r\n\r\n Label(self.frame, text='Data de Nascimento :').grid(row=2, column=1)\r\n self.datanasci = Entry(self.frame)\r\n self.datanasci.grid(row=2, column=2)\r\n\r\n Label(self.frame, text='CPF :').grid(row=3, column=1)\r\n self.cpf = Entry(self.frame)\r\n self.cpf.grid(row=3, column=2)\r\n\r\n Label(self.frame, text='Data do Débito :').grid(row=4, column=1)\r\n self.datdeb = Entry(self.frame)\r\n self.datdeb.grid(row=4, column=2)\r\n\r\n Label(self.frame, text='Valor do Débito :').grid(row=5, column=1)\r\n self.valordeb = Entry(self.frame)\r\n self.valordeb.grid(row=5, column=2)\r\n\r\n self.btn1 = Button(self.frame, text='ENVIAR', command=self.enviar_nome)\r\n self.btn1.grid(row=6, column=2, pady=10)\r\n\r\n # Início quadrado Buscar/Deletar Débitos.\r\n self.frame2 = LabelFrame(self.frame_aba1, text='BUSCAR/DELETAR DÉBITOS')\r\n self.frame2.grid(row=1, column=0)\r\n\r\n Label(self.frame2, text='Buscar por Nome :').grid(row=1, column=1)\r\n self.busnome = Entry(self.frame2)\r\n self.busnome.grid(row=1, column=2, padx=10)\r\n\r\n Label(self.frame2, text='Buscar por CPF :').grid(row=2, column=1)\r\n self.buscpf = Entry(self.frame2)\r\n self.buscpf.grid(row=2, column=2)\r\n\r\n Label(self.frame2, text='Buscar Pessoa').grid(row=3, column=1)\r\n\r\n Label(self.frame2, text='Deletar Débito').grid(row=3, column=2)\r\n\r\n self.btn2 = Button(self.frame2, text='BUSCAR')\r\n self.btn2.grid(row=4, column=1, pady=10)\r\n\r\n self.btn3 = Button(self.frame2, text='DELETAR', command=self.deletar_nome)\r\n self.btn3.grid(row=4, column=2)\r\n\r\n # Início tabela Treeview.\r\n\r\n self.tree = ttk.Treeview(self.frame_aba1, height=20, columns=('Nº', 'Nome', 'Valor do Débito', 'Data do Débito', 'CPF'))\r\n self.tree.grid(row=0, column=1, rowspan=10)\r\n self.tree.heading('#0', text='Nº', anchor=W)\r\n self.tree.heading('#1', text='Nome', anchor=W)\r\n self.tree.heading('#2', text='Valor do Débito', anchor=W)\r\n self.tree.heading('#3', text='Data do Débito', anchor=W)\r\n self.tree.heading('#4', text='CPF', anchor=W)\r\n self.tree.heading('#5', text='Data Nascimento', anchor=W)\r\n self.tree.column('#0', minwidth=50, width=80)\r\n self.tree.column('#1', minwidth=50, width=250)\r\n self.tree.column('#2', minwidth=50, width=150)\r\n self.tree.column('#3', minwidth=50, width=150)\r\n self.tree.column('#4', minwidth=100, width=200)\r\n self.tree.column('#5', minwidth=50, width=150)\r\n self.treeview = self.tree\r\n self.i = 1\r\n\r\n # Scrollbar da tabela Treeview.\r\n self.yscroll = Scrollbar(self.frame_aba1, orient=VERTICAL)\r\n self.tree['yscrollcommand'] = self.yscroll.set\r\n self.yscroll['command'] = self.tree.yview()\r\n self.yscroll.grid(row=0, column=1, rowspan=10, sticky=N+S+E)\r\n\r\n ############################## ABA REALIZAR PEDIDO #########################################\r\n\r\n self.frame3 = LabelFrame(self.frame_aba2, text='INFORMAÇÕES DO PEDIDO')\r\n self.frame3.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n Label(self.frame3, text='Nome :').grid(row=0, column=0)\r\n self.nameframe3 = Entry(self.frame3)\r\n self.nameframe3.grid(row=0, column=1)\r\n\r\n Label(self.frame3, text='Entregar Dia :').grid(row=1, column=0)\r\n self.entdia = Entry(self.frame3)\r\n self.entdia.grid(row=1, column=1)\r\n\r\n Label(self.frame3, text='Endereço :').grid(row=0, column=2)\r\n self.ende = Entry(self.frame3)\r\n self.ende.grid(row=0, column=3)\r\n\r\n Label(self.frame3, text='Bairro :').grid(row=1, column=2)\r\n self.bairro = Entry(self.frame3)\r\n self.bairro.grid(row=1, column=3)\r\n\r\n Label(self.frame3, text='CEP :').grid(row=0, column=4)\r\n self.cep = Entry(self.frame3)\r\n self.cep.grid(row=0, column=5)\r\n\r\n Label(self.frame3, text='Ponto de Referência :').grid(row=1, column=4)\r\n self.pontodr = Entry(self.frame3)\r\n self.pontodr.grid(row=1, column=5)\r\n\r\n Label(self.frame3, text='Fone 1 :').grid(row=0, column=6)\r\n self.fone1 = Entry(self.frame3)\r\n self.fone1.grid(row=0, column=7)\r\n\r\n Label(self.frame3, text='Fone 2 :').grid(row=1, column=6)\r\n self.fone2 = Entry(self.frame3)\r\n self.fone2.grid(row=1, column=7, padx=10, pady=10)\r\n\r\n self.frame4 = LabelFrame(self.frame_aba2, text='INFORME AQUI OS PRODUTOS DO PEDIDO')\r\n self.frame4.grid(row=1, column=0)\r\n\r\n self.entradap = Text(self.frame4)\r\n self.entradap.grid(row=2, column=0, padx=10, pady=10)\r\n\r\n self.btn4 = Button(self.frame4, text='ENVIAR')\r\n self.btn4.grid(row=3, column=0, pady=10)\r\n\r\n # Comandos da aba Débitos\r\n def enviar_nome(self):\r\n self.treeview.insert('', 'end', text='Nº '+str(self.i), values=(self.name.get(), self.valordeb.get(),\r\n self.datdeb.get(), self.cpf.get(),\r\n self.datanasci.get()))\r\n self.name.delete(0, 'end')\r\n self.valordeb.delete(0, 'end')\r\n self.datdeb.delete(0, 'end')\r\n self.cpf.delete(0, 'end')\r\n self.datanasci.delete(0, 'end')\r\n self.i = self.i + 1\r\n\r\n def deletar_nome(self):\r\n self.selected_item = self.tree.selection() # get selected item\r\n self.tree.delete(self.selected_item)\r\n\r\n\r\nins = Main\r\nroot = Tk()\r\nMain(root)\r\nroot.title('Lírios Produtos de Limpeza')\r\nroot.wm_iconbitmap('lirios.ico') # Create Logo\r\nroot.geometry('1366x768+0+0')\r\nroot.mainloop()\r\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 24.5, "blob_id": "c5545257e810f6f62900f377463171502e385777", "content_id": "fe0dd8043983bb13e0b33643b25fa6c1c344d102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "Bobinoza/Projeto-input", "src_encoding": "UTF-8", "text": "# Projeto-input\nPrograma para adicionar devedores.\n" } ]
2
architasurai22/CS50
https://github.com/architasurai22/CS50
b071ce08071fa7d4585f3120b733e3cf332e0f50
f85a56261ded1cfc94a97326eb6a4e91033992c6
7faf8242ef2997014c60582de95f3c45401d9f3d
refs/heads/master
2021-01-25T08:12:42.193485
2017-07-17T16:23:14
2017-07-17T16:23:14
93,726,658
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.37747034430503845, "alphanum_fraction": 0.38735178112983704, "avg_line_length": 15.322580337524414, "blob_id": "13edff828b8b70cf672b4fc979d85877db809e1c", "content_id": "d5c19f8428098f0f849b263075039d4cb7ce843c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 506, "license_type": "no_license", "max_line_length": 36, "num_lines": 31, "path": "/initials.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include <stdio.h>\n#include <ctype.h>\n#include <string.h>\n\nint main(void)\n{\n string s;\n int i;\n char c;\n \n s = get_string();\n c = s[0];\n if(islower(c))\n printf(\"%c\",toupper(c));\n else\n printf(\"%c\",c);\n for(i = 1;i < strlen(s);i++)\n {\n if(s[i] == ' ')\n {\n c = s[i+1];\n if(islower(c))\n printf(\"%c\",toupper(c));\n else\n printf(\"%c\",c);\n i++;\n }\n }\n printf(\"\\n\");\n}\n" }, { "alpha_fraction": 0.41789939999580383, "alphanum_fraction": 0.4741124212741852, "avg_line_length": 20.109375, "blob_id": "301512d366c4300b92181720170110e42ffa51a4", "content_id": "40127c5755f6d0adf50b656bf9ea3b584ecdc4cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 101, "num_lines": 64, "path": "/recover.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdint.h>\n#include <stdlib.h>\n#include <string.h> \n \n \n int main(int argc, char *argv[])\n {\n if(argc!=2)\n {\n fprintf(stderr,\"./recover image\\n\");\n return 1;\n }\n char* infile;\n infile = argv[1];\n \n \n FILE* file1;\n \n char filename[10];\n file1 = fopen(infile, \"r\");\n \n if (file1 == NULL)\n {\n fprintf(stderr,\"Error opening the input file\\n\");\n return 1;\n }\n \n uint8_t buffer[512];\n uint8_t jpeg1[4] = {0xff, 0xd8, 0xff, 0xe0};\n\tuint8_t jpeg2[4] = {0xff, 0xd8, 0xff, 0xe1};\n uint8_t jpeg3[4];\n int count = 0;\n FILE *file2 = NULL;\n \n // Iterate over file contents\n while (fread(buffer,512, 1, file1))\n {\n jpeg3[0] = buffer[0];\n jpeg3[1] = buffer[1];\n jpeg3[2] = buffer[2];\n jpeg3[3] = buffer[3];\n if((memcmp(jpeg1, jpeg3, sizeof(jpeg3)) == 0 ) || (memcmp(jpeg2, jpeg3, sizeof(jpeg3)) == 0))\n {\n if (file2 != NULL)\n fclose(file2);\n \n \n sprintf(filename, \"%03d.jpg\", count);\n count++;\n file2 = fopen(filename, \"w\");\n }\n \n if (file2 != NULL)\n fwrite(buffer,512, 1, file2);\n }\n \n if (file2 != NULL)\n fclose(file2);\n \n fclose(file1);\n \n return 0;\n } \n" }, { "alpha_fraction": 0.3498622477054596, "alphanum_fraction": 0.3677685856819153, "avg_line_length": 17.615385055541992, "blob_id": "54b9887f01c75e30edb331ac5315d01c7a83413f", "content_id": "18d5cbb8dc0b0d3d655f564e6df448d2812fec36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 726, "license_type": "no_license", "max_line_length": 56, "num_lines": 39, "path": "/caesar.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include <stdio.h>\n#include <ctype.h>\n#include <string.h>\n#include <stdlib.h>\n\nint main(int argc, string argv[])\n{\n if(argc!=2)\n {\n printf(\"error\\n\");\n return 1;\n }\n int i;\n printf(\"plaintext: \");\n string s = get_string();\n string k = argv[1];\n int n = atoi(k);\n n = n%26;\n \n for(i = 0;i < strlen(s);i++)\n {\n if(isalpha(s[i]))\n {\n if(isupper(s[i]))\n {\n s[i] = ((((s[i] -'A') + n) % 26) + 'A');\n }\n \n if(islower(s[i]))\n {\n s[i] = ((((s[i] -'a') + n) % 26) + 'a');\n }\n }\n }\n\n printf(\"ciphertext: %s\\n\",s);\n return 0;\n}\n" }, { "alpha_fraction": 0.48148149251937866, "alphanum_fraction": 0.5291807055473328, "avg_line_length": 22.447368621826172, "blob_id": "0b3d516ea546977e65967d18ad8919e074bb4ecd", "content_id": "18de53464f71cf2d4eb1e0b47b31f7fad6a6eb43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 72, "num_lines": 76, "path": "/whodunit.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"bmp.h\"\n\nint main(int argc, char* argv[])\n{\n // ensure proper usage\n if (argc != 3)\n {\n fprintf(stderr, \"Usage: ./whodunit infile outfile\\n\");\n return 1;\n }\n\n char* file1 = argv[1];\n char* file2 = argv[2];\n \n FILE* ptr1 = fopen(file1, \"r\");\n if (ptr1 == NULL)\n {\n fprintf(stderr,\"Unable to open input file.\\n\");\n return 1;\n }\n FILE* ptr2 = fopen(file2, \"w\");\n if (ptr2 == NULL)\n {\n fclose(ptr1);\n fprintf(stderr, \"Unable to open output file.\\n\");\n return 1;\n }\n BITMAPFILEHEADER b1;\n fread(&b1, sizeof(BITMAPFILEHEADER), 1, ptr1);\n\n BITMAPINFOHEADER b2;\n fread(&b2, sizeof(BITMAPINFOHEADER), 1, ptr1);\n\n if (b1.bfType != 0x4d42 || b1.bfOffBits != 54 || b2.biSize != 40 || \n b2.biBitCount != 24 || b2.biCompression != 0)\n {\n fclose(ptr1);\n fclose(ptr2);\n fprintf(stderr, \"File format not supported.\\n\");\n return 1;\n }\n\n fwrite(&b1, sizeof(BITMAPFILEHEADER), 1, ptr2);\n\n fwrite(&b2, sizeof(BITMAPINFOHEADER), 1, ptr2);\n \n int height = abs(b2.biHeight);\n int width = abs(b2.biWidth);\n\n int padding = (4 - (b2.biWidth * sizeof(RGBTRIPLE)) % 4) % 4;\n \n RGBTRIPLE triple;\n \n for (int i = 0;i < height; i++)\n {\n for (int j = 0; j < width; j++)\n {\n fread(&triple, sizeof(RGBTRIPLE), 1, ptr1);\n triple.rgbtGreen = 0x000000;\n triple.rgbtBlue = 0x000000;\n \n fwrite(&triple, sizeof(RGBTRIPLE), 1, ptr2);\n }\n }\n fseek(ptr1, padding, SEEK_CUR);\n for (int k = 0; k < padding; k++)\n fputc(0x00, ptr2);\n\n fclose(ptr1);\n fclose(ptr2);\n return 0;\n}\n" }, { "alpha_fraction": 0.37789201736450195, "alphanum_fraction": 0.4061696529388428, "avg_line_length": 16.68181800842285, "blob_id": "2aa363527ea9fcd1168f10bcb6ac692ee00f5b49", "content_id": "3b5f638e58c3108ff8292ed282b688c9b8f21a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 389, "license_type": "no_license", "max_line_length": 76, "num_lines": 22, "path": "/mario.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include<stdio.h>\n\nint main(void)\n{\n int n,i,j,k;\n \n do\n {\n printf(\"Enter a non-negetive integer number no greater than 23 : \");\n n = get_int();\n }\n while((n>23)||(n<0));\n for(i = 1;i <= n;i++)\n {\n for(j = 1;j <= n-i;j++)\n printf(\" \");\n for(k = 1;k <= i+1;k++)\n printf(\"#\");\n printf(\"\\n\");\n }\n}\n" }, { "alpha_fraction": 0.35049834847450256, "alphanum_fraction": 0.37873753905296326, "avg_line_length": 16.200000762939453, "blob_id": "0341a657dcf9b0402ada90c61c576b530eeefd49", "content_id": "cc803f9379afc47b48d13455cece1cab12d2f6f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 602, "license_type": "no_license", "max_line_length": 31, "num_lines": 35, "path": "/greedy.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include<stdio.h>\n\nint main(void)\n{\n float change;\n int cents,count=0;\n printf(\"Change : \");\n change = get_float();\n cents = change*100;\n while(cents)\n {\n if(cents>25)\n {\n cents = cents - 25;\n count++;\n }\n else if(cents>10)\n {\n cents = cents - 10;\n count++;\n }\n else if(cents>5)\n {\n cents = cents - 5;\n count++;\n }\n else\n {\n cents = cents - 1;\n count++;\n }\n }\n printf(\"%d\\n\",count);\n}\n" }, { "alpha_fraction": 0.38985738158226013, "alphanum_fraction": 0.4278922379016876, "avg_line_length": 21.535715103149414, "blob_id": "cf6b25f31cb04c3c768d391b1230f02baeffe9ba", "content_id": "3de2afcd29dc78b890421def10e231672f90b782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 48, "num_lines": 28, "path": "/greedy.py", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "import cs50\n\ndef main():\n while True:\n print(\"O hai! How much change is owed?\")\n change = cs50.get_float()\n if(change>=0):\n break\n cents = change * 100\n count = 0\n while cents:\n if(cents>=25):\n cents = cents - 25\n count = count + 1\n elif(cents>=10):\n cents = cents - 10\n count = count + 1\n elif(cents>=5):\n cents = cents - 5\n count = count + 1\n else:\n cents = cents - 1\n count = count + 1\n print(\"{}\".format(count))\n \n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.3598901033401489, "alphanum_fraction": 0.3956044018268585, "avg_line_length": 21.75, "blob_id": "7666a429c7ca92a3e4fd5206193a93ee5bce8ff4", "content_id": "ea3cc4d7515e4be1dc25f12a33009c903d6f18f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 34, "num_lines": 16, "path": "/mario.py", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "import cs50\n\ndef main():\n while True:\n print(\"Height: \",end = \"\")\n n = cs50.get_int()\n if(n<=23 and n>=0):\n break\n for i in range (1,n+1):\n for j in range (1,n-i+1):\n print(\" \",end = \"\")\n for k in range (1,i+1):\n print(\"#\",end = \"\")\n print(\"#\")\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.29579833149909973, "avg_line_length": 20.25, "blob_id": "acbf8330816346fcae2ac05f248c83ad06b0a4d1", "content_id": "550ad362baaf13ce28e8a604c6de9a348f2c40c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1190, "license_type": "no_license", "max_line_length": 56, "num_lines": 56, "path": "/vigenere.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n\nint main(int argc, string argv[])\n{\n string k = argv[1];\n int i;\n if(argc!=2)\n {\n printf(\"error\\n\");\n return 1;\n }\n for(i = 0;i < strlen(k);i++)\n if(!isalpha(k[i]))\n {\n printf(\"error\\n\");\n return 1;\n }\n \n \n printf(\"plaintext: \");\n string s = get_string();\n int counter = 0;\n int n = strlen(k),a;\n char c;\n \n for(i = 0;i < strlen(s);i++)\n {\n if(isalpha(s[i]))\n {\n if(isupper(s[i]))\n {\n c = k[counter%n];\n if(isupper(c))\n c = c - 'A';\n else\n c = c - 'a';\n a = c;\n s[i] = ((((s[i] -'A') + a) % 26) + 'A');\n counter++;\n }\n if(islower(s[i]))\n {\n c = k[counter%n];\n if(isupper(c))\n c = c - 'A';\n else\n c = c - 'a';\n a = c;\n s[i] = ((((s[i] -'a') + a) % 26) + 'a');\n counter++;\n }\n }\n }\n printf(\"ciphertext: %s\\n\",s);\n return 0;\n}\n" }, { "alpha_fraction": 0.341085284948349, "alphanum_fraction": 0.3736433982849121, "avg_line_length": 21.925926208496094, "blob_id": "072a519e4eb0954c0b9e54404bc9b07b74bef338", "content_id": "944c897401a5d8b38f56f517253d930d05c6ed05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 54, "num_lines": 27, "path": "/caesar.py", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "import cs50\nimport sys\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Error!\")\n exit(1)\n k = int(sys.argv[1])\n t = []\n print(\"plaintext: \",end = \"\")\n s = cs50.get_string()\n k = k%26\n for c in s:\n if(c.isalpha):\n if(c.isupper()):\n a = chr(((ord(c) - 65 + k) % 26) + 65)\n t.append(a)\n elif(c.islower()):\n a = chr(((ord(c) - 97 + k) % 26) + 97)\n t.append(a)\n else:\n t.append(c)\n print(\"ciphertext: \",end = \"\")\n print(\"\".join(t))\n\nif __name__ == \"__main__\":\n main()\n \n \n" }, { "alpha_fraction": 0.557894766330719, "alphanum_fraction": 0.5947368144989014, "avg_line_length": 16.272727966308594, "blob_id": "d42bb13eac522a57a952adcbc59ead1ee3fa8a0b", "content_id": "087a5a731f95b2a33ffc64a4f5312c3ef79ec8a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 190, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/water.c", "repo_name": "architasurai22/CS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include<stdio.h>\n\nint main(void)\n{\n int minutes,bottles;\n printf(\"Minutes : \");\n minutes = get_int();\n bottles = minutes*192/16;\n printf(\"%d\\n\",bottles);\n}\n" } ]
11
UTAGNATI/PST_VRWATCH
https://github.com/UTAGNATI/PST_VRWATCH
a4fa120a9a9de31c6f86c7c6e31efe78c5844f8a
f60af5f9bcd1e00e540badffe93d4ed39679d179
11094312e9c02d0ecc356ab390ea42d43d7d222e
refs/heads/master
2020-03-19T09:52:54.084747
2018-06-12T16:02:27
2018-06-12T16:02:27
136,324,306
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7250000238418579, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 11.666666984558105, "blob_id": "b64817822d1f4c375c4cf541969ad34725df8ff2", "content_id": "d9b1c6869f54797e82c2557a58d3a03864f45b93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 18, "num_lines": 3, "path": "/test2.py", "repo_name": "UTAGNATI/PST_VRWATCH", "src_encoding": "UTF-8", "text": "\n\nimport pantilthat\n\npantilthat.pan(45)\n" }, { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.7019230723381042, "avg_line_length": 13.857142448425293, "blob_id": "68ebee22c61903382cc0e6f4d759d9c45e15de4c", "content_id": "79b5bfb8765b61162eb3c5273fde013b89b549fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/aff.py", "repo_name": "UTAGNATI/PST_VRWATCH", "src_encoding": "UTF-8", "text": "import pantilthat\nimport math\n\nwhile True:\n \n pantilthat.servo_one(0)\n pantilthat.servo_two(0)\n" }, { "alpha_fraction": 0.4183673560619354, "alphanum_fraction": 0.5204081535339355, "avg_line_length": 22.5, "blob_id": "2d993b2eac2e06aeaa82d8ab7745523efac38740", "content_id": "e3bc1acfa69b0baeb1a93293149cf5c8142d07b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 49, "num_lines": 4, "path": "/scriptTradStreamToRasp.py", "repo_name": "UTAGNATI/PST_VRWATCH", "src_encoding": "UTF-8", "text": "\r\ndonnees = input()\r\nif (donnees[15] = 'H'){\r\n print (données[24:30] + \",\" + donnees[37:43])\r\n}" }, { "alpha_fraction": 0.34673747420310974, "alphanum_fraction": 0.41350531578063965, "avg_line_length": 22.122806549072266, "blob_id": "2c7fbf5fe0c69b48457c43ba750ddf85dc0ce7db", "content_id": "141492e8b27b2a3dc62ffe73069f57ade2152db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 53, "num_lines": 57, "path": "/conversion.py", "repo_name": "UTAGNATI/PST_VRWATCH", "src_encoding": "UTF-8", "text": "import pantilthat\nimport math\n\nwhile True:\n poub = raw_input()\n donnees = raw_input()\n \n ##\n \n #print(donnees)\n cd = 0\n\n if donnees[24] == '-':cd+=1\n if donnees[37+cd] == '-':cd+=1\n if donnees[50+cd] == '-':cd+=1\n\n qw = float(donnees[64+cd:70+cd])\n if donnees[64+cd] == '-':cd+=1\n qx = float(donnees[78+cd:84+cd])\n if donnees[78+cd] == '-':cd+=1\n qy = float(donnees[92+cd:98+cd])\n if donnees[92+cd] == '-':cd+=1\n qz = float(donnees[106+cd:112+cd])\n\n\n def quaternion_to_euler_angle(w, x, y, z):\n ysqr = y * y\n \n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + ysqr)\n X = math.degrees(math.atan2(t0, t1))\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n Y = math.degrees(math.asin(t2))\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (ysqr + z * z)\n Z = math.degrees(math.atan2(t3, t4))\n \n return X, Y, Z\n \n x,y,z = quaternion_to_euler_angle(qw, qx, qy, qz)\n \n i=0\n while i<2:\n if -90<x<90:\n pantilthat.tilt(-x)\n \n if -90<y<90:\n pantilthat.pan(y)\n \n i=i+1\n \n\n ##\n" } ]
4
gweakliem/openspacesboard-python
https://github.com/gweakliem/openspacesboard-python
85b58fa0083de778a002f5d17abb24adcde333e3
bbea0cceec098ba4fcf6d530cc8c9b715ea1df0f
28b627db44b72801ab6cafeaa0473871c1aab593
refs/heads/master
2020-12-03T09:14:50.602842
2016-03-05T19:05:35
2016-03-05T19:05:35
53,222,084
0
0
null
2016-03-05T20:36:38
2016-03-02T23:09:42
2016-03-05T19:05:48
null
[ { "alpha_fraction": 0.6356275081634521, "alphanum_fraction": 0.6720647811889648, "avg_line_length": 27.22857093811035, "blob_id": "447a152721ca7e2a0fe6bf00fd315ce35a2b84f1", "content_id": "2a532b3ad3c47e2fef38bf018b2c17b98b4b1e56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/migrations/versions/1217a2e3ce7e_.py", "repo_name": "gweakliem/openspacesboard-python", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 1217a2e3ce7e\nRevises: 1402a9a4e97e\nCreate Date: 2016-03-05 11:32:09.364053\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1217a2e3ce7e'\ndown_revision = '1402a9a4e97e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('conference_space',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('space_name', sa.String(length=64), nullable=True),\n sa.Column('location_id', sa.Integer(), nullable=True),\n sa.Column('event_date', sa.Date(), nullable=True),\n sa.Column('start_time', sa.DateTime(), nullable=True),\n sa.Column('end_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('space_name')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('conference_space')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.640303373336792, "alphanum_fraction": 0.6771397590637207, "avg_line_length": 26.147058486938477, "blob_id": "f5ec6ea581bc6f939245611ffa500dfbe77fe9a4", "content_id": "56f9328e1583071bd803d9a1395c997bd06b3685", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/migrations/versions/1402a9a4e97e_.py", "repo_name": "gweakliem/openspacesboard-python", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 1402a9a4e97e\nRevises: None\nCreate Date: 2016-03-05 11:27:42.836345\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1402a9a4e97e'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('conference_session',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=64), nullable=True),\n sa.Column('description', sa.String(length=64), nullable=True),\n sa.Column('convener', sa.String(length=64), nullable=True),\n sa.Column('space_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('title')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('conference_session')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6395061612129211, "alphanum_fraction": 0.6814814805984497, "avg_line_length": 27.928571701049805, "blob_id": "9460ed9ffaaf1a2e0232127e80dd7e4cf1715108", "content_id": "6219cf230ad1cbb6d491b7ee33ffb7ca86009880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/migrations/versions/e0a59d9b65e7_.py", "repo_name": "gweakliem/openspacesboard-python", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: e0a59d9b65e7\nRevises: acc0569b1d62\nCreate Date: 2016-03-05 12:02:34.594048\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'e0a59d9b65e7'\ndown_revision = 'acc0569b1d62'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_foreign_key(None, 'conference_session', 'conference_space', ['space_id'], ['id'])\n op.create_foreign_key(None, 'conference_space', 'conference_location', ['location_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'conference_space', type_='foreignkey')\n op.drop_constraint(None, 'conference_session', type_='foreignkey')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7928870320320129, "alphanum_fraction": 0.7928870320320129, "avg_line_length": 33.14285659790039, "blob_id": "3a354a817cd3f667efbd6143c9c38bc25fe71ab7", "content_id": "5280f4f47ccf9f156a5dc826e6417f021b3fbecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 111, "num_lines": 14, "path": "/osbp_app/mod_utils/db_create.py", "repo_name": "gweakliem/openspacesboard-python", "src_encoding": "UTF-8", "text": "#!flask/bin/python\nimport os\n\nfrom migrate.versioning import api\n\nfrom config import SQLALCHEMY_MIGRATE_REPO, SQLALCHEMY_DATABASE_URI\nfrom osbp_app import db\n\ndb.create_all()\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database_repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))\n" } ]
4
0962kk/python_tor_web
https://github.com/0962kk/python_tor_web
09bbeef296ba08e9141245b880e145905acf6473
0884708ebacb035d4abfceefb74ab41962eb5864
2cfeb1f26ca4f46313f70b334830004be0f0b37e
refs/heads/master
2021-01-22T21:03:37.257864
2017-03-18T09:38:13
2017-03-18T09:38:13
85,390,402
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 30, "blob_id": "d916e88f550b35a3d43dc2bf7a3f24a895810b5c", "content_id": "000547509ab7db42b5a8d3deb88db20d7f7a7db7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 44, "num_lines": 2, "path": "/README.md", "repo_name": "0962kk/python_tor_web", "src_encoding": "UTF-8", "text": "# python_tor_web\nget the content with python urllib &amp; tor \n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7494089603424072, "avg_line_length": 25.4375, "blob_id": "ed52425b72dac2bddf5c9145e212ad361e76543a", "content_id": "b19084b095021d263a463f24d4ca8b9c7e83c6f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 73, "num_lines": 16, "path": "/app.py", "repo_name": "0962kk/python_tor_web", "src_encoding": "UTF-8", "text": "import socks, socket\nfrom urllib.request import urlopen\n\n\nurl = \"http://whoer.net\"\ndef create_connection(address, timeout=None, source_address=None):\n sock = socks.socksocket()\n sock.connect(address)\n return sock\n\nsocks.set_default_proxy(socks.PROXY_TYPE_SOCKS5, \"127.0.0.1\", 9150, True)\nsocket.socket = socks.socksocket\nsocket.create_connection = create_connection\n\ncontents = urlopen(url).read()\nprint(contents)\n" } ]
2
scottsilverlabs/raspberrystem-ide
https://github.com/scottsilverlabs/raspberrystem-ide
9d630163dd2248e1b8c4af5c070ed1c4de2ee0dc
883483477853db329b302996c861d6dfec407321
177651ef855b5c2447f174d3ca07482ab4b5c505
refs/heads/master
2021-01-13T14:05:08.211369
2015-11-09T09:45:16
2015-11-09T09:45:16
21,174,700
0
0
null
2014-06-24T17:45:02
2014-07-02T21:02:28
2014-09-07T15:18:52
JavaScript
[ { "alpha_fraction": 0.6502569913864136, "alphanum_fraction": 0.6555951237678528, "avg_line_length": 34.61971664428711, "blob_id": "49f0fea63b3c7857d06055c42e091d2ee9aa570d", "content_id": "67635168777d2fdc9806ba142ff35484b11c57f1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5058, "license_type": "permissive", "max_line_length": 92, "num_lines": 142, "path": "/setup.py", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n# Copyright (c) 2014, Scott Silver Labs, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport sys\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install as _install\nimport shutil\nimport subprocess\n\n# Utility function to read the README file.\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nTGT_CONFIG_FILE = '/etc/rstem_ide.conf'\nTGT_INSTALL_DIR = '/opt/raspberrystem/ide'\nTGT_PYTHON_DOCS_DIR = '/opt/raspberrystem/python.org'\nTGT_HTML_SYMLINK = '/opt/raspberrystem/pydoc'\nTGT_CONFIG_FILE = '/etc/rstem_ide.conf'\nTGT_BIN_SYMLINK = '/usr/local/bin/rstem_ided'\nTGT_INITD = '/etc/init.d/rstem_ided'\nTGT_OPENBOX_FILE = '/home/pi/.config/openbox/lxde-pi-rc.xml'\nTGT_DESKTOP_FILE = '/home/pi/desktop/openbox/rstem.desktop'\noutputs = [\n TGT_INSTALL_DIR,\n TGT_PYTHON_DOCS_DIR,\n TGT_CONFIG_FILE,\n TGT_BIN_SYMLINK,\n TGT_INITD,\n ]\n\ndef _post_install(dir):\n # import rstem to find its install path\n # NOTE: Require dependency on rstem\n import rstem\n pydoc_path = os.path.join(os.path.dirname(rstem.__file__), 'pydoc', rstem.__name__)\n\n for dir in [TGT_INSTALL_DIR, TGT_PYTHON_DOCS_DIR]:\n print('Removing: ' + dir)\n shutil.rmtree(dir, ignore_errors=True)\n for dir in [TGT_INSTALL_DIR, TGT_PYTHON_DOCS_DIR]:\n print('Installing: ' + dir)\n shutil.copytree(os.path.basename(dir), dir)\n\n print('Creating links...')\n\n # API docs symlink - note: TGT_HTML_SYMLINK not considered an output of the\n # install because if it is, then on pip uninstall, it will not remove the\n # symlink, but instead removes the files linked TO.\n try:\n os.remove(TGT_HTML_SYMLINK)\n except OSError:\n pass\n print(' symlink {} -->\\n {}'.format(TGT_HTML_SYMLINK, pydoc_path))\n os.symlink(pydoc_path, TGT_HTML_SYMLINK)\n\n # server binary symlink\n try:\n os.remove(TGT_BIN_SYMLINK)\n except OSError:\n pass\n dest_bin = os.path.join(TGT_INSTALL_DIR, 'server')\n print(' symlink {} -->\\n {}'.format(TGT_BIN_SYMLINK, dest_bin))\n os.symlink(dest_bin, TGT_BIN_SYMLINK)\n os.chmod(TGT_BIN_SYMLINK, 0o4755)\n\n # Copy config file\n SRC_CONFIG_FILE = '.' + TGT_CONFIG_FILE\n print('Copy config file {} -> {}'.format(SRC_CONFIG_FILE, TGT_CONFIG_FILE))\n shutil.copy(SRC_CONFIG_FILE, TGT_CONFIG_FILE)\n\n # Copy and create link for init script\n SRC_INITD = '.' + TGT_INITD\n print('Copy init.d script {} -> {}'.format(SRC_INITD, TGT_INITD))\n shutil.copy(SRC_INITD, TGT_INITD)\n os.chmod(TGT_INITD, 0o755)\n # symlink is created via postinstall script\n\n # WM rc config file\n try:\n os.makedirs(\"/\".join(TGT_OPENBOX_FILE.split(\"/\")[:-1]) + \"/\")\n except:\n pass\n try:\n print('Backup {} -> {}'.format(TGT_OPENBOX_FILE, TGT_OPENBOX_FILE + '.old'))\n shutil.copy(TGT_OPENBOX_FILE, TGT_OPENBOX_FILE + '.old')\n except:\n pass\n print('Copy {} -> {}'.format(\"./configfiles/lxde-pi-rc.xml\", TGT_OPENBOX_FILE))\n shutil.copy(\"./configfiles/lxde-pi-rc.xml\", TGT_OPENBOX_FILE)\n\n # Desktop link\n print('Copy {} -> {}'.format(\"./configfiles/rstem.desktop\", TGT_DESKTOP_FILE))\n shutil.copy(\"./configfiles/rstem.desktop\", TGT_DESKTOP_FILE)\n\n # Additional post install steps via shell script\n from subprocess import call\n call('bash ./pkg/postinstall %s rstem' % dir, shell=True)\n\n# Post installation task to setup raspberry pi\nclass install(_install):\n # Required to force PiP to know about our additional files.\n def get_outputs(self):\n return super().get_outputs() + outputs\n\n def run(self):\n super().run()\n self.execute(_post_install, (self.install_lib,), msg='Running post install task...')\n\nsetup(\n name = read('NAME').strip(),\n version = read('VERSION').strip(),\n author = 'Brian Silverman',\n author_email = '[email protected]',\n description = ('RaspberrySTEM IDE'),\n license = 'Apache License 2.0',\n keywords = ['raspberrystem', 'raspberrypi', 'stem', 'ide'],\n url = 'http://www.raspberrystem.com',\n long_description = read('README.md'),\n # use https://pypi.python.org/pypi?%3Aaction=list_classifiers as help when editing this\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Topic :: Education',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n ],\n cmdclass={'install': install}, # overload install command\n)\n" }, { "alpha_fraction": 0.5831640958786011, "alphanum_fraction": 0.5978334546089172, "avg_line_length": 27.960784912109375, "blob_id": "2c4678f3dfd9312e94695e64a3c4be1fa5003a35", "content_id": "b66ff1195cecb694ea1a004a7861bb228ecd64da", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4431, "license_type": "permissive", "max_line_length": 108, "num_lines": 153, "path": "/sitescrape/sitescrape.rb", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "#!/usr/bin/env ruby\nrequire \"open-uri\"\nrequire \"json\"\n@cwd = File.dirname __FILE__\n@repscript = open(\"#{@cwd}/projectspage.js\").read\n@baseurl = \"http://dev.raspberrystem.com/wphidden42/?page_id=5\"\n@localbase = @cwd+\"/website/\"\n@basedomain = @baseurl[/http:\\/\\/.+\\.com\\//]\ncon = open(@baseurl)\nhtml = con.read\njson = html[/posts = \\[.+\\]/][8..-1]\n@posts = JSON.parse json\nif !Dir.exists? @cwd+\"/website\"\n\tDir.mkdir @cwd+\"/website\"\nend\n@queue = []\n@scanned = {}\n@urlhashes = {}\n\ndef formatUrl(url)\n\turl = url.gsub \"?p=\", \"?page_id=\"\n\tif url[0, 2] == \"//\"\n\t\turl = \"http:\"+url\n\tend\n\turl = URI.join(@basedomain, URI.unescape(url.gsub(\"&#038;\", \"\"))).to_s.gsub(\"www.\", \"\")\n\treturn url\nend\n\ndef tolocal(url)\n\turl = formatUrl(url)\n\treturn @urlhashes[url] if @urlhashes[url]\n\tnewurl = url.split(\"/\").last.delete(\"/,:=\").gsub(/^\\?/, \"\").gsub(/\\?.+$/, \"\")\n\tif newurl == \"0.jpg\"\n\t\tnewurl = (0...8).map { (65 + rand(26)).chr }.join.hash.to_s(16) + \".jpg\"\n\tend\n\t@urlhashes[url] = newurl\n\treturn newurl\nend\n\ndef crawl(url)\n\t@scanned[url] = true\n\turl = formatUrl(url)\n\tpage = nil\n\tbegin\n\t\tpage = open url\n\trescue\n\t\tputs \"404 \"+url\n\t\treturn\n\tend\n\ttype = page.content_type\n\tbody = page.read\n\n\tif type == \"text/html\"\n\t\treturn if !url.include? @basedomain\n\t\tjqs = body.scan /<script[^>]+jquery[^>]+><\\/script>/\n\t\tfor i in jqs\n\t\t\tbody = body.gsub i, \"\"\n\t\tend\n\t\tif url == formatUrl(@baseurl)\n\t\t\tscript = body[/var diffImage.+psort\\(\"name\"\\);/m]\n\t\t\tdiff = formatUrl(@repscript[/var diffImage.*/][17..-3])\n\t\t\trate = formatUrl(@repscript[/var rateImage.*/][17..-3])\n\t\t\tcellurls = JSON.parse body[/cellurls = \\{[^\\}]+\\}/][11..-1]\n\t\t\tfor i, v in cellurls\n\t\t\t\tcellurls[i] = formatUrl(tolocal(@basedomain+\"wphidden42/\"+v))\n\t\t\t\[email protected] formatUrl(tolocal(@basedomain+\"wphidden42/\"+v))\n\t\t\tend\n\t\t\t@repscript = @repscript.gsub \"cellurls = {}\", \"cellurls = \" + JSON.generate(cellurls)\n\t\t\[email protected] diff, rate\n\t\t\t@repscript = @repscript.gsub diff, tolocal(diff)\n\t\t\t@repscript = @repscript.gsub rate, tolocal(rate)\n\t\t\tlocalposts = []\n\t\t\tfor i in @posts\n\t\t\t\tlocal = tolocal(formatUrl(@basedomain+\"wphidden42/?page_id=\"+i[\"id\"].to_s))\n\t\t\t\tlocalposts.push(i.clone()).last[\"url\"] = local\n\t\t\tend\n\t\t\tfor i in localposts\n\t\t\t\ti[\"cells\"] = i[\"cells\"].gsub(\"\\r\", \"\").strip\n\t\t\t\ti[\"lid\"] = i[\"lid\"].gsub(\"\\r\", \"\").strip\n\t\t\t\ti[\"category\"] = i[\"category\"].gsub(\"\\r\", \"\").strip\n\t\t\t\tif i[\"description\"]\n\t\t\t\t\ti[\"description\"] = i[\"description\"].gsub(\"\\r\", \"\").strip\n\t\t\t\tend\n\t\t\tend\n\t\t\tcellimageurls = {}\n\t\t\tfor i in localposts\n\t\t\t\ts = i[\"cells\"].scan /\\[[^\\]]+\\]/\n\t\t\t\tfor v in s\n\t\t\t\t\tv = v[v.split(\" \")[0].length+1..-2]\n\t\t\t\t\tif not cellimageurls[v]\n\t\t\t\t\t\tcellimageurls[v] = tolocal(formatUrl(@basedomain+\"wphidden42/cellicons/\"+v.downcase().gsub(\" \", \"-\")))\n\t\t\t\t\t\[email protected] formatUrl(@basedomain+\"wphidden42/cellicons/\"+v.downcase().gsub(\" \", \"-\"))\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\t\t@repscript = @repscript.gsub \"cellimageurls = {}\", \"cellimageurls = \"+JSON.generate(cellimageurls)\n\t\t\t@repscript = @repscript.gsub \"posts = []\", \"posts = \"+JSON.generate(localposts)\n\t\t\tbody = body.gsub script, @repscript\n\t\tend\n\t\tyoutubes = body.scan /<iframe [^>]+src=\\\"http:\\/\\/www.youtube\\S+\\\"[^<]+<\\/iframe>/\n\t\tfor i in youtubes\n\t\t\twidth = i[/width=\\\"[0-9]+\\\"/]\n\t\t\theight = i[/height=\\\"[0-9]+\\\"/]\n\t\t\tid = i[/src=\\\"http:\\/\\/www.youtube\\S+\\\"/][34..-1].split(\"?\")[0]\n\t\t\tbody = body.gsub i, \"<img #{width} #{height} src=\\\"http://img.youtube.com/vi/#{id}/0.jpg\\\"></img>\"\n\t\tend\n\t\tlinks = body.scan /href=\"[^ #]+\"/\n\t\tlinks.concat body.scan /href='\\S+'/ #\\S because the open sans URL has # in it\n\t\tlinks.concat body.scan /src=\"[^ #]+\"/\n\t\tlinks.concat body.scan /src='[^ #]+'/\n\t\tfor i in links\n\t\t\ttrimmed = i[/\\\".+\\\"/] || i[/\\'.+\\'/]\n\t\t\tif !@scanned[i]\n\t\t\t\[email protected] trimmed[1..-2]\n\t\t\t\t@scanned[i] = true\n\t\t\tend\n\t\t\tbody = body.gsub trimmed, \"\\\"\"+tolocal(trimmed[1..-2])+\"\\\"\"\n\t\tend\n\t\tf = File.open @cwd+\"/website/\"+tolocal(url), \"w\"\n\t\tf.write body\n\telse\n\t\tf = File.open @cwd+\"/website/\"+tolocal(url), \"w\"\n\t\tf.write body\n\tend\nend\[email protected] @baseurl\nfor i in @posts\n\[email protected] @basedomain+\"wphidden42/?page_id=\"+i[\"id\"].to_s\nend\n\nthreads = []\nfor i in 1..8\n\tthreads.push(Thread.new {\n\t\twhile true\n\t\t\tif @queue.size > 0\n\t\t\t\tcrawl @queue.pop\n\t\t\telse\n\t\t\t\tbreak\n\t\t\tend\n\t\tend\n\t}).last.run\n\tsleep(0.1)\nend\n\nalive = true\nwhile alive\n\tfor i in threads\n\t\talive = alive && i.alive?\n\tend\n\tsleep 1\nend\nf = File.open @cwd+\"/website/index.html\", \"w\"\nf.write(open(@cwd+\"/website/\"+@urlhashes[formatUrl(@baseurl)]).read)\n" }, { "alpha_fraction": 0.7160493731498718, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 25.66666603088379, "blob_id": "b3b9452b2b8abf6eac8c83dcc209b06aa156b4a8", "content_id": "442fa7f2d31c9c453f0abdb83b68d8d15a27e1c7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 81, "license_type": "permissive", "max_line_length": 37, "num_lines": 3, "path": "/scripts/version.sh", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho raspberrystem_ide > NAME\ngit describe --tags --dirty > VERSION\n\n" }, { "alpha_fraction": 0.6305637955665588, "alphanum_fraction": 0.6342729926109314, "avg_line_length": 23.472726821899414, "blob_id": "2850dc91bfce85054fbd3c5121c8ef0bac018f0d", "content_id": "a10cc79d3f47a579c507681e373e52c6f6a4ffa9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1348, "license_type": "permissive", "max_line_length": 82, "num_lines": 55, "path": "/pkg/postinstall", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPKG_DIR=$1\nPKG_NAME=$2\n\n#\n# Enable boot to desktop... Taken from http://github.com/asb/raspi-config\n#\n\ndisable_raspi_config_at_boot() {\n if [ -e /etc/profile.d/raspi-config.sh ]; then\n rm -f /etc/profile.d/raspi-config.sh\n sed -i /etc/inittab \\\n -e \"s/^#\\(.*\\)#\\s*RPICFG_TO_ENABLE\\s*/\\1/\" \\\n -e \"/#\\s*RPICFG_TO_DISABLE/d\"\n telinit q\n fi\n}\n\ndisable_boot_to_scratch() {\n if [ -e /etc/profile.d/boottoscratch.sh ]; then\n rm -f /etc/profile.d/boottoscratch.sh\n sed -i /etc/inittab \\\n -e \"s/^#\\(.*\\)#\\s*BTS_TO_ENABLE\\s*/\\1/\" \\\n -e \"/#\\s*BTS_TO_DISABLE/d\"\n telinit q\n fi\n}\n\nenable_boot_to_desktop() {\n update-rc.d lightdm enable 2\n sed /etc/lightdm/lightdm.conf -i -e \"s/^#autologin-user=.*/autologin-user=pi/\"\n disable_boot_to_scratch\n disable_raspi_config_at_boot\n}\n\n\necho \"Running postinstall script...\"\necho \" Enabling boot to desktop\"\n\nenable_boot_to_desktop\n\n# Add server startup to boot\nupdate-rc.d rstem_ided defaults\n\n# Autostart IDE client web browser\necho \" Enabling autostart of IDE client...\"\nIDE_LINE=\"@sh /opt/raspberrystem/ide/start_client.sh\"\nAUTOSTART_CFG=\"/etc/xdg/lxsession/LXDE-pi/autostart\"\nif ! grep -q \"$IDE_LINE\" \"$AUTOSTART_CFG\"; then\n echo \"$IDE_LINE\" >> \"$AUTOSTART_CFG\"\n echo \" ... now enabled.\"\nelse\n echo \" ... was already enabled.\"\nfi\n\n\n" }, { "alpha_fraction": 0.6433737874031067, "alphanum_fraction": 0.660287082195282, "avg_line_length": 52.183433532714844, "blob_id": "dccc9a3a27d24b1396e2da5343a957d548a36c43", "content_id": "5c414b98c7e9c45359a738d5ac6eb13a5a4ccd79", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8989, "license_type": "permissive", "max_line_length": 262, "num_lines": 169, "path": "/sitescrape/projectspage.js", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "var posts = [];\nvar cellurls = {};\nvar cellimageurls = {};\nvar diffImage = \"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcSZCilIMSKaiiLs6gE0RwLWlIIBLkYsSKlRXhu1ZbGIprGrdh9BMFK-Bg\";\nvar rateImage = \"http://icongal.com/gallery/image/144117/nintendo_star.png\";\nvar headerStyle = \"width:\"+(100/6)+\"%;display:inline;\";\n\nvar title = document.getElementById(\"content\").firstElementChild.firstElementChild;\ntitle.firstElementChild.innerHTML += \"<span id=\\\"count\\\">(\"+posts.length+\")</span>\";\nvar count = document.getElementById(\"count\");\nvar content = document.getElementById(\"content\").firstElementChild.getElementsByTagName(\"div\")[0];\nvar bar = document.getElementById(\"tablesearchbar\");\ncontent.innerHTML += \"<div id=\\\"projectTable\\\" class=\\\"tableheader\\\"></div>\";\nvar table = document.getElementById(\"projectTable\");\nvar header = \"<style> .entry-title {margin-bottom:0;} \";\nheader += \".descout {height:100%;padding-bottom:1em;padding-top:1em;}\";\nheader += \".descin {height:0px;padding-bottom:0px;padding-top:0px;}\";\nheader += \".down {-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg)}\";\nheader += \".up {-webkit-transform:rotate(0deg);-moz-transform:rotate(0deg);-ms-transform:rotate(0deg)}\";\nheader += \".gray {-webkit-filter:grayscale(100%);-moz-filter:grayscale(100%);-ms-filter:grayscale(100%);-o-filter:grayscale(100%);filter:grayscale(100%);filter:url(GRAYSCALEURL);} </style>\";\nheader += \"<div id=\\\"advsearch\\\" class=\\\"in\\\" style=\\\"width:100%;overflow:hidden;display:inline-flex;display:-webkit-inline-box;\\\"></div>\";\nheader += \"<input id=\\\"tablesearchbar\\\" style=\\\"float:left;width:99%;max-height:1em;overflow-x:auto;overflow-y:hidden;display:inline;\\\" class=\\\"headerbutton\\\" type=\\\"text\\\" value=\\\"\\\" placeholder=\\\"Search\\\" onchange=\\\"search(this.value)\\\"/>\";\nheader += \"<div class=\\\"tableheader\\\" style=\\\"display:inline;width:100%;text-align:center;white-space:nowrap;\\\">\";\nheader += \"<input style=\\\"\"+headerStyle+\"\\\" class=\\\"headerbutton\\\" type=\\\"button\\\" value=\\\"Name\\\" onclick=\\\"psort('name')\\\"></input>\";\nheader += \"<input style=\\\"\"+headerStyle+\"\\\" class=\\\"headerbutton\\\" type=\\\"button\\\" value=\\\"Difficulty\\\" onclick=\\\"psort('difficulty')\\\"></input>\";\nheader += \"<input style=\\\"\"+headerStyle+\"\\\" class=\\\"headerbutton\\\" type=\\\"button\\\" value=\\\"Rating\\\" onclick=\\\"psort('rating')\\\"></input>\";\nheader += \"<input style=\\\"\"+headerStyle+\"\\\" class=\\\"headerbutton\\\" type=\\\"button\\\" value=\\\"Category\\\" onclick=\\\"psort('category')\\\"></input>\";\nheader += \"<input style=\\\"\"+headerStyle+\"\\\" class=\\\"headerbutton\\\" type=\\\"button\\\" value=\\\"Cells\\\" onclick=\\\"psort('cellcount')\\\"></input>\";\nheader += \"<input style=\\\"\"+headerStyle+\"\\\" class=\\\"headerbutton\\\" type=\\\"button\\\" value=\\\"Lid\\\" onclick=\\\"psort('lid')\\\"></input>\";\nheader += \"</div>\";\nheader += \"<div id=\\\"entryTable\\\" class=\\\"tableheader\\\" style=\\\"width:100%;\\\"></div>\";\ntable.innerHTML += header;\nvar etable = document.getElementById(\"entryTable\");\nvar advsearch = document.getElementById(\"advsearch\");\nvar headers = document.getElementsByClassName(\"entry-title\");\nheaders[headers.length-1].style.display = \"inline\"; //In reality there should only be one element in the array\n//Setup posts array and searching arrays\nfor (var i in posts) {\n\tvar ele = posts[i];\n\tif (!ele.rating) {\n\t\tele.rating = 1;\n\t}\n\tvar cstring = ele.cells;\n\tele.cells = {};\n\tvar allsplit = cstring.substring(1, cstring.length-1).split(\"] [\");\n\tfor (var j in allsplit) {\n\t\tnum = allsplit[j].split(\" \")[0];\n\t\tname = allsplit[j].substring(num.length+1);\n\t\tnum = parseInt(num);\n\t\tele.cells[name] = num;\n\t\tele.cellcount += num;\n\t}\n}\n\n\nvar textHolderStyle = \"width:\"+(100/6)+\"%;overflow-x:auto;overflow-y:hidden;max-height:inherit;text-align:inherit;background-color:#fff;\";\nvar circleStyle = \"float:right;width:1em;height:1em;border-radius:100%;background-color:#aaa;vertical-align:middle;\";\nvar grayStyle = \"width:5em;position:relative;z-index:1;max-height:1.2em;overflow:hidden\";\nvar colorStyle = \"position:relative;z-index:2;max-height:1.2em;overflow:hidden;margin-top:-1.2em;white-space:nowrap;\";\nfunction generateEntry(optionsDict) {\n\tvar id = optionsDict.name.replace(/ /g, \"-\");\n\tvar html = \"<div id=\\\"\"+id+\"\\\" class=\\\"tableentry\\\" onclick=\\\"toggleDesc(event, this.id+'Desc', this)\\\" style=\\\"display:inline-flex;display:-webkit-inline-box;width:100%;min-height:1.3em;max-height:3.2em;text-align:left;overflow:hidden;padding-bottom:.1em;\\\">\";\n\thtml += \"<div class=\\\"tabletext pname\\\" style=\\\"\"+textHolderStyle+\"\\\"><a href=\\\"\"+optionsDict.url+\"\\\">\"+optionsDict.name+\"</a></div>\";\n\thtml += \"<div class=\\\"tabletext pdiff\\\" style=\\\"\"+textHolderStyle+\"overflow-x:hidden;\\\">\";\n\thtml += \"<div style=\\\"\"+grayStyle+\"\\\">\"; //Background holder\n\tfor (var i = 1; i <= 5; i++) {\n\t\thtml += \"<img class=\\\"gray\\\" src=\\\"\"+diffImage+\"\\\" style=\\\"box-shadow:0 0px;height:1em;width:1em;display:inline-flex;display:-webkit-inline-box;\\\"></img>\";\n\t}\n\thtml += \"</div>\";\n\thtml += \"<div style=\\\"width:\"+optionsDict.difficulty+\"em;\"+colorStyle+\"\\\">\"; //Forground holder\n\tfor (var i = 1; i <= 5; i++) {\n\t\thtml += \"<img src=\\\"\"+diffImage+\"\\\" style=\\\"box-shadow:0 0px;height:1em;width:1em;position:relative;\\\"></img>\";\n\t}\n\thtml += \"</div>\";\n\thtml += \"</div>\";\n\thtml += \"<div class=\\\"tabletext prate\\\" style=\\\"\"+textHolderStyle+\"overflow-x:hidden;\\\">\";\n\thtml += \"<div style=\\\"\"+grayStyle+\"\\\">\"; //Background holder\n\tfor (var i = 1; i <= 5; i++) {\n\t\thtml += \"<img class=\\\"gray\\\" src=\\\"\"+rateImage+\"\\\" style=\\\"box-shadow:0 0px;height:1em;width:1em;display:inline-flex;display:-webkit-inline-box;\\\"></img>\";\n\t}\n\thtml += \"</div>\";\n\thtml += \"<div id=\\\"\"+optionsDict.id+\"RCover\\\" style=\\\"width:\"+optionsDict.rating+\"em;\"+colorStyle+\"\\\">\"; //Forground holder\n\tfor (var i = 1; i <= 5; i++) {\n\t\thtml += \"<img src=\\\"\"+rateImage+\"\\\" style=\\\"box-shadow:0 0px;height:1em;width:1em;display:inline-flex;display:-webkit-inline-box;\\\"></img>\";\n\t}\n\thtml += \"</div>\";\n\thtml += \"</div>\";\n\thtml += \"<div class=\\\"tabletext pcategory\\\" style=\\\"\"+textHolderStyle+\"\\\">\"+optionsDict.category+\"</div>\";\n\thtml += \"<div class=\\\"tabletext pcells\\\" style=\\\"\"+textHolderStyle+\";\\\">\";\n\tfor (var i in optionsDict.cells) {\n\t\thtml += \"<a href=\\\"\"+cellurls[i]+\"\\\" style=\\\"display:inline\\\">\";\n\t\thtml += \"<img style=\\\"width:1em;height:1em;\\\" src=\\\"\"+cellimageurls[i]+\"\\\"></img></a>\";\n\t}\n\thtml += \"</div>\";\n\thtml += \"<div class=\\\"tabletext plid\\\" style=\\\"\"+textHolderStyle+\"\\\">\";\n\thtml += \"<a href=\\\"\"+cellurls[optionsDict.lid]+\"\\\" style=\\\"display:inline\\\">\";\n\thtml += \"<img style=\\\"width:1em;height:1em;\\\" src=\\\"\"+cellimageurls[optionsDict.lid]+\"\\\"></img></a>\";\n\thtml += \"<div class=\\\"tablespinner down\\\" style=\\\"\"+circleStyle+\"\\\"><span style=\\\";position:relative;top:-45%;\\\">▲</span></div></div></div>\";\n\thtml += \"<div id=\\\"\"+id+\"Desc\\\" class=\\\"tabledesc descin\\\" onclick=\\\"toggleDesc(this.id)\\\" style=\\\"width:100%;overflow:hidden;max-height:100%;min-height:0px\\\">\";\n\thtml += \"Licensed as ALv2, Copyright Scott Silver Labs, created by \"+optionsDict.author;\n\thtml += \"<div>\";\n\tfor (var i in optionsDict.cells) {\n\t\thtml += \"<a href=\\\"\"+cellurls[i]+\"\\\" style=\\\"padding-right:1em;\\\">\";\n\t\thtml += \"<img style=\\\"width:1em;height:1em;\\\" src=\\\"\"+cellimageurls[i]+\"\\\"></img>\"+i+\"</a>\";\n\t}\n\thtml += \"<a href=\\\"\"+cellurls[optionsDict.lid]+\"\\\" style=\\\"float:right;\\\">\";\n\thtml += \"<img style=\\\"width:1em;height:1em;\\\" src=\\\"\"+cellimageurls[optionsDict.lid]+\"\\\"></img>\"+optionsDict.lid+\"</a>\";\n\thtml += \"</div>\";\n\thtml += \"<br/>\"+optionsDict.description;\n\thtml += \"</div>\";\n\tetable.innerHTML += html;\n}\n\nfunction message(content) {\n\tvar html = \"<div class=\\\"tablenone\\\" style=\\\"text-align:center;display:inline-flex;display:-webkit-inline-box;width:100%;min-height:18px;max-height:50px;\\\">\";\n\thtml += \"<h4>\"+content+\"</h4>\";\n\thtml += \"</div>\";\n\tetable.innerHTML += html;\n}\n\nfunction search(str) {\n\tentryTable.innerHTML = \"\";\n\tvar found = 0;\n\tfor (var i in posts) {\n\t\tvar num = parseInt(str);\n\t\tif (posts[i].name.toLowerCase().indexOf(str.toLowerCase()) != -1) {\n\t\t\tfound++;\n\t\t\tgenerateEntry(posts[i]);\n\t\t}\n\t}\n\tcount.innerHTML = \"(\"+found+\")\";\n\tif (found === 0) {\n\t\tmessage(\"No Projects Found\");\n\t}\n}\n\nvar last;\nvar lastOrig;\nfunction toggleDesc(event, id, orig) {\n\tvar cur = document.getElementById(id);\n\tif (cur && !event.target.href) {\n\t\tif (last != id) {\n\t\t\tcur.classList.remove(\"descin\");\n\t\t\tcur.classList.add(\"descout\");\n\t\t\torig.lastChild.lastChild.classList.remove(\"down\");\n\t\t\torig.lastChild.lastChild.classList.add(\"up\");\n\t\t} else { //Ugly, but this fixing a minor toggling issue.\n\t\t\tcur.classList.remove(\"descout\");\n\t\t\tcur.classList.add(\"descin\");\n\t\t\torig.lastChild.lastChild.classList.remove(\"up\");\n\t\t\torig.lastChild.lastChild.classList.add(\"down\");\n\t\t\tlast = undefined;\n\t\t\treturn;\n\t\t}\n\t\tif (last) {\n\t\t\tvar old = document.getElementById(last);\n\t\t\tif (old) {\n\t\t\t\told.classList.remove(\"descout\");\n\t\t\t\told.classList.add(\"descin\");\n\t\t\t\tlastOrig.lastChild.lastChild.classList.remove(\"up\");\n\t\t\t\tlastOrig.lastChild.lastChild.classList.add(\"down\");\n\t\t\t}\n\t\t}\n\t\tlast = id;\n\t\tlastOrig = orig;\n\t}\n}\n\nsearch(\"\");" }, { "alpha_fraction": 0.7490040063858032, "alphanum_fraction": 0.7490040063858032, "avg_line_length": 34.85714340209961, "blob_id": "ff8e642feea6710c1292191a8f5c560091c975d5", "content_id": "a9551253f205485e819c491efd95a1c8417ac538", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 502, "license_type": "permissive", "max_line_length": 77, "num_lines": 14, "path": "/start_client.sh", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# Cache corruption on sudden shutdown (or other cases) can cause missing file\n# or broken image links. So start with a clean cache on every boot.\nrm -rf $HOME/.cache/chromium\n\n# A hard shutdown causes Chromium to report \"didn't shut down correctly\"...\n# Clean files responsible for this (Note: this method is ad-hoc, there's no\n# clearly defined way to do this).\nrm -f $HOME/.config/chromium/Default/Preferences\nrm -f $HOME/.config/chromium/SingletonLock\n\n# Run!\nchromium --kiosk localhost\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6875, "avg_line_length": 23, "blob_id": "082cf2384880f806da2e69e84b35203ca6e1886b", "content_id": "1df1f0f590ddf71f8e8088eec22905f5341afaa1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 96, "license_type": "permissive", "max_line_length": 58, "num_lines": 4, "path": "/README.md", "repo_name": "scottsilverlabs/raspberrystem-ide", "src_encoding": "UTF-8", "text": "raspberrystem-ide\n=================\n\nraspberrySTEM(TM) IDE (Integrated Development Environment)\n" } ]
7
jebos/hyperion
https://github.com/jebos/hyperion
a08d149cdf6794970fdd578c16bae081bac3558f
bfaa5ce1804c39cf626ba10220801d09f36fb6dc
64dc888a606a056a273c94133eaf02e98cc8b57d
refs/heads/master
2021-01-15T09:43:44.342637
2016-01-19T20:58:35
2016-01-19T20:58:35
48,980,988
0
0
null
2016-01-04T07:18:17
2016-01-04T07:18:19
2016-01-06T21:45:58
C++
[ { "alpha_fraction": 0.5609848499298096, "alphanum_fraction": 0.5973485112190247, "avg_line_length": 26.05640983581543, "blob_id": "509c15d614426f0d3f113141f6dbb6d36fb4755d", "content_id": "73462abfd4110b8fed3d229fb9b2e6fc74fd92e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5280, "license_type": "permissive", "max_line_length": 91, "num_lines": 195, "path": "/effects/snake.py", "repo_name": "jebos/hyperion", "src_encoding": "UTF-8", "text": "import hyperion\nimport colorsys\nimport socket\n \n\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n\n# import required modules\nimport RPi.GPIO as GPIO\nimport time\n\n# define GPIO pin with button\nGPIOFrontLight = 13\nGPIOSwitchAmbianceMode = 11\nGPIODimmer = 15\n\n# main function\ndef main():\n try:\n # use GPIO pin numbering convention\n GPIO.setmode(GPIO.BOARD)\n\n # set up GPIO pin for input\n GPIO.setup(GPIOFrontLight, GPIO.IN)\n GPIO.setup(GPIOSwitchAmbianceMode, GPIO.IN)\n GPIO.setup(GPIODimmer, GPIO.IN)\n\n GPIO.setup(GPIOFrontLight, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n GPIO.setup(GPIOSwitchAmbianceMode, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n GPIO.setup(GPIODimmer, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n\n\n frontLightStatus = 0 # 0 = off , 1 = on\n ambianceLightStatus = 0 # 0 = off, 1 = on\n\n ambianceMode = 0 # 0 = normal ambiance light , 1 = tv ambiance mode\n\n dimmerValue = 10 # 0 to 10\n\n UDP_IP = \"\"\n UDP_PORT = 8888\n\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n sock.bind((UDP_IP, UDP_PORT))\n sock.settimeout(0.3)\n \n ledDataWandOn = bytearray()\n ledDataLampenOn = bytearray()\n ledDataWandOff = bytearray()\n ledDataLampenOff = bytearray()\n hsv = colorsys.rgb_to_hsv(250/255.0, 1.0, 158.0/255.0)\n hsvFront = colorsys.rgb_to_hsv(250.0/255.0, 1.0, 158.0/255.0)\n\n\n for i in range(1,152):\n rgb = colorsys.hsv_to_rgb(hsv[0], hsv[1]*2, 1)\n ledDataWandOn += bytearray((int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255)))\n ledDataWandOff += bytearray((0,0,0))\n\t \n for i in range(1,61):\n rgb = colorsys.hsv_to_rgb(hsvFront[0], hsvFront[1]*2, hsvFront[2])\n ledDataLampenOn += bytearray((int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255)))\n ledDataLampenOff += bytearray((0,0,0))\n\t\n while not hyperion.abort():\n # get GPIO value\n if GPIO.input(GPIOSwitchAmbianceMode) and ambianceMode <> 1:\n ambianceMode = 1\n\ttime.sleep(0.2)\n print(\"Button 2 pressed\")\n\n if not GPIO.input(GPIOSwitchAmbianceMode) and ambianceMode <> 0:\n ambianceMode = 0\n ambianceLightStatus = 0\n\ttime.sleep(0.2)\n print(\"Button 2 released\")\n\n\t\t\n if GPIO.input(GPIOFrontLight) and frontLightStatus <> 1:\n frontLightStatus = 1\n\ttime.sleep(0.2)\n print(\"Button 1 pressed\")\n\n if not GPIO.input(GPIOFrontLight) and frontLightStatus <> 0:\n frontLightStatus = 0\n\ttime.sleep(0.2)\n print(\"Button 1 released\")\n \n \n if GPIO.input(GPIODimmer):\n print(\"DIMMER 1\") \n\n timeCounter = 0\n \n if GPIO.input(GPIODimmer) and ambianceLightStatus == 0:\n print(\"Light on\")\n ambianceLightStatus = 1\n ledData = ledDataWandOn\n\n if frontLightStatus == 1:\n ledData = ledData + ledDataLampenOn\n else:\n ledData = ledData + ledDataLampenOff\n\n hyperion.setColor(ledData)\n\n\n time.sleep(0.4)\n\n while GPIO.input(GPIODimmer):\n timeCounter = timeCounter + 1\n print(\"TIME COUNTER\")\n if timeCounter > 10:\n ambianceLightStatus = 0\n\n ledData = ledDataWandOff\n\n if frontLightStatus == 1:\n ledData = ledData + ledDataLampenOn\n else:\n ledData = ledData + ledDataLampenOff\n\n hyperion.setColor(ledData)\n\n # print(\"Light off\")\n \n time.sleep(0.1)\n \n if ambianceLightStatus == 1 and timeCounter <=10:\n print(\"DIMMING\")\n ledDataWandOn = bytearray()\n dimmerValue = dimmerValue - 1\n\n if dimmerValue == -1:\n\t dimmerValue = 10\n\n for i in range(1,152):\n \t rgb = colorsys.hsv_to_rgb(hsv[0], hsv[1]*2, dimmerValue/10.0)\n ledDataWandOn += bytearray((int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255)))\n \n print(\"Dimmdone\")\n\n ledData = bytearray()\n\n sleepTime = 0.1\n\n if ambianceMode == 1:\n srgb = bytearray(152*3)\n \n try:\n number,addr = sock.recvfrom_into(srgb) # buffer size is \n ledDataServer = bytearray()\n for i in range(1, number-3, 3): \n ledDataServer += bytearray((srgb[i+2], srgb[i], srgb[i+1]))\n ledData = ledData + ledDataServer \n sleepTime = 0.01\n except socket.timeout:\n ledData = ledData + ledDataWandOff \n\n else:\n\n if ambianceLightStatus == 1:\n ledData = ledData + ledDataWandOn\n else:\n ledData = ledData + ledDataWandOff\n\n if frontLightStatus == 1:\n\tledData = ledData + ledDataLampenOn\n else:\n ledData = ledData + ledDataLampenOff \n \n hyperion.setColor(ledData)\n\t \n # wait 100ms\n time.sleep(sleepTime)\n\t \n\t\n\t \n # reset GPIO settings if user pressed Ctrl+C\n except KeyboardInterrupt:\n print(\"Execution stopped by user\")\n GPIO.cleanup()\n\nif __name__ == '__main__':\n main()\n\n\n\n\n" } ]
1
cul-it/arxiv-browse
https://github.com/cul-it/arxiv-browse
9b4ae0b97abcf4a007b2ce34a049243a031fd459
d82184d006c1e62bcb147b56d02dacaddc9e165d
2c16624aecb06c7a28733c3bd0f1f07e28a2f229
HEAD
2018-12-27T06:03:38.262308
2018-12-20T14:21:18
2018-12-20T14:21:18
99,135,282
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6118305921554565, "alphanum_fraction": 0.6134493350982666, "avg_line_length": 35.517242431640625, "blob_id": "4a85f8949828b7a0fadef252aa0f10611c9f94f8", "content_id": "3981feeb4fe2912dacecfa2f05ec2a7a6406e541", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14826, "license_type": "permissive", "max_line_length": 79, "num_lines": 406, "path": "/browse/controllers/abs_page/__init__.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"\nHandle requests to support the abs feature.\n\nThe primary entrypoint to this module is :func:`.get_abs_page`, which handles\nGET requests to the abs endpoint.\n\"\"\"\n\nimport re\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom urllib.parse import urljoin\nfrom datetime import datetime\nfrom dateutil import parser\nfrom dateutil.tz import tzutc\n\nfrom flask import url_for\nfrom flask import request\nfrom werkzeug.exceptions import InternalServerError\n\nfrom arxiv import status, taxonomy\nfrom arxiv.base import logging\nfrom browse.domain.metadata import DocMetadata\nfrom browse.domain.category import Category\nfrom browse.exceptions import AbsNotFound\nfrom browse.services.search.search_authors import queries_for_authors, \\\n split_long_author_list\nfrom browse.services.util.metatags import meta_tag_metadata\nfrom browse.services.util.response_headers import abs_expires_header, \\\n mime_header_date\nfrom browse.services.document import metadata\nfrom browse.services.document.metadata import AbsException,\\\n AbsNotFoundException, AbsVersionNotFoundException, AbsDeletedException\nfrom browse.domain.identifier import Identifier, IdentifierException,\\\n IdentifierIsArchiveException\nfrom browse.services.database import count_trackback_pings,\\\n get_trackback_ping_latest_date, has_sciencewise_ping, \\\n get_dblp_listing_path, get_dblp_authors\nfrom browse.services.util.external_refs_cits import include_inspire_link,\\\n include_dblp_section, get_computed_dblp_listing_path, get_dblp_bibtex_path\nfrom browse.services.document.config.external_refs_cits import DBLP_BASE_URL,\\\n DBLP_BIBTEX_PATH, DBLP_AUTHOR_SEARCH_PATH\n\nlogger = logging.getLogger(__name__)\n\nResponse = Tuple[Dict[str, Any], int, Dict[str, Any]]\n\ntruncate_author_list_size = 100\n\n\ndef get_abs_page(arxiv_id: str) -> Response:\n \"\"\"\n Get abs page data from the document metadata service.\n\n Parameters\n ----------\n arxiv_id : str\n The arXiv identifier as provided in the request.\n download_format_pref: str\n Download format preference.\n\n Returns\n -------\n dict\n Search result response data.\n int\n HTTP status code.\n dict\n Headers to add to the response.\n\n Raises\n ------\n :class:`.InternalServerError`\n Raised when there was an unexpected problem executing the query.\n\n \"\"\"\n response_data: Dict[str, Any] = {}\n response_headers: Dict[str, Any] = {}\n try:\n arxiv_id = _check_legacy_id_params(arxiv_id)\n arxiv_identifier = Identifier(arxiv_id=arxiv_id)\n\n redirect = _check_supplied_identifier(arxiv_identifier)\n if redirect:\n return redirect\n\n abs_meta = metadata.get_abs(arxiv_id)\n response_data['requested_id'] = arxiv_identifier.idv \\\n if arxiv_identifier.has_version else arxiv_identifier.id\n response_data['abs_meta'] = abs_meta\n response_data['meta_tags'] = meta_tag_metadata(abs_meta)\n response_data['author_links'] = \\\n split_long_author_list(queries_for_authors(\n abs_meta.authors.raw), truncate_author_list_size)\n response_data['url_for_author_search'] = \\\n lambda author_query: url_for('search_archive',\n searchtype='author',\n archive=abs_meta.primary_archive.id,\n query=author_query)\n\n # Dissemination formats for download links\n download_format_pref = request.cookies.get('xxx-ps-defaults')\n add_sciencewise_ping = _check_sciencewise_ping(abs_meta.arxiv_id_v)\n response_data['formats'] = metadata.get_dissemination_formats(\n abs_meta,\n download_format_pref,\n add_sciencewise_ping)\n\n # Following are less critical and template must display without them\n # try:\n _non_critical_abs_data(abs_meta, arxiv_identifier, response_data)\n # except Exception:\n # logger.warning(\"Error getting non-critical abs page data\",\n # exc_info=app.debug)\n\n except AbsNotFoundException:\n if arxiv_identifier.is_old_id and arxiv_identifier.archive \\\n in taxonomy.ARCHIVES:\n archive_name = taxonomy.ARCHIVES[arxiv_identifier.archive]['name']\n raise AbsNotFound(data={'reason': 'old_id_not_found',\n 'arxiv_id': arxiv_id,\n 'archive_id': arxiv_identifier.archive,\n 'archive_name': archive_name})\n raise AbsNotFound(data={'reason': 'not_found', 'arxiv_id': arxiv_id})\n except AbsVersionNotFoundException:\n raise AbsNotFound(data={'reason': 'version_not_found',\n 'arxiv_id': arxiv_identifier.idv,\n 'arxiv_id_latest': arxiv_identifier.id})\n except AbsDeletedException as e:\n raise AbsNotFound(data={'reason': 'deleted',\n 'arxiv_id_latest': arxiv_identifier.id,\n 'message': e})\n except IdentifierIsArchiveException as e:\n raise AbsNotFound(data={'reason': 'is_archive',\n 'arxiv_id': arxiv_id,\n 'archive_name': e})\n except IdentifierException:\n raise AbsNotFound(data={'arxiv_id': arxiv_id})\n except AbsException as e:\n raise InternalServerError(\n 'There was a problem. If this problem persists, please contact '\n '[email protected].') from e\n\n response_status = status.HTTP_200_OK\n\n not_modified = _check_request_headers(\n abs_meta, response_data, response_headers)\n if not_modified:\n return {}, status.HTTP_304_NOT_MODIFIED, response_headers\n\n return response_data, response_status, response_headers\n\n\ndef _check_supplied_identifier(id: Identifier) -> Optional[Response]:\n \"\"\"\n Provide redirect URL if supplied ID does not match parsed ID.\n\n Parameters\n ----------\n arxiv_identifier : :class:`Identifier`\n\n Returns\n -------\n redirect_url: str\n A `browse.abstract` redirect URL that uses the canonical\n arXiv identifier.\n\n \"\"\"\n if not id or id.ids == id.id or id.ids == id.idv:\n return None\n\n arxiv_id = id.idv if id.has_version else id.id\n redirect_url: str = url_for('browse.abstract',\n arxiv_id=arxiv_id)\n return {},\\\n status.HTTP_301_MOVED_PERMANENTLY,\\\n {'Location': redirect_url}\n\n\ndef _non_critical_abs_data(abs_meta: DocMetadata,\n arxiv_identifier: Identifier,\n response_data: Dict)->None:\n \"\"\"Get additional non-essential data for the abs page.\"\"\"\n # The DBLP listing and trackback counts depend on the DB.\n response_data['dblp'] = _check_dblp(abs_meta)\n response_data['trackback_ping_count'] = count_trackback_pings(\n arxiv_identifier.id)\n if response_data['trackback_ping_count'] > 0:\n response_data['trackback_ping_latest'] = \\\n get_trackback_ping_latest_date(arxiv_identifier.id)\n\n # Include INSPIRE link in references & citations section\n response_data['include_inspire_link'] = include_inspire_link(\n abs_meta)\n\n # Ancillary files\n response_data['ancillary_files'] = \\\n metadata.get_ancillary_files(abs_meta)\n\n # Browse context\n _check_context(arxiv_identifier,\n abs_meta.primary_category,\n response_data)\n\n\ndef _check_request_headers(docmeta: DocMetadata,\n response_data: Dict[str, Any],\n headers: Dict[str, Any]) -> bool:\n \"\"\"Check the request headers, update the response headers accordingly.\"\"\"\n last_mod_dt: datetime = docmeta.modified\n\n # Latest trackback ping time depends on the database\n if 'trackback_ping_latest' in response_data \\\n and isinstance(response_data['trackback_ping_latest'], datetime) \\\n and response_data['trackback_ping_latest'] > last_mod_dt:\n # If there is a more recent trackback ping, use that datetime\n last_mod_dt = response_data['trackback_ping_latest']\n\n # Check for request headers If-Modified-Since and If-None-Match and compare\n # them to the last modified time to determine whether we will return a\n # \"not modified\" response\n mod_since_dt = _time_header_parse(headers, 'If-Modified-Since')\n none_match_dt = _time_header_parse(headers, 'If-None-Match')\n not_modified = _not_modified(last_mod_dt, mod_since_dt, none_match_dt)\n\n last_mod_mime = mime_header_date(last_mod_dt)\n headers['Last-Modified'] = last_mod_mime\n headers['ETag'] = last_mod_mime\n headers['Expires'] = abs_expires_header()[1]\n\n return not_modified\n\n\ndef _not_modified(last_mod_dt: datetime,\n mod_since_dt: Optional[datetime],\n none_match_dt: Optional[datetime])->bool:\n if mod_since_dt and none_match_dt:\n not_modified = (mod_since_dt >= last_mod_dt\n and none_match_dt >= last_mod_dt)\n elif mod_since_dt and not none_match_dt:\n not_modified = mod_since_dt >= last_mod_dt\n elif none_match_dt and not mod_since_dt:\n not_modified = none_match_dt >= last_mod_dt\n else:\n not_modified = False\n return not_modified\n\n\ndef _time_header_parse(headers: Dict[str, Any], header: str) \\\n -> Optional[datetime]:\n if (header in request.headers\n and request.headers[header] is not None):\n try:\n dt = parser.parse(request.headers.get(header))\n if not dt.tzinfo:\n dt = dt.replace(tzinfo=tzutc())\n return dt\n except (ValueError, TypeError):\n print(f'Exception parsing the If-None-Match request header')\n return None\n else:\n return None\n\n\ndef _check_legacy_id_params(arxiv_id: str) -> str:\n \"\"\"\n Check for legacy request parameters related to old arXiv identifiers.\n\n Parameters\n ----------\n arxiv_id : str\n\n Returns\n -------\n arxiv_id: str\n A possibly modified version of the input arxiv_id string.\n\n \"\"\"\n if request.args and '/' not in arxiv_id:\n # To support old references to /abs/<archive>?papernum=\\d{7}\n if 'papernum' in request.args:\n return f\"{arxiv_id}/{request.args['papernum']}\"\n\n for param in request.args:\n # singleton case, where the parameter is the value\n # To support old references to /abs/<archive>?\\d{7}\n if not request.args[param] \\\n and re.match(r'^\\d{7}$', param):\n return f'{arxiv_id}/{param}'\n return arxiv_id\n\n\ndef _check_context(arxiv_identifier: Identifier,\n primary_category: Optional[Category],\n response_data: Dict[str, Any]) -> None:\n \"\"\"\n Check context in request parameters and update response accordingly.\n\n Parameters\n ----------\n arxiv_identifier : :class:`Identifier`\n primary_category : :class: `Category`\n\n Returns\n -------\n Dict of values to add to response_data\n\n \"\"\"\n # Set up the context\n context = None\n if ('context' in request.args and (\n request.args['context'] == 'arxiv'\n or request.args['context'] in taxonomy.CATEGORIES\n or request.args['context'] in taxonomy.ARCHIVES)):\n context = request.args['context']\n elif primary_category:\n pc = primary_category.canonical or primary_category\n if not arxiv_identifier.is_old_id: # new style IDs\n context = pc.id\n else: # Old style id\n if pc.id in taxonomy.ARCHIVES:\n context = pc.id\n else:\n context = arxiv_identifier.archive\n else:\n context = None\n\n response_data['browse_context'] = context\n\n if arxiv_identifier.is_old_id or context == 'arxiv':\n next_id = metadata.get_next_id(arxiv_identifier)\n # TODO: might have to pass non-arxiv context to url_for becuase\n # of examples like physics/9707012\n if next_id:\n next_url = url_for('browse.abstract',\n arxiv_id=next_id.id,\n context='arxiv' if context == 'arxiv' else None)\n else:\n next_url = None\n\n previous_id = metadata.get_previous_id(arxiv_identifier)\n if previous_id:\n prev_url = url_for('browse.abstract',\n arxiv_id=previous_id.id,\n context='arxiv' if context == 'arxiv' else None)\n else:\n prev_url = None\n\n else:\n # This is the case where the context is not in 'arxiv' or an archive,\n # so just let the prevnext controller figure it out.\n\n # TODO do url_for() here\n next_url = '/prevnext?site=arxiv.org&id=' + \\\n arxiv_identifier.id + '&function=next'\n prev_url = '/prevnext?site=arxiv.org&id=' + \\\n arxiv_identifier.id + '&function=prev'\n if context:\n next_url = next_url + '&context=' + context\n prev_url = prev_url + '&context=' + context\n\n response_data['browse_context_previous_url'] = prev_url\n response_data['browse_context_next_url'] = next_url\n\n\ndef _check_sciencewise_ping(paper_id_v: str) -> bool:\n \"\"\"Check whether paper has a ScienceWISE ping.\"\"\"\n try:\n return has_sciencewise_ping(paper_id_v) # type: ignore\n except IOError:\n return False\n\n\ndef _check_dblp(docmeta: DocMetadata,\n db_override: bool = False) -> Optional[Dict]:\n \"\"\"Check whether paper has DBLP Bibliography entry.\"\"\"\n if not include_dblp_section(docmeta):\n return None\n identifier = docmeta.arxiv_identifier\n listing_path = None\n author_list: List[str] = []\n # fallback check in case DB service is not available\n if db_override:\n listing_path = get_computed_dblp_listing_path(docmeta)\n else:\n try:\n if identifier.id is None:\n return None\n listing_path = get_dblp_listing_path(identifier.id)\n if not listing_path:\n return None\n author_list = get_dblp_authors(identifier.id)\n except IOError:\n # log this\n return None\n if listing_path is not None:\n bibtex_path = get_dblp_bibtex_path(listing_path)\n else:\n return None\n return {\n 'base_url': DBLP_BASE_URL,\n 'author_search_url':\n urljoin(DBLP_BASE_URL, DBLP_AUTHOR_SEARCH_PATH),\n 'bibtex_base_url': urljoin(DBLP_BASE_URL, DBLP_BIBTEX_PATH),\n 'bibtex_path': bibtex_path,\n 'listing_url': urljoin(DBLP_BASE_URL, listing_path),\n 'author_list': author_list\n }\n" }, { "alpha_fraction": 0.5754921436309814, "alphanum_fraction": 0.5960808992385864, "avg_line_length": 32.65957260131836, "blob_id": "7570a7a96b0c58a0f678c811379b54349fd98035", "content_id": "1728fcf1ed53df38a8bfbc11f6bcf5459f2535b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11074, "license_type": "permissive", "max_line_length": 99, "num_lines": 329, "path": "/browse/util/id_patterns.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "r\"\"\"Patterns and functions to detect arXiv ids and Urls in text.\n\nFunctions to detech arXiv ids, URLs and DOI in text.\nFunctions to transform them to <a> tags.\n\nThese were originally jinja filters but became a little too big\nfor that so they were split out and made more general so they didn't\nrely on the Flask context.\n\nThese all use expect input of Markup or non-markup text and return\nMarkup objects. This is because the <a> that get added need to avoid\ndouble escaping.\n\nThere are several classes of patterns we want to match but there is\nsome overlap in these patterns. To avoid looking for and parsing HTML in each\njinja filter, detecting these patterns is combined.\n\nSo far we are looking for:\nDOIs DOI: 10.1145/0001234.1234567\narXiv IDS: 1234.12345 1234.12345v1 hep-ph1307.1843\nHTTP URLs: http://something.org/myPaper/1234.12345\nFTP URLs: ftp://example.com/files/1234.12345\n\nJust matching for arXiv ids with \\d{4}\\.\\d{4,5} will match several of\nthese. To deal with this we are priortizing the matches and\ninterupting once one is found.\n\nWe should probably match DOIs first because they are the source of a\nlot of false positives for arxiv matches.\n\"\"\"\nfrom typing import Optional, List, Pattern, Match, Tuple, Callable\nimport re\nfrom dataclasses import dataclass\n\nfrom urllib.parse import quote\nfrom jinja2 import Markup, escape\n\nfrom arxiv import taxonomy\n\n\n@dataclass\nclass Matchable:\n \"\"\"Class for paterns.\"\"\"\n\n examples: List[str]\n pattern: Pattern\n\n\ndef _identity(x: str)->str:\n \"\"\"Identity funciton for default in some places.\"\"\"\n return x\n\n\ndoi_patterns = [\n Matchable(['10.1145/0001234.1234567'],\n re.compile(r'(?P<doi>10.\\d{4,9}/[-._;()/:A-Z0-9]+)', re.I))\n]\n\"\"\"List of Matchable for DOIs in text.\n\nWe should probably match DOIs first because they are the source of a\nlot of false positives for arxiv matches.\n\nOnly using the most general express from\nhttps://www.crossref.org/blog/dois-and-matching-regular-expressions/\n\"\"\"\n\n_archive = '|'.join([re.escape(key) for key in taxonomy.ARCHIVES.keys()])\n\"\"\"string for use in Regex for all arXiv archives\"\"\"\n\n_category = '|'.join([re.escape(key) for key in taxonomy.CATEGORIES.keys()])\n\n_arxiv_id_prefix = r'(?P<arxiv_prefix>ar[xX]iv:)?'\n\"\"\"Attempt to catch the arxiv prefix in front of arxiv ids so it can be\nincluded in the <a> tag anchor. ARXIVNG-1284\"\"\"\n\nbasic_arxiv_id_patterns = [\n Matchable(['math/0501233', 'hep-ph/0611734', 'gr-qc/0112123'],\n re.compile(_arxiv_id_prefix + r'(?P<arxiv_id>(%s)\\/\\d{2}[01]\\d{4}(v\\d*)?)'\n % _archive, re.I)),\n Matchable(['1609.05068', '1207.1234v1', '1207.1234', '1807.12345',\n '1807.12345v1', '1807.12345v12'],\n re.compile(r'(?<![\\d=])' + _arxiv_id_prefix + r'(?P<arxiv_id>\\d{4}\\.\\d{4,5}(v\\d*)?)',\n re.I)),\n Matchable(['math.GR/0601136v3', 'math.GR/0601136'],\n re.compile(_arxiv_id_prefix + r'(?P<arxiv_id>(%s)\\/\\d{2}[01]\\d{4}(v\\d*)?)'\n % _category, re.I))\n]\n\nOKCHARS = r'([a-z0-9,_.\\-+~:]|%[a-f0-9]*)'\n\"\"\"Chacters that are acceptable during PATH, QUERY and ANCHOR parts\"\"\"\n\nHOST_NAME = r'(?:[a-z0-9][a-z0-9\\-.:]+[a-z0-9])'\n\"\"\"Regex used to match host names in arXiv urlize.\n\nThis is not a perfect regex for a host name, It accepts only a sub-set\nof hostnames to meet the needs of arxiv.\n\nHOST_NAME must end with a simplified character to avoid capturing a\nperiod.\n\"\"\"\n\nPATH = rf'(?P<PATH>(/{OKCHARS}*)+)?'\n\"\"\"Regex for path part of URLs for use in urlize\"\"\"\n\nQUERY = rf'(?P<QUERY>\\?(&?({OKCHARS}*(={OKCHARS}*)?))*)?'\n\"\"\"Regex for query part of URLs for use in urlize\"\"\"\n\nANCHOR = rf'(?P<ANCHOR>#({OKCHARS}|/)*)?'\n\"\"\"Regex for anchor part of URLs for use in urlize\"\"\"\n\nURLINTEXT_PAT = re.compile(r'(?P<url>(?:https?://)'\n f'{HOST_NAME}{PATH}{QUERY}{ANCHOR})',\n re.I)\n\"\"\"Regex to match URLs in text.\"\"\"\n\nFTP_PAT = re.compile(rf'(?P<url>(?:ftp://)({OKCHARS}|(@))*{PATH})', re.I)\n\"\"\"Regex to match FTP URLs in text.\"\"\"\n\nbasic_url_patterns = [\n Matchable(['http://something.com/bla'], URLINTEXT_PAT),\n Matchable(['ftp://something.com/bla'], FTP_PAT)\n]\n\"\"\"List of Matchable to use when finding URLs in text\"\"\"\n\nbad_arxiv_id_patterns = [\n re.compile('vixra', re.I), # don't need to link to vixra\n]\n\"\"\"List of Regex patterns that will cause matching to be skipped for\nthe token.\"\"\"\n\ndois_ids_and_urls = basic_url_patterns + doi_patterns + basic_arxiv_id_patterns\n\"\"\"List of Matchable to use when finding DOIs, arXiv IDs, and URLs.\n\nURLs are first because some URLs contain DOIs or arXiv IDS.\n\nDOI are before arXiv ids because many DOIs are falsely matched by the\narxiv_id patterns.\n\"\"\"\n\n\n_bad_endings = ['.', ',', ':', ';', '&', '(', '[', '{']\n\"\"\"These should not appear at the end of URLs because they are likely\npart of the surrounding text\"\"\"\n\n\ndef _find_match(patterns: List[Matchable], token: str) \\\n -> Optional[Tuple[Match, Matchable]]:\n \"\"\"Find first in patterns that is found in txt.\"\"\"\n for chgMtch in patterns:\n if chgMtch.pattern.flags:\n fnd = re.search(chgMtch.pattern, token)\n else:\n fnd = re.search(chgMtch.pattern, token, re.I)\n if fnd is not None:\n return (fnd, chgMtch)\n return None\n\n\ndef _transform_token(patterns: List[Matchable],\n bad_patterns: List[Pattern],\n id_to_url: Callable[[str], str],\n doi_to_url: Callable[[str], str],\n url_to_url: Callable[[str], str],\n token: str) -> str:\n \"\"\"\n Transform a token from text to one of the Matchables.\n\n This only transforms against the first of Matchable matched.\n Matching on this token will be skipped if any of the bad_patterns\n match the token (that is re.search).\n \"\"\"\n id_to_url = id_to_url or (lambda x: x)\n doi_to_url = doi_to_url or (lambda x: x)\n url_to_url = url_to_url or (lambda x: x)\n\n for pattern in bad_patterns:\n if re.search(pattern, token):\n return token\n\n mtch = _find_match(patterns, token)\n if mtch is None:\n return token\n\n (match, _) = mtch\n keys = match.groupdict().keys()\n if 'arxiv_id' in keys:\n (front, back) = _arxiv_id_sub(match, id_to_url)\n elif 'doi' in keys:\n (front, back) = _doi_sub(match, doi_to_url)\n elif 'url' in keys:\n (front, back) = _url_sub(match, url_to_url)\n else:\n # unclear how to substitute in for this match\n return token\n\n if back:\n t_back = _transform_token(patterns, bad_patterns,\n id_to_url, doi_to_url, url_to_url, back)\n return front + Markup(t_back)\n else:\n return front\n\n\ndef _arxiv_id_sub(match: Match, id_to_url: Callable[[str], str]) \\\n -> Tuple[Markup, str]:\n \"\"\"Return match.string transformed for a arxiv id match.\"\"\"\n aid = match.group('arxiv_id')\n prefix = 'arXiv:' if match.group('arxiv_prefix') else ''\n \n if aid[-1] in _bad_endings:\n arxiv_url = id_to_url(aid)[:-1]\n anchor = aid[:-1]\n back = aid[-1] + match.string[match.end():]\n else:\n arxiv_url = id_to_url(aid)\n anchor = prefix + aid\n back = match.string[match.end():]\n\n front = match.string[0:match.start()]\n return (Markup(f'{front}<a href=\"{arxiv_url}\">{anchor}</a>'), back)\n\n\ndef _doi_sub(match: Match, doi_to_url: Callable[[str], str]) \\\n ->Tuple[Markup, str]:\n \"\"\"Return match.string transformed for a DOI match.\"\"\"\n doi = match.group('doi')\n if(doi[-1] in _bad_endings):\n back = match.string[match.end():] + doi[-1]\n doi = doi[:-1]\n else:\n back = match.string[match.end():]\n\n quoted_doi = quote(doi, safe='/')\n doi_url = f'https://dx.doi.org/{quoted_doi}'\n doi_url = doi_to_url(doi_url)\n\n anchor = escape(doi)\n front = match.string[0:match.start()]\n return (Markup(f'{front}<a href=\"{doi_url}\">{anchor}</a>'), back)\n\n\ndef _url_sub(match: Match, url_to_url: Callable[[str], str]) \\\n ->Tuple[Markup, str]:\n \"\"\"Return match.string transformed for a URL match.\"\"\"\n url = match.group('url')\n if url.startswith('https'):\n anchor = 'this https URL'\n elif url.startswith('http'):\n anchor = 'this http URL'\n elif url.startswith('ftp'):\n anchor = 'this ftp URL'\n else:\n anchor = 'this URL'\n\n front = match.string[0:match.start()]\n if url[-1] in _bad_endings:\n back = url[-1] + match.string[match.end():]\n url = url[:-1]\n else:\n back = match.string[match.end():]\n\n url = url_to_url(url)\n return (Markup(f'{front}<a href=\"{url}\">{anchor}</a>'), back)\n\n\n_word_split_re = re.compile(r'(\\s+)')\n\"\"\"Regex to split to tokens during _to_tags.\n\nCapturing group causes the splitting spaces to be included\nin the returned list.\n\"\"\"\n\n\ndef _to_tags(patterns: List[Matchable],\n bad_patterns: List[Pattern],\n id_to_url: Callable[[str], str],\n doi_to_url: Callable[[str], str],\n url_to_url: Callable[[str], str],\n text: str)-> str:\n \"\"\"Split text to tokens, do _transform_token for each, return results.\"\"\"\n def transform_token(tkn: str)-> str:\n return _transform_token(patterns, bad_patterns,\n id_to_url, doi_to_url, url_to_url,\n tkn)\n\n if not hasattr(text, '__html__'):\n text = Markup(escape(text))\n\n words = _word_split_re.split(text)\n for i, token in enumerate(words):\n token_2 = transform_token(token)\n if token_2 != token:\n words[i] = token_2\n result = u''.join(words)\n return Markup(result)\n\n\ndef do_id_to_tags(id_to_url: Callable[[str], str],\n text: str)-> str:\n \"\"\"Transform arxiv ids in text to <a> tags.\"\"\"\n return _to_tags(basic_arxiv_id_patterns,\n bad_arxiv_id_patterns,\n id_to_url, _identity, _identity,\n text)\n\n\ndef do_dois_id_urls_to_tags(id_to_url: Callable[[str], str],\n doi_to_url: Callable[[str], str],\n text: str)-> str:\n \"\"\"Transform DOIs, arxiv ids and URLs in text to <a> tags.\"\"\"\n return _to_tags(dois_ids_and_urls,\n bad_arxiv_id_patterns,\n id_to_url, doi_to_url, _identity,\n text)\n\n\ndef do_dois_to_tags(doi_to_url: Callable[[str], str], text: str)->str:\n \"\"\"Transform DOIs in text to <a> tags.\"\"\"\n return _to_tags(doi_patterns, [], _identity, doi_to_url, _identity, text)\n\n\ndef do_dois_arxiv_ids_to_tags(id_to_url: Callable[[str], str],\n doi_to_url: Callable[[str], str],\n text: str)->str:\n \"\"\"Transform DOIs and arXiv IDs to <a> tags.\"\"\"\n return _to_tags(doi_patterns + basic_arxiv_id_patterns,\n bad_arxiv_id_patterns,\n id_to_url, doi_to_url, _identity,\n text)\n" }, { "alpha_fraction": 0.5476190447807312, "alphanum_fraction": 0.575630247592926, "avg_line_length": 15.60465145111084, "blob_id": "e82b9d4ad306f1aa878210bcd03a42c174eb4ae2", "content_id": "fe643e73c9b7b782de59ccf9ea5d907bbeb786aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 714, "license_type": "permissive", "max_line_length": 38, "num_lines": 43, "path": "/Pipfile", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\narxiv-base = \">=0.9.1\"\ndataclasses = \"*\"\npython-dateutil = \"*\"\nflask = \"==0.12.*\"\nFlask-API = \"*\"\nFlask-SQLAlchemy = \"*\"\n\"jinja2\" = \"==2.10\"\nMarkupSafe = \"*\"\nSQLAlchemy = \"*\"\npytz = \"*\"\nmysqlclient = \"*\"\n\"mmh3\" = \"*\"\naiohttp = \"*\"\n\n[dev-packages]\npylama = \"*\"\nmypy = \">=0.630\"\nmypy-extensions = \"*\"\n\"nose2\" = \"*\"\nsqlacodegen = \"*\"\nFlask-Testing = \"*\"\npycodestyle = \"*\"\npydocstyle = \"*\"\npylint = \"*\"\npytest = \"*\"\npytest-html = \"*\"\n\"ansi2html\" = \"*\"\nhypothesis = \"*\"\n\"beautifulsoup4\" = \"*\"\npyhamcrest = \"*\"\npytest-easyread = \"*\"\nweighted-levenshtein = \"*\"\ncoverage = \"*\"\ncoveralls = \"*\"\n\n[requires]\npython_version = \"3.6\"\n" }, { "alpha_fraction": 0.7503604292869568, "alphanum_fraction": 0.754352867603302, "avg_line_length": 35.212852478027344, "blob_id": "4724075c7a498ac003872ad016b164dd6e5b456a", "content_id": "d133d1caf3aa4f35759905e5968d198d08d700e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9019, "license_type": "permissive", "max_line_length": 81, "num_lines": 249, "path": "/browse/config.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"\nFlask configuration.\n\nDocstrings are from the `Flask configuration documentation\n<http://flask.pocoo.org/docs/0.12/config/>`_.\n\"\"\"\nimport os\n\nVERSION = '0.1.1'\n\"\"\"The application version \"\"\"\n\nON = 'yes'\nOFF = 'no'\n\nDEBUG = os.environ.get('DEBUG') == ON\n\"\"\"enable/disable debug mode\"\"\"\n\nTESTING = os.environ.get('TESTING') == ON\n\"\"\"enable/disable testing mode\"\"\"\n\nPROPAGATE_EXCEPTIONS = \\\n True if os.environ.get('PROPAGATE_EXCEPTIONS') == ON else None\n\"\"\"\nexplicitly enable or disable the propagation of exceptions. If not set or\nexplicitly set to None this is implicitly true if either TESTING or DEBUG is\ntrue.\n\"\"\"\n\nPRESERVE_CONTEXT_ON_EXCEPTION = \\\n True if os.environ.get('PRESERVE_CONTEXT_ON_EXCEPTION') == ON else None\n\"\"\"\nBy default if the application is in debug mode the request context is not\npopped on exceptions to enable debuggers to introspect the data. This can be\ndisabled by this key. You can also use this setting to force-enable it for non\ndebug execution which might be useful to debug production applications (but\nalso very risky).\n\"\"\"\n\nSECRET_KEY = os.environ.get('SECRET_KEY', 'qwert2345')\n\"\"\"\nthe secret key\n\"\"\"\n\nSESSION_COOKIE_NAME = os.environ.get('SESSION_COOKIE_NAME', 'arxiv_browse')\n\"\"\"\nthe name of the session cookie\n\"\"\"\n\nSESSION_COOKIE_DOMAIN = os.environ.get('SESSION_COOKIE_DOMAIN', None)\n\"\"\"\nthe domain for the session cookie. If this is not set, the cookie will be valid\nfor all subdomains of SERVER_NAME.\n\"\"\"\n\nSESSION_COOKIE_PATH = os.environ.get('SESSION_COOKIE_PATH', None)\n\"\"\"\nthe path for the session cookie. If this is not set the cookie will be valid\nfor all of APPLICATION_ROOT or if that is not set for '/'.\n\"\"\"\n\nSESSION_COOKIE_HTTPONLY = os.environ.get('SESSION_COOKIE_HTTPONLY') != OFF\n\"\"\"\ncontrols if the cookie should be set with the httponly flag. Defaults to True.\n\"\"\"\n\nSESSION_COOKIE_SECURE = os.environ.get('SESSION_COOKIE_SECURE') == ON\n\"\"\"\ncontrols if the cookie should be set with the secure flag. Defaults to False.\n\"\"\"\n\nPERMANENT_SESSION_LIFETIME = \\\n int(os.environ.get('PERMANENT_SESSION_LIFETIME', '3600'))\n\"\"\"\nthe lifetime of a permanent session as datetime.timedelta object. Starting with\nFlask 0.8 this can also be an integer representing seconds.\n\"\"\"\n\nSESSION_REFRESH_EACH_REQUEST = \\\n os.environ.get('SESSION_REFRESH_EACH_REQUEST') != OFF\n\"\"\"\nthis flag controls how permanent sessions are refreshed. If set to True (which\nis the default) then the cookie is refreshed each request which automatically\nbumps the lifetime. If set to False a set-cookie header is only sent if the\nsession is modified. Non permanent sessions are not affected by this.\n\"\"\"\n\nUSE_X_SENDFILE = os.environ.get('USE_X_SENDFILE') == ON\n\"\"\"\nenable/disable x-sendfile\n\"\"\"\n\nLOGGER_NAME = os.environ.get('LOGGER_NAME', 'browse')\n\"\"\"\nthe name of the logger\n\"\"\"\n\nLOGGER_HANDLER_POLICY = os.environ.get('LOGGER_HANDLER_POLICY', 'always')\n\"\"\"\nthe policy of the default logging handler. The default is 'always' which means\nthat the default logging handler is always active. 'debug' will only activate\nlogging in debug mode, 'production' will only log in production and 'never'\ndisables it entirely.\n\"\"\"\n\nSERVER_NAME = os.environ.get('BROWSE_SERVER_NAME', None)\n\"\"\"\nthe name and port number of the server. Required for subdomain support (e.g.:\n'myapp.dev:5000') Note that localhost does not support subdomains so setting\nthis to \"localhost\" does not help. Setting a SERVER_NAME also by default\nenables URL generation without a request context but with an application\ncontext.\n\"\"\"\n\nAPPLICATION_ROOT = os.environ.get('APPLICATION_ROOT', None)\n\"\"\"\nIf the application does not occupy a whole domain or subdomain this can be set\nto the path where the application is configured to live. This is for session\ncookie as path value. If domains are used, this should be None.\n\"\"\"\n\nMAX_CONTENT_LENGTH = os.environ.get('MAX_CONTENT_LENGTH', None)\n\"\"\"\nIf set to a value in bytes, Flask will reject incoming requests with a content\nlength greater than this by returning a 413 status code.\n\"\"\"\n\nSEND_FILE_MAX_AGE_DEFAULT = int(\n os.environ.get('SEND_FILE_MAX_AGE_DEFAULT', 43200))\n\"\"\"\nDefault cache control max age to use with send_static_file() (the default\nstatic file handler) and send_file(), as datetime.timedelta or as seconds.\nOverride this value on a per-file basis using the get_send_file_max_age() hook\non Flask or Blueprint, respectively. Defaults to 43200 (12 hours).\n\"\"\"\n\nTRAP_HTTP_EXCEPTIONS = os.environ.get('TRAP_HTTP_EXCEPTIONS') == ON\n\"\"\"\nIf this is set to True Flask will not execute the error handlers of HTTP\nexceptions but instead treat the exception like any other and bubble it through\nthe exception stack. This is helpful for hairy debugging situations where you\nhave to find out where an HTTP exception is coming from.\n\"\"\"\n\nTRAP_BAD_REQUEST_ERRORS = os.environ.get('TRAP_BAD_REQUEST_ERRORS') == ON\n\"\"\"\nWerkzeug's internal data structures that deal with request specific data will\nraise special key errors that are also bad request exceptions. Likewise many\noperations can implicitly fail with a BadRequest exception for consistency.\nSince it’s nice for debugging to know why exactly it failed this flag can be\nused to debug those situations. If this config is set to True you will get a\nregular traceback instead.\n\"\"\"\n\nPREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http')\n\"\"\"\nThe URL scheme that should be used for URL generation if no URL scheme is\navailable. This defaults to http.\n\"\"\"\n\nJSON_AS_ASCII = os.environ.get('JSON_AS_ASCII') == ON\n\"\"\"\nBy default Flask serialize object to ascii-encoded JSON. If this is set to\nFalse Flask will not encode to ASCII and output strings as-is and return\nunicode strings. jsonify will automatically encode it in utf-8 then for\ntransport for instance.\n\"\"\"\n\nJSON_SORT_KEYS = os.environ.get('JSON_AS_ASCII') != OFF\n\"\"\"\nBy default Flask will serialize JSON objects in a way that the keys are\nordered. This is done in order to ensure that independent of the hash seed of\nthe dictionary the return value will be consistent to not trash external HTTP\ncaches. You can override the default behavior by changing this variable.\nThis is not recommended but might give you a performance improvement on the\ncost of cacheability.\n\"\"\"\n\nJSONIFY_PRETTYPRINT_REGULAR = os.environ.get('JSON_AS_ASCII') != OFF\n\"\"\"\nIf this is set to True (the default) jsonify responses will be pretty printed\nif they are not requested by an XMLHttpRequest object (controlled by the\nX-Requested-With header).\n\"\"\"\n\nJSONIFY_MIMETYPE = os.environ.get('JSONIFY_MIMETYPE', 'application/json')\n\"\"\"\nMIME type used for jsonify responses.\n\"\"\"\n\nTEMPLATES_AUTO_RELOAD = os.environ.get('TEMPLATES_AUTO_RELOAD') == ON\n\"\"\"\nWhether to check for modifications of the template source and reload it\nautomatically. By default the value is None which means that Flask checks\noriginal file only in debug mode.\n\"\"\"\n\nEXPLAIN_TEMPLATE_LOADING = os.environ.get('EXPLAIN_TEMPLATE_LOADING') == OFF\n\"\"\"\nIf this is enabled then every attempt to load a template will write an info\nmessage to the logger explaining the attempts to locate the template. This can\nbe useful to figure out why templates cannot be found or wrong templates appear\nto be loaded.\n\"\"\"\n\n# SQLAlchemy configuration\n# For mysql: 'mysql://user:pass@localhost/dbname'\nSQLALCHEMY_DATABASE_URI = os.environ.get(\n 'BROWSE_SQLALCHEMY_DATABASE_URI', 'sqlite:///../tests/data/browse.db')\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nSQLALCHEMY_ECHO = False\nSQLALCHEMY_RECORD_QUERIES = False\n# SQLALCHEMY_POOL_SIZE and SQLALCHEMY_MAX_OVERFLOW are set without defaults\n# because they will not work with sqlite\n# SQLALCHEMY_POOL_SIZE = int(os.environ.get('BROWSE_SQLALCHEMY_POOL_SIZE'))\n# SQLALCHEMY_MAX_OVERFLOW = int(os.environ.get('BROWSE_SQLALCHEMY_MAX_OVERFLOW'))\n\n# Disable DB queries even if other SQLAlchemy config are defined\n# This, for example, could be used in conjunction with the `no-write` runlevel\n# in the legacy infrastructure, which is a case where we know the DB is\n# unavailable and thus intentionally bypass any DB access.\nBROWSE_DISABLE_DATABASE = os.environ.get('BROWSE_DISABLE_DATABASE', False)\n\n# Enable/disable Piwik (Matomo) web analytics\nBROWSE_PIWIK_ENABLED = os.environ.get('BROWSE_PIWIK_ENABLED', False)\n\n# Enable/disable user banner\nBROWSE_USER_BANNER_ENABLED = os.environ.get(\n 'BROWSE_USER_BANNER_ENABLED', False)\n\n# Paths to .abs and source files\nDOCUMENT_LATEST_VERSIONS_PATH = os.environ.get(\n 'DOCUMENT_LATEST_VERSIONS_PATH', 'tests/data/abs_files/ftp')\n\nDOCUMENT_ORIGNAL_VERSIONS_PATH = os.environ.get(\n 'DOCUMENT_ORIGNAL_VERSIONS_PATH', 'tests/data/abs_files/orig')\n\n# Path to cache directory\nDOCUMENT_CACHE_PATH = os.environ.get(\n 'DOCUMENT_CACHE_PATH', 'tests/data/cache'\n)\n\n# Used in linking to /show-email\nSHOW_EMAIL_SECRET = os.environ.get('SHOW_EMAIL_SECRET', 'foo')\n\n# Used in linking to /ct\nCLICKTHROUGH_SECRET = os.environ.get('CLICKTHROUGH_SECRET', 'bar')\n\n# arXiv Labs options\nLABS_BIBEXPLORER_ENABLED = os.environ.get('LABS_BIBEXPLORER_ENABLED', True)\n" }, { "alpha_fraction": 0.6623188257217407, "alphanum_fraction": 0.6623188257217407, "avg_line_length": 34.844154357910156, "blob_id": "607ee20401c0c9343a5ce23d332c74fa44574a04", "content_id": "189f64d3789ed84aafb49934b63dc4f2b13d6af4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2760, "license_type": "permissive", "max_line_length": 78, "num_lines": 77, "path": "/browse/factory.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"Application factory for browse service components.\"\"\"\nfrom functools import partial\nfrom typing import Any\nfrom flask import Flask, url_for\nfrom browse.domain.identifier import canonical_url\nfrom browse.util.clickthrough import create_ct_url\nfrom browse.util.id_patterns import do_dois_id_urls_to_tags, do_id_to_tags, \\\n do_dois_arxiv_ids_to_tags\nfrom browse.routes import ui\nfrom browse.services.database import models\nfrom browse.services.util.email import generate_show_email_hash\nfrom browse.filters import line_feed_to_br, tex_to_utf, entity_to_utf, \\\n single_doi_url\n\nfrom arxiv.base.config import BASE_SERVER\nfrom arxiv.base import Base\n\n\ndef create_web_app() -> Flask:\n \"\"\"Initialize an instance of the browse web application.\"\"\"\n app = Flask('browse', static_folder='static', template_folder='templates')\n app.config.from_pyfile('config.py')\n\n # TODO Only needed until this route is added to arxiv-base\n if 'URLS' not in app.config:\n app.config['URLS'] = []\n app.config['URLS'].append(\n ('search_archive', '/search/<archive>', BASE_SERVER))\n\n models.init_app(app)\n\n Base(app)\n app.register_blueprint(ui.blueprint)\n\n ct_url_for = partial(create_ct_url, app.config.get(\n 'CLICKTHROUGH_SECRET'), url_for)\n\n if not app.jinja_env.globals:\n app.jinja_env.globals = {}\n\n app.jinja_env.globals['canonical_url'] = canonical_url\n\n def ct_single_doi_filter(doi: str)->str:\n return single_doi_url(ct_url_for, doi)\n\n def _id_to_url(id: str)->Any:\n return url_for('browse.abstract', arxiv_id=id)\n\n def contextualized_id_filter(text: str)->str:\n return do_id_to_tags(_id_to_url, text)\n\n def contextualized_doi_id_url_filter(text: str)->str:\n return do_dois_id_urls_to_tags(_id_to_url, ct_url_for, text)\n\n def ct_doi_filter(text: str)->str:\n return do_dois_arxiv_ids_to_tags(_id_to_url,\n ct_url_for,\n text)\n\n if not app.jinja_env.filters:\n app.jinja_env.filters = {}\n\n app.jinja_env.filters['line_feed_to_br'] = line_feed_to_br\n app.jinja_env.filters['tex_to_utf'] = tex_to_utf\n app.jinja_env.filters['entity_to_utf'] = entity_to_utf\n\n app.jinja_env.filters['clickthrough_url_for'] = ct_url_for\n app.jinja_env.filters['show_email_hash'] = \\\n partial(generate_show_email_hash,\n secret=app.config.get('SHOW_EMAIL_SECRET'))\n\n app.jinja_env.filters['single_doi_url'] = ct_single_doi_filter\n app.jinja_env.filters['arxiv_id_urls'] = contextualized_id_filter\n app.jinja_env.filters['arxiv_urlize'] = contextualized_doi_id_url_filter\n app.jinja_env.filters['arxiv_id_doi_filter'] = ct_doi_filter\n\n return app\n" }, { "alpha_fraction": 0.7462919354438782, "alphanum_fraction": 0.756440281867981, "avg_line_length": 25.14285659790039, "blob_id": "6c50c922189acbc177c565d27216805e203cb0b6", "content_id": "5255fe66fb5eae352a9effd34b0c0341e4cf1b53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2562, "license_type": "permissive", "max_line_length": 153, "num_lines": 98, "path": "/README.md", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "# arxiv-browse\n\n### Flask development server\n\nYou can run the browse app directly. Using pipenv:\n\n```bash\npipenv install\nFLASK_APP=app.py FLASK_DEBUG=1 pipenv run flask run\n```\n\n\nThis will monitor for any changes to the Python code and restart the server.\nUnfortunately static files and templates are not monitored, so you'll have to\nmanually restart to see those changes take effect.\n\nIf all goes well, http://127.0.0.1:5000/abs/0906.5132 should render the basic\nabs page.\n\nBy default, the application will use the directory trees in\n`tests/data/abs_files` and `tests/data/cache` and when looking for the document metadata and cache files, respectively. These paths can be overridden via\nenvironment variables (see `browse/config.py`).\n\n### Rebuilding the test database\n\nThe default app configuration uses a test SQLite database in\n``tests/data/browse.db``; it has been pre-populated with a small set of test\ndata.\n\nTo rebuild the test database, run the following script:\n\n```bash\nFLASK_APP=app.py pipenv run python populate_test_database.py --drop_and_create\n```\n\n### Configuration Parameters\n\nConfiguration parameters (and defaults) are defined in `browse/config.py`.\nAny of these can be overridden on the command line when testing the application.\n\nBelow are some examples of some application-specific parameters:\n\nDatabase URI:\n* `SQLALCHEMY_DATABASE_URI``\n\nPaths to .abs and source files:\n* `DOCUMENT_LATEST_VERSIONS_PATH`\n* `DOCUMENT_ORIGNAL_VERSIONS_PATH`\n\nPath to cache directory:\n* `DOCUMENT_CACHE_PATH`\n\narXiv Labs options:\n* `LABS_BIBEXPLORER_ENABLED`\n\n### Test suite\n\nBefore running the test suite, install the dev packages:\n\n```bash\npipenv install --dev\n```\n\nRun the main test suite with the following command:\n\n```bash\npipenv run nose2 --with-coverage\n```\n\n### Static checking\nGoal: zero errors/warnings.\n\nUse `# type: ignore` to disable mypy messages that do not reveal actual\nprogramming errors, and that are impractical to fix. If ignoring without\nverifying, insert a `# TODO: recheck`.\n\nIf there is an active `mypy` GitHub issue (i.e. it's a bug/limitation in mypy)\nrelevant to missed check, link that for later follow-up.\n\n```bash\npipenv run mypy -p browse | grep -v \"test.*\" | grep -v \"defined here\"\n```\n\nNote that we filter out messages about test modules, and messages about a known\nlimitation of mypy related to ``dataclasses`` support.\n\n### Documentation style\nGoal: zero errors/warnings.\n\n```bash\npipenv run pydocstyle --convention=numpy --add-ignore=D401 browse\n```\n\n### Linting\nGoal: 9/10 or better.\n\n```bash\npipenv run pylint browse\n" }, { "alpha_fraction": 0.6144164800643921, "alphanum_fraction": 0.6279704570770264, "avg_line_length": 37.12751770019531, "blob_id": "3f9bb26ab2d8b39c5d28dc85a2da2b1bf6c69939", "content_id": "89acb9d42f4df7cec81266927a2e2bbd6dc8fc2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11362, "license_type": "permissive", "max_line_length": 79, "num_lines": 298, "path": "/browse/services/database/models.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"arXiv browse database models.\"\"\"\n\nfrom typing import Optional\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import BigInteger, Column, DateTime, Enum, ForeignKey, Index, \\\n Integer, SmallInteger, String, text, Text\nfrom sqlalchemy.orm import relationship\nfrom werkzeug.local import LocalProxy\n\ndb: SQLAlchemy = SQLAlchemy()\n\n\nclass Document(db.Model):\n \"\"\"Model for documents stored as part of the arXiv repository.\"\"\"\n\n __tablename__ = 'arXiv_documents'\n\n document_id = Column(Integer, primary_key=True)\n paper_id = Column(String(20), nullable=False,\n unique=True, server_default=text(\"''\"))\n title = Column(String(255), nullable=False,\n index=True, server_default=text(\"''\"))\n authors = Column(Text)\n submitter_email = Column(String(64), nullable=False,\n index=True, server_default=text(\"''\"))\n submitter_id = Column(ForeignKey('tapir_users.user_id'), index=True)\n dated = Column(Integer, nullable=False, index=True,\n server_default=text(\"'0'\"))\n primary_subject_class = Column(String(16))\n created = Column(DateTime)\n submitter = relationship('User')\n\n\nclass License(db.Model):\n \"\"\"Model for arXiv licenses.\"\"\"\n\n __tablename__ = 'arXiv_licenses'\n\n name = Column(String(255), primary_key=True)\n label = Column(String(255))\n active = Column(Integer, server_default=text(\"'1'\"))\n note = Column(String(255))\n sequence = Column(Integer)\n\n\nclass Metadata(db.Model):\n \"\"\"Model for arXiv document metadata.\"\"\"\n\n __tablename__ = 'arXiv_metadata'\n __table_args__ = (\n Index('pidv', 'paper_id', 'version', unique=True),\n )\n\n metadata_id = Column(Integer, primary_key=True)\n document_id = Column(\n ForeignKey('arXiv_documents.document_id',\n ondelete='CASCADE',\n onupdate='CASCADE'),\n nullable=False,\n index=True,\n server_default=text(\"'0'\")\n )\n paper_id = Column(String(64), nullable=False)\n created = Column(DateTime)\n updated = Column(DateTime)\n submitter_id = Column(ForeignKey('tapir_users.user_id'), index=True)\n submitter_name = Column(String(64), nullable=False)\n submitter_email = Column(String(64), nullable=False)\n source_size = Column(Integer)\n source_format = Column(String(12))\n source_flags = Column(String(12))\n title = Column(Text)\n authors = Column(Text)\n abs_categories = Column(String(255))\n comments = Column(Text)\n proxy = Column(String(255))\n report_num = Column(Text)\n msc_class = Column(String(255))\n acm_class = Column(String(255))\n journal_ref = Column(Text)\n doi = Column(String(255))\n abstract = Column(Text)\n license = Column(ForeignKey('arXiv_licenses.name'), index=True)\n version = Column(Integer, nullable=False, server_default=text(\"'1'\"))\n modtime = Column(Integer)\n is_current = Column(Integer, server_default=text(\"'1'\"))\n is_withdrawn = Column(Integer, nullable=False, server_default=text(\"'0'\"))\n\n document = relationship('Document')\n arXiv_license = relationship('License')\n submitter = relationship('User')\n\n\nclass MemberInstitution(db.Model):\n \"\"\"Primary model for arXiv member insitution data.\"\"\"\n\n __tablename__ = 'Subscription_UniversalInstitution'\n\n resolver_URL = Column(String(255))\n name = Column(String(255), nullable=False, index=True)\n label = Column(String(255))\n id = Column(Integer, primary_key=True)\n alt_text = Column(String(255))\n link_icon = Column(String(255))\n note = Column(String(255))\n\n\nclass MemberInstitutionContact(db.Model):\n \"\"\"Model for arXiv member institution contact information.\"\"\"\n\n __tablename__ = 'Subscription_UniversalInstitutionContact'\n\n email = Column(String(255))\n sid = Column(ForeignKey('Subscription_UniversalInstitution.id',\n ondelete='CASCADE'), nullable=False, index=True)\n active = Column(Integer, server_default=text(\"'0'\"))\n contact_name = Column(String(255))\n id = Column(Integer, primary_key=True)\n phone = Column(String(255))\n note = Column(String(2048))\n\n Subscription_UniversalInstitution = relationship('MemberInstitution')\n\n\nclass MemberInstitutionIP(db.Model):\n \"\"\"Model for arXiv member insitution IP address ranges and exclusions.\"\"\"\n\n __tablename__ = 'Subscription_UniversalInstitutionIP'\n __table_args__ = (\n Index('ip', 'start', 'end'),\n )\n\n sid = Column(ForeignKey('Subscription_UniversalInstitution.id',\n ondelete='CASCADE'), nullable=False, index=True)\n id = Column(Integer, primary_key=True)\n exclude = Column(Integer, server_default=text(\"'0'\"))\n end = Column(BigInteger, nullable=False, index=True)\n start = Column(BigInteger, nullable=False, index=True)\n\n Subscription_UniversalInstitution = relationship('MemberInstitution')\n\n\nclass SciencewisePing(db.Model):\n \"\"\"Model for ScienceWISE (trackback) pings.\"\"\"\n\n __tablename__ = 'arXiv_sciencewise_pings'\n\n paper_id_v = Column(String(32), primary_key=True)\n updated = Column(DateTime)\n\n\nclass User(db.Model):\n \"\"\"Model for legacy user data.\"\"\"\n\n __tablename__ = 'tapir_users'\n\n user_id = Column(Integer, primary_key=True)\n first_name = Column(String(50), index=True)\n last_name = Column(String(50), index=True)\n suffix_name = Column(String(50))\n share_first_name = Column(\n Integer, nullable=False, server_default=text(\"'1'\"))\n share_last_name = Column(Integer, nullable=False,\n server_default=text(\"'1'\"))\n email = Column(String(255), nullable=False,\n unique=True, server_default=text(\"''\"))\n share_email = Column(Integer, nullable=False,\n server_default=text(\"'8'\"))\n email_bouncing = Column(Integer, nullable=False,\n server_default=text(\"'0'\"))\n policy_class = Column(ForeignKey('tapir_policy_classes.class_id'),\n nullable=False, index=True,\n server_default=text(\"'0'\"))\n joined_date = Column(Integer, nullable=False,\n index=True, server_default=text(\"'0'\"))\n joined_ip_num = Column(String(16), index=True)\n joined_remote_host = Column(\n String(255), nullable=False, server_default=text(\"''\"))\n flag_internal = Column(Integer, nullable=False,\n index=True, server_default=text(\"'0'\"))\n flag_edit_users = Column(Integer, nullable=False,\n index=True, server_default=text(\"'0'\"))\n flag_edit_system = Column(\n Integer, nullable=False, server_default=text(\"'0'\"))\n flag_email_verified = Column(\n Integer, nullable=False, server_default=text(\"'0'\"))\n flag_approved = Column(Integer, nullable=False,\n index=True, server_default=text(\"'1'\"))\n flag_deleted = Column(Integer, nullable=False,\n index=True, server_default=text(\"'0'\"))\n flag_banned = Column(Integer, nullable=False,\n index=True, server_default=text(\"'0'\"))\n flag_wants_email = Column(\n Integer, nullable=False, server_default=text(\"'0'\"))\n flag_html_email = Column(Integer, nullable=False,\n server_default=text(\"'0'\"))\n tracking_cookie = Column(String(255), nullable=False,\n index=True, server_default=text(\"''\"))\n flag_allow_tex_produced = Column(\n Integer, nullable=False, server_default=text(\"'0'\"))\n\n tapir_policy_class = relationship('UserPolicyClass')\n\n\nclass UserPolicyClass(db.Model):\n \"\"\"Model for the legacy user policy class.\"\"\"\n\n __tablename__ = 'tapir_policy_classes'\n\n class_id = Column(SmallInteger, primary_key=True)\n name = Column(String(64), nullable=False, server_default=text(\"''\"))\n description = Column(Text, nullable=False)\n password_storage = Column(\n Integer, nullable=False, server_default=text(\"'0'\"))\n recovery_policy = Column(Integer, nullable=False,\n server_default=text(\"'0'\"))\n permanent_login = Column(Integer, nullable=False,\n server_default=text(\"'0'\"))\n\n\nclass TrackbackPing(db.Model):\n \"\"\"Primary model for arXiv trackback data.\"\"\"\n\n __tablename__ = 'arXiv_trackback_pings'\n\n trackback_id = Column(Integer, primary_key=True)\n document_id = Column(Integer, index=True)\n title = Column(String(255), nullable=False, server_default=text(\"''\"))\n excerpt = Column(String(255), nullable=False, server_default=text(\"''\"))\n url = Column(String(255), nullable=False,\n index=True, server_default=text(\"''\"))\n blog_name = Column(String(255), nullable=False, server_default=text(\"''\"))\n remote_host = Column(String(255), nullable=False,\n server_default=text(\"''\"))\n remote_addr = Column(String(16), nullable=False, server_default=text(\"''\"))\n posted_date = Column(Integer, nullable=False,\n index=True, server_default=text(\"'0'\"))\n is_stale = Column(Integer, nullable=False, server_default=text(\"'0'\"))\n approved_by_user = Column(Integer, nullable=False,\n server_default=text(\"'0'\"))\n approved_time = Column(Integer, nullable=False, server_default=text(\"'0'\"))\n status = Column(Enum('pending', 'pending2', 'accepted',\n 'rejected', 'spam'),\n nullable=False, index=True,\n server_default=text(\"'pending'\"))\n site_id = Column(Integer)\n\n\nclass TrackbackSite(db.Model):\n \"\"\"Model for sites that submit trackbacks to arXiv.\"\"\"\n\n __tablename__ = 'arXiv_trackback_sites'\n\n pattern = Column(String(255), nullable=False,\n index=True, server_default=text(\"''\"))\n site_id = Column(Integer, primary_key=True)\n action = Column(Enum('neutral', 'accept', 'reject', 'spam'),\n nullable=False, server_default=text(\"'neutral'\"))\n\n\nclass DBLP(db.Model):\n \"\"\"Primary model for the DBLP Computer Science Bibliography data.\"\"\"\n\n __tablename__ = 'arXiv_dblp'\n\n document_id = Column(ForeignKey('arXiv_documents.document_id'),\n primary_key=True, server_default=text(\"'0'\"))\n url = Column(String(80))\n\n\nclass DBLPAuthor(db.Model):\n \"\"\"Model for DBLP author name.\"\"\"\n\n __tablename__ = 'arXiv_dblp_authors'\n\n author_id = Column(Integer, primary_key=True, unique=True)\n name = Column(String(40), unique=True)\n\n\nclass DBLPDocumentAuthor(db.Model):\n \"\"\"Model for the DBLP document to author mapping with ordering.\"\"\"\n\n __tablename__ = 'arXiv_dblp_document_authors'\n\n document_id = Column(ForeignKey('arXiv_documents.document_id'),\n primary_key=True, nullable=False, index=True)\n author_id = Column(ForeignKey('arXiv_dblp_authors.author_id'),\n primary_key=True, nullable=False, index=True,\n server_default=text(\"'0'\"))\n position = Column(Integer, nullable=False, server_default=text(\"'0'\"))\n\n author = relationship('DBLPAuthor')\n document = relationship('Document')\n\n\ndef init_app(app: Optional[LocalProxy]) -> None:\n \"\"\"Set configuration defaults and attach session to the application.\"\"\"\n db.init_app(app)\n" }, { "alpha_fraction": 0.65450119972229, "alphanum_fraction": 0.6565867066383362, "avg_line_length": 31.325841903686523, "blob_id": "0cb793d2e3bdcdcaa16fa915894e3cc1bfe0f71f", "content_id": "8b30acecb9d9b9fc3dae2fe7a8381620c44dfde9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2877, "license_type": "permissive", "max_line_length": 74, "num_lines": 89, "path": "/browse/filters.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"Browse jinja filters.\"\"\"\nimport re\nfrom urllib import parse\nfrom typing import Callable, Union\n\nfrom jinja2 import Markup, escape\nimport html\n\nfrom browse.services.util.tex2utf import tex2utf\n\nJinjaFilterInput = Union[Markup, str]\n\"\"\"\n Jinja filters will receive their text input as either\n a Markup object or a str. It is critical for proper escaping to\n to ensure that str is correctly HTML escaped.\n\n Markup is decoded from str so this type is redundant but\n the hope is to make it clear what is going on to arXiv developers.\n\"\"\"\n\n\ndef single_doi_url(clickthrough_url_for: Callable[[str], str],\n doi: JinjaFilterInput) -> Markup:\n \"\"\"\n DOI is made into a link.\n\n This expects a DOI ONLY. It should not be used on general text.\n\n This link is not through clickthrough. Use an additional filter in\n the template to get that.\n\n How does this ensure escaping? It expects just a DOI, The result\n is created as a properly escaped Markup.\n \"\"\"\n doi_url = f'https://dx.doi.org/{parse.quote_plus(doi)}'\n ct_url = clickthrough_url_for(doi_url)\n return Markup(f'<a href=\"{ct_url}\">{escape(doi)}</a>')\n\n\ndef line_feed_to_br(text: JinjaFilterInput) -> Markup:\n \"\"\"Lines that start with two spaces should be broken.\"\"\"\n if hasattr(text, '__html__'):\n etxt = text\n else:\n etxt = Markup(escape(text))\n\n # if line starts with spaces, replace the white space with <br\\>\n br = re.sub(r'((?<!^)\\n +)', '\\n<br />', etxt)\n dedup = re.sub(r'\\n\\n', '\\n', br) # skip if blank\n return Markup(dedup)\n\n\ndef entity_to_utf(text: str) -> str:\n \"\"\"\n Convert HTML entities to unicode.\n\n For example '&amp;' becomes '&'.\n\n Must be first filter in list because it does not do anything to a\n Markup. On a Markup object it will do nothing and just return the\n input Markup.\n\n DANGEROUS because this is basically an unescape.\n It tries to avoid junk like <script> but it is a bad idea.\n This MUST NEVER BE USED ON USER PROVIDED INPUT. Submission titles etc.\n \"\"\"\n # TODO it would be good to move this out of a jinja filter\n # and to the controller, it is only used for things coming from DBLP\n if hasattr(text, '__html__'):\n return text\n\n without_lt = re.sub('<', 'XXX_LESS_THAN_XXX', text)\n without_lt_gt = re.sub('>', 'XXX_GREATER_THAN_XXX', without_lt)\n\n unes = html.unescape(without_lt_gt)\n\n with_lt = re.sub('XXX_LESS_THAN_XXX', '&lt;', unes)\n with_lt_gt = re.sub('XXX_GREATER_THAN_XXX', '&gt;', with_lt)\n\n return Markup(with_lt_gt)\n\n\ndef tex_to_utf(text: JinjaFilterInput) -> Markup:\n \"\"\"Wrap tex2utf as a filter.\"\"\"\n if hasattr(text, '__html__'):\n # Need to unescape so nothing that is tex is escaped\n return Markup(escape(tex2utf(text.unescape()))) # type: ignore\n else:\n return Markup(escape(tex2utf(text)))\n" }, { "alpha_fraction": 0.7835820913314819, "alphanum_fraction": 0.7835820913314819, "avg_line_length": 21.33333396911621, "blob_id": "4ab3c52a72ae2585a2a9c283fb3d996a38b967b0", "content_id": "c309f5128c0f89b9e44b7024484dd822d6a285e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "permissive", "max_line_length": 77, "num_lines": 6, "path": "/browse/controllers/__init__.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"\nHouses controllers for browse.\n\nEach controller corresponds to a distinct browse feature with its own request\nhandling logic.\n\"\"\"\n" }, { "alpha_fraction": 0.6534518003463745, "alphanum_fraction": 0.6541353464126587, "avg_line_length": 36.27388381958008, "blob_id": "14855171e38ba4fcdde3553b9cb936ee2afd52bc", "content_id": "8ef7347ac057be95979da502cd3021aabe3fe6ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5852, "license_type": "permissive", "max_line_length": 77, "num_lines": 157, "path": "/browse/services/database/__init__.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"Import db instance and define utility functions.\"\"\"\n\nimport ipaddress\nfrom datetime import datetime\nfrom dateutil.tz import tzutc, gettz\nfrom typing import List, Optional, Any, Callable\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm import Query\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.exc import OperationalError, DBAPIError\nfrom arxiv.base.globals import get_application_config\n\nfrom browse.services.database.models import db, Document, \\\n MemberInstitution, MemberInstitutionIP, TrackbackPing, SciencewisePing, \\\n DBLP, DBLPAuthor, DBLPDocumentAuthor\nfrom arxiv.base import logging\nfrom logging import Logger\n\nlogger = logging.getLogger(__name__)\napp_config = get_application_config()\n\n\ndef db_handle_error(logger: Logger, default_return_val: Any) \\\n -> Any:\n \"\"\"Handle operational database errors via decorator.\"\"\"\n def decorator(func: Callable) -> Any:\n def wrapper(*args, **kwargs): # type: ignore\n # Bypass attempt to perform query and just return default value\n is_db_disabled: bool = app_config.get(\n 'BROWSE_DISABLE_DATABASE') or False\n if is_db_disabled:\n if logger:\n logger.info(\n 'Database is disabled per BROWSE_DISABLE_DATABASE')\n return default_return_val\n try:\n return func(*args, **kwargs)\n except NoResultFound:\n return default_return_val\n except (OperationalError, DBAPIError) as ex:\n if logger:\n logger.warning(\n f'Error executing query in {func.__name__}: {ex}')\n return default_return_val\n except Exception as ex:\n if logger:\n logger.warning(\n f'Unknown exception in {func.__name__}: {ex}')\n raise\n return wrapper\n return decorator\n\n\ndef __all_trackbacks_query() -> Query:\n return db.session.query(TrackbackPing)\n\n\ndef __paper_trackbacks_query(paper_id: str) -> Query:\n return __all_trackbacks_query() \\\n .filter(TrackbackPing.document_id == Document.document_id) \\\n .filter(Document.paper_id == paper_id) \\\n .filter(TrackbackPing.status == 'accepted')\n\n\n@db_handle_error(logger=logger, default_return_val=None)\ndef get_institution(ip: str) -> Optional[str]:\n \"\"\"Get institution label from IP address.\"\"\"\n decimal_ip = int(ipaddress.ip_address(ip))\n\n stmt = (\n db.session.query(\n MemberInstitution.label,\n func.sum(MemberInstitutionIP.exclude).label(\"exclusions\")\n ).\n join(MemberInstitutionIP).\n filter(\n MemberInstitutionIP.start <= decimal_ip,\n MemberInstitutionIP.end >= decimal_ip\n ).\n group_by(MemberInstitution.label).\n subquery()\n )\n institution_row = db.session.query(stmt.c.label).\\\n filter(stmt.c.exclusions == 0).first()\n institution_name = None\n if institution_row:\n institution_name = institution_row.label\n assert isinstance(institution_name, str)\n return institution_name\n\n\n@db_handle_error(logger=logger, default_return_val=[])\ndef get_all_trackback_pings() -> List[TrackbackPing]:\n \"\"\"Get all trackback pings in database.\"\"\"\n return list(__all_trackbacks_query().all())\n\n\n@db_handle_error(logger=logger, default_return_val=[])\ndef get_trackback_pings(paper_id: str) -> List[TrackbackPing]:\n \"\"\"Get trackback pings for a particular document (paper_id).\"\"\"\n return list(__paper_trackbacks_query(paper_id).all())\n\n\n@db_handle_error(logger=logger, default_return_val=None)\ndef get_trackback_ping_latest_date(paper_id: str) -> Optional[datetime]:\n \"\"\"Get the most recent accepted trackback datetime for a paper_id.\"\"\"\n timestamp: int = db.session.query(\n func.max(TrackbackPing.approved_time)\n ).filter(TrackbackPing.document_id == Document.document_id) \\\n .filter(Document.paper_id == paper_id) \\\n .filter(TrackbackPing.status == 'accepted').scalar()\n dt = datetime.fromtimestamp(timestamp, tz=gettz('US/Eastern'))\n dt = dt.astimezone(tz=tzutc())\n return dt\n\n\n@db_handle_error(logger=logger, default_return_val=0)\ndef count_trackback_pings(paper_id: str) -> int:\n \"\"\"Count trackback pings for a particular document (paper_id).\"\"\"\n count: int = __paper_trackbacks_query(paper_id) \\\n .group_by(TrackbackPing.url).count()\n return count\n\n\n@db_handle_error(logger=logger, default_return_val=0)\ndef count_all_trackback_pings() -> int:\n \"\"\"Count trackback pings for all documents, without DISTINCT(URL).\"\"\"\n c = __all_trackbacks_query().count()\n assert isinstance(c, int)\n return c\n\n\n@db_handle_error(logger=logger, default_return_val=False)\ndef has_sciencewise_ping(paper_id_v: str) -> bool:\n \"\"\"Determine whether versioned document has a ScienceWISE ping.\"\"\"\n has_ping: bool = db.session.query(SciencewisePing) \\\n .filter(SciencewisePing.paper_id_v == paper_id_v).count() > 0\n return has_ping\n\n\n@db_handle_error(logger=logger, default_return_val=None)\ndef get_dblp_listing_path(paper_id: str) -> Optional[str]:\n \"\"\"Get the DBLP Bibliography URL for a given document (paper_id).\"\"\"\n url: str = db.session.query(DBLP.url).join(Document).filter(\n Document.paper_id == paper_id).one().url\n return url\n\n\n@db_handle_error(logger=logger, default_return_val=[])\ndef get_dblp_authors(paper_id: str) -> List[str]:\n \"\"\"Get sorted list of DBLP authors for a given document (paper_id).\"\"\"\n authors_t = db.session.query(DBLPAuthor.name).\\\n join(DBLPDocumentAuthor).\\\n join(Document).filter(Document.paper_id == paper_id).\\\n order_by(DBLPDocumentAuthor.position).all()\n authors = [a for (a,) in authors_t]\n return authors\n" }, { "alpha_fraction": 0.6352705359458923, "alphanum_fraction": 0.6359385251998901, "avg_line_length": 35.96296310424805, "blob_id": "e95bfba2d18a30d16c92c668bc8240f69cc9d7ce", "content_id": "70ad6d6d078f3c0a42de7cdd677810daac123705", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2994, "license_type": "permissive", "max_line_length": 77, "num_lines": 81, "path": "/browse/domain/category.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"Class that represents a single category.\"\"\"\n\nfrom typing import Union, List\nfrom dataclasses import dataclass, field\n\nfrom arxiv import taxonomy\n\n\n@dataclass(eq=True, order=True)\nclass Category:\n \"\"\"Represents an arXiv category.\n\n arXiv categories are arranged in a hierarchy where there are archives\n (astro-ph, cs, math, etc.) that contain subject classes (astro-ph has\n subject classes CO, GA, etc.). We now use the term category to refer\n to any archive or archive.subject_class that one can submit to (so\n hep-th and math.IT are both categories). No subject class can be in\n more than one archive. However, our scientific advisors identify some\n categories that should appear in more than one archive because they\n bridge major subject areas. Examples include math.MP == math-ph and\n stat.TH = math.ST. These are called category aliases and the idea is\n that any article classified in one of the aliases categories also appears\n in the other (canonical), but that most of the arXiv code for display,\n search, etc. does not need to understand the break with hierarchy.\n \"\"\"\n\n id: str = field(compare=True)\n \"\"\"The category identifier (e.g. cs.DL).\"\"\"\n\n name: str = field(init=False, compare=False)\n \"\"\"The name of the category (e.g. Digital Libraries).\"\"\"\n\n #TODO should probably be changed to get_canonical to avoid confusion\n canonical: Union['Category', None] = field(init=False, compare=False)\n\n def __hash__(self)->int:\n \"\"\"Hash.\"\"\"\n return id.__hash__()\n\n def __post_init__(self) -> None:\n \"\"\"Get the full category name.\"\"\"\n if self.id in taxonomy.CATEGORIES:\n self.name = taxonomy.CATEGORIES[self.id]['name']\n\n if self.id in taxonomy.ARCHIVES_SUBSUMED:\n self.canonical = Category(id=taxonomy.ARCHIVES_SUBSUMED[self.id])\n else:\n self.canonical = None\n\n def unalias(self) -> 'Category':\n \"\"\"Follow any EQUIV or SUBSUMED to get the current category.\"\"\"\n if self.id in taxonomy.CATEGORY_ALIASES:\n return Category(taxonomy.CATEGORY_ALIASES[self.id])\n if self.id in taxonomy.ARCHIVES_SUBSUMED:\n return Category(taxonomy.ARCHIVES_SUBSUMED[self.id])\n return self\n\n def display_str(self)->str:\n \"\"\"String to use in display of a category.\n\n Ex:\n Earth and Planetary Astrophysics (astro-ph.EP)\n \"\"\"\n if self.id in taxonomy.CATEGORIES:\n catname = taxonomy.CATEGORIES[self.id]['name']\n return f'{catname} ({self.id})'\n sp = _split_cat_str(self.id)\n hassub = len(sp) == 2\n if hassub:\n (arc, _) = sp\n if arc in taxonomy.ARCHIVES:\n arcname = taxonomy.ARCHIVES[arc]['name']\n return f'{arcname} ({self.id})'\n else:\n return self.id\n else:\n return self.id\n\n\ndef _split_cat_str(cat: str)-> List[str]:\n return cat.split('.', 2)\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 30, "blob_id": "d29316531653d4d3a271a69794c9d46f4aa9225e", "content_id": "a7a09d0fae8c295c27b927eec17b2ca6ee6420fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "permissive", "max_line_length": 30, "num_lines": 1, "path": "/browse/util/__init__.py", "repo_name": "cul-it/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"Utility code for browse.\"\"\"\n" } ]
12
alberand/pyb
https://github.com/alberand/pyb
df9231ce7d4a7dcff0e74c945a5b9c6ac32357ff
062e55765a66ad431a7fd8d8e5ebe60e22557a52
f02d5ee433dbc054a2500169232897b23c5cc6ac
refs/heads/master
2020-04-15T16:57:26.466917
2019-02-11T13:15:52
2019-02-11T13:15:52
164,854,933
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5387400388717651, "alphanum_fraction": 0.5626357793807983, "avg_line_length": 16.705127716064453, "blob_id": "425c0b791ba21d7db31348c8e5f7cf42abcbd1de", "content_id": "73d18879f39b6d875f1b37f07127165d2a80d56b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1381, "license_type": "no_license", "max_line_length": 79, "num_lines": 78, "path": "/pyb.py", "repo_name": "alberand/pyb", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n'''\nSmall script to display numbers in HEX, decimal and binary formats. I tired of\ngoogling for \"hex to dec\", \"dec to hex\" etc. This can be done by python or bash\nbut it's easier and doesn't force you to remember commands.\n\nDoesn't work with negative numbers.\n\nUsage:\n $ pyb 0b01010101\n $ pyb 0xFF\n $ pyb 123\n\nTODO:\n * Negative numbers\n * Octal number representation\n * ASCII\n\nAuthor:\n Andrey Albershteyn <[email protected]>\n'''\n\nimport sys\n\ndef is_bin(num):\n return num[0:2] == '0b'\n\ndef is_hex(num):\n return num[0:2] == '0x'\n\ndef is_dec(num):\n return num.isdigit()\n\ndef from_bin(num):\n return int(num, 2)\n\ndef from_hex(num):\n return int(num, 16)\n\ndef from_dec(num):\n return int(num)\n\ndef convert(num):\n d = 0\n if is_bin(num):\n d = from_bin(num)\n if is_hex(num):\n d = from_hex(num)\n if is_dec(num):\n d = from_dec(num)\n\n return d\n\ndef int_size(num):\n size = 0\n while True:\n if num < pow(2, size):\n return size\n size = size + 4\n\n\ndef output(d):\n size = int_size(d)\n\n tmpl = '\\tBIN: {{bin:#0{}b}}\\n\\tHEX: {{hex:0{}x}}\\n\\tDEC: {{dec}}'.format(\n size + 2, size/4)\n\n print(tmpl.format(bin=d, hex=d, dec=d))\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(__doc__)\n sys.exit(0)\n\n number = sys.argv[1]\n \n output(convert(number))\n" }, { "alpha_fraction": 0.669211208820343, "alphanum_fraction": 0.7022900581359863, "avg_line_length": 22.058822631835938, "blob_id": "49a283d58827092f6e9682fbfc3bad7864f63194", "content_id": "f74c78a64530dcf1f7e78d7ddd7d0755b337604a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 393, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/README.md", "repo_name": "alberand/pyb", "src_encoding": "UTF-8", "text": "\nSmall script to display numbers in HEX, decimal and binary formats. I tired of\ngoogling for \"hex to dec\", \"dec to hex\" etc. This can be done by python or bash\nbut it's easier and doesn't force you to remember commands.\n\nDoesn't work with negative numbers.\n\nUsage:\n\n $ pyb 0b01010101\n $ pyb 0xFF\n $ pyb 123\n\nTODO:\n\n * Negative numbers\n * Octal number representation\n * ASCII\n" } ]
2
stavco9/tuneApp
https://github.com/stavco9/tuneApp
233eb5d4b36e5f6b8161474bc0eb532df6b3ecb0
ff16f93e44bbc4742b7b232a4af6025dac7abfe3
666994c75fa57cb78ec9bf2f445e3131da3c0965
refs/heads/master
2022-12-11T05:58:37.614334
2019-10-02T18:28:40
2019-10-02T18:28:40
163,518,754
0
0
null
2018-12-29T14:50:17
2019-10-02T18:28:43
2022-12-09T23:25:42
JavaScript
[ { "alpha_fraction": 0.6743383407592773, "alphanum_fraction": 0.6846950650215149, "avg_line_length": 25.33333396911621, "blob_id": "ccad1065c1c7b1a63a3a3cc5ea9360f7ff3f3390", "content_id": "422e8c95cf0a96f41bca33c5ccd56d5075aad06a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 869, "license_type": "no_license", "max_line_length": 68, "num_lines": 33, "path": "/server/api/routes/albumsRoutes.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "'use strict';\n\nconst session = require('express-session');\nconst express = require('express');\nconst request = require('request'); // \"Request\" library\nvar cookieParser = require('cookie-parser');\nconst albumsController = require('../controllers/albumsController');\n\nvar app = express();\n\nmodule.exports = function(app) {\n\n app.use(session({\n secret: 'keyboard cat',\n resave: false,\n saveUninitialized: true,\n cookie:{ maxAge: 2*60*60*1000} // two hours\n }))\n\n app.use(cookieParser());\n\n // artistsController Routes\n app.route('/albums')\n .get(async function(req, res){\n await albumsController.getAllAlbums(req, res);\n })\n .post(albumsController.AddNewAlbum);\n\n app.route('/albums/:albumId')\n .get(albumsController.GetAlbumById)\n .put(albumsController.UpdateAlbumById)\n .delete(albumsController.DeleteAlbumById);\n};\n" }, { "alpha_fraction": 0.5967742204666138, "alphanum_fraction": 0.599078357219696, "avg_line_length": 27.899999618530273, "blob_id": "c9c8ff48117ff42959ffa70e6b5a85b5b41c58d7", "content_id": "ec3a7be30b8c25b21e0ad5a692d3705466e9cb20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 868, "license_type": "no_license", "max_line_length": 79, "num_lines": 30, "path": "/Components/Player/PlayerControls.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, {PureComponent} from 'react';\nimport {Platform, StyleSheet, View} from 'react-native';\nimport TrackPlayer from 'react-native-track-player';\nimport {connect} from 'react-redux';\n\nimport TouchableIcon from './TouchableIcon';\n//import { getTrackStructure } from './Utils';\nimport playerReducer from \"../../redux/reducers/player-reducer\";\n\nconst STATE_PLAYING = Platform.OS === 'android' ? 3 : 'STATE_PLAYING';\nconst STATE_PAUSED = Platform.OS === 'android' ? 2 : 'STATE_PAUSED';\n\nclass PlayerControls extends PureComponent<Props> {\n\n render() {\n const {playerState, track} = this.props;\n\n return (\n <View\n style={[\n styles.playerContainer,\n {backgroundColor: \"white\", justifyContent: 'space-around'},\n ]}\n >\n\n\n </View>\n );\n }\n}\n\n" }, { "alpha_fraction": 0.7362318634986877, "alphanum_fraction": 0.7362318634986877, "avg_line_length": 33.599998474121094, "blob_id": "07d95be8026573f67f956b7cec28f1f37f806b78", "content_id": "6cfd805dd8c662313bf9a528d610746488bce272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 345, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/redux/actions/player-actions.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "export const CHANGE_CURRENT_TRACK = 'CHANGE_CURRENT_TRACK';\nexport const PLAY_SONGS = 'PLAY_SONGS';\n\nexport const changeCurrentTrack = currentTrackId => {\n return { type: CHANGE_CURRENT_TRACK, newTrackId: currentTrackId}\n};\n\nexport const playSongs = (songsList, requestedSong) => {\n return { type: PLAY_SONGS, songsList, requestedSong}\n};" }, { "alpha_fraction": 0.5615054368972778, "alphanum_fraction": 0.5628891587257385, "avg_line_length": 24.184669494628906, "blob_id": "b77d6806b742ff376174c0f412c3c2b272662ea7", "content_id": "bd38d961086324ae320b754151e94b08795a5852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7227, "license_type": "no_license", "max_line_length": 102, "num_lines": 287, "path": "/server/MachineLearning/ml.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "const {PyMachine} = require('./PyMachine/pymachine');\n\n//let RecommendationsMachine_knn = new PyMachine(__dirname + '/pythonScripts/Recommendations_KNN.py');\n//let RecommendationsMachine_id3 = new PyMachine(__dirname + '/pythonScripts/Recommendations_ID3.py');\n//let RecommendationsMachine_nn = new PyMachine(__dirname + '/pythonScripts/Recommendations_NN.py');\nlet similarTracksMachine_knn = new PyMachine(__dirname + '/pythonScripts/similarTracks_KNN.py');\n\n// Function names are as follows:\n// [Function goal and returned value type]_[Algorithm used]\n\n// ===== in: =====\n// familliarTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n// testedTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n//\n// ===== out: =====\n// [\n// {\n// id (of track)\n// ...\n// }\n// ]\n//\n// recommendedTracks returned as track objects\n// as they are presented as the testedTracks array\nasync function classifyForRecommendedTracks_knn(familliarTracks, testedTracks) {\n let familliarTracksFeatures = familliarTracks.map((t) => {\n return ReformatAudioFeatures(t);\n });\n let testedTracksFeatures = testedTracks.map((t) => {\n return ReformatAudioFeatures(t);\n });\n\n let recommendations = [];\n testedTracks.forEach(async (t, i) => {\n let isRecommended = await RecommendationsMachine_knn.run({\n 'y': testedTracksFeatures[i],\n\t\t 'X': familliarTracksFeatures\n });\n if(isRecommended) {\n recommendations.push(t);\n }\n });\n\n return recommendations;\n}\n\n// ===== in: =====\n// familliarTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n// testedTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n//\n// ===== out: =====\n// [\n// {\n// id (of track)\n// ...\n// }\n// ]\n//\n// recommendedTracks returned as track objects\n// as they are presented as the testedTracks array\nasync function classifyForRecommendedTracks_id3(familliarTracks, testedTracks) {\n let familliarTracksFeatures = familliarTracks.map((t) => {\n return ReformatAudioFeatures(t);\n });\n let testedTracksFeatures = testedTracks.map((t) => {\n return ReformatAudioFeatures(t);\n });\n\n let recommendations = [];\n testedTracks.forEach(async (t, i) => {\n let isRecommended = await RecommendationsMachine_id3.run({\n 'y': testedTracksFeatures[i],\n\t\t 'X': familliarTracksFeatures\n });\n if(isRecommended) {\n recommendations.push(t);\n }\n });\n\n return recommendations;\n}\n\n// ===== in: =====\n// neuralNetwork: [\n// [], [], [], ...\n// ]\n// testedTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n//\n// ===== out: =====\n// [\n// {\n// id (of track)\n// ...\n// }\n// ]\n//\n// recommendedTracks returned as the track objects\n// as they are presented as the testedTracks array\nasync function classifyForRecommendedTracks_neuralnetwork(neuralNetwork, testedTracks) {\n neuralNetwork = ReformatNeuralNetwork(neuralNetwork);\n let testedTracksFeatures = testedTracks.map((t) => {\n return ReformatAudioFeatures(t);\n });\n\n let recommendations = [];\n testedTracks.forEach(async (t, i) => {\n let isRecommended = await RecommendationsMachine_nn.run({\n 't': testedTracksFeatures[i],\n\t\t 'nn': neuralNetwork\n });\n if(isRecommended) {\n recommendations.push(t);\n }\n });\n\n return recommendations;\n}\n\n// ===== in: =====\n// neuralNetwork: [\n// [], [], [], ...\n// ]\n// familliarTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n// testedTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n//\n// ===== out: =====\n// [\n// {\n// id (of track)\n// ...\n// }\n// ]\n//\nasync function classifyForRecommendedTracks_all(neuralNetwork, familliarTracks, testedTracks)\n{\n let results = await Promise.all([\n classifyForRecommendedTracks_knn(familliarTracks, testedTracks),\n classifyForRecommendedTracks_id3(familliarTracks, testedTracks),\n classifyForRecommendedTracks_neuralnetwork(neuralNetwork, testedTracks)\n ]);\n\n results.forEach((arrOfRecommendedTracks) => {\n arrOfRecommendedTracks.forEach((r) => {\n let track = testedTracks.find((t) => t.id === r.id)\n if(track.recommendationsCounter === undefined) {\n track.recommendationsCounter = 0;\n }\n track.recommendationsCounter++;\n });\n });\n \n return testedTracks.filter((a) => a.recommendationsCounter !== undefined)\n .sort((a, b) => b.recommendationsCounter - a.recommendationsCounter);\n}\n\n// ===== in: =====\n// baseTrack: {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// testedTracks: [\n// {\n// id (of track)\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\n//\n// ===== out: =====\n// [\n// {\n// id (of track)\n// ...\n// }\n// ]\n//\n// similarTracks returned as track objects\n// as they are presented as the testedTracks array\nasync function SearchForSimilarTracks_knn(baseTrack, testedTracks) {\n let baseTrackFeatures = ReformatAudioFeatures(baseTrack);\n let testedTracksFeatures = testedTracks.map((t) => {\n return ReformatAudioFeatures(t);\n });\n\n let similarIndexes = await similarTracksMachine_knn.run({\n 'y': baseTrackFeatures,\n 'X': testedTracksFeatures\n });\n return (similarIndexes.map((i) => {\n return testedTracks[i];\n }));\n}\n\nfunction ReformatAudioFeatures(track) {\n let featuresObject = track.AudioFeatures;\n let features = [];\n features.push(featuresObject.danceability);\n features.push(featuresObject.energy);\n features.push(featuresObject.key / 10);\n features.push(featuresObject.loudness);\n features.push(featuresObject.mode);\n features.push(featuresObject.speechiness);\n features.push(featuresObject.acousticness);\n features.push(featuresObject.instrumentalness);\n features.push(featuresObject.liveness);\n features.push(featuresObject.valence);\n features.push(featuresObject.tempo);\n features.push(featuresObject.time_signature);\n return features;\n}\n\nfunction ReformatNeuralNetwork(nn) {\n return nn;\n}\n\nmodule.exports = {\n Recommendations: {\n classifyMultipleByKNN: classifyForRecommendedTracks_knn,\n classifyMultipleByID3: classifyForRecommendedTracks_id3,\n classifyMultipleByNN: classifyForRecommendedTracks_neuralnetwork,\n classifyMultiple: classifyForRecommendedTracks_all\n },\n SimilarTracks: {\n search: SearchForSimilarTracks_knn\n }\n};" }, { "alpha_fraction": 0.6721215844154358, "alphanum_fraction": 0.6773816347122192, "avg_line_length": 24.92424201965332, "blob_id": "395a13704d5a63aa1ab8067af3c652c6f6922701", "content_id": "c2c8398d65a5fc59c469f9752c6125e7384b1857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1711, "license_type": "no_license", "max_line_length": 68, "num_lines": 66, "path": "/server/api/routes/tracksRoutes.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "'use strict';\n\nconst session = require('express-session');\nconst express = require('express');\nconst request = require('request'); // \"Request\" library\nvar cookieParser = require('cookie-parser');\nconst tracksController = require('../controllers/tracksController');\n\nvar app = express();\n\nmodule.exports = function(app) {\n\n app.use(session({\n secret: 'keyboard cat',\n resave: false,\n saveUninitialized: true,\n cookie:{ maxAge: 2*60*60*1000} // two hours\n }))\n\n app.use(cookieParser());\n\n // Parse URL-encoded bodies (as sent by HTML forms)\n app.use(express.urlencoded());\n\n // Parse JSON bodies (as sent by API clients)\n app.use(express.json());\n\n // artistsController Routes\n app.route('/tracks')\n .get(async function(req, res){\n await tracksController.getAllTracks(req, res);\n })\n .post(tracksController.AddNewTrack);\n\n // artistsController Routes\n app.route('/tracks/features')\n .get(async function(req, res){\n await tracksController.getAllAudioFeatures(req, res);\n });\n //.post(tracksController.AddNewAudioFeature);\n\n app.route('/tracks/:trackId')\n .get(tracksController.GetTrackById)\n .put(tracksController.UpdateTrackById)\n .delete(tracksController.DeleteTrackById);\n\n // Body format for POST request\n //{\n // \"trackId\": \"dsgkkld\"\n //}\n app.route('/tracks/like/')\n .post(tracksController.LikeTrackById);\n\n // Body format for POST request\n //{\n // \"trackId\": \"dsgkkld\"\n //}\n app.route('/tracks/unlike/')\n .post(tracksController.UnlikeTrackById);\n\n app.route('/tracks/similar/:trackId')\n .get(tracksController.GetSimilarTracksById);\n\n app.route('/tracks/top/:limit')\n .get(tracksController.GetTopTracks);\n};\n" }, { "alpha_fraction": 0.6272965669631958, "alphanum_fraction": 0.6377952694892883, "avg_line_length": 30.83333396911621, "blob_id": "4b6f1d51c23a73145e27f15df1235ea52d1bb311", "content_id": "57c227aa7ba5f5d7c5a6e5372d6719b24d4fb369", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 381, "license_type": "no_license", "max_line_length": 77, "num_lines": 12, "path": "/Components/LoginPage/LoginIcon.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport {Image, TouchableNativeFeedback, View} from \"react-native\";\n\nfunction LoginIcon(imageUrl) {\n return <TouchableNativeFeedback>\n <View>\n <Image style={{width: 50, height: 50}}\n source={require('../../assets/sign-in-with-google.png')}/>\n </View>\n </TouchableNativeFeedback>\n}\nexport default LoginIcon;" }, { "alpha_fraction": 0.511860191822052, "alphanum_fraction": 0.5156055092811584, "avg_line_length": 31.079999923706055, "blob_id": "39bf454451f9b282cd70df3fdf502153f4cc7ec8", "content_id": "e7590076e8c7bcbe5445142d1ad46b6305b0cde2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 801, "license_type": "no_license", "max_line_length": 96, "num_lines": 25, "path": "/Components/CompactSongList/CompactSong.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {ListItem, Thumbnail, Left, Right, Body, Text, Button, Icon} from \"native-base\";\nimport React from \"react\";\n\nconst CompactSong = props => {\n const {song} = props;\n\n return (\n <ListItem thumbnail button onPress={() => props.expandSong()}>\n <Left>\n <Thumbnail square source={{ uri: song.album.images[0].url }} />\n </Left>\n <Body>\n <Text>{song.name}</Text>\n <Text note numberOfLines={1}>{song.artists[0].name}</Text>\n </Body>\n <Right>\n <Button small rounded onPress={() => props.onPlay(song, props.playlistContext)}>\n <Icon active name=\"play\"/>\n </Button>\n </Right>\n </ListItem>\n );\n};\n\nexport default CompactSong;" }, { "alpha_fraction": 0.6073931455612183, "alphanum_fraction": 0.6126489043235779, "avg_line_length": 26.713592529296875, "blob_id": "3de425c937ff6a3038fd75c2c5821fcca2376e3e", "content_id": "1a636acb4d5cf0885cd41e9e5c551f1f98b71d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5708, "license_type": "no_license", "max_line_length": 132, "num_lines": 206, "path": "/server/index.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst queryString = require('query-string');\nconst passport = require('passport');\nconst session = require('express-session');\nconst request = require('request');\nconst cookieParser = require('cookie-parser');\n\nrequire('dotenv').config({path: __dirname+'/tuneApp.env'});\n\nconst http = require('http');\n\nconst googleAuthentication = require('./google-authentication');\nconst spotifyAuthentication = require('./spotify-authentication');\nconst mongoConnection = require('./mongo-connection');\n\nconst hostname = process.env.HOSTNAME;\nconst port = process.env.PORT;\n\nconst spotifyStateKey = 'spotify_auth_state';\n\nconst app = express();\n\n// Registering the artist routes\nrequire('./api/routes/artistsRoutes')(app);\n\n// Registering the track routes\nrequire('./api/routes/tracksRoutes')(app);\n\n// Registering the album routes\nrequire('./api/routes/albumsRoutes')(app);\n\n// Registering the user routes\nrequire('./api/routes/usersRoutes')(app);\n\n// Registering the user routes\nconst playlistRoutes = require('./api/routes/playlistRoute');\nplaylistRoutes(app);\n\napp.use(session({\n secret: 'keyboard cat',\n resave: false,\n saveUninitialized: true,\n cookie:{ maxAge: 2*60*60*1000} // two hours\n }));\n\napp.use(cookieParser());\n\napp.get('/', (req, res) => {\n const googleUrl = googleAuthentication.urlGoogle();\n const spotifyUrl = spotifyAuthentication.getSpofityUrl();\n if (req.session.token){\n return res.redirect('/home');\n }\n else{\n return res.redirect('/login');\n }\n});\n\napp.get('/home', async(req, res) => {\n res.statusCode = 200;\n\n //const url = googleAuthentication.urlGoogle();\n\n //res.redirect(url)\n\n if (!req.session.token){\n return res.redirect('/login');\n }\n else{\n //if (!req.session.spotify_access_token){\n // return res.redirect('/get-spotify-token');\n //}\n\n res.setHeader('Content-Type', 'application/json');\n\n try{\n if (!req.session.userDetailsFromDB){\n req.session.userDetailsFromDB = await mongoConnection.queryFromMongoDB('users', {'email': req.session.token.email});\n //req.session.userDetailsFromDB = mongoConnection.queryFromMongoDB('users', {});\n }\n if (req.session.userDetailsFromDB.length < 1) {\n await mongoConnection.addToMongoDB('users', req.session.token);\n } else {\n if (req.session.token.hasOwnProperty('google_id')) {\n if (!req.session.userDetailsFromDB[0].hasOwnProperty('google_id')) {\n await mongoConnection.updateMongoDB('users', {'email': req.session.token.email}, req.session.token);\n }\n } else if (req.session.token.hasOwnProperty('spotify_id')) {\n if (!req.session.userDetailsFromDB[0].hasOwnProperty('spotify_id')) {\n await mongoConnection.updateMongoDB('users', {'email': req.session.token.email}, req.session.token);\n }\n }\n }\n }\n catch(error){\n console.log(error);\n }\n\n //artistsController.getAllArtists(req, res);\n\n //tracksController.tracksPolling();\n res.send(req.session.token);\n }\n});\n\napp.get('/login/google', (req, res) => {\n res.statusCode = 200;\n\n var googleUrl = googleAuthentication.urlGoogle();\n\n if (req.session.token){\n return res.redirect('/home');\n }\n else{\n return res.redirect(googleUrl);\n }\n});\n\napp.get('/login/spotify', (req, res) => {\n res.statusCode = 200;\n\n var spotifyUrl = spotifyAuthentication.getSpofityUrl();\n\n res.cookie(spotifyStateKey, queryString.parseUrl(spotifyUrl).query.state);\n\n if (req.session.token){\n return res.redirect('/home');\n }\n else{\n return res.redirect(spotifyUrl);\n }\n});\n\n\napp.get('/login', (req, res) => {\n\n res.statusCode = 200;\n\n var googleUrl = googleAuthentication.urlGoogle();\n\n var spotifyUrl = spotifyAuthentication.getSpofityUrl();\n\n res.cookie(spotifyStateKey, queryString.parseUrl(spotifyUrl).query.state);\n\n if (req.session.token){\n return res.redirect('/home');\n }\n else{\n res.setHeader('Content-Type', 'text/html');\n res.end('<a href=\\'' + googleUrl + '\\'>Login with Google</a></br><a href=\\'' + spotifyUrl + '\\'>Login with Spotify</a>\\n');\n }\n\n //res.redirect('/');\n});\n\napp.get('/logout', (req, res) => {\n\n if (req.session.token){\n req.session.token = null;\n req.session.userDetailsFromDB = null;\n }\n\n res.redirect('/');\n});\n\napp.get('/remove-account', async (req, res) => {\n\n if (!req.session.token){\n return res.redirect('/login');\n }\n else{\n var email = req.session.token.email;\n\n req.session.token = null;\n req.session.userDetailsFromDB = null;\n\n await mongoConnection.deleteFromMongoDB('users', {'email': email});\n }\n\n res.status(204).send(\"User \" + email + \" removed sucessfully\");\n});\n\napp.get('/google-auth', async(req, res, next) => {\n const code = queryString.parseUrl(req.url).query.code;\n\n const token = await googleAuthentication.getGoogleAccountFromCode(code);\n\n req.session.token = token;\n\n res.statusCode = 200;\n\n res.redirect('/home');\n });\n\napp.get('/spotify-auth', (req, res, next) => {\n spotifyAuthentication.getAccessToken(req, res, next);\n});\n\napp.get('/get-spotify-token', (req, res) =>{\n spotifyAuthentication.getAccessTokenForAPI(req, res);\n});\n\n\napp.listen(port, hostname, () => {\n console.log(`Server running at http://${hostname}:${port}/`);\n});" }, { "alpha_fraction": 0.5892608165740967, "alphanum_fraction": 0.6018131375312805, "avg_line_length": 30.19565200805664, "blob_id": "7c135992f0c1e2fec63d41826482431e12af30aa", "content_id": "b9f31ee9585f0178628afb0eea9b849fc612149f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1434, "license_type": "no_license", "max_line_length": 101, "num_lines": 46, "path": "/Components/UserPlaylist.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import axios from \"axios\";\nimport React, {useEffect, useState} from 'react'\nimport {SetUserPlaylist} from \"../redux/actions/songs-actions\";\nimport {connect} from \"react-redux\";\nimport {List, Spinner} from \"native-base\";\nimport SongItem from \"./common/SongItem\";\nimport CompactSong from \"./CompactSongList/CompactSong\";\n\nconst UserPlaylist = props => {\n const [isLoading, setIsLoading] = useState(true);\n useEffect(() => {\n axios.get('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/playlist/build')\n .then(({data: fetchedSongs}) => {\n props.setUserPlaylist([...fetchedSongs]);\n setIsLoading(false);\n });\n }, []);\n\n return (\n <List style={{width: '100%'}}>\n {isLoading ? <Spinner color={'#3c50b5'}/> :\n props.songReducer.userPlaylist.map(song => {\n const SongCardItem = SongItem(CompactSong, song, props.songReducer.userPlaylist);\n return <SongCardItem/>\n })\n }\n </List>\n );\n};\n\nconst mapStateToProps = state => {\n console.log(state);\n return {\n songReducer: state.songReducer\n }\n};\n\nconst mapDispatchToProps = dispatch => {\n return {\n setUserPlaylist: userPlaylist => {\n dispatch(SetUserPlaylist(userPlaylist));\n }\n }\n};\n\nexport default connect(mapStateToProps, mapDispatchToProps)(UserPlaylist);" }, { "alpha_fraction": 0.5094130635261536, "alphanum_fraction": 0.5193798542022705, "avg_line_length": 26.363636016845703, "blob_id": "01b2f7a9f4743fa3d694eb0e9df05853fa0db54b", "content_id": "07103cb5e5f48bf72ffbb93aa59a28115ad5dd1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 903, "license_type": "no_license", "max_line_length": 64, "num_lines": 33, "path": "/Components/Player/ProgressBar.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, { Fragment } from 'react';\nimport {Text, ProgressBar} from 'native-base'\nimport { ProgressComponent } from 'react-native-track-player';\nimport { StyleSheet } from 'react-native';\n\nimport { leftPad } from './Utils';\n\nclass ProgressBarA extends ProgressComponent<> {\n render() {\n const { theme } = this.props;\n const { duration, position } = this.state;\n\n return (\n <Fragment>\n <Text style={{fontSize:14}}>\n 0:{leftPad(Math.floor(position), 2)}\n <Text style={{ color: \"gray\",fontSize:14 }}>\n {' '}\n / 0:{leftPad(Math.round(duration), 2)}\n </Text>\n </Text>\n </Fragment>\n );\n }\n}\n\nconst styles = StyleSheet.create({\n progressBar: {\n paddingVertical: 0,\n },\n});\n\nexport default ProgressBarA;\n" }, { "alpha_fraction": 0.605674684047699, "alphanum_fraction": 0.6134942173957825, "avg_line_length": 26.807453155517578, "blob_id": "0046b0b1a64df70644e20c164483136e1bd6e2a3", "content_id": "0368261f1c6124c08d71133e429d4fa5bccb778c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4476, "license_type": "no_license", "max_line_length": 152, "num_lines": 161, "path": "/server/api/controllers/usersController.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "const mongoConnection = require('../../mongo-connection');\n\nfunction GetUserIdFromReq(req) {\n return req && req.session && req.session.token && req.session.token.email;\n}\n\n// Get the last listening of the user, sorted from the newest to oldest\n// each returned object should look like that:\n// {\n// trackId\n// score\n// }\nasync function GetLastActivitiesByUserId(userId, numOfActivities=300) {\n return (await mongoConnection.queryFromMongoDBSortedMax('ListeningAndSuggestions', {'email': userId}, {'_id': -1}, numOfActivities));\n}\n\nasync function GetUserInfo(userId){\n return (await mongoConnection.queryFromMongoDB('users', {'email': userId}))[0];\n}\n\n// returns\n// [\n// {\n// id (of track)\n// name\n// AudioFeatures\n// ...\n// scoreForUser\n// }\n// ]\nasync function GetFamilliarTracksByUserId(userId, numOfActivities=300) {\n let preferredTracks = {};\n let lastActivities = (await GetLastActivitiesByUserId(userId, numOfActivities)).reverse();\n let likes = [];\n let unlikes = [];\n let userInfo = await GetUserInfo(userId);\n\n if (userInfo.hasOwnProperty('likedTracks')){\n likes = userInfo.likedTracks;\n }\n if (userInfo.hasOwnProperty('unlikedTracks')){\n unlikes = userInfo.unlikedTracks;\n }\n\n let scale = 1;\n lastActivities.forEach((act) => {\n scale += 0.01;\n if(preferredTracks[act.trackId] === undefined) {\n preferredTracks[act.trackId] = 0;\n }\n preferredTracks[act.trackId] += act.score * scale;\n });\n\n likes.forEach((t) => {\n if(preferredTracks[t] === undefined) {\n preferredTracks[t] = 0;\n }\n preferredTracks[t] += 2;\n });\n\n unlikes.forEach((t) => {\n if(preferredTracks[t] === undefined) {\n preferredTracks[t] = 0;\n }\n preferredTracks[t] -= 2;\n });\n\n\n let trackObjects = await mongoConnection.queryFromMongoDBJoin('Tracks', 'AudioFeatures', 'id', 'id',\n {'id': {\n $in: Object.keys(preferredTracks)\n }\n });\n\n Object.keys(preferredTracks).forEach((k) => {\n trackObjects.find((o) => o.id == k).scoreForUser = preferredTracks[k];\n });\n trackObjects.sort(function(a, b) {\n return b.scoreForUser - a.scoreForUser;\n });\n\n return trackObjects;\n}\n\n// ===== out: =====\n// [\n// {\n// id (of track)\n// name\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\nasync function GetPreferredTracksByUserId(userId, numOfActivities=300) {\n return ((await GetFamilliarTracksByUserId(userId, numOfActivities))\n .filter(t => t.scoreForUser > 0));\n}\n\n// ===== out: =====\n// [\n// {\n// id (of track)\n// name\n// AudioFeatures: {\n// ...\n// }\n// ...\n// }\n// ]\nasync function GetUnfamilliarPopularTracksByUserId(userId, numOfActivities=1000) {\n let familliar = await GetFamilliarTracksByUserId(userId, numOfActivities);\n let popular = await mongoConnection.queryFromMongoDBJoinSort('Tracks', 'AudioFeatures', 'id', 'id', {}, familliar.length + 200, {'popularity': -1});\n\n return popular.filter((p) => !familliar.some((f) => p.id == f.id));\n}\n\nasync function getMyDetails(req, res){\n\n if (!req.session.token){\n res.status(401).send(\"You are unauthorized !! Please login\");\n }\n else{\n var userDetails = await mongoConnection.queryFromMongoDB(\"users\", {\"email\": req.session.token.email});\n\n res.status(200).send(userDetails);\n }\n}\n\nasync function GetPreferencesNN(userId) {\n return ((await GetUserInfo(userId)).neuralnetwork);\n}\n\nfunction DoesUserExist(req, response) {\n const user = req.body.user;\n mongoConnection.checkIfUserExists(user).then(answer => {\n if (answer) {\n req.session.token.email = user.email;\n }\n response.send(answer)\n });\n}\n\nfunction RegisterUser(req, res) {\n const user = req.body.user;\n mongoConnection.addNewUser(user).then(newUser => {\n res(user);\n });\n}\n\nmodule.exports = {\n getMyDetails: getMyDetails,\n GetFamilliarTracksByUserId: GetFamilliarTracksByUserId,\n GetUnfamilliarPopularTracksByUserId: GetUnfamilliarPopularTracksByUserId,\n GetPreferredTracksByUserId: GetPreferredTracksByUserId,\n GetUserIdFromReq: GetUserIdFromReq,\n GetPreferencesNN: GetPreferencesNN,\n DoesUserExist: DoesUserExist,\n RegisterUser: RegisterUser\n};" }, { "alpha_fraction": 0.6573543548583984, "alphanum_fraction": 0.666985034942627, "avg_line_length": 30.972009658813477, "blob_id": "945c461369d1802a0262bcdb4dab38d476e40121", "content_id": "5af2b47cedc975a08363413724fb200995072753", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 12564, "license_type": "no_license", "max_line_length": 129, "num_lines": 393, "path": "/server/api/controllers/tracksController.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "//'use strict';\nvar spotifyAuthentication = require('../../spotify-authentication');\nconst mongoConnection = require('../../mongo-connection');\nvar asyncPolling = require('async-polling');\nconst request = require('request'); // \"Request\" library\nconst reqPromise = require('request-promise');\nconst usersController = require('./usersController');\nconst searchKeys = [ 'a', 'e', 'i', 'o', 'u', 'er', 'ar', 'or', 'de', 'do' ];\nconst { SimilarTracks } = require('../../MachineLearning/ml');\n\nconst spotifyBaseUrl = \"https://api.spotify.com/v1/\";\n\n//let similarTracksMachine = new PyMachine(__dirname + '/../../MachineLearning/pythonScripts/similarTracks_KNN.py');\n\nvar polling = asyncPolling(function(req, res){\n\n\tvar result = {};\n\n\t// get Spotify access token for authentication\n\tvar spotify_access_token_promise = spotifyAuthentication.getAccessTokenForPolling();\n\n\t// random key search letter from a constant array\n\t//keyLetter = searchKeys[Math.floor(Math.random()*searchKeys.length)];\n\tkeyLetter = 'a';\n\n\t// When the access token is given\n\tspotify_access_token_promise.then(async function(spotify_access_token){\n\n\t\t// parameters of HTTP get request to query random tracks from Spotify\n\t\tvar getSearchTrackParameters = {\n\t\t\turl: (spotifyBaseUrl + \"search?q=*\" + keyLetter + \"*&type=track&limit=50\"),\n\t\t\theaders: {\n\t\t\t\t'Authorization': 'Bearer ' + spotify_access_token\n\t\t\t},\n\t\t\tjson: true\n\t\t};\n\t\tvar test = 42;\n\n\t\twhile (test !== undefined || test !== null) {\n\n\t\t\tawait sleep(10000);\n\t\t\t// Invoke the web request\n\t\t\trequest.get(getSearchTrackParameters, async function (error, response, body) {\n\n\t\t\t\t// if the result is OK\n\t\t\t\tif (!error && response.statusCode === 200) {\n\n\t\t\t\t\t//console.log(body)\n\t\t\t\t\tgetSearchTrackParameters.url = body.tracks.next;\n\t\t\t\t\ttest = body.tracks.next;\n\n\t\t\t\t\t// foreach track in the tracks array\n\t\t\t\t\tbody.tracks.items.forEach(async item => {\n\n\t\t\t\t\t\tvar trackResult = await mongoConnection.queryFromMongoDB('Tracks', {'id': item.id});\n\n\t\t\t\t\t\tif (trackResult.length < 1) {\n\t\t\t\t\t\t\tawait mongoConnection.addToMongoDB(\"Tracks\", item);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\n\t\t\t\t\tvar tracks = body.tracks.items;\n\n\t\t\t\t\ttracks.forEach(async track => {\n\n\t\t\t\t\t\tvar getAudioParameters = {\n\t\t\t\t\t\t\turl: (spotifyBaseUrl + \"audio-features/\" + track.id),\n\t\t\t\t\t\t\theaders: {\n\t\t\t\t\t\t\t\t'Authorization': 'Bearer ' + spotify_access_token\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tjson: true\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\treqPromise(getAudioParameters)\n\t\t\t\t\t\t\t.then(async function (featuresBody) {\n\t\t\t\t\t\t\t\t//res.statusCode = 200;\n\t\t\t\t\t\t\t\tconsole.log(featuresBody)\n\n\t\t\t\t\t\t\t\tvar featuresResult = await mongoConnection.queryFromMongoDB('AudioFeatures', {'id': featuresBody.id});\n\n\t\t\t\t\t\t\t\tif (featuresResult.length < 1) {\n\t\t\t\t\t\t\t\t\tawait mongoConnection.addToMongoDB(\"AudioFeatures\", featuresBody);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t.catch(function (err) {\n\t\t\t\t\t\t\t\tconsole.log(err);\n\t\t\t\t\t\t\t});\n\n\t\t\t\t\t\tvar albumResult = await mongoConnection.queryFromMongoDB('Albums', {'id': track.album.id});\n\n\t\t\t\t\t\tif (albumResult.length < 1) {\n\t\t\t\t\t\t\tawait mongoConnection.addToMongoDB(\"Albums\", track.album);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttrack.artists.forEach(async artist => {\n\t\t\t\t\t\t\tvar artistResult = await mongoConnection.queryFromMongoDB('Artists', {'id': artist.id});\n\n\t\t\t\t\t\t\tif (artistResult.length < 1) {\n\t\t\t\t\t\t\t\tawait mongoConnection.addToMongoDB(\"Artists\", artist);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t})\n\t\t\t\t\t});\n\n\t\t\t\t\tend(null, result);\n\n\t\t\t\t\tres.json({\"Added all items\": \"true\"});\n\t\t\t\t} else {\n\t\t\t\t\tend(error);\n\t\t\t\t\tconsole.log(\"invalid_token\");\n\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t}, function(err){\n\t\tconsole.log(err);\n\t})\n\n}, 25000);\n\nfunction sleep(ms) {\n\treturn new Promise(resolve => {\n\t\tsetTimeout(resolve, ms)\n\t})\n}\n\npolling.on('result', function (tracks) {\n tracks.forEach(async track => {\n\n\t\tvar albumResult = await mongoConnection.queryFromMongoDB('Albums', {'id': track.album.id});\n\n\t\tif (albumResult.length < 1){\n\t\t\tawait mongoConnection.addToMongoDB(\"Albums\", track.album);\n\t\t}\n\n\t\ttracks.artists.forEach(async artist =>{\n\t\t\tvar artistResult = await mongoConnection.queryFromMongoDB('Artists', {'id': artist.id});\n\n\t\t\tif (artistResult.length < 1){\n\t\t\t\tawait mongoConnection.addToMongoDB(\"Artists\", artist);\n\t\t\t}\n\t\t})\n\t});\n});\n\n// Get all tracks from DB\nasync function getAllTracks(req, res) {\n\tvar Alltracks = await mongoConnection.queryFromMongoDB('Tracks', {}, 1000);\n\n\tres.json(Alltracks);\n}\n\nasync function getAllAudioFeatures(req, res){\n\tvar AllAudioFeatures = await mongoConnection.queryFromMongoDB('AudioFeatures', {}, 1000);\n\n\tres.json(AllAudioFeatures);\n}\n\n// Saves a new artist, exposed at POST /artists\nfunction AddNewAudioFeature(req, res) {\n\t// Add a new artist by req.body\n}\n\n// Saves a new artist, exposed at POST /artists\nfunction AddNewTrack(req, res) {\n// Add a new artist by req.body\n}\n\n// Gets a single artist by ID, exposed at GET /artists/artistID\nfunction GetTrackById(req, res) {\n\tif (req.params.trackId === 'top') {\n\t\tGetTopTracks(req, res);\n\t}\n\telse {\n\t\tvar trackResult = mongoConnection.queryFromMongoDBJoin('Tracks', 'AudioFeatures', 'id', 'id', {'id': req.params.trackId});\n\n\t\ttrackResult.then(function (result) {\n\t\t\tif (result.length < 1) {\n\t\t\t\tres.status(404).send('The track with the ID ' + req.params.trackId + \" was not found!\");\n\t\t\t} else {\n\t\t\t\tres.json(result);\n\t\t\t}\n\t\t});\n\t}\n\n}\n\n// Updates a single artist by ID, exposed at PUT /artists/artistID\nfunction UpdateTrackById(req, res) {\n// Updates the artist by ID, get ID by req.params.artistId\n}\n\n// Deletes a single artist by ID, exposed at DELETE /artists/artistID\nfunction DeleteTrackById(req, res) {\n\t// Deletes the artist by ID, get ID by req.params.artistId\n}\n\nvar arrayUnique = function (arr) {\n\treturn arr.filter(function(item, index){\n\t\treturn arr.indexOf(item) >= index;\n\t});\n};\n\n// Saves a new artist, exposed at POST /artists\nfunction LikeTrackById(req, res) {\n\tvar trackResult = mongoConnection.queryFromMongoDB('Tracks', {'id': req.body.trackId});\n\ttrackResult.then(async function (result) {\n\t\tif (result.length < 1) {\n\t\t\tres.status(404).send('The track with the ID ' + req.body.trackId + \" was not found!\");\n\t\t}\n\t\telse {\n\t\t\t//if (req.session.token == null){\n\t\t\t//\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t//}\n\t\t\t//else{\n\t\t\t\t//var email = req.session.token.email;\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\tvar user = await mongoConnection.queryFromMongoDB('users', {'email': '[email protected]'});\n\t\t\t\t//var user = await mongoConnection.queryFromMongoDB('users', {'email': email});\n\n\t\t\t\tif (user.length < 1) {\n\t\t\t\t\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t\t}\n\n\t\t\t\tvar likedTracks = user[0].likedTracks;\n\t\t\t\tif (likedTracks === undefined) {\n\t\t\t\t\tlikedTracks = [];\n\t\t\t\t}\n\n\t\t\t\tvar unlikedTracks = user[0].unlikedTracks;\n\t\t\t\tif (unlikedTracks === undefined) {\n\t\t\t\t\tunlikedTracks = [];\n\t\t\t\t}\n\n\t\t\t\tvar length = likedTracks.length;\n\t\t\t\tlikedTracks.push(req.body.trackId);\n\t\t\t\tlikedTracks = arrayUnique(likedTracks);\n\t\t\t\t\n\t\t\t\t// Add the track to the liked tracks\n\t\t\t\tif (length !== likedTracks.length) {\n\t\t\t\t\tif (result[0].likes === undefined) { result[0].likes = 0; }\n\n\t\t\t\t\tresult[0].likes++;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Tracks', {'id': req.body.trackId}, {likes: result[0].likes});\n\n\t\t\t\t\tawait mongoConnection.updateMongoDB('AudioFeatures', {'id': req.body.trackId}, {likes: result[0].likes});\n\t\t\t\t}\n\n\t\t\t\t// Remove the liked track from the unlike tracksp if exists\n\t\t\t\tvar index = unlikedTracks.indexOf(req.body.trackId);\n\t\t\t\tif (index > -1) {\n\t\t\t\t\tunlikedTracks.splice(index, 1);\n\t\t\t\t\tif (result[0].unlikes === undefined) { result[0].unlikes = 1; }\n\t\t\t\t\tresult[0].unlikes--;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Tracks', {'id': req.body.trackId}, {unlikes: result[0].unlikes});\n\n\t\t\t\t\tawait mongoConnection.updateMongoDB('AudioFeatures', {'id': req.body.trackId}, {unlikes: result[0].unlikes});\n\t\t\t\t}\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\t//await mongoConnection.updateMongoDB('users', {'email': email}, {likedTracks: likedTracks});\n\t\t\t\tawait mongoConnection.updateMongoDB('users', {'email': \"[email protected]\"}, {likedTracks: likedTracks, unlikedTracks: unlikedTracks});\n\n\t\t\t\tres.status(200).send('Liked track ' + req.body.trackId);\n\t\t\t//}\n\t\t}\n\t});\n}\n\n// Saves a new artist, exposed at POST /artists\nfunction UnlikeTrackById(req, res) {\n\tvar trackResult = mongoConnection.queryFromMongoDB('Tracks', {'id': req.body.trackId});\n\ttrackResult.then(async function (result) {\n\t\tif (result.length < 1) {\n\t\t\tres.status(404).send('The track with the ID ' + req.body.trackId + \" was not found!\");\n\t\t}\n\t\telse {\n\t\t\t//if (req.session.token == null){\n\t\t\t//\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t//}\n\t\t\t//else{\n\t\t\t\t//var email = req.session.token.email;\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\tvar user = await mongoConnection.queryFromMongoDB('users', {'email': '[email protected]'});\n\t\t\t\t//var user = await mongoConnection.queryFromMongoDB('users', {'email': email});\n\n\t\t\t\tif (user.length < 1) {\n\t\t\t\t\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t\t}\n\n\t\t\t\tvar unlikedTracks = user[0].unlikedTracks;\n\t\t\t\tif (unlikedTracks === undefined) {\n\t\t\t\t\tunlikedTracks = [];\n\t\t\t\t}\n\n\t\t\t\tvar likedTracks = user[0].likedTracks;\n\t\t\t\tif (likedTracks === undefined) {\n\t\t\t\t\tlikedTracks = [];\n\t\t\t\t}\n\n\t\t\t\tvar length = unlikedTracks.length;\n\t\t\t\tunlikedTracks.push(req.body.trackId);\n\t\t\t\tunlikedTracks = arrayUnique(unlikedTracks);\n\n\t\t\t\t// Add the track to the unliked tracks\n\t\t\t\tif (length !== unlikedTracks.length) {\n\t\t\t\t\tif (result[0].unlikes === undefined) { result[0].unlikes = 0; }\n\n\t\t\t\t\tresult[0].unlikes++;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Tracks', {'id': req.body.trackId}, {unlikes: result[0].unlikes});\n\n\t\t\t\t\tawait mongoConnection.updateMongoDB('AudioFeatures', {'id': req.body.trackId}, {unlikes: result[0].unlikes});\n\t\t\t\t}\n\n\t\t\t\t// Remove the unliked track from the likes list if exists\n\t\t\t\tvar index = likedTracks.indexOf(req.body.trackId);\n\t\t\t\tif (index > -1) {\n\t\t\t\t\tlikedTracks.splice(index, 1);\n\t\t\t\t\tif (result[0].likes === undefined) { result[0].likes = 1; }\n\t\t\t\t\tresult[0].likes--;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Tracks', {'id': req.body.trackId}, {likes: result[0].likes});\n\n\t\t\t\t\tawait mongoConnection.updateMongoDB('AudioFeatures', {'id': req.body.trackId}, {likes: result[0].likes});\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\tawait mongoConnection.updateMongoDB('users', {'email': '[email protected]'}, {likedTracks: likedTracks, unlikedTracks: unlikedTracks});\n\t\t\t\t//await mongoConnection.updateMongoDB('users', {'email': email}, {likedTracks: likedTracks});\n\n\t\t\t\tres.status(200).send('Unliked track ' + req.body.trackId);\n\t\t\t//}\n\t\t}\n\t});\n}\n\n// MACHINE LEARNING !!!\n// LIOR CURRENTLY WORKING ON IT @@@@@@@@@@@@@@@@@@@@@@@@\n// DO NOT CHANGE IT @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n// in: trackId\n// out: [ { trackId, trackName, artistName } ]\nasync function GetSimilarTracksById(req, res) {\n\tlet trackId = req.params.trackId;\n\n let baseTrack = await mongoConnection.queryFromMongoDBJoin(\"Tracks\", \"AudioFeatures\", \"id\", \"id\", {\"id\": trackId});\n\n\tif (baseTrack.length < 1){\n\t\tres.status(404).send(\"Track \" + trackId + \" was not found\");\n\t}\n\telse{\n\t\tif (req.session.token == null){\n\t\t\tres.status(401).send('You are unauthorized! Please login!');\n\t\t}\n\t\telse{\n\t\t\tlet userId = req.session.token.email;\n\n\t\t\tlet preferredTracks = await usersController.GetPreferredTracksByUserId(userId, 1000);\n\t\t\tlet unfamilliarTracks = await usersController.GetUnfamilliarPopularTracksByUserId(userId, 1000);\n\t\t\tlet allTestedTracks = [...preferredTracks, ...unfamilliarTracks];\n\t\t\n\t\t\tres.status(200).send(await SimilarTracks.search(baseTrack[0], allTestedTracks));\n\t\t}\n\t}\n}\n\n// tracks/top/trackId\nasync function GetTopTracks(req, res) {\n\tlet limit = 10;\n\tif (req.params.limit !== undefined) {\n\t\tlimit = parseInt(req.params.limit);\n\t}\n\tvar trackResult = mongoConnection.queryFromMongoDBSortedMax('Tracks', {}, {likes: -1}, limit);\n\ttrackResult.then(function (result) {\n\t\tres.json(result);\n\t});\n}\n\nmodule.exports = {\n\t//tracksPolling: polling.run(),\n\tgetAllTracks: getAllTracks,\n\tAddNewTrack: AddNewTrack,\n\tGetTrackById: GetTrackById,\n\tUpdateTrackById: UpdateTrackById,\n\tDeleteTrackById: DeleteTrackById,\n\tgetAllAudioFeatures: getAllAudioFeatures,\n\tAddNewAudioFeature: AddNewAudioFeature,\n\tLikeTrackById: LikeTrackById,\n\tUnlikeTrackById: UnlikeTrackById,\n GetSimilarTracksById: GetSimilarTracksById,\n\tGetTopTracks: GetTopTracks\n};" }, { "alpha_fraction": 0.6513075828552246, "alphanum_fraction": 0.6625155806541443, "avg_line_length": 24.90322494506836, "blob_id": "0ff1c36827a4d6cb0a88ec2cd97a8313c3c8574f", "content_id": "c90ecf48e4c09dedafa43d2d183fa6d627d4675e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 803, "license_type": "no_license", "max_line_length": 66, "num_lines": 31, "path": "/server/api/routes/usersRoutes.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "'use strict';\n\nconst session = require('express-session');\nconst express = require('express');\nconst request = require('request'); // \"Request\" library\nvar cookieParser = require('cookie-parser');\nconst usersController = require('../controllers/usersController');\n\nmodule.exports = function(app) {\n\n app.use(session({\n secret: 'keyboard cat',\n resave: false,\n saveUninitialized: true,\n cookie:{ maxAge: 2*60*60*1000} // two hours\n }));\n\n app.use(cookieParser());\n\n // artistsController Routes\n app.route('/users/my')\n .get(async function(req, res){\n await usersController.getMyDetails(req, res);\n });\n\n app.route('/users/userexist')\n .post(usersController.DoesUserExist);\n\n app.route('/users/register')\n .post(usersController.RegisterUser);\n};\n" }, { "alpha_fraction": 0.5624123215675354, "alphanum_fraction": 0.5624123215675354, "avg_line_length": 33, "blob_id": "24a1e0b898832f78f1868a0557d26f96a8979b1d", "content_id": "3df3ee8dfabfc783c0f47a86b404bd3502b96148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 713, "license_type": "no_license", "max_line_length": 69, "num_lines": 21, "path": "/App.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, {Component} from 'react';\nimport {Provider} from 'react-redux'\nimport store from \"./redux/store\";\nimport LoginPage from \"./Components/LoginPage/LoginPage\";\nimport Shell from \"./Components/Shell/Shell\";\nimport {NativeRouter, Route, BackButton} from \"react-router-native\";\n\nexport default class App extends Component<Props> {\n render() {\n return (\n <NativeRouter>\n <BackButton>\n <Provider store={store}>\n <Route exact path=\"/\" component={LoginPage}/>\n <Route path=\"/home\" component={Shell}/>\n </Provider>\n </BackButton>\n </NativeRouter>\n );\n }\n}" }, { "alpha_fraction": 0.6969371438026428, "alphanum_fraction": 0.6990864872932434, "avg_line_length": 28.09375, "blob_id": "555298c29afe35d0d37a80100b73f10ac49b68f9", "content_id": "3fd238f3920883a10bd19ba39793062dcd7073ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1861, "license_type": "no_license", "max_line_length": 92, "num_lines": 64, "path": "/server/api/controllers/albumsController.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "//'use strict';\n\nvar spotifyAuthentication = require('../../spotify-authentication');\nconst mongoConnection = require('../../mongo-connection');\nvar asyncPolling = require('async-polling');\nconst request = require('request'); // \"Request\" library\nconst reqPromise = require('request-promise');\nvar {PythonShell} = require('python-shell');\n//const usersController = require('usersController');\nconst searchKeys = [ 'a', 'e', 'i', 'o', 'u', 'er', 'ar', 'or', 'de', 'do' ];\n\n// Get all Albums from DB\nasync function getAllAlbums(req, res) {\n\tvar Allalbums = await mongoConnection.queryFromMongoDB('Albums', {});\n\n\tres.json(Allalbums);\n}\n\n// Saves a new artist, exposed at POST /artists\nfunction AddNewAlbum(req, res) {\n// Add a new artist by req.body\n}\n\n// Gets a single artist by ID, exposed at GET /artists/artistID\nfunction GetAlbumById(req, res) {\n\tif (req.params.albumId === 'top') {\n\t\tGetTopAlbums(req, res);\n\t}\n\telse {\n\t\tvar albumResult = mongoConnection.queryFromMongoDB('Albums', {'id': req.params.albumId});\n\t\talbumResult.then(function (result) {\n\t\t\tif (result.length < 1) {\n\t\t\t\tres.status(404).send('The album with the ID ' + req.params.albumId + \" was not found!\");\n\t\t\t} else {\n\t\t\t\tres.json(result);\n\t\t\t}\n\t\t});\n\t}\n\n}\n\n// Updates a single artist by ID, exposed at PUT /artists/artistID\nfunction UpdateAlbumById(req, res) {\n// Updates the artist by ID, get ID by req.params.artistId\n}\n\n// Deletes a single artist by ID, exposed at DELETE /artists/artistID\nfunction DeleteAlbumById(req, res) {\n\t// Deletes the artist by ID, get ID by req.params.artistId\n}\n\nvar arrayUnique = function (arr) {\n\treturn arr.filter(function(item, index){\n\t\treturn arr.indexOf(item) >= index;\n\t});\n};\n\nmodule.exports = {\n\tgetAllAlbums: getAllAlbums,\n\tAddNewAlbum: AddNewAlbum,\n\tGetAlbumById: GetAlbumById,\n\tUpdateAlbumById: UpdateAlbumById,\n\tDeleteAlbumById: DeleteAlbumById\n};" }, { "alpha_fraction": 0.4884873330593109, "alphanum_fraction": 0.48965317010879517, "avg_line_length": 32.63725662231445, "blob_id": "f24cda5f83df4fd3e7780b6357248a4da5c37e0f", "content_id": "d3bce51776ee53de62eddac858b9cbaa4bbfb6d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3431, "license_type": "no_license", "max_line_length": 118, "num_lines": 102, "path": "/redux/reducers/song-reducer.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {\n SET_TOP_SONGS,\n SET_SEARCHED_SONGS,\n TOGGLE_LIKE_SONG,\n TOGGLE_UNLIKE_SONG,\n SET_USER_PLAYLIST\n} from \"../actions/songs-actions\";\n\nconst initState = {\n topSongs: [],\n searchedSongs: [],\n userPlaylist: []\n};\n\nexport const songReducer = (state = initState, action) => {\n const getSongByIdFromTop = songId => {\n return state.topSongs.findIndex(songItem => {\n return songItem.id === songId\n });\n };\n\n const getSongByIdFromSearched = songId => {\n return state.searchedSongs.findIndex(songItem => {\n return songItem.id === songId\n });\n };\n\n switch (action.type) {\n case SET_TOP_SONGS: {\n state = {\n ...state,\n topSongs: action.topSongs\n };\n break;\n }\n\n case SET_SEARCHED_SONGS: {\n state = {\n ...state,\n searchedSongs: action.searchedSongs\n };\n break;\n }\n\n case TOGGLE_LIKE_SONG: {\n const topSongIndex = getSongByIdFromTop(action.songId);\n const searchedSongIndex = getSongByIdFromSearched(action.songId);\n const updatedTopSongs = [...state.topSongs];\n const updatedSearchedSongs = [...state.searchedSongs];\n\n if (topSongIndex !== -1) {\n updatedTopSongs[topSongIndex].liked = !state.topSongs[topSongIndex].liked;\n updatedTopSongs[topSongIndex].unliked = false;\n }\n\n if (searchedSongIndex !== -1) {\n updatedSearchedSongs[searchedSongIndex].liked = !state.searchedSongs[searchedSongIndex].liked;\n updatedSearchedSongs[searchedSongIndex].unliked = false;\n }\n\n state = {\n ...state,\n topSongs: updatedTopSongs,\n searchedSongs: updatedSearchedSongs\n };\n break;\n }\n case TOGGLE_UNLIKE_SONG: {\n const topSongIndex = getSongByIdFromTop(action.songId);\n const searchedSongIndex = getSongByIdFromSearched(action.songId);\n const updatedTopSongs = [...state.topSongs];\n const updatedSearchedSongs = [...state.searchedSongs];\n\n if (topSongIndex !== -1) {\n updatedTopSongs[topSongIndex].unliked = !state.topSongs[topSongIndex].unliked;\n updatedTopSongs[topSongIndex].liked = false;\n }\n\n if (searchedSongIndex !== -1) {\n updatedSearchedSongs[searchedSongIndex].unliked = !state.searchedSongs[searchedSongIndex].unliked;\n updatedSearchedSongs[searchedSongIndex].liked = false;\n }\n\n state = {\n ...state,\n topSongs: updatedTopSongs,\n searchedSongs: updatedSearchedSongs\n };\n break;\n }\n case SET_USER_PLAYLIST:\n state = {\n ...state,\n userPlaylist: action.userPlaylist\n };\n break;\n default:\n return state;\n }\n return state;\n }\n;\n" }, { "alpha_fraction": 0.49035245180130005, "alphanum_fraction": 0.5086184740066528, "avg_line_length": 30.609756469726562, "blob_id": "e5f93616d1893cb83a6cbe5bedd3e8e261e165de", "content_id": "5b24fb6cec28e3c859692765d2e9ab064efe64c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3887, "license_type": "no_license", "max_line_length": 123, "num_lines": 123, "path": "/Components/LoginPage/LoginPage.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, {Fragment} from 'react';\nimport styled from 'styled-components';\nimport {connect} from 'react-redux'\nimport {Image, Text, TouchableNativeFeedback, View} from 'react-native';\nimport {GoogleSignin} from 'react-native-google-signin';\nimport {Login} from \"../../redux/actions/login-actions\";\nimport {Redirect} from 'react-router-native';\nimport axios from 'axios';\n\nGoogleSignin.configure({\n webClientId: \"331158363292-hd64i4i48r4oaij2op4l5nahpf6u0rfo.apps.googleusercontent.com\",\n offlineAccess: true\n});\n\n\nconst StyledLoginPage = styled(View)`\n display: flex;\n justify-content: space-evenly;\n align-items: center;\n flex-flow: column;\n width: 100%;\n height: 100%;\n background-color: #3F51B5;\n`;\n\nconst StyledLoginButtonView = styled(View)`\n display: flex;\n width: 200px;\n justify-content: space-evenly;\n align-items: center;\n flex-flow: row;\n`;\n\nconst StyledAppHeader = styled(Text)`\n width: 100%;\n font-size: 26px;\n color: white;\n text-align: center;\n font-family: \"Roboto-Thin\";\n text-align: center;\n`;\n\nconst LoginPage = props => {\n if (props.user || props.user.email) {\n GoogleSignin.signInSilently().then(({user}) => {\n if (user) {\n loginToServer(user);\n }\n }).catch(err => {\n console.log(err);\n });\n }\n\n const loginToServer = user => {\n axios.post('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/users/userexist', {\n user: {\n email: user.email\n }\n }).then(() => props.login(user));\n };\n\n const signIn = async () => {\n try {\n await GoogleSignin.hasPlayServices();\n const {user} = await GoogleSignin.signIn();\n if (user) {\n axios.post('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/users/Register', {\n user: {\n email: user.email\n }\n }).then(response => {\n console.log(response);\n loginToServer(props, user);\n });\n }\n } catch (error) {\n console.log(error);\n }\n };\n return (\n <Fragment>\n {props.user.email ? <Redirect to=\"/home/topSongs\"/> :\n (\n < StyledLoginPage>\n < StyledAppHeader> Welcome to TuneApp!!!</StyledAppHeader>\n <StyledLoginButtonView>\n <TouchableNativeFeedback onPress={signIn}\n background={TouchableNativeFeedback.Ripple('ThemeAttrAndroid', true)}>\n <View>\n <Image style={{width: 50, height: 50}}\n source={require('../../assets/sign-in-with-google.png')}/>\n </View>\n </TouchableNativeFeedback>\n <TouchableNativeFeedback onPress={signIn}\n background={TouchableNativeFeedback.Ripple('ThemeAttrAndroid', true)}>\n <Image style={{width: 50, height: 50}}\n source={require('../../assets/sign-in-with-spotify.png')}/>\n </TouchableNativeFeedback>\n </StyledLoginButtonView>\n </StyledLoginPage>\n )\n }\n </Fragment>\n )\n ;\n};\n\nconst mapStateToProps = state => {\n return {\n user: state.login\n }\n};\n\nconst mapDispatchToProps = dispatch => {\n return {\n login: user => {\n dispatch(Login(user));\n },\n }\n};\n\n\nexport default connect(mapStateToProps, mapDispatchToProps)(LoginPage);" }, { "alpha_fraction": 0.6385269165039062, "alphanum_fraction": 0.6447591781616211, "avg_line_length": 35.79166793823242, "blob_id": "783c417dddb7c4fcccf5b412d21ecc3aafe16a49", "content_id": "0ddf1f6e416c7be7a058843225c15963ecbb7b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1765, "license_type": "no_license", "max_line_length": 153, "num_lines": 48, "path": "/Components/Shell/AppContent/AppContent.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, {useEffect, useState} from \"react\";\nimport styled from \"styled-components\";\nimport {Content} from 'native-base';\nimport TopSongs from \"../../TopSongs\";\nimport {Route} from \"react-router-native\";\nimport axios from \"axios\";\nimport {connect} from \"react-redux\";\nimport {SetInitialStats} from \"../../../redux/actions/like-actions\";\nimport ExpandedSongCard from \"../../ExpandedSongCard\";\nimport Search from \"../../Search\";\nimport UserPlaylist from '../../UserPlaylist';\n\nconst StyledContent = styled(Content)`\n display: flex;\n background-color: gray;\n`;\n\nconst AppContent = props => {\n const [isUserLikedTracksLoaded, setIsUserLikedTracksLoaded] = useState(false);\n useEffect(() => {\n axios.get('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/users/my/')\n .then(({data}) => {\n props.setInitialStats(data);\n setIsUserLikedTracksLoaded(true);\n });\n }, []);\n\n return (\n <StyledContent style={{backgroundColor:'white'}}>\n <Route exact path={\"/home/topSongs\"} render={(routeProps) => <TopSongs {...routeProps} isUserLikedTracksLoaded={isUserLikedTracksLoaded}/>}/>\n <Route exact path={\"/home/userPlaylist\"} render={(routeProps) =>\n <UserPlaylist {...routeProps} isUserLikedTracksLoaded={isUserLikedTracksLoaded}/>}/>\n <Route exact path={\"/home/search\"} component={Search}/>\n <Route exact path={\"/home/selectedSong\"} component={ExpandedSongCard}/>\n </StyledContent>\n );\n};\n\nconst mapDispatchToProps = dispatch => {\n return {\n setInitialStats: (stats) => {\n dispatch(SetInitialStats(stats));\n },\n }\n};\n\n\nexport default connect(null, mapDispatchToProps)(AppContent);" }, { "alpha_fraction": 0.6687611937522888, "alphanum_fraction": 0.6775882840156555, "avg_line_length": 33.10714340209961, "blob_id": "ec8d0454cf623081b94082b95e1b88c56b8a2c70", "content_id": "ec5104dc8fb0e17c54c1f4068fba9edec82078f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6684, "license_type": "no_license", "max_line_length": 133, "num_lines": 196, "path": "/server/api/controllers/artistsController.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "//'use strict';\n\nvar spotifyAuthentication = require('../../spotify-authentication');\nconst mongoConnection = require('../../mongo-connection');\nconst request = require('request'); // \"Request\" library\n\n// Get all artists from DB\nasync function getAllArtists(req, res) {\n\tvar Allartists = await mongoConnection.queryFromMongoDB('Artists', {});\n\n\tres.json(Allartists);\n}\n\n// Saves a new artist, exposed at POST /artists\nfunction AddNewArtist(req, res) {\n// Add a new artist by req.body\n}\n\n// Gets a single artist by ID, exposed at GET /artists/artistID\nfunction GetArtistById(req, res) {\n\tif (req.params.artistId === 'top') {\n\t\tGetTopArtists(req, res);\n\t}\n\telse {\n\t\tvar artistResult = mongoConnection.queryFromMongoDB('Artists', {'id': req.params.artistId});\n\t\tartistResult.then(function (result) {\n\t\t\tif (result.length < 1) {\n\t\t\t\tres.status(404).send('The artist with the ID ' + req.params.artistId + \" was not found!\");\n\t\t\t} else {\n\t\t\t\tres.json(result);\n\t\t\t}\n\t\t});\n\t}\n}\n\n// Updates a single artist by ID, exposed at PUT /artists/artistID\nfunction UpdateArtistById(req, res) {\n// Updates the artist by ID, get ID by req.params.artistId\n}\n\n// Deletes a single artist by ID, exposed at DELETE /artists/artistID\nfunction DeleteArtistById(req, res) {\n\t// Deletes the artist by ID, get ID by req.params.artistId\n}\n\nvar arrayUnique = function (arr) {\n\treturn arr.filter(function(item, index){\n\t\treturn arr.indexOf(item) >= index;\n\t});\n};\n\n// Saves a new artist, exposed at POST /artists\nfunction LikeArtistById(req, res) {\n\tvar artistResult = mongoConnection.queryFromMongoDB('Artists', {'id': req.body.artistId});\n\tartistResult.then(async function (result) {\n\t\tif (result.length < 1) {\n\t\t\tres.status(404).send('The artist with the ID ' + req.body.artistId + \" was not found!\");\n\t\t}\n\t\telse {\n\t\t\t//if (req.session.token == null){\n\t\t\t//\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t//}\n\t\t\t//else{\n\t\t\t\t//var email = req.session.token.email;\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\tvar user = await mongoConnection.queryFromMongoDB('users', {'email': '[email protected]'});\n\t\t\t\t//var user = await mongoConnection.queryFromMongoDB('users', {'email': email});\n\n\t\t\t\tvar likedArtists = user[0].likedArtists;\n\t\t\t\tif (likedArtists === undefined) {\n\t\t\t\t\tlikedArtists = [];\n\t\t\t\t}\n\n\t\t\t\tvar unlikedArtists = user[0].unlikedArtists;\n\t\t\t\tif (unlikedArtists === undefined) {\n\t\t\t\t\tunlikedArtists = [];\n\t\t\t\t}\n\n\t\t\t\tvar length = likedArtists.length;\n\t\t\t\tlikedArtists.push(req.body.artistId);\n\t\t\t\tlikedArtists = arrayUnique(likedArtists);\n\t\t\t\t\n\t\t\t\t// Add the artist to the liked artists\n\t\t\t\tif (length !== likedArtists.length) {\n\t\t\t\t\tif (result[0].likes === undefined) { result[0].likes = 0; }\n\n\t\t\t\t\tresult[0].likes++;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Artists', {'id': req.body.artistId}, {likes: result[0].likes});\n\t\t\t\t}\n\n\t\t\t\t// Remove the liked artist from the unlike artists if exists\n\t\t\t\tvar index = unlikedArtists.indexOf(req.body.artistId);\n\t\t\t\tif (index > -1) {\n\t\t\t\t\tunlikedArtists.splice(index, 1);\n\t\t\t\t\tif (result[0].unlikes === undefined) { result[0].unlikes = 1; }\n\t\t\t\t\tresult[0].unlikes--;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Artists', {'id': req.body.artistId}, {unlikes: result[0].unlikes});\n\t\t\t\t}\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\t//await mongoConnection.updateMongoDB('users', {'email': email}, {likedArtists: likedArtists});\n\t\t\t\tawait mongoConnection.updateMongoDB('users', {'email': \"[email protected]\"}, {likedArtists: likedArtists, unlikedArtists: unlikedArtists});\n\n\t\t\t\tres.status(200).send('Liked artist ' + req.body.artistId);\n\t\t\t//}\n\t\t}\n\t});\n}\n\n// Saves a new artist, exposed at POST /artists\nfunction UnlikeArtistById(req, res) {\n\tvar artistResult = mongoConnection.queryFromMongoDB('Artists', {'id': req.body.artistId});\n\tartistResult.then(async function (result) {\n\t\tif (result.length < 1) {\n\t\t\tres.status(404).send('The artist with the ID ' + req.body.artistId + \" was not found!\");\n\t\t}\n\t\telse {\n\t\t\t//if (req.session.token == null){\n\t\t\t//\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t//}\n\t\t\t//else{\n\t\t\t\t//var email = req.session.token.email;\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\tvar user = await mongoConnection.queryFromMongoDB('users', {'email': '[email protected]'});\n\t\t\t\t//var user = await mongoConnection.queryFromMongoDB('users', {'email': email});\n\n\t\t\t\tif (user.length < 1) {\n\t\t\t\t\tres.status(401).send('You are unauthorized! Please login!');\n\t\t\t\t}\n\n\t\t\t\tvar unlikedArtists = user[0].unlikedArtists;\n\t\t\t\tif (unlikedArtists === undefined) {\n\t\t\t\t\tunlikedArtists = [];\n\t\t\t\t}\n\n\t\t\t\tvar likedArtists = user[0].likedArtists;\n\t\t\t\tif (likedArtists === undefined) {\n\t\t\t\t\tlikedArtists = [];\n\t\t\t\t}\n\n\t\t\t\tvar length = unlikedArtists.length;\n\t\t\t\tunlikedArtists.push(req.body.artistId);\n\t\t\t\tunlikedArtists = arrayUnique(unlikedArtists);\n\t\t\t\t\n\t\t\t\t// Add the artist to the unliked artists\n\t\t\t\tif (length !== unlikedArtists.length) {\n\t\t\t\t\tif (result[0].unlikes === undefined) { result[0].unlikes = 0; }\n\n\t\t\t\t\tresult[0].unlikes++;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Artists', {'id': req.body.artistId}, {unlikes: result[0].unlikes});\n\t\t\t\t}\n\n\t\t\t\t// Remove the unliked artist from the liked artists if exists\n\t\t\t\tvar index = likedArtists.indexOf(req.body.artistId);\n\t\t\t\tif (index > -1) {\n\t\t\t\t\tlikedArtists.splice(index, 1);\n\t\t\t\t\tif (result[0].likes === undefined) { result[0].likes = 1; }\n\t\t\t\t\tresult[0].likes--;\n\t\t\t\t\tawait mongoConnection.updateMongoDB('Artists', {'id': req.body.artistId}, {likes: result[0].likes});\n\t\t\t\t}\n\n\t\t\t\t// REPLACE THE EMAIL WITH req.session.token.email IT SHOULD WORK IF YOU'RE USING A REAL WEB APP!\n\t\t\t\t//await mongoConnection.updateMongoDB('users', {'email': email}, {unlikedArtists: unlikedArtists});\n\t\t\t\tawait mongoConnection.updateMongoDB('users', {'email': \"[email protected]\"}, {unlikedArtists: unlikedArtists, likedArtists: likedArtists});\n\n\t\t\t\tres.status(200).send('unliked artist ' + req.body.artistId);\n\t\t\t//}\n\t\t}\n\t});\n}\n\n// artists/top/artistId\nasync function GetTopArtists(req, res) {\n\tlet limit = 10;\n\tif (req.params.limit !== undefined) {\n\t\tlimit = parseInt(req.params.limit);\n\t}\n\tvar artistResult = mongoConnection.queryFromMongoDBSortedMax('Artists', {}, {likes: -1}, limit);\n\tartistResult.then(function (result) {\n\t\tres.json(result);\n\t});\n}\n\n\nmodule.exports = {\n\tgetAllArtists: getAllArtists,\n\tAddNewArtist: AddNewArtist,\n\tGetArtistById: GetArtistById,\n\tUpdateArtistById: UpdateArtistById,\n\tDeleteArtistById: DeleteArtistById,\n\tLikeArtistById: LikeArtistById,\n\tUnlikeArtistById: UnlikeArtistById,\n\tGetTopArtists: GetTopArtists\n};" }, { "alpha_fraction": 0.5711195468902588, "alphanum_fraction": 0.5835586786270142, "avg_line_length": 33.25925827026367, "blob_id": "95b9843afac601e9bed22c548436b744c5a2bcda", "content_id": "d0a93c85bd2218f624b58f76c55e590376c7e115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1849, "license_type": "no_license", "max_line_length": 91, "num_lines": 54, "path": "/Components/Shell/SideBar/SideBar.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, {Component} from \"react\";\nimport {Container, ListItem, Text, List} from \"native-base\";\nimport {Link} from \"react-router-native\";\nimport styled from \"styled-components\";\n\nconst StyledSideBarContainer = styled(Container)`\n display: flex;\n background-color: #3F51B5;\n flex-flow: row wrap;\n justify-content:space-evenly;\n align-items:center;\n`;\nconst StyledSideBarLink = styled(Link)`\n display: flex;\n flex-flow: row wrap;\n justify-content:space-evenly;\n align-items:center;\n width:100%;\n \n`;\n\nconst StyledSideBarText = styled(Text)`\n font-size: 20px;\n`;\n\nconst StyledSideBarListItem = styled(ListItem)`\n background-color: #1719b5;\n`;\n\nexport default class SideBar extends Component<Props> {\n render() {\n return (\n <StyledSideBarContainer>\n <List>\n <StyledSideBarListItem itemDivider>\n <StyledSideBarLink to=\"/home/topSongs\" underlayColor=\"#f0f4f7\">\n <StyledSideBarText>Top Songs</StyledSideBarText>\n </StyledSideBarLink>\n </StyledSideBarListItem>\n <StyledSideBarListItem itemDivider>\n <StyledSideBarLink to=\"/home/userPlaylist\" underlayColor=\"#f0f4f7\">\n <StyledSideBarText>User Playlist</StyledSideBarText>\n </StyledSideBarLink>\n </StyledSideBarListItem>\n <StyledSideBarListItem itemDivider>\n <StyledSideBarLink to=\"/home/search\" underlayColor=\"#f0f4f7\">\n <StyledSideBarText>Search</StyledSideBarText>\n </StyledSideBarLink>\n </StyledSideBarListItem>\n </List>\n </StyledSideBarContainer>\n );\n }\n}" }, { "alpha_fraction": 0.7023319602012634, "alphanum_fraction": 0.7023319602012634, "avg_line_length": 28.200000762939453, "blob_id": "3966b5fe7ed38719c2f9915ccf1c3748c850eb0b", "content_id": "6838553419740094682878de3248bf5c743eaae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 729, "license_type": "no_license", "max_line_length": 97, "num_lines": 25, "path": "/redux/actions/like-actions.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "export const LIKE = 'LIKE';\nexport const UNLIKE = 'UNLIKE';\nexport const REMOVE_LIKE = 'REMOVE_LIKE';\nexport const REMOVE_UNLIKE = 'REMOVE_UNLIKE';\nexport const SET_INITIAL_STATS = \"SET_INITIAL_STATS\";\n\nexport const LikeSong = songId => {\n return { type: LIKE, songId}\n};\n\nexport const UnlikeSong = songId => {\n return { type: UNLIKE, songId}\n};\n\nexport const RemoveLikeSong = songId => {\n return { type: REMOVE_LIKE, songId}\n};\n\nexport const RemoveUnlikeSong = songId => {\n return { type: REMOVE_UNLIKE, songId}\n};\n\nexport const SetInitialStats = ({likedTracks, likedArtists, unlikedTracks, unlikedArtists}) => {\n return { type: SET_INITIAL_STATS, likedTracks, likedArtists, unlikedArtists, unlikedTracks}\n};" }, { "alpha_fraction": 0.6031836271286011, "alphanum_fraction": 0.6139852404594421, "avg_line_length": 29.34482765197754, "blob_id": "31724f0e98d21d63d69105ede44bd6b9d5f82ef6", "content_id": "1b90f484ba006a01e0013d16bad22da48930faba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1759, "license_type": "no_license", "max_line_length": 119, "num_lines": 58, "path": "/Components/TopSongs.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import React, {useEffect} from \"react\";\nimport styled from 'styled-components';\nimport {View} from 'native-base';\nimport axios from 'axios';\nimport {connect} from \"react-redux\";\nimport {SetTopSongs} from \"../redux/actions/songs-actions\";\nimport SongCard from './SongCardList/SongCard';\nimport SongItem from \"./common/SongItem\";\n\nconst StyledTopSongsContainer = styled(View)`\n display: flex;\n background-color: #C5CAE9;\n flex-flow: row wrap;\n flex: 1;\n width: 100%;\n justify-content: space-evenly;\n align-items: center;\n`;\n\nconst TopSongs = props => {\n useEffect(() => {\n if (props.isUserLikedTracksLoaded && !props.songReducer.topSongs.length) {\n axios.get('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/tracks/top/30')\n .then(({data: fetchedSongs}) => {\n props.setTopSongs([...fetchedSongs]);\n });\n }\n }, [props.isUserLikedTracksLoaded]);\n\n return (\n <StyledTopSongsContainer>\n {\n props.songReducer.topSongs.map(song => {\n const playlistContext = props.songReducer.topSongs.slice(props.songReducer.topSongs.indexOf(song));\n const SongCardItem = SongItem(SongCard, song, playlistContext);\n return <SongCardItem key={song.id}/>\n })\n }\n </StyledTopSongsContainer>\n )\n};\n\nconst mapStateToProps = state => {\n console.log(state);\n return {\n songReducer: state.songReducer\n }\n};\n\nconst mapDispatchToProps = dispatch => {\n return {\n setTopSongs: topSongs => {\n dispatch(SetTopSongs(topSongs));\n }\n }\n};\n\nexport default connect(mapStateToProps, mapDispatchToProps)(TopSongs);" }, { "alpha_fraction": 0.43405675888061523, "alphanum_fraction": 0.44407346844673157, "avg_line_length": 36.45833206176758, "blob_id": "2dd0d0497cbfb6ee043681ae008fadcf09cf6577", "content_id": "dc84acc7c03c45dd54955564e2f29a33bd718049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1797, "license_type": "no_license", "max_line_length": 121, "num_lines": 48, "path": "/Components/SongCardList/SongCard.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {Image, TouchableOpacity} from \"react-native\";\nimport {Body, Button, Card, CardItem, Icon, Right, Text} from \"native-base\";\nimport React from \"react\";\nimport styled from \"styled-components\";\n\nconst StyledSongCard = styled(Card)`\n width: 96%;\n height: 230px;\n`;\n\nconst SongCard = props => {\n const {song} = props;\n\n return (\n <StyledSongCard key={song.id}>\n <TouchableOpacity onPress={() => props.onPlay(song, props.playlistContext)} style={{width: '100%', flex: 1}}>\n <Image\n style={{width: '100%', flex: 1}}\n source={{uri: song.album.images[0].url}}\n />\n </TouchableOpacity>\n <CardItem footer button onPress={() => props.expandSong()}>\n <Body>\n <Text>{song.name}</Text>\n <Text note>{song.artists[0].name}</Text>\n </Body>\n <Right>\n <CardItem style={{paddingLeft: '30%', paddingRight: 0}}>\n <Body>\n <Button small rounded bordered={!props.isLiked}\n onPress={() => props.likeSong(song)}>\n <Icon active name=\"thumbs-up\"/>\n </Button>\n </Body>\n <Right>\n <Button small rounded bordered={!props.isUnliked}\n onPress={() => props.unlikeSong(song)}>\n <Icon active name=\"thumbs-down\"/>\n </Button>\n </Right>\n </CardItem>\n </Right>\n </CardItem>\n </StyledSongCard>\n )\n};\n\nexport default SongCard;" }, { "alpha_fraction": 0.5550572872161865, "alphanum_fraction": 0.572496235370636, "avg_line_length": 34.85714340209961, "blob_id": "0900dce9dda1957da76ac0f38ddd46dbf12c5d77", "content_id": "89af32274d4344757fc422ce1046cc2ba8d63888", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2007, "license_type": "no_license", "max_line_length": 113, "num_lines": 56, "path": "/Components/ExpandedSongCard.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import SongCard from \"./SongCardList/SongCard\";\nimport React, {useEffect, useState} from \"react\";\nimport {View, Text, List, ListItem, Spinner} from 'native-base'\nimport axios from \"axios\";\nimport CompactSong from \"./CompactSongList/CompactSong\";\nimport styled from \"styled-components\";\nimport SongItem from \"./common/SongItem\";\nimport {Image, ScrollView} from \"react-native\";\n\nconst StyledSongContainer = styled(View)`\n display: flex;\n flex-flow: column;\n flex: 1;\n width: 100%;\n justify-content: space-evenly;\n align-items: center;\n background-color: #ECEFF1;\n`;\n\nconst ExpandedSongCard = props => {\n const [similarSongs, setSimilarSongs] = useState([]);\n const song = props.song ? props.song : props.location.state.song;\n\n useEffect(() => {\n setSimilarSongs([]);\n axios.get('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/tracks/similar/'.concat(song.id))\n .then(({data: similarSongs}) => {\n setSimilarSongs([...similarSongs]);\n });\n }, [song]);\n\n const SongCardItem = SongItem(SongCard, song);\n return (\n <View>\n <StyledSongContainer>\n <Image\n style={{width: '100%', height: 250, flex: 1}}\n source={{uri: song.album.images[0].url}}\n />\n <ListItem style={{width: '100%'}} itemDivider>\n <Text>{song.artists[0].name} - {song.name}</Text>\n </ListItem>\n </StyledSongContainer>\n <ScrollView style={{height: 378, backgroundColor: 'white', display: 'flex'}}>\n {!similarSongs.length ? <Spinner color={'#3c50b5'}/> :\n similarSongs.map(similarSong => {\n const SongCardItem = SongItem(CompactSong, similarSong, similarSongs);\n return <SongCardItem/>\n })\n }\n </ScrollView>\n </View>\n )\n};\n\nexport default ExpandedSongCard;" }, { "alpha_fraction": 0.687747061252594, "alphanum_fraction": 0.6916996240615845, "avg_line_length": 30.6875, "blob_id": "c633ee8cb88944fad8d78ca2197a2b5c88da2f72", "content_id": "64eb7fdeaf817816dfb58663f71ac61359a6a5da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 506, "license_type": "no_license", "max_line_length": 77, "num_lines": 16, "path": "/Components/Player/TouchableIcon.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import * as React from 'react';\nimport {TouchableNativeFeedback} from 'react-native';\nimport {Icon} from 'native-base';\n\nconst TouchableIcon = props => (\n <TouchableNativeFeedback\n borderless={props.borderless}\n disabled={props.disabled}\n onPress={props.onPress}\n background={TouchableNativeFeedback.Ripple('ThemeAttrAndroid', true)}\n >\n <Icon name={props.name} style={{color:'gray', fontSize: 20}}/>\n </TouchableNativeFeedback>\n);\n\nexport default TouchableIcon;" }, { "alpha_fraction": 0.6669796705245972, "alphanum_fraction": 0.6807511448860168, "avg_line_length": 26.316238403320312, "blob_id": "ca057665239c8c2fac268edbace2d56b872bfecb", "content_id": "263ae3972807ca81b24857ca21bffd8cb1565326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3195, "license_type": "no_license", "max_line_length": 142, "num_lines": 117, "path": "/server/api/controllers/playlistController.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "//'use strict';\nvar spotifyAuthentication = require('../../spotify-authentication');\nconst mongoConnection = require('../../mongo-connection');\nvar usersController = require('./usersController');\nvar asyncPolling = require('async-polling');\nconst request = require('request'); // \"Request\" library\nconst reqPromise = require('request-promise');\nvar moment = require('moment');\nconst { Recommendations } = require('../../MachineLearning/ml');\n\nfunction getDate(){\n\t\t\n\tvar today = new Date();\n\n\tvar month = today.getMonth() + 1;\n\n\tvar day = today.getDate();\n\n\tvar hours = today.getHours();\n\n\tvar minutes = today.getMinutes();\n\n\tvar seconds = today.getSeconds();\n\n\tvar date = today.getFullYear()+'-' + (month < 10 ? '0' + month: month) + '-' + (day < 10 ? '0' + day: day);\n\n\tvar time = (hours < 10 ? '0' + hours: hours) + '-' + (minutes < 10 ? '0' + minutes: minutes) + '-' + (seconds < 10 ? '0' + seconds: seconds);\n\n\treturn (date + \" \" + time)\n}\n\nasync function buildPlaylist(req, res){\n\n\t// In production, unmark all the comments of user details\n\t//if (!req.session.token){\n\t//\tres.status(401).send('Unauthorized !! Please login');\n\t//}\n\t//else{\n\n\t\tuserId = \"[email protected]\";\n\n\t\tlet [familliarTracks, unfamilliarTracks, userPreferencesNN] = await Promise.all([\n\t\t\tusersController.GetFamilliarTracksByUserId(userId),\n\t\t\tusersController.GetUnfamilliarPopularTracksByUserId(userId),\n\t\t\tusersController.GetPreferencesNN(userId)\n\t\t]);\n\n\t\treturn Recommendations.classifyMultiple(userPreferencesNN, familliarTracks, unfamilliarTracks);\n\t//}\n}\n\nasync function listenPlaylist(req, res){\n\t\n\t// In production, unmark all the comments of user details\n\t//if (!req.session.token){\n\t//\tres.status(401).send('Unauthorized !! Please login');\n\t//}\n\t//else{\n\n\t\tvar currentTime = getDate();\n\n\t\tvar startListeningTime = moment(req.body.startListeningTime, 'YYYY-MM-DD HH:mm:ss');\n\n\t\tvar trackId = req.body.trackId;\n\n\t\tvar durationOfListening = moment(currentTime, 'YYYY-MM-DD HH:mm:ss').diff(startListeningTime, 'milliseconds');\n\n\t\tvar trackromDB = await mongoConnection.queryFromMongoDB('Tracks', {'id': trackId});\n\n\t\tif(trackromDB.length < 1){\n\t\t\tres.status(404).send(\"Track \" + trackId + \" not found\");\n\t\t}\n\t\telse{\n\t\t\tvar listeningPercent = (durationOfListening * 100 / trackromDB[0].duration_ms);\n\n\t\t\tvar score = (listeningPercent - 50) / 10;\n\n\t\t\tif (req.body.isSelectedByUser == \"true\"){\n\t\t\t\tscore += 1;\n\t\t\t}\n\n\t\t\tvar listeningData = {\n\t\t\t\ttrackId: req.body.trackId,\n\t\t\t\temail: \"[email protected]\",\n\t\t\t\t//email: req.session.token.email,\n\t\t\t\tdateTime: currentTime,\n\t\t\t\tduration: durationOfListening,\n\t\t\t\tlisteningPercent: listeningPercent,\n\t\t\t\tscore: score,\n\t\t\t\tisListened: req.body.isListened,\n\t\t\t\tisSelectedByUser: req.body.isSelectedByUser\n\t\t\t};\n\t\n\t\t\ttry{\n\t\t\t\tawait mongoConnection.addToMongoDB('ListeningAndSuggestions', listeningData);\n\n\t\t\t\t//res.status(200).send(listeningData);\n\t\t\t}\n\t\t\tcatch{\n\t\t\t\tres.status(500).send(\"Error while processing this request\");\t\n\t\t\t}\n\t\n\t\t\tres.status(200).send(\"success\");\n\t\t}\n\t//}\n}\n\nvar arrayUnique = function (arr) {\n\treturn arr.filter(function(item, index){\n\t\treturn arr.indexOf(item) >= index;\n\t});\n};\n\nmodule.exports = {\n\tbuildPlaylist: buildPlaylist,\n\tlistenPlaylist: listenPlaylist\n};" }, { "alpha_fraction": 0.5187817215919495, "alphanum_fraction": 0.5187817215919495, "avg_line_length": 23.024391174316406, "blob_id": "3e2f6906975db8faad46a56ae9b190275c3824eb", "content_id": "2edd13efbd62ca17dc5f8c06749694471d53d672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 985, "license_type": "no_license", "max_line_length": 75, "num_lines": 41, "path": "/redux/reducers/player-reducer.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {CHANGE_CURRENT_TRACK, PLAY_SONGS} from \"../actions/player-actions\";\n\nexport const PLAYBACK_STATE = 'PLAYBACK_STATE';\n\nconst initialState = {\n playerState: null,\n newTrackId: undefined\n};\n\nexport default function reducer(state = initialState, action) {\n switch (action.type) {\n case PLAYBACK_STATE: {\n state = {...state, ...{playerState: action.payload}};\n break;\n }\n case CHANGE_CURRENT_TRACK : {\n state = {\n ...state,\n newTrackId: action.newTrackId\n };\n break;\n }\n case PLAY_SONGS : {\n state = {\n ...state,\n currentPlaylist: action.songsList,\n requestedSong: action.requestedSong\n };\n break;\n }\n default:\n return state;\n }\n\n return state;\n}\n\nexport const playbackState = (payload: string) => ({\n type: PLAYBACK_STATE,\n payload,\n});\n" }, { "alpha_fraction": 0.6626016497612, "alphanum_fraction": 0.6644493937492371, "avg_line_length": 28.107526779174805, "blob_id": "371d6d106d916db1baa465677c5ed2fb53edd7f9", "content_id": "075c28919e526e71a17bc134c7ac38bc5292ace2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2706, "license_type": "no_license", "max_line_length": 120, "num_lines": 93, "path": "/server/google-authentication.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "const {google} = require('googleapis');\nrequire('dotenv').config({path: __dirname+'/tuneApp.env'});\nconst express = require('express');\nconst session = require('express-session');\nconst app = require('./index.js');\n\nconst googleConfig = {\n clientId: process.env.GOOGLE_CLIENT_ID, // e.g. asdfghjkljhgfdsghjk.apps.googleusercontent.com\n clientSecret: process.env.GOOGLE_CLIENT_SECRET, // e.g. _ASDFA%DFASDFASDFASD#FAD-\n redirect: process.env.GOOGLE_REDIRECT_URI // this must match your google api settings\n};\n\n/**\n * This scope tells google what information we want to request.\n */\nconst defaultScope = [\n 'https://www.googleapis.com/auth/plus.me',\n 'https://www.googleapis.com/auth/userinfo.email',\n 'https://www.googleapis.com/auth/userinfo.profile'\n];\n\n/*************/\n/** HELPERS **/\n/*************/\n\n/**\n * Create the google auth object which gives us access to talk to google's apis.\n */\n\nfunction createConnection() {\n return new google.auth.OAuth2(\n googleConfig.clientId,\n googleConfig.clientSecret,\n googleConfig.redirect\n );\n}\n\nfunction getConnectionUrl(auth) {\n return auth.generateAuthUrl({\n access_type: 'offline',\n prompt: 'consent',\n scope: defaultScope\n });\n}\n\nfunction getGooglePlusApi(auth) {\n return google.plus({ version: 'v1', auth });\n}\n\n/**********/\n/** MAIN **/\n\nmodule.exports = {\n /**\n * Part 1: Create a Google URL and send to the client to log in the user.\n */\n urlGoogle: function(){\n const auth = createConnection();\n const url = getConnectionUrl(auth);\n return url;\n },\n /**\n * Part 2: Take the \"code\" parameter which Google gives us once when the user logs in, then get the user's email and id.\n */\n getGoogleAccountFromCode: async function(code){\n const auth = createConnection();\n const data = await auth.getToken(code);\n //const data = await auth.getToken(code);\n const tokens = data.tokens;\n\n auth.setCredentials(tokens);\n const plus = getGooglePlusApi(auth);\n const me = await plus.people.get({ userId: 'me' });\n const userGoogleId = me.data.id;\n const userGoogleEmail = me.data.emails && me.data.emails.length && me.data.emails[0].value;\n const userGoogleName = me.data.displayName;\n const userGoogleFname = me.data.name.givenName;\n const userGoogleLname = me.data.name.familyName;\n const userGoogleImage = me.data.image.url;\n const userGoogleGender = me.data.gender;\n\n return {\n google_id: userGoogleId,\n email: userGoogleEmail,\n name: userGoogleName,\n first_name: userGoogleFname,\n last_name: userGoogleLname,\n gender: userGoogleGender,\n google_image_url: userGoogleImage\n };\n }\n\n};" }, { "alpha_fraction": 0.7116402387619019, "alphanum_fraction": 0.7116402387619019, "avg_line_length": 29.239999771118164, "blob_id": "1ea7e7aa4e9b6b7c388ae8a77f5a145d8143be91", "content_id": "e569e1bc07c1c4e91bdf2404d9a5e29a02095463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 756, "license_type": "no_license", "max_line_length": 55, "num_lines": 25, "path": "/redux/actions/songs-actions.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "export const SET_TOP_SONGS = 'SET_TOP_SONGS';\nexport const SET_SEARCHED_SONGS = 'SET_SEARCHED_SONGS';\nexport const TOGGLE_LIKE_SONG = 'TOGGLE_LIKE_SONG';\nexport const TOGGLE_UNLIKE_SONG = 'TOGGLE_UNLIKE_SONG';\nexport const SET_USER_PLAYLIST = 'SET_USER_PLAYLIST';\n\nexport const SetTopSongs = topSongs => {\n return { type: SET_TOP_SONGS, topSongs}\n};\n\nexport const ToggleLikeSong = songId => {\n return { type: TOGGLE_LIKE_SONG, songId}\n};\n\nexport const ToggleUnlikeSong = songId => {\n return { type: TOGGLE_UNLIKE_SONG, songId}\n};\n\nexport const SetSearchedSongs = searchedSongs => {\n return { type: SET_SEARCHED_SONGS, searchedSongs}\n};\n\nexport const SetUserPlaylist = userPlaylist => {\n return {type: SET_USER_PLAYLIST, userPlaylist}\n};\n" }, { "alpha_fraction": 0.46348315477371216, "alphanum_fraction": 0.46348315477371216, "avg_line_length": 19.941177368164062, "blob_id": "d23052f1f202d3b31e8f64df4323b55f3677d192", "content_id": "c42b7ebc0a91846195196305adf4c5d995c3dc20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 356, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/redux/reducers/login-reducer.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {LOGIN} from \"../actions/login-actions\";\n\nconst initState = {\n};\n\nexport const loginReducer = (state = initState, action) => {\n switch (action.type) {\n case LOGIN: {\n state = action.user;\n break;\n }\n default:\n return state;\n }\n return state;\n }\n;\n" }, { "alpha_fraction": 0.6487119197845459, "alphanum_fraction": 0.6510538458824158, "avg_line_length": 31.923076629638672, "blob_id": "b979dbf002e1fa1ed3e0ecfc7f9d5d883e99d166", "content_id": "e84a059317b9ed815ad89b9ba0da66bc60765671", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "no_license", "max_line_length": 82, "num_lines": 13, "path": "/server/MachineLearning/pythonScripts/similarTracks_KNN.py", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import sys\nimport json\nfrom sklearn.neighbors import NearestNeighbors\nimport numpy as np\n\nwhile True:\n j = json.loads(sys.stdin.readline())\n X = np.array(j['X'])\n y = np.array(j['y'])\n nbrs = NearestNeighbors(n_neighbors=4, algorithm='ball_tree').fit(X)\n result = nbrs.kneighbors([y],return_distance=False)\n print((','.join((''.join(str(e) for e in result)).split())).replace('[,','['))\n sys.stdout.flush()" }, { "alpha_fraction": 0.5274595022201538, "alphanum_fraction": 0.5377321243286133, "avg_line_length": 33.684932708740234, "blob_id": "1bb1c182452a3330b1a9d43aba4261e94d554b65", "content_id": "9a4cb288b604b692672caab1c81b643f2b268329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2531, "license_type": "no_license", "max_line_length": 114, "num_lines": 73, "path": "/Components/Search.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {connect} from \"react-redux\";\nimport {SetSearchedSongs} from \"../redux/actions/songs-actions\";\nimport React, {useState} from \"react\";\nimport {View, Text, Header, Item, Icon, Input, Button, List, Spinner} from 'native-base'\nimport styled from \"styled-components\";\nimport axios from \"axios\";\nimport SongCard from \"./SongCardList/SongCard\";\nimport SongItem from \"./common/SongItem\";\nimport CompactSong from \"./CompactSongList/CompactSong\";\nimport {ScrollView} from \"react-native\";\n\nconst StyledSearchContainer = styled(View)`\n background-color: #9FA8DA;\n width: 100%;\n`;\n\nconst Search = props => {\n const [searchInput, setSearchInput] = useState('');\n\n const search = () => {\n axios.get('http://tuneapp-server-1969202483.us-east-1.elb.amazonaws.com/tracks/find/'.concat(searchInput))\n .then(({data: fetchedSongs}) => {\n props.setSearchedSongs(fetchedSongs.slice(0, 20));\n });\n };\n\n return (\n <View>\n <StyledSearchContainer>\n <Header searchBar rounded>\n <Item>\n <Icon name=\"search\"/>\n <Input placeholder=\"Search\"\n onChangeText={(text) => {\n setSearchInput(text);\n }}\n value={searchInput}\n onSubmitEditing={() => search()}/>\n </Item>\n <Button transparent onPress={() => search()}>\n <Text>Search</Text>\n </Button>\n </Header>\n </StyledSearchContainer>\n <List style={{width: '100%'}}>\n {!props.songReducer.searchedSongs.length ? <Spinner color={'#3c50b5'}/> :\n props.songReducer.searchedSongs.map(song => {\n const SongCardItem = SongItem(CompactSong, song, props.songReducer.searchedSongs);\n return <SongCardItem key={song.id}/>\n })\n }\n </List>\n </View>\n );\n};\n\nconst mapStateToProps = state => {\n console.log(state);\n return {\n likeReducer: state.likeReducer,\n songReducer: state.songReducer\n }\n};\n\nconst mapDispatchToProps = dispatch => {\n return {\n setSearchedSongs: searchedSongs => {\n dispatch(SetSearchedSongs(searchedSongs));\n },\n }\n};\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Search);" }, { "alpha_fraction": 0.7844611406326294, "alphanum_fraction": 0.7844611406326294, "avg_line_length": 43.44444274902344, "blob_id": "7b94d69ed647bdbd8bfbf3f99ff9f88aa5fb223e", "content_id": "eeeecfbfc1ffba06f637918ac26677516e0da827", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 399, "license_type": "no_license", "max_line_length": 107, "num_lines": 9, "path": "/redux/store.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "import {combineReducers, createStore} from \"redux\";\nimport {loginReducer} from \"./reducers/login-reducer\";\nimport {likeReducer} from \"./reducers/like-reducer\";\nimport playerReducer from \"./reducers/player-reducer\";\nimport {songReducer} from \"./reducers/song-reducer\";\n\nconst store = createStore(combineReducers({login: loginReducer, likeReducer, songReducer, playerReducer}));\n\nexport default store;" }, { "alpha_fraction": 0.5518118143081665, "alphanum_fraction": 0.5537189841270447, "avg_line_length": 29.25, "blob_id": "48a668e04a263e9b206ae4bd89f23541769458e2", "content_id": "cf27f99638936efa456ec7e16fb284915c5ae792", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1573, "license_type": "no_license", "max_line_length": 70, "num_lines": 52, "path": "/Components/Player/PlayerHandler.js", "repo_name": "stavco9/tuneApp", "src_encoding": "UTF-8", "text": "/* @flow */\n\nimport { Alert } from 'react-native';\nimport TrackPlayer from 'react-native-track-player';\nimport { playbackState } from '../../redux/reducers/player-reducer';\nimport {changeCurrentTrack} from \"../../redux/actions/player-actions\";\n\nconst playerHandler = async (dispatch, data) =>{\n switch (data.type) {\n // Forward remote events to the player\n case 'remote-play':\n TrackPlayer.play();\n break;\n case 'remote-pause':\n TrackPlayer.pause();\n break;\n case 'remote-stop':\n TrackPlayer.stop();\n break;\n case 'remote-next':\n TrackPlayer.skipToNext();\n break;\n case 'remote-previous':\n TrackPlayer.skipToPrevious();\n break;\n case 'remote-seek':\n TrackPlayer.seekTo(data.position);\n break;\n // You can make ducking smoother by adding a fade in/out\n case 'remote-duck':\n TrackPlayer.setVolume(data.ducking ? 0.5 : 1);\n break;\n // Playback updates\n case 'playback-state': {\n dispatch(playbackState(data.state));\n break;\n }\n case 'playback-track-changed':\n console.log(data);\n dispatch(changeCurrentTrack(data.nextTrack));\n break;\n case 'playback-error':\n Alert.alert('An error occurred', data.error);\n break;\n default:\n break;\n }\n};\n\nexport default function(dispatch) {\n return playerHandler.bind(null, dispatch);\n}\n" } ]
34
srajender/task
https://github.com/srajender/task
140c76503eeea01543ffaf9786bea489d41a96da
9f2f98af4a3647f2a3ce7270b1682ba928011194
aec969ccf1e05309477796ecc5aba05e8bd404e5
refs/heads/master
2020-03-07T17:49:07.181229
2018-04-01T11:22:27
2018-04-01T11:22:27
127,621,641
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5325474143028259, "alphanum_fraction": 0.5489492416381836, "avg_line_length": 39.64583206176758, "blob_id": "24054a1d4a1bc0dbac3cc77b7092e9a24c00eb6a", "content_id": "db1cf1548c6528db518c93f8fd882e0456ca7801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1951, "license_type": "no_license", "max_line_length": 125, "num_lines": 48, "path": "/task/myapp/migrations/0012_auto_20180328_2319.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0011_auto_20180328_2318'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('account_number', models.IntegerField(unique=True)),\n ('balance', models.DecimalField(decimal_places=2, max_digits=20)),\n ('date_of_open', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('customer_name', models.CharField(max_length=100)),\n ('mail_id', models.EmailField(null=True, max_length=100, blank=True)),\n ('mobile_number', models.CharField(null=True, max_length=10, blank=True)),\n ('address', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Transactions',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('transaction_type', models.CharField(default='C', max_length=1, choices=[('C', 'credit'), ('D', 'debit')])),\n ('transaction_date', models.DateField(auto_now_add=True)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=20)),\n ('account', models.ForeignKey(to='myapp.Account')),\n ],\n ),\n migrations.AddField(\n model_name='account',\n name='customer',\n field=models.ForeignKey(to='myapp.Customer'),\n ),\n ]\n" }, { "alpha_fraction": 0.7819767594337463, "alphanum_fraction": 0.7819767594337463, "avg_line_length": 30.363636016845703, "blob_id": "926400c38323b312d0787b867e674cbbc4460e82", "content_id": "7260a16fd64893bd945aa453f20346bcf05d0791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 83, "num_lines": 11, "path": "/task/myapp/admin.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Transaction\n\nclass TransactionAdmin(admin.ModelAdmin):\n\tlist_display=('customer','transaction_type','transaction_date','amount','balance')\n\n\tdef balance(self, obj):\n\t\treturn '%s'%(obj.customer.balance)\n\tbalance.short_description = 'Balance'\n\nadmin.site.register(Transaction,TransactionAdmin)" }, { "alpha_fraction": 0.733938992023468, "alphanum_fraction": 0.7488643527030945, "avg_line_length": 36.60975646972656, "blob_id": "c67e60e56c4375faffce8696c3f5570ffe837c9a", "content_id": "a7a7f20e3496dc2d3f4e825a2278a6e1f47e8db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1541, "license_type": "no_license", "max_line_length": 125, "num_lines": 41, "path": "/task/myapp/models.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django import forms\nfrom django.db.models import Sum,Avg,Count,Max,Min\n\n# Create your models here.\n\ntype=(('C','credit'),('D','debit'),)\n\nclass Customer(models.Model):\n\taccount_number\t\t=\tmodels.CharField(max_length=10,unique=True)\n\tcustomer_name\t\t=\tmodels.CharField(max_length=100)\n\tdate_of_open\t\t=\tmodels.DateField(auto_now_add=True)\n\tmail_id \t\t\t=\tmodels.EmailField(max_length=100,null=True,blank=True)\n\tmobile_number\t\t=\tmodels.CharField(max_length=10,null=True,blank=True)\n\taddress\t\t\t\t=\tmodels.TextField()\n\tbalance \t\t\t=\tmodels.DecimalField(max_digits=20,decimal_places=2,default=0.00)\n\n\tdef __str__(self):\n\t\treturn self.account_number\n\t\t\nclass TransactionManager(models.Manager):\n\tdef average(self,accno):\n\t\treturn super(TransactionManager,self).get_queryset().filter(customer__account_number__exact=accno).aggregate(Avg(\"amount\"))\n\n\tdef average_of_all(self):\n\t\treturn super(TransactionManager,self).get_queryset().aggregate(Avg(\"amount\"))\n\nclass Transaction(models.Model):\n\ttransaction_type =\tmodels.CharField(max_length=1,choices=type,default='C',help_text='select transaction type')\n\ttransaction_date\t= \tmodels.DateField(auto_now_add=True)\n\tamount\t\t\t\t=\tmodels.DecimalField(max_digits=20,decimal_places=2,default=0.00,help_text='enter amount')\n\tcustomer \t\t\t=\tmodels.ForeignKey(Customer,on_delete=models.CASCADE)\n\tmanager \t\t\t=\tTransactionManager()\n\n\tdef __str__(self):\n\t\treturn self.customer.account_number\n\t\t\nclass TransactionForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel=Transaction\n\t\tfields='__all__'" }, { "alpha_fraction": 0.546798050403595, "alphanum_fraction": 0.5556650161743164, "avg_line_length": 31.74193572998047, "blob_id": "8272d473cf6f60c7a989e77d33a8df830d8ef6a4", "content_id": "78365471ceefaf963da6e0b8b2cf5568988f81ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 123, "num_lines": 31, "path": "/task/myapp/migrations/0004_auto_20180328_2048.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0003_customer_transctions'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Transactions',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('tranction_type', models.CharField(choices=[('C', 'credit'), ('D', 'debit')], default='C', max_length=1)),\n ('tranction_date', models.DateField(auto_now_add=True)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=20)),\n ('customer', models.ManyToManyField(to='myapp.Customer')),\n ],\n ),\n migrations.RemoveField(\n model_name='transctions',\n name='customer',\n ),\n migrations.DeleteModel(\n name='Transctions',\n ),\n ]\n" }, { "alpha_fraction": 0.5337423086166382, "alphanum_fraction": 0.5501022338867188, "avg_line_length": 39.75, "blob_id": "c6261e7dbe3803089b8024f9053a9d737512131a", "content_id": "306684c568d7012251f107da53510982d1ddcd0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1956, "license_type": "no_license", "max_line_length": 125, "num_lines": 48, "path": "/task/myapp/migrations/0010_auto_20180328_2312.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0009_auto_20180328_2311'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('account_number', models.IntegerField(unique=True)),\n ('balance', models.DecimalField(max_digits=20, decimal_places=2)),\n ('date_of_open', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('customer_name', models.CharField(max_length=100)),\n ('mail_id', models.EmailField(max_length=100, blank=True, null=True)),\n ('mobile_number', models.CharField(max_length=10, blank=True, null=True)),\n ('address', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Transactions',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('transaction_type', models.CharField(max_length=1, default='C', choices=[('C', 'credit'), ('D', 'debit')])),\n ('transaction_date', models.DateField(auto_now_add=True)),\n ('amount', models.DecimalField(max_digits=20, decimal_places=2)),\n ('account', models.ManyToManyField(to='myapp.Account')),\n ],\n ),\n migrations.AddField(\n model_name='account',\n name='customer',\n field=models.ForeignKey(to='myapp.Customer'),\n ),\n ]\n" }, { "alpha_fraction": 0.5452352166175842, "alphanum_fraction": 0.5536791086196899, "avg_line_length": 36.681819915771484, "blob_id": "6a85a84430fe2096d53ab478b1c827983de2aea2", "content_id": "4bb3ed908d906bd244a5960a3bc8f5223aadff8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1658, "license_type": "no_license", "max_line_length": 114, "num_lines": 44, "path": "/task/myapp/migrations/0001_initial.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BankAccount',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('account_number', models.CharField(max_length=11, unique=True)),\n ('opening_date', models.DateTimeField(auto_now_add=True)),\n ('balance', models.DecimalField(max_digits=20, decimal_places=2)),\n ],\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('customer_name', models.CharField(max_length=100)),\n ('mail_id', models.EmailField(max_length=100, null=True, blank=True)),\n ('mobile_number', models.CharField(max_length=10, null=True, blank=True)),\n ('address', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Transctions',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('bankaccount', models.ManyToManyField(to='myapp.BankAccount')),\n ],\n ),\n migrations.AddField(\n model_name='bankaccount',\n name='customer',\n field=models.ForeignKey(to='myapp.Customer'),\n ),\n ]\n" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 15.5, "blob_id": "3fad1b6b778407eefb15a9b36508395ecbe5a5c0", "content_id": "85987c5f34352ca8711cfec8c1ffb528ef13f984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/README.md", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# task\nThis is a for do the task\n" }, { "alpha_fraction": 0.5168700814247131, "alphanum_fraction": 0.5312275886535645, "avg_line_length": 28.02083396911621, "blob_id": "fb349e9f1a78fe1a18187b57d202e2f49a7e203e", "content_id": "a09e39323d1f403e606493cd31ac870b66726d1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 114, "num_lines": 48, "path": "/task/myapp/migrations/0005_auto_20180328_2102.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0004_auto_20180328_2048'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('account_number', models.IntegerField(unique=True)),\n ('balance', models.DecimalField(max_digits=20, decimal_places=2)),\n ],\n ),\n migrations.RemoveField(\n model_name='customer',\n name='account_number',\n ),\n migrations.RemoveField(\n model_name='customer',\n name='balance',\n ),\n migrations.RemoveField(\n model_name='customer',\n name='opening_date',\n ),\n migrations.RemoveField(\n model_name='transactions',\n name='customer',\n ),\n migrations.AddField(\n model_name='account',\n name='customer',\n field=models.ForeignKey(to='myapp.Customer'),\n ),\n migrations.AddField(\n model_name='transactions',\n name='account',\n field=models.ManyToManyField(to='myapp.Account'),\n ),\n ]\n" }, { "alpha_fraction": 0.69701087474823, "alphanum_fraction": 0.69701087474823, "avg_line_length": 42.29411697387695, "blob_id": "5e7d755038d75843d8f8e06f8e13219711bfb0d6", "content_id": "e7865b9b67289b1b123a3f04d9a3f1ceedde9aa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "no_license", "max_line_length": 142, "num_lines": 17, "path": "/task/task/urls.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom myapp.views import transaction_view,all_customer_transaction_average,customer_details,individual_customer_transactions,calculate_std,home\nurlpatterns = [\n # Examples:\n # url(r'^$', 'task.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'home/$',home),\n url(r'^transaction/$',transaction_view),\n # url(r'^all_transactions/$',all_customer_transaction),\n url(r'^average/$',all_customer_transaction_average),\n url(r'^customer_details/$',customer_details),\n url(r'^individual_customer/$',individual_customer_transactions),\n url(r'^st_dev/$',calculate_std),\n]\n" }, { "alpha_fraction": 0.5408867001533508, "alphanum_fraction": 0.5724138021469116, "avg_line_length": 36.592594146728516, "blob_id": "2008db29854fd66fccee5ce6ff946d2295f1d19d", "content_id": "7a8f0d7abaf1678d455af1e062197a48913308fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/task/myapp/migrations/0014_customer.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0013_auto_20180328_2336'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('account_number', models.CharField(max_length=10, unique=True)),\n ('customer_name', models.CharField(max_length=100)),\n ('date_of_open', models.DateField(auto_now_add=True)),\n ('mail_id', models.EmailField(max_length=100, null=True, blank=True)),\n ('mobile_number', models.CharField(max_length=10, null=True, blank=True)),\n ('address', models.TextField()),\n ('balance', models.DecimalField(decimal_places=2, default=0.0, max_digits=20)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6858699917793274, "alphanum_fraction": 0.692098081111908, "avg_line_length": 23.711538314819336, "blob_id": "7e10a744e3b3ded8d3eb80cab1800c0658f64c13", "content_id": "52488379510b5d6f8551773826f87309461a2e65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2569, "license_type": "no_license", "max_line_length": 73, "num_lines": 104, "path": "/task/myapp/views.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import Http404\nfrom myapp.forms import CustomerForm\nfrom . models import Customer,Transaction,TransactionForm\nimport numpy as np\n# Create your views here.\ndef home(request):\n\treturn render(request,'home_page.html')\ndef transaction_view(request):\n\tt=TransactionForm(request.POST)\n\tcontext={\n\t'form':t\n\t}\n\tif request.method=='POST':\n\t\tif t.is_valid():\n\t\t\tq=t.cleaned_data\n\t\t\tamount=q['amount']\n\t\t\tacnumber=q['customer']\n\t\t\ttype\t=q['transaction_type']\n\t\t\tobj=Customer.objects.get(account_number=acnumber)\n\t\t\tt=Transaction(transaction_type=type,amount=amount,customer=obj)\n\t\t\tt.save()\n\t\t\tif type=='C':\n\t\t\t\tobj.balance=obj.balance+amount\n\t\t\t\tobj.save()\n\t\t\telse:\n\t\t\t\tif obj.balance>amount:\n\t\t\t\t\tobj.balance=obj.balance-amount\n\t\t\t\t\tobj.save()\n\t\t\t\telse:\n\t\t\t\t\traise Exception(\"Your balance is low\")\n\treturn render(request,'transaction.html',context)\n\ndef all_customer_transaction_average(request):\n\tobj=Transaction.manager.average_of_all()\n\tcontext={\n\t'form':int(obj['amount__avg'])\n\t}\n\treturn render(request,'average.html',context)\n\ndef customer_details(request):\n\tcust=CustomerForm(request.POST)\n\tcontext={\n\t'form':cust\n\t}\n\tif request.method=='POST':\n\t\tif cust.is_valid():\n\t\t\tc=cust.cleaned_data\n\t\t\taccnum=c['account_number']\n\t\t\ttry:\n\t\t\t\tobj=Customer.objects.get(account_number=accnum)\n\t\t\texcept Customer.DoesNotExist:\n\t\t\t\traise Http404(\"Customer account number is invalid\")\n\t\treturn \trender(request,'cust1.html',{'form':obj})\n\treturn render(request,'cust.html',context)\n\n\ndef individual_customer_transactions(request):\n\tcust=CustomerForm(request.POST)\n\tcontext={\n\t'form':cust\n\t}\n\tif request.method=='POST':\n\t\tif cust.is_valid():\n\t\t\tc=cust.cleaned_data\n\t\t\taccnum=c['account_number']\n\t\t\ttry:\n\t\t\t\tobj=Transaction.manager.all().filter(customer__account_number=accnum)\n\t\t\t\tl=list()\n\t\t\t\tl1=list()\n\t\t\t\tfor a in obj:\n\t\t\t\t\tl.append(a.amount)\n\t\t\t\tx=np.array(l)\n\t\t\t\tstd = int(x.std())\n\t\t\t\tfor b in obj:\n\t\t\t\t\tif std<=b.amount:\n\t\t\t\t\t\tl1.append(b.amount)\n\t\t\t\tcont={\n\t\t\t\t\t'form':obj,\n\t\t\t\t\t'list':l1,\n\t\t\t\t\t'std_dev':std\n\t\t\t\t\t}\n\t\t\texcept Customer.DoesNotExist:\n\t\t\t\traise Http404(\"Customer account number is invalid\")\n\t\treturn \trender(request,'standard_deviation.html',cont)\n\treturn render(request,'cust.html',context)\n\ndef calculate_std(request):\n\tobj=Transaction.manager.all()\n\tl=list()\n\tl1=list()\n\tfor a in obj:\n\t\tl.append(a.amount)\n\tx=np.array(l)\n\tstd = int(x.std())\n\tfor b in obj:\n\t\tif std<=b.amount:\n\t\t\tl1.append(b.amount)\n\tcontext={\n\t'form':obj,\n\t'list':l1,\n\t'std_dev':std\n\t}\n\treturn render(request,'standard_deviation.html',context)" }, { "alpha_fraction": 0.8041958212852478, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 47, "blob_id": "142ae176dcd8609d4f2d94317e4ac73947285af2", "content_id": "83359d672b593ed1377a23d7edbfc6b93697cf45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 86, "num_lines": 3, "path": "/task/myapp/forms.py", "repo_name": "srajender/task", "src_encoding": "UTF-8", "text": "from django import forms\nclass CustomerForm(forms.Form):\n\taccount_number=forms.CharField(max_length=10,help_text='please enter account number')" } ]
12
libertyluna/challenge-me-ui-mockup
https://github.com/libertyluna/challenge-me-ui-mockup
54262c2fd83f09a3b5f2019260f76db43b93692b
a3d1b220c77d7084f1aefed6865aa27b9f737b6a
441d5f5f368ed90e89cc169d478450067077d673
refs/heads/master
2022-12-14T23:19:22.618121
2018-01-23T18:45:21
2018-01-23T18:45:21
118,252,494
0
0
null
2018-01-20T14:58:37
2019-04-14T09:41:35
2022-12-08T00:52:40
Python
[ { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 20, "blob_id": "6b80d7c42ed9b53d9da2706129a91c63f461ac05", "content_id": "63c800815672145331bff5555bd5c6bbebabcacf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/README.md", "repo_name": "libertyluna/challenge-me-ui-mockup", "src_encoding": "UTF-8", "text": "# challenge-me\nChallenge / goal portfolio\n" }, { "alpha_fraction": 0.6649214625358582, "alphanum_fraction": 0.717277467250824, "avg_line_length": 20.33333396911621, "blob_id": "947d4371eda2cf804e04099a204455de9aaf8864", "content_id": "836e7396ac59195e26750bd1242a5ac2782cfce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 66, "num_lines": 9, "path": "/test.py", "repo_name": "libertyluna/challenge-me-ui-mockup", "src_encoding": "UTF-8", "text": "import MySQLdb\n\ndb=MySQLdb.connect(\"127.0.0.1\",\"root\",\"root\",\"challenge_me\", 8889)\n\ncursor = db.cursor()\ncursor.execute(\"select * from test_table\")\n\nresults = cursor.fetchall()\nprint(results)" } ]
2
drkwlfff/port-scanner
https://github.com/drkwlfff/port-scanner
8b41214d982270666e480bfd197303215469c2e9
82f09f3b027d242f53056af5a9b5ddb32c80a10d
00f4f2bef76d6dfb9d997526c834c0d1dba13810
refs/heads/main
2023-01-01T04:44:23.423866
2020-10-28T15:40:03
2020-10-28T15:40:03
308,062,366
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.7900000214576721, "avg_line_length": 48, "blob_id": "797d887ce014b5316c46592d67538e7e5f81d2b3", "content_id": "f12eb07c44303b9f6075a2dec2eca05cb63246f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "no_license", "max_line_length": 82, "num_lines": 2, "path": "/README.md", "repo_name": "drkwlfff/port-scanner", "src_encoding": "UTF-8", "text": "# port-scanner\na very basic functional port scanner build in python3 using sys and socket module \n" }, { "alpha_fraction": 0.6437292098999023, "alphanum_fraction": 0.6614872217178345, "avg_line_length": 26.33333396911621, "blob_id": "cbc813270d0031c22d33046e791cd816ad2d3244", "content_id": "6926aa0195c6b7c732cb0e5830c997601c78ae83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "no_license", "max_line_length": 73, "num_lines": 33, "path": "/scanner.py", "repo_name": "drkwlfff/port-scanner", "src_encoding": "UTF-8", "text": "#!/bin/python\nimport sys\nimport socket\nfrom datetime import datetime\n\n#define our target\nif len(sys.argv) ==2:\n target =socket.gethostbyname(sys.argv[1]) #translate hostname to IPV4\nelse:\n print(\"invalid argument\")\n print(\"Syntax: python3 scanner.py <ip>\")\n \nprint(\"_\" *50)\nprint(\" scanning target\"+target)\nprint(\"Time started:\"+str(datetime.now))\nprint(\"_\"*50)\ntry:\n for port in range(1,65535):\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n socket.setdefaulttimeout(1)\n result=s.connect_ex((target,port)) #error are indicated \n if result==0:\n print(\"Port {} is open\".format(port))\n s.close()\nexcept KeyboardInterrupt:\n print(\"\\nExiting program.\")\n sys.exit()\nexcept socket.gaierror:\n print(\"Hostname could not be resolved.\")\n sys.exit()\nexcept socket.error:\n print(\"could not connect to the server\")\n sys.exit()" } ]
2
laniahasib/Skin_Cancer_Detection
https://github.com/laniahasib/Skin_Cancer_Detection
9d32cd5a84baec095466fb3fac8a276c98ed95e0
b99afb17ea315a8755ae24ee8b557ce68a258bb1
2d5c24b2ccde055906a0f6f35770ce49fac188fa
refs/heads/master
2020-12-19T06:10:44.150459
2020-01-27T06:02:45
2020-01-27T06:02:45
235,642,283
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6184267997741699, "alphanum_fraction": 0.6362451314926147, "avg_line_length": 28.88311767578125, "blob_id": "941584dca09f3c0bfdb1fbf5a8aac2224be6aa68", "content_id": "48c073f27d5ec2da9faa7c8b0c7a36f8033fbfcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2301, "license_type": "no_license", "max_line_length": 96, "num_lines": 77, "path": "/src/app.py", "repo_name": "laniahasib/Skin_Cancer_Detection", "src_encoding": "UTF-8", "text": "from __future__ import division, print_function\nimport sys\nimport os\nimport glob\nimport re\nfrom pathlib import Path\nfrom io import BytesIO\nimport base64\nimport requests\n\n# Import fast.ai Library\nfrom fastai import *\nfrom fastai.vision import *\n\n# Flask utils\nfrom flask import Flask, request, jsonify\n\n\n# Define a flask app\napp = Flask(__name__)\nNAME_OF_FILE = 'model_best' # Name of the model pth file\nPATH_TO_MODELS_DIR = Path('') # by default just use /models in root dir\nclasses = ['Actinic keratoses', 'Basal cell carcinoma', 'Benign keratosis',\n 'Dermatofibroma', 'Melanocytic nevi', 'Melanoma', 'Vascular lesions']\n\ndef setup_model_pth(path_to_pth_file, learner_name_to_load, classes):\n data = ImageDataBunch.single_from_classes(\n path_to_pth_file, classes, ds_tfms=get_transforms(), size=224).normalize(imagenet_stats)\n learn = cnn_learner(data, models.densenet169, model_dir='models')\n learn.load(learner_name_to_load, device=torch.device('cpu'))\n return learn\n\nlearn = setup_model_pth(PATH_TO_MODELS_DIR, NAME_OF_FILE, classes)\n\ndef decode(img_b64):\n img = base64.b64decode(img_b64)\n return img\n\ndef model_predict(img):\n img = open_image(BytesIO(img))\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n formatted_outputs = [\"{:.1f}\".format(value) for value in [\n x * 100 for x in torch.nn.functional.softmax(outputs, dim=0)]]\n pred_probs = sorted(\n zip(learn.data.classes, map(str, formatted_outputs)),\n key=lambda p: p[1],\n reverse=True\n )\n pred_dict = {i[0]: i[1] for i in pred_probs}\n #for k, v in pred_dict.items():\n # print(k, v)\n\n message = {\n 'status': 200,\n 'message': 'OK',\n 'predictions': pred_dict, \n }\n return jsonify(message)\n\[email protected]('/predict', methods=[\"POST\", \"GET\"])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n img_b64 = request.form.get(\"image\")\n if img_b64 != None:\n # Make prediction\n img = decode(img_b64)\n preds = model_predict(img)\n return preds\n \n\nif __name__ == '__main__':\n port = os.environ.get('PORT', 8008)\n\n if \"prepare\" not in sys.argv:\n app.run(debug=False, host='0.0.0.0', port=port)\n" }, { "alpha_fraction": 0.7071682810783386, "alphanum_fraction": 0.7727503776550293, "avg_line_length": 102.47368621826172, "blob_id": "6a56ee45d0936281e2e497fa59bfc2fe041f3734", "content_id": "44dd5f8636d9922d7ad7b42768dbfacdfae2cfd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1967, "license_type": "no_license", "max_line_length": 916, "num_lines": 19, "path": "/README.md", "repo_name": "laniahasib/Skin_Cancer_Detection", "src_encoding": "UTF-8", "text": "## Overview\n\nThis is the source code for a deep learning based skin cancer detection android app. The model has been built using **fastai** deep learning library which is a high level api for pytorch. A **flask API** has also been implemented for **cloud-based inference**. The classifier has been trained and validated on [Kaggle MNIST HAM10000 dataset](https://www.kaggle.com/kmader/skin-cancer-mnist-ham10000) which contains 10015 images of seven categories of pigmented skin lesions. As a preprocessing step, I have applied random undersampling to data to alleviate the class-imbalance problem. The classifier has been trained with transfer learning technique using a pretrained **Densenet169** model. The final classifer achieved an accuracy of **91.2%** and a F1-score of **91.7%** on validation data. You can check out the jupyter notebook that goes along to follow all the steps which have been taken to build the model.\n\n## Screenshots\n<img src=\"https://user-images.githubusercontent.com/34622266/57833782-1a521280-77d0-11e9-917f-56db245998a4.jpg\" width=\"480\" height=\"854\"> <img src=\"https://user-images.githubusercontent.com/34622266/57833790-1d4d0300-77d0-11e9-95e7-5524e760c9dd.jpg\" width=\"480\" height=\"854\">\n\n\n## Dependencies\n\n- Python 3.6 <br/>\n- Fastai 1.0.52 <br/>\n- Flask <br/>\n- Gunicorn <br/>\n- [SquareCamera](https://github.com/boxme/SquareCamera) <br/>\n- [Volley](https://github.com/google/volley)\n\n## Instructions\nIn order to setup flask API first run `sudo pip install -r requirements.txt` to install the required dependencies. Then launch the app by running `python app.py`. When you take a photo of skin lesion using the android app, a base64 encoding of the image will be sent to the API at http://localhost:8008. Once you set up the API, download [Android Studio](https://developer.android.com/studio) and then import the android app. Since I've placed the dependencies in the build.gradle file, they should be automatically downloaded. \n" } ]
2
econsnapshot/election-2016
https://github.com/econsnapshot/election-2016
93f243beebab218664b7533d09f35065c0a8d448
561f29fd7b5cd3557555b79c95951d2361cd5e5a
0578f5f6024637dbcd8ec24ec6d14b4f03816646
refs/heads/master
2020-06-14T05:53:52.564525
2016-12-16T18:42:41
2016-12-16T18:42:41
75,224,713
4
0
null
2016-11-30T20:32:41
2016-11-30T20:45:11
2016-12-01T20:54:33
Python
[ { "alpha_fraction": 0.2939632534980774, "alphanum_fraction": 0.2939632534980774, "avg_line_length": 52.42856979370117, "blob_id": "bc2fb10ee84542d095a8aae00954d603dbd3a0d5", "content_id": "7e0981ed853f87f9dd94c26f7a8a84cc527db75e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 112, "num_lines": 7, "path": "/scripts/__init__.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n" }, { "alpha_fraction": 0.5212103128433228, "alphanum_fraction": 0.5223969221115112, "avg_line_length": 49.86153793334961, "blob_id": "f99c4b76f38bc66b1593a2bc3714d225a17ac7b2", "content_id": "12db67115d927912550c7cf98f34c28ab1c7f80e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3371, "license_type": "no_license", "max_line_length": 135, "num_lines": 65, "path": "/scripts/merge.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport pandas as pd\r\nimport os\r\n\r\ndef merge_data(series, merge_opts, output, temp, merge_var, year = None):\r\n\r\n print series\r\n print year\r\n\r\n if os.path.isdir(temp) is False:\r\n os.mkdir(temp)\r\n\r\n data_temp = pd.read_csv(series, delimiter = ',', header = 0)\r\n if merge_opts['pivot_column'] == None:\r\n data_temp[merge_var] = pd.to_numeric(data_temp[merge_var], errors = 'coerce')\r\n data_temp[merge_var] = data_temp[merge_var].astype(float)\r\n if os.path.isfile(output):\r\n data = pd.read_csv(output, delimiter = ',', header = 0)\r\n data_final = pd.merge(data, data_temp, on = merge_var, how = 'outer')\r\n data_final.to_csv(output, sep = ',', index = False, mode = 'w')\r\n else:\r\n data_temp.to_csv(output, sep = ',', index = False, mode = 'w')\r\n\r\n else:\r\n for pv_vals in merge_opts['pivot_values']:\r\n # try:\r\n data_temp[pv_vals] = data_temp[pv_vals].str.replace(\"%\",\"\")\r\n data_temp[pv_vals] = pd.to_numeric(data_temp[pv_vals], errors = 'coerce')\r\n # except:\r\n # pass\r\n # print pv_vals\r\n # print data_temp[pv_vals]\r\n \r\n data_temp[merge_opts['pivot_column']] = data_temp[merge_opts['pivot_column']].str.replace(\"Dem\",\"D\")\r\n data_temp[merge_opts['pivot_column']] = data_temp[merge_opts['pivot_column']].str.replace(\"Democrat\",\"D\")\r\n data_temp[merge_opts['pivot_column']] = data_temp[merge_opts['pivot_column']].str.replace(\"GOP\",\"R\")\r\n data_temp[merge_opts['pivot_column']] = data_temp[merge_opts['pivot_column']].str.replace(\"Rep\",\"R\")\r\n data_temp[merge_opts['pivot_column']] = data_temp[merge_opts['pivot_column']].str.replace(\"Republican\",\"R\")\r\n data_temp.loc[~(data_temp[merge_opts['pivot_column']].isin(['D','R'])),merge_opts['pivot_column']] = \"I\"\r\n data_temp = data_temp.pivot_table(index = merge_var, columns = merge_opts['pivot_column'], values = merge_opts['pivot_values'])\r\n data_temp.columns = ['_'.join(col).strip() + \"_\" + year for col in data_temp.columns.values]\r\n data_temp[merge_var] = data_temp.index\r\n data_temp[merge_var] = data_temp[merge_var].astype(float)\r\n # for i in data_temp.iterrows():\r\n # if data_temp.index.ix[i] == merge_var:\r\n # data_temp[:].ix[i] = \r\n # except:\r\n # pass\r\n if os.path.isfile(output):\r\n data = pd.read_csv(output, delimiter = ',', header = 0)\r\n data_final = pd.merge(data, data_temp, on = merge_var, how = 'outer')\r\n # data_temp.to_csv(temp + '/temp.csv', sep = ',', index = False, mode = 'w')\r\n # data = pd.read_csv(temp + '/temp.csv', delimiter = ',', header = 0)\r\n data_final.to_csv(output, sep = ',', index = False, mode = 'w')\r\n else:\r\n data_temp.to_csv(output, sep = ',', index = False, mode = 'w')\r\n" }, { "alpha_fraction": 0.5798733234405518, "alphanum_fraction": 0.5883181095123291, "avg_line_length": 38.5428581237793, "blob_id": "b94fe801c49c408f5f0af2e2130903898a13586a", "content_id": "634efa62b2a3df268ffb9491d8e50ab9634c403a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2842, "license_type": "no_license", "max_line_length": 297, "num_lines": 70, "path": "/scripts/extras_cleaner.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "\r\n################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport time\r\nmatplotlib.use('Agg')\r\nimport urllib2\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nimport us\r\nimport wget\r\n# from pyvirtualdisplay import Display\r\n\r\n# display = Display(visible=0, size=(1024, 768))\r\n# display.start()\r\n\r\n# binary = FirefoxBinary(r'C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe')\r\n# driver = webdriver.Firefox(firefox_binary=binary)\r\n\r\ndef clean_extras(series, output_path, replace_files):\r\n\r\n states = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\",\"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\n main_folder = os.path.dirname(os.path.abspath('__file__'))\r\n\r\n af = addfips.AddFIPS()\r\n\r\n data_path = main_folder + '/data/extras/'\r\n\r\n if os.path.isdir(data_path) is False:\r\n os.mkdir(data_path)\r\n\r\n voting_machines_data = pd.read_csv(data_path + \"/voting_machines.csv\", header = 0, names=['FIPS code', 'State', 'Jurisdiction','Division','Precincts','Total Registration','Make','Model','Equipment Type','VVPAT','Accessible Use','Early Voting','Absentee Ballots','Polling Place','County_FIPS'])\r\n\r\n paper = []\r\n electronic = []\r\n\r\n fips_temp = []\r\n\r\n for i in voting_machines_data.iterrows():\r\n fips_temp.append(int(voting_machines_data['FIPS code'].ix[i]/100000))\r\n if voting_machines_data['Equipment Type'].ix[i] == \"DRE-Touchscreen\" or voting_machines_data['Equipment Type'].ix[i] == \"DRE-Push Button\" or voting_machines_data['Equipment Type'].ix[i] == \"DRE-Dial\":\r\n paper.append(0)\r\n electronic.append(1)\r\n else:\r\n paper.append(1)\r\n electronic.append(0)\r\n\r\n voting_machines_data['County_FIPS'] = fips_temp\r\n\r\n voting_machines_data['Paper'] = paper\r\n voting_machines_data['Electronic'] = electronic\r\n \r\n voting_machines_data = voting_machines_data.pivot_table(columns = 'County_FIPS', values = ['Paper','Electronic']).transpose()\r\n\r\n voting_machines_data.to_csv(main_folder + '/data/covariates/voting_machines.csv', index = True, sep = \",\", encoding='utf-8', mode = 'w')\r\n\r\n" }, { "alpha_fraction": 0.4383498430252075, "alphanum_fraction": 0.4476567804813385, "avg_line_length": 59.585365295410156, "blob_id": "9cbf0b9fc2218c5757af7c296e41b79743d1a467", "content_id": "305edee3f24cce1b7c65fa8b8673a9fb0b538683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15150, "license_type": "no_license", "max_line_length": 296, "num_lines": 246, "path": "/scripts/scraper_2004.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\nimport urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nimport re\r\nimport us\r\n\r\n\r\n# states = ['al']\r\ndef scraper_2004(output_path, scrape_all, replace):\r\n af = addfips.AddFIPS()\r\n\r\n states = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\", \"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\n website_president = 'http://www.cnn.com/ELECTION/2004/pages/results/states/'\r\n\r\n website_house = 'http://www.cnn.com/ELECTION/2004/pages/results/states/'\r\n\r\n website_senate = 'http://www.cnn.com/ELECTION/2004/pages/results/states/'\r\n\r\n website_governor = 'http://www.cnn.com/ELECTION/2004/pages/results/states/'\r\n\r\n data_pres_path = output_path + '/president/'\r\n data_senate_path = output_path + '/senate/'\r\n data_house_path = output_path + '/house/'\r\n data_governor_path = output_path + '/governor/'\r\n\r\n if os.path.isdir(data_pres_path) is False:\r\n os.mkdir(data_pres_path)\r\n\r\n file_nat = open(data_pres_path + \"national_2004.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Percentage,Votes\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n file_nat = open(data_pres_path + \"national_2004.csv\", \"a\")\r\n file_state = open(data_pres_path + state + \"_2004.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n i = 0\r\n try:\r\n while i < 100:\r\n url = website_president + state.upper() + '/P/00/county' + '.00' + str(i) + '.html'\r\n page = urllib2.urlopen(url)\r\n soup = BeautifulSoup(page.read())\r\n for res_row in soup.find_all('tr', align='center'):\r\n if res_row.find('td', class_=\"dataTableRace\"):\r\n county_name = res_row.find('td', class_=\"dataTableRace\").find('b').get_text()\r\n tds_name = res_row.find('a', href=re.compile('(javascript.*)')).get_text()\r\n if res_row.find('img', class_=\"dataIcon\")['alt'] == 'Republican':\r\n tds_party = \"R\"\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Democrat':\r\n tds_party = 'D'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Independent':\r\n tds_party = 'I'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] != '':\r\n tds_party = str(res_row.find('img', class_=\"dataIcon\")['alt']).replace(\"Democratic\",\"D\")\r\n else:\r\n tds_party = 'I'\r\n for res_col in res_row.find_all('td'):\r\n if len(res_col.attrs) == 0 and len(list(res_col.descendants)) == 1 and res_col.string != None:\r\n tds_tot = res_col.get_text()\r\n elif len(res_col.attrs) == 0 and len(list(res_col.descendants)) == 2 and res_col.string != None:\r\n tds_pct = res_col.get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n i = i + 1\r\n except:\r\n pass\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n\r\n if scrape_all == True:\r\n\r\n if os.path.isdir(data_senate_path) is False:\r\n os.mkdir(data_senate_path)\r\n\r\n if os.path.isdir(data_house_path) is False:\r\n os.mkdir(data_house_path)\r\n\r\n if os.path.isdir(data_governor_path) is False:\r\n os.mkdir(data_governor_path)\r\n\r\n file_nat = open(data_senate_path + \"national_2004.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Percentage,Votes\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n file_nat = open(data_senate_path + \"national_2004.csv\", \"a\")\r\n file_state = open(data_senate_path + state + \"_2004.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n i = 0\r\n try:\r\n while i < 100:\r\n url = website_senate + state.upper() + '/S/01/county' + '.00' + str(i) + '.html'\r\n page = urllib2.urlopen(url)\r\n soup = BeautifulSoup(page.read())\r\n for res_row in soup.find_all('tr', align='center'):\r\n if res_row.find('td', class_=\"dataTableRace\"):\r\n county_name = res_row.find('td', class_=\"dataTableRace\").find('b').get_text()\r\n tds_name = res_row.find('a', href=re.compile('(javascript.*)')).get_text()\r\n if res_row.find('img', class_=\"dataIcon\")['alt'] == 'Republican':\r\n tds_party = \"R\"\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Democrat':\r\n tds_party = 'D'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Independent':\r\n tds_party = 'I'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] != '':\r\n tds_party = str(res_row.find('img', class_=\"dataIcon\")['alt']).replace(\"Democratic\",\"D\")\r\n else:\r\n tds_party = 'I'\r\n for res_col in res_row.find_all('td'):\r\n if len(res_col.attrs) == 0 and len(list(res_col.descendants)) == 1 and res_col.string != None:\r\n tds_tot = res_col.get_text()\r\n elif len(res_col.attrs) == 0 and len(list(res_col.descendants)) == 2 and res_col.string != None:\r\n tds_pct = res_col.get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n i = i + 1\r\n except:\r\n pass\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n\r\n file_nat = open(data_house_path + \"national_2004.csv\", \"w\")\r\n file_nat.write(\"State,District,Party,St_FIPS,Candidate,Percentage,Votes\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n file_nat = open(data_house_path + \"national_2004.csv\", \"a\")\r\n file_state = open(data_house_path + state + \"_2004.csv\", \"w\")\r\n file_state.write(\"District,Party,Candidate,Percentage,Votes\\n\")\r\n i = 1\r\n try:\r\n while i < 100:\r\n if i < 10:\r\n url = website_house + state.upper() + '/H/0' + str(i) + '/index.html'\r\n else:\r\n url = website_house + state.upper() + '/H/' + str(i) + '/index.html'\r\n # print url\r\n page = urllib2.urlopen(url)\r\n soup = BeautifulSoup(page.read())\r\n for res_row in soup.find_all('tr', align='center'):\r\n if res_row.find('td', class_=\"dataTableRace\"):\r\n county_name = res_row.find('td', class_=\"dataTableRace\").find('b').get_text()\r\n tds_name = res_row.find('a', href=re.compile('(javascript.*)')).get_text()\r\n # print tds_name\r\n if res_row.find('img', class_=\"dataIcon\")['alt'] == 'Republican':\r\n tds_party = \"R\"\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Democrat':\r\n tds_party = 'D'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Independent':\r\n tds_party = 'I'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] != '':\r\n tds_party = str(res_row.find('img', class_=\"dataIcon\")['alt']).replace(\"Democratic\",\"D\")\r\n else:\r\n tds_party = 'I'\r\n if 'unopposed' in res_row.get_text():\r\n tds_tot = 'unopposed'\r\n tds_pct = 'unopposed'\r\n else:\r\n for res_col in res_row.find_all('td'):\r\n if len(res_col.attrs) == 1 and len(list(res_col.descendants)) == 1 and res_col.string != None:\r\n tds_tot = res_col.get_text()\r\n elif len(res_col.attrs) == 1 and len(list(res_col.descendants)) == 2 and res_col.string != None:\r\n tds_pct = res_col.get_text()\r\n try:\r\n file_nat.write(state.upper() + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state.upper() + \",\" + str(us.states.lookup(state)) + \" \" + str(i) + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(str(us.states.lookup(state)) + \" \" + str(i) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n i = i + 1\r\n except:\r\n pass\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n\r\n file_nat = open(data_governor_path + \"national_2004.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Percentage,Votes\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n file_nat = open(data_governor_path + \"national_2004.csv\", \"a\")\r\n file_state = open(data_governor_path + state + \"_2004.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n i = 0\r\n try:\r\n while i < 100:\r\n url = website_governor + state.upper() + '/G/00/county' + '.00' + str(i) + '.html'\r\n page = urllib2.urlopen(url)\r\n soup = BeautifulSoup(page.read())\r\n for res_row in soup.find_all('tr', align='center'):\r\n if res_row.find('td', class_=\"dataTableRace\"):\r\n county_name = res_row.find('td', class_=\"dataTableRace\").find('b').get_text()\r\n tds_name = res_row.find('a', href=re.compile('(javascript.*)')).get_text()\r\n if res_row.find('img', class_=\"dataIcon\")['alt'] == 'Republican':\r\n tds_party = \"R\"\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Democrat':\r\n tds_party = 'D'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] == 'Independent':\r\n tds_party = 'I'\r\n elif res_row.find('img', class_=\"dataIcon\")['alt'] != '':\r\n tds_party = str(res_row.find('img', class_=\"dataIcon\")['alt']).replace(\"Democratic\",\"D\")\r\n else:\r\n tds_party = 'I'\r\n for res_col in res_row.find_all('td'):\r\n if len(res_col.attrs) == 0 and len(list(res_col.descendants)) == 1 and res_col.string != None:\r\n tds_tot = res_col.get_text()\r\n elif len(res_col.attrs) == 0 and len(list(res_col.descendants)) == 2 and res_col.string != None:\r\n tds_pct = res_col.get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n i = i + 1\r\n except:\r\n pass\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n" }, { "alpha_fraction": 0.7260273694992065, "alphanum_fraction": 0.7580946683883667, "avg_line_length": 49.984127044677734, "blob_id": "16305a85d7f14dbe9a17d0a70cd3b5b6b776c184", "content_id": "fd899f334755f63022461b17e0d6eed41dd2ecbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3212, "license_type": "no_license", "max_line_length": 452, "num_lines": 63, "path": "/README.md", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "#Election 2016 Analysis for Econsnapshot.com\n\nAuthor: Ben Griffy\n\nInstitution: University of California, Santa Barbara\n\nemail: [email protected]\n\nwebsite: www.bengriffy.com\n\nDate: 11/28/2016\n\nTopic: 2016 election\n\nThis is a series of files written in Python to download, merge, and perform analysis on data related to the 2016 election. This code was used to generate the blog posts on the election at www.econsnapshot.com, run by Thomas Cooley, Ben Griffy, and Peter Rupert. We are releasing the data and codes so that others can explore the results that drove the election, as well as gain more experience with scraping and performing statistical analysis in Python (us included!).\n\n1: Software\n\nThe easiest way to get all of these modules to work on a PC is to install Anaconda, as a number of the modules are at least partially Unix-based, which makes them difficult to work on a PC. A Mac or a Linux computer should not have these problems. The only non-standard program are phantomjs, which is a program required to make the web-scraper run silently. All others should be available via pip.\n\n2: Options\n\nAt this time, the software can scrape data from 4 different websites for the 2004, 2008, 2012, and 2016 elections, including down-ballot elections. Options are given in the file \"Main.py,\" as follows (note, for the time being, the 2004 election doesn't work; we don't use any of this data in our analysis):\n\ndownload_options = {'years': ['2004','2008','2012','2016'], 'replace': ['2004','2008','2012','2016'],\n 'down_ballot':True, 'covariates': ['income','unemployment','industry','demographics'],\n 'extra_datasets': True, 'merge_datasets': True,\n}\n\ndownload_options: check model_wrapper.py for default options.\nyears: select years for download. currently supports 2004, 2008, 2012, 2016.\nreplace: select either individual years to replace, or write True to replace all. False will download files not\nfound\ndown_ballot: download down-ballot (senate, house, local, etc.) at the county-level when available.\ncovariates: select covariates to download and merge. currently supports BEA income, BLS unemployment,\nBLS industry, and ACS demographics. \nextra_datasets: download and merge extra sets of covariates that we used for additional analysis. this includes\ndata on the Voting Rights Act (RIP), manufacturing employment, and voting machines. currently not merging, but can be manually merged.\nmerge_datasets: combine all datasets on county_fips\n\nSome of these options are currently unavailable, but will be updated shortly.\n\ngraphics_options = {'graph_maps': True, 'ggplot': True, 'interactive': False,\n 'diff_plots': True\n}\n\ngraph_maps: plot data on maps.\ninteractive: plot data to interactive maps.\ndiff_plots: plot difference between 2012 and 2016 percent vote\n\nAt the moment, we are using a Python module to create the maps, but may use ggplot in the future. Our current setup allows for interactive plotting.\n\nTo run the program, go to the commandline or your favorite IDE and run \"Main.py\" with your options set.\n\nBasic Outline:\n\n1: Scrape data from news websites. \n\n2: Clean data and merge\n\n3: Map data at the county level\n\n4: Import into Stata for statistical analysis\n" }, { "alpha_fraction": 0.5373450517654419, "alphanum_fraction": 0.5627456903457642, "avg_line_length": 45.57746505737305, "blob_id": "ad8eb180b887190bebc26b8efb6ec778d6066199", "content_id": "0c0e8f923c63619a0e61a80dd2d31e7a470673a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 112, "num_lines": 71, "path": "/Main.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "###########################################Author###############################################################\n#Author: Ben Griffy\n#Institution: University of California, Santa Barbara\n#email: [email protected]\n#website: https://sites.google.com/site/bengriffy/home\n#Date: Nov. 2016\n#Created for: econsnapshot.com\n################################################################################################################\n\nfrom __future__ import division\nimport os\nfrom Model_Wrapper import ModelWrapper\n\n###########################################Modules##############################################################\n#required programs: python, R, Stata (for statistical analysis)\n#python modules:\n#R modules:\n################################################################################################################\n\npath = os.path.dirname(os.path.abspath(__file__))\n\n# data_path = pd.read_csv(path + \"/Data/Smoothed_Profiles.csv\", delimiter=\",\")\n# script_path = pd.read_csv(path + \"/Data/Weights.csv\", delimiter=\",\")\ndata_path = path + '/data/'\nscripts_path = path + '/scripts/'\ngraphics_path = path + '/graphics/'\n\n############################################Options#############################################################\n#download_options: check model_wrapper.py for default options.\n#years: select years for download. currently supports 2004, 2008, 2012, 2016. The 2004 scraper isn't working\n#completely, so for the time being, only use 2008-2016.\n#replace: select either individual years to replace, or write True to replace all. False will download files not\n#found\n#down_ballot: download down-ballot (senate, house, local, etc.) at the county-level when available.\n#covariates: select covariates to download and merge. currently supports BEA income, BLS unemployment,\n#BLS industry, and ACS demographics. \n#extra_datasets: download and merge extra sets of covariates that we used for additional analysis. this includes\n#data on the Voting Rights Act (RIP), manufacturing employment, and voting machines.\n#merge_datasets: combine all datasets on county_fips\n#graphics_options:\n#graph_maps: plot data on maps. Only requires beautifulsoup, as the image is generated on an SVG document.\n#interactive: plot data to interactive maps.\n#diff_plots: plot difference between 2012 and 2016 percent vote\n################################################################################################################\n\n# download_options = {'years': ['2004','2008','2012','2016'], 'replace': ['2004','2008','2012','2016'],\n# 'down_ballot':True, 'covariates': ['income','unemployment','industry','demographics'],\n# 'extra_datasets': True, 'merge_datasets': True,\n# }\n\ndownload_options = {'years': ['2008','2012','2016'], 'replace': False,\n 'down_ballot': True, 'covariates': ['income','unemp','demographics','education','industry'],\n 'extra_datasets': False, 'merge_datasets': False,\n}\n\ngraphics_options = {'graph_maps': True, 'interactive': True,\n 'diff_plots': False,\n}\n\nopts = {\n 'download_opts':download_options,\n 'graphics_opts':graphics_options,\n 'data_loc': data_path,\n 'scripts_loc': scripts_path,\n 'graphics_loc': graphics_path,\n}\n\n\nmodel = ModelWrapper(opts = opts)\n\nmodel.Run_Model()\n" }, { "alpha_fraction": 0.5925359725952148, "alphanum_fraction": 0.6168553233146667, "avg_line_length": 44.36879348754883, "blob_id": "95ae4813953f904cf13cae7d1cf9975876b3b357", "content_id": "a702b82a08246d8cc3dfa1f532a173cdb977218d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6538, "license_type": "no_license", "max_line_length": 315, "num_lines": 141, "path": "/scripts/extras_scraper.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport time\r\nmatplotlib.use('Agg')\r\nimport urllib2\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nimport us\r\nimport wget\r\n# from pyvirtualdisplay import Display\r\n\r\n# display = Display(visible=0, size=(1024, 768))\r\n# display.start()\r\n\r\n# binary = FirefoxBinary(r'C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe')\r\n# driver = webdriver.Firefox(firefox_binary=binary)\r\n\r\nstates = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\",\"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\ndata_types = ['']\r\n\r\nmain_folder = os.path.join(os.path.dirname(os.path.abspath('__file__')), '..')\r\n\r\n# states = ['al']\r\n\r\naf = addfips.AddFIPS()\r\n\r\nmachine_data_url_1 = 'https://www.verifiedvoting.org/api?advanced&state_fips='\r\nmachine_data_url_2 = '&equip_type=&make=&model=&year=2016&download=csv'\r\n\r\ndata_path_2016 = main_folder + '/voting_machines/2016'\r\n\r\ndata_path_2014 = main_folder + '/voting_machines/2014'\r\n\r\ndata_path_2012 = main_folder + '/voting_machines/2012'\r\n\r\nif os.path.isdir(data_path_2016) is False:\r\n os.mkdir(data_path_2016)\r\n\r\nif os.path.isdir(data_path_2014) is False:\r\n os.mkdir(data_path_2014)\r\n\r\nif os.path.isdir(data_path_2012) is False:\r\n os.mkdir(data_path_2012)\r\n\r\ncrosswalk = us.states.mapping('name','abbr')\r\n\r\n# for state in states:\r\n# st = af.get_state_fips(state)\r\n# urllib.urlretrieve(machine_data_url_1 + str(st) + machine_data_url_2, data_path_2016 + '/' + str(state) + '.csv')\r\n\r\n# machine_data_url_1 = 'https://www.verifiedvoting.org/api?advanced&state_fips='\r\n# machine_data_url_2 = '&equip_type=&make=&model=&year=2014&download=csv'\r\n\r\n# for state in states:\r\n# st = af.get_state_fips(state)\r\n# urllib.urlretrieve(machine_data_url_1 + str(st) + machine_data_url_2, data_path_2014 + '/' + str(state) + '.csv')\r\n\r\n# machine_data_url_1 = 'https://www.verifiedvoting.org/api?advanced&state_fips='\r\n# machine_data_url_2 = '&equip_type=&make=&model=&year=2012&download=csv'\r\n\r\n# for state in states:\r\n# st = af.get_state_fips(state)\r\n# urllib.urlretrieve(machine_data_url_1 + str(st) + machine_data_url_2, data_path_2012 + '/' + str(state) + '.csv')\r\n\r\nfile_nat = open(data_path_2016 + \"national_2016.csv\", \"w\")\r\nfile_nat.write(\"FIPS code,State,Jurisdiction,Division,Precincts,Total Registration,Make,Model,Equipment Type,VVPAT,Accessible Use,Early Voting,Absentee Ballots,Polling Place\\n\")\r\nfile_nat.close()\r\n\r\nfor state in states:\r\n print state\r\n try:\r\n data = pd.read_csv(data_path_2016 + '/' + str(state) + '.csv', delimiter = \",\", header = 1, names = ['FIPS code','State','Jurisdiction','Division','Precincts','Total Registration','Make','Model','Equipment Type','VVPAT','Accessible Use','Early Voting','Absentee Ballots','Polling Place'], index_col = False)\r\n county_fips = []\r\n data['Jurisdiction'] = data['Jurisdiction'].str.replace(\" County\",\"\")\r\n data['Jurisdiction'] = data['Jurisdiction'].str.strip()\r\n for i in data.iterrows():\r\n county_fips.append(str(af.get_county_fips(str(data['Jurisdiction'].ix[i]), str(data['State'].ix[i]))))\r\n data['county_fips'] = county_fips\r\n data['county_fips'] = data['county_fips'].apply(str)\r\n with open(data_path_2016 + '/' + 'national_2016.csv', 'a') as f:\r\n data.to_csv(f, header=False, index = False)\r\n except:\r\n print \"problem \" + str(state)\r\n pass\r\n \r\nfile_nat = open(data_path_2014 + \"national_2014.csv\", \"w\")\r\nfile_nat.write(\"FIPS code,State,Jurisdiction,Division,Precincts,Total Registration,Make,Model,Equipment Type,VVPAT,Accessible Use,Early Voting,Absentee Ballots,Polling Place\\n\")\r\nfile_nat.close()\r\n\r\nfor state in states:\r\n print state\r\n try:\r\n data = pd.read_csv(data_path_2014 + '/' + str(state) + '.csv', delimiter = \",\", header = 1, names = ['FIPS code','State','Jurisdiction','Division','Precincts','Total Registration','Make','Model','Equipment Type','VVPAT','Accessible Use','Early Voting','Absentee Ballots','Polling Place'], index_col = False)\r\n county_fips = []\r\n data['Jurisdiction'] = data['Jurisdiction'].str.replace(\" County\",\"\")\r\n data['Jurisdiction'] = data['Jurisdiction'].str.strip()\r\n for i in data.iterrows():\r\n county_fips.append(str(af.get_county_fips(str(data['Jurisdiction'].ix[i]), str(data['State'].ix[i]))))\r\n data['county_fips'] = county_fips\r\n with open(data_path_2014 + '/' + 'national_2014.csv', 'a') as f:\r\n data.to_csv(f, header=False, index = False)\r\n except:\r\n print \"problem \" + str(state)\r\n pass\r\n\r\nfile_nat = open(data_path_2012 + \"national_2012.csv\", \"w\")\r\nfile_nat.write(\"FIPS code,State,Jurisdiction,Division,Precincts,Total Registration,Make,Model,Equipment Type,VVPAT,Accessible Use,Early Voting,Absentee Ballots,Polling Place\\n\")\r\nfile_nat.close()\r\n\r\nfor state in states:\r\n print state\r\n try:\r\n data = pd.read_csv(data_path_2012 + '/' + str(state) + '.csv', delimiter = \",\", header = 1, names = ['FIPS code','State','Jurisdiction','Division','Precincts','Total Registration','Make','Model','Equipment Type','VVPAT','Accessible Use','Early Voting','Absentee Ballots','Polling Place'], index_col = False)\r\n county_fips = []\r\n data['Jurisdiction'] = data['Jurisdiction'].str.replace(\" County\",\"\")\r\n data['Jurisdiction'] = data['Jurisdiction'].str.strip()\r\n for i in data.iterrows():\r\n county_fips.append(str(af.get_county_fips(str(data['Jurisdiction'].ix[i]), str(data['State'].ix[i]))))\r\n data['county_fips'] = county_fips\r\n with open(data_path_2012 + '/' + 'national_2012.csv', 'a') as f:\r\n data.to_csv(f, header=False, index = False)\r\n except:\r\n print \"problem \" + str(state)\r\n pass\r\n" }, { "alpha_fraction": 0.4831811189651489, "alphanum_fraction": 0.49431943893432617, "avg_line_length": 37.03478240966797, "blob_id": "5a8e7179de0f07819150ae815fede4b93c68a12e", "content_id": "c559fa62d3b0b95f3cf0be08096f435e502b26ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4489, "license_type": "no_license", "max_line_length": 295, "num_lines": 115, "path": "/scripts/covariates_scraper.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\nimport urllib2\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nimport time\r\nimport us\r\n\r\ndef scraper_economics(series, output_path, replace_files):\r\n\r\n states = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\",\"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\n main_folder = os.path.dirname(os.path.abspath('__file__'))\r\n\r\n fips = pd.read_csv(main_folder + '/data/fips/' + 'fips.csv')\r\n\r\n af = addfips.AddFIPS()\r\n\r\n location_empwage_data_url = 'http://data.bls.gov/cew/data/api/2016/1/area/'\r\n industry_empwage_data_url = 'http://data.bls.gov/cew/data/api/2016/1/industry/'\r\n # unemp_data_url = 'http://www.bls.gov/lau/laucnty'\r\n unemp_data_url = 'http://www.bls.gov/web/metro/laucntycur14.txt'\r\n income_data_url = 'http://www.bea.gov/newsreleases/regional/lapi/2016/xls/lapi1116.xlsx'\r\n data_location_empwage_path = main_folder + '/data/covariates/location/'\r\n data_industry_empwage_path = main_folder + '/data/covariates/industry/'\r\n data_unemp_path = main_folder + '/data/covariates/unemp/'\r\n data_income_path = main_folder + '/data/covariates/income/'\r\n\r\n if os.path.isdir(data_location_empwage_path) is False:\r\n os.mkdir(data_location_empwage_path)\r\n if os.path.isdir(data_industry_empwage_path) is False:\r\n os.mkdir(data_industry_empwage_path)\r\n if os.path.isdir(data_unemp_path) is False:\r\n os.mkdir(data_unemp_path)\r\n if os.path.isdir(data_income_path) is False:\r\n os.mkdir(data_income_path)\r\n\r\n\r\n crosswalk = us.states.mapping('name','abbr')\r\n\r\n if series == \"area\":\r\n\r\n for loc in fips['fips']:\r\n loc = str(loc)\r\n if len(loc) < 5:\r\n st = str(us.states.lookup('0' + loc[0]))\r\n st = crosswalk[st].lower()\r\n else:\r\n st = str(us.states.lookup(loc[:2]))\r\n st = crosswalk[st].lower()\r\n state_loc = data_location_empwage_path + '/' + st + '/'\r\n if os.path.isdir(state_loc) is False:\r\n os.mkdir(state_loc)\r\n if len(loc) < 4:\r\n urllib.urlretrieve(location_empwage_data_url + '0' + loc + '.csv', state_loc + str(st) + '.csv')\r\n else:\r\n urllib.urlretrieve(location_empwage_data_url + loc + '.csv', state_loc + str(st) + '.csv')\r\n\r\n if series == \"industry\":\r\n i = 10\r\n urllib.urlretrieve(industry_empwage_data_url + str(i) + '.csv', data_industry_empwage_path + str(i) + '.csv')\r\n\r\n # while i < 999:\r\n # try:\r\n # urllib.urlretrieve(industry_empwage_data_url + str(i) + '.csv', data_industry_empwage_path + str(i) + '.csv')\r\n # i = i + 1\r\n # except:\r\n # i = i + 1\r\n # pass\r\n\r\n if series == \"unemp\":\r\n urllib.urlretrieve(unemp_data_url, data_unemp_path + '/unemployment.txt')\r\n # i = 15\r\n\r\n # while i > 9:\r\n # try:\r\n # urllib.urlretrieve(unemp_data_url + str(i) + '.txt', data_unemp_path + str(i) + '.txt')\r\n # i = i - 1\r\n # except:\r\n # i = i - 1\r\n # pass\r\n\r\n # while i >= 0:\r\n # try:\r\n # urllib.urlretrieve(unemp_data_url + '0' + str(i) + '.txt', data_unemp_path + str(i) + '.txt')\r\n # i = i - 1\r\n # except:\r\n # i = i - 1\r\n # pass\r\n\r\n # i = 99\r\n\r\n # while i >= 90:\r\n # try:\r\n # urllib.urlretrieve(unemp_data_url + str(i) + '.txt', data_unemp_path + str(i) + '.txt')\r\n # i = i - 1\r\n # except:\r\n # i = i - 1\r\n # pass\r\n\r\n if series == \"income\":\r\n urllib.urlretrieve(income_data_url, data_income_path + '/income_uncleaned.xlsx')\r\n" }, { "alpha_fraction": 0.551354169845581, "alphanum_fraction": 0.5639582872390747, "avg_line_length": 71.44021606445312, "blob_id": "dab5c3c9b1a5fa5eaf79a7845a38eef091a5d973", "content_id": "b408a628e0f8df5956d6d5c035bad90f22686e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13329, "license_type": "no_license", "max_line_length": 290, "num_lines": 184, "path": "/Model_Wrapper.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\n#Author: Ben Griffy\n#Institution: University of California, Santa Barbara\n#email: [email protected]\n#website: https://sites.google.com/site/bengriffy/home\n#Date:\n################################################################################################################\nfrom __future__ import division\nimport os\nfrom scripts.scraper_2016 import scraper_2016\nfrom scripts.scraper_2012 import scraper_2012\nfrom scripts.scraper_2004 import scraper_2004\nfrom scripts.covariates_scraper import scraper_economics\nfrom scripts.covariates_cleaner import clean_covariates\nfrom scripts.covariates_cleaner import clean_demographics\nfrom scripts.covariates_scraper import scraper_economics as scraper_demographics\n# from scripts import scraper_unemp\n# from scripts import scraper_demog\n# from scripts import scraper_ind\n# from scripts import scraper_extras\nfrom scripts.plot_maps import plot_maps\nfrom scripts.merge import merge_data\nimport platform\nimport zipfile as zf\nimport subprocess as subprocess\n\npath = os.path.dirname(os.path.abspath(__file__))\n\nclass ModelWrapper:\n\n def __init__(self, **kwargs):\n \n default_download_options = {'years': ['2004','2008','2012','2016'], 'replace': False,\n 'down_ballot': ['2004','2008','2012','2016'], 'covariates': ['income','unemployment','industry','demographics'],\n 'extra_datasets': True, 'merge_datasets': True,\n }\n default_graphics_options = {'graph_maps': True, 'ggplot': True, 'interactive': False,\n 'diff_plots': True\n }\n \n default_series_options = {'president':{'title':'Presidential Election (% Dem Votes), ','column':'Percentage','subset':'D','subset_column':'Party','match_column':'County_FIPS','low_color':\"#FF0000\",'high_color':\"#0000FF\"},\n 'senate':{'title':'Senate Elections (% Dem Votes), ','column':'Percentage','subset':'D','subset_column':'Party','match_column':'County_FIPS','low_color':\"#FF0000\",'high_color':\"#0000FF\"},\n 'house':{'title':'House Elections (% Dem Votes), ','column':'Percentage','subset':'D','subset_column':'Party','match_column':'County_FIPS','low_color':\"#FF0000\",'high_color':\"#0000FF\"},\n 'governor':{'title':'Gubernatorial Elections (% Dem Votes), ','column':'Percentage','subset':'D','subset_column':'Party','match_column':'County_FIPS','low_color':\"#FF0000\",'high_color':\"#0000FF\"},\n 'income':{'title':'Income Levels','column':'Inc_2015','subset':None,'subset_column':'Party','match_column':'County_FIPS','low_color':\"#00fa9a\",'high_color':\"#ffd700\"},\n 'unemp':{'title':'Unemployment Rate','column':'Unemp_Rate','subset':None,'subset_column':'Party','match_column':'County_FIPS','low_color':\"#00fa9a\",'high_color':\"#ffd700\"},\n 'education':{'title':'Percent with HS Degree or less','column':'pct_hs_less','subset':None,'subset_column':'Party','match_column':'County_FIPS','low_color':\"#00fa9a\",'high_color':\"#ffd700\"},\n 'industry':{'title':'Estabs % Change','column':'oty_qtrly_estabs_pct_chg','subset':10,'subset_column':'industry_code','match_column':'County_FIPS','low_color':\"#00fa9a\",'high_color':\"#ffd700\"},\n 'demographics':{'title':'Percent White','column':'pct_white','subset':None,'subset_column':'Party','match_column':'County_FIPS','low_color':\"#00fa9a\",'high_color':\"#ffd700\"},\n }\n\n default_merge_options = {'president':{'pivot_column':'Party','pivot_values':['Percentage'],},\n 'senate':{'pivot_column':'Party','pivot_values':['Percentage','Votes'],},\n 'house':{'pivot_column':'Party','pivot_values':['Percentage','Votes'],},\n 'governor':{'pivot_column':'Party','pivot_values':['Percentage','Votes'],},\n 'income':{'pivot_column':None,'pivot_values':['Percentage','Votes'],},\n 'unemp':{'pivot_column':None,'pivot_values':['Percentage','Votes'],},\n 'education':{'pivot_column':None,'pivot_values':['Percentage','Votes'],},\n 'industry':{'pivot_column':None,'pivot_values':['Percentage','Votes'],},\n 'demographics':{'pivot_column':None,'pivot_values':['Percentage','Votes'],},\n }\n \n self.opts = kwargs.get('opts')\n self.download_opts = self.opts.get('download_opts',default_download_options)\n self.graphics_opts = self.opts.get('graphics_opts',default_graphics_options)\n self.series_opts = self.opts.get('series_opts',default_series_options)\n self.merge_opts = self.opts.get('merge_opts',default_merge_options)\n self.data_path = self.opts.get('data_loc',path + '/data/')\n self.graphics_path = self.opts.get('graphics_loc',path + '/graphics/')\n \n def Run_Model(self):\n os.environ['PATH'] += str(';' + path)\n if self.download_opts['replace'] == True:\n system = platform.system()\n if system == \"Windows\":\n if os.path.isdir(path + '/scripts/phantomjs-windows/') is False:\n install_phantomjs = raw_input(\"To download all of the data, you need PhantomJS. Install? [y/n] \")\n if install_phantomjs == \"y\":\n for filename in os.listdir(path + '/scripts/'):\n if filename.endswith('-windows.zip'):\n with zf.ZipFile(path + \"/scripts/\" + filename, \"r\") as z:\n z.extractall(path + \"/scripts/phantomjs-windows/\")\n elif system == \"Darwin\":\n if os.path.isdir(path + '/scripts/phantomjs-mac/') is False:\n install_phantomjs = raw_input(\"To download all of the data, you need PhantomJS. Install? [y/n] \")\n if install_phantomjs == \"y\":\n for filename in os.listdir(path + '/scripts/'):\n if filename.endswith('-macosx.zip'):\n with zf.ZipFile(path + \"/scripts/\" + filename, \"r\") as z:\n z.extractall(path + \"/scripts/phantomjs-windows/\")\n elif system == \"Linux\":\n if os.path.isdir(path + '/scripts/phantomjs-linux/') is False:\n install_phantomjs = raw_input(\"To download all of the data, you need PhantomJS. Install? [y/n] \")\n if install_phantomjs == \"y\":\n for filename in os.listdir(path + '/scripts/'):\n if filename.endswith('-linux-x86_64.zip'):\n with zf.ZipFile(path + \"/scripts/\" + filename, \"r\") as z:\n z.extractall(path + \"/scripts/phantomjs-windows/\")\n else:\n system_type = raw_input(\"Python could not detect your system type. Please enter your system type, one of \\\"Windows\\\", \\\"Mac\\\", \\\"Linux\\\".\")\n system_type = system_type.lower()\n syst_dict = {'windows':'windows','mac':'macosx','linux':'linux-x86_64'}\n if os.path.isdir(path + '/scripts/phantomjs-' + system_type + '/') is False:\n install_phantomjs = raw_input(\"To download all of the data, you need PhantomJS. Install? [y/n] \")\n if install_phantomjs == \"y\":\n for filename in os.listdir(path + '/scripts/'):\n if filename.endswith('-' + syst_dict[system_type] + '.zip'):\n with zf.ZipFile(path + \"/scripts/\" + filename, \"r\") as z:\n z.extractall(path + \"/scripts/phantomjs-\" + system_type + \"/\")\n \n \n if self.download_opts['replace'] == True:\n for year in self.download_opts['years']:\n if self.download_opts['down_ballot'] == True:\n down_ballot_scrape = True\n else:\n down_ballot_scrape = False\n self.download_election_data(date = year, output_path = self.data_path + 'election-' + str(year) + '/', down_ballot_scrape = down_ballot_scrape, replace_files = self.download_opts['replace'])\n for covariate in self.download_opts['covariates']:\n self.download_covariate_data(covariate, self.data_path + '/covariates/' + str(covariate) + '/', self.download_opts['replace'])\n self.clean_covariate_data(covariate, self.data_path + '/covariates/' + str(covariate) + '/', self.download_opts['replace'])\n self.clean_covariate_data(covariate, self.data_path + '/covariates/' + str(covariate) + '/', self.download_opts['replace'])\n # if self.download_opts['extra_datasets'] == True:\n # self.download_extra_data(self.data_path + '/other/')\n if self.graphics_opts['graph_maps'] == True:\n for year in self.download_opts['years']:\n print year\n self.graph_maps(input_path = self.data_path + '/election-' + str(year) + '/president/national_' + str(year) + '.csv', series_name = 'president', output_path = self.graphics_path, year = year, series_options = self.series_opts)\n if self.download_opts['down_ballot'] == True:\n # for year in self.download_opts['down_ballot']:\n try:\n self.graph_maps(input_path = self.data_path + '/election-' + str(year) + '/senate/national_' + str(year) + '.csv', series_name = 'senate', output_path = self.graphics_path, year = year, series_options = self.series_opts)\n # self.graph_maps(input_path = self.data_path + '/election-' + str(year) + '/house/national_' + str(year) + '.csv', series_name = 'house', output_path = self.graphics_path, year = year, series_options = self.series_opts)\n self.graph_maps(input_path = self.data_path + '/election-' + str(year) + '/governor/national_' + str(year) + '.csv', series_name = 'governor', output_path = self.graphics_path, year = year, series_options = self.series_opts)\n except:\n pass\n for covariate in self.download_opts['covariates']:\n self.graph_maps(input_path = self.data_path + '/covariates/' + str(covariate) + '.csv', series_name = covariate, year = None, output_path = self.graphics_path, series_options = self.series_opts)\n\n if os.path.isfile(self.data_path + '/merged_data.csv'):\n os.remove(self.data_path + '/merged_data.csv')\n\n for year in self.download_opts['years']:\n self.merge_vars(series = self.data_path + 'election-' + str(year) + '/president/national_' + str(year) + '.csv', merge_opts = self.merge_opts.get('president'), output = self.data_path + '/merged_data.csv', temp = self.data_path + '/tmp/', year = year, merge_var = 'County_FIPS')\n\n for covariate in self.download_opts['covariates']:\n self.merge_vars(series = self.data_path + '/covariates/' + str(covariate) + '.csv', merge_opts = self.merge_opts.get(covariate), output = self.data_path + '/merged_data.csv', temp = self.data_path + '/tmp/', year = year, merge_var = 'County_FIPS')\n\n def download_election_data(self, date, output_path, down_ballot_scrape, replace_files):\n if date == '2016':\n scraper_2016(output_path, down_ballot_scrape, replace_files)\n if date == '2012':\n scraper_2012(output_path, down_ballot_scrape, replace_files)\n if date == '2008':\n subprocess.call(['python', path + '/scripts/scraper_2008.py'], shell = True)\n if down_ballot_scrape == True:\n subprocess.call(['python', path + '/scripts/scraper_2008_downballot.py'], shell = True)\n if date == '2004':\n scraper_2004(output_path, down_ballot_scrape, replace_files)\n\n def download_covariate_data(self, series, output_path, replace_files):\n if series != \"demographics\":\n scraper_economics(series, output_path, replace_files)\n if series == \"demographics\":\n pass\n # scraper_demographics(series, output_path, replace_files)\n\n def clean_covariate_data(self, series, output_path, replace_files):\n clean_covariates(series, output_path, replace_files)\n if series == 'demographics':\n clean_demographics(output_path)\n\n # def download_extra_data(output_path):\n # scraper_extras(output_path)\n\n def graph_maps(self, input_path, series_name, output_path, series_options, year = None):\n if year == None:\n output = output_path + '/' + series_name\n else:\n output = output_path + '/' + series_name + \"_\" + year\n plot_maps(input_path, output, series_options, year, series_name)\n\n def merge_vars(self, series, merge_opts, output, temp, merge_var, year = None):\n merge_data(series, merge_opts, output, temp, merge_var, year)\n" }, { "alpha_fraction": 0.4861329197883606, "alphanum_fraction": 0.4992150664329529, "avg_line_length": 39.543479919433594, "blob_id": "4d91c0430463ed29446a65519ca46124a094263b", "content_id": "d9a48fdff61deaf2177737570498f025c4072e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5733, "license_type": "no_license", "max_line_length": 135, "num_lines": 138, "path": "/scripts/plot_maps.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "### color_map.py\r\n \r\nimport csv\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom bs4 import BeautifulSoup, Tag, NavigableString\r\nfrom graph_helper import linear_gradient\r\nfrom wand.api import library\r\nimport wand.color\r\nimport wand.image\r\nimport os\r\n\r\ndef plot_maps(input_path, output, options, year = None, series = False):\r\n main_folder = os.path.dirname(os.path.abspath('__file__'))\r\n # Read in unemployment rates\r\n final_var_dict = {}\r\n opts = options.get(series)\r\n data = pd.read_csv(input_path, delimiter=\",\", header = 0, index_col = False)\r\n for i in data.iterrows():\r\n # try:\r\n if opts['subset'] == None:\r\n try:\r\n match = int(data[opts['match_column']].ix[i])\r\n except:\r\n match = 0\r\n if match > 0:\r\n try:\r\n temp_var = float( data[opts['column']].ix[i].replace(\"%\",\"\").strip() )\r\n except:\r\n temp_var = float( data[opts['column']].ix[i] )\r\n # print temp_var\r\n final_var_dict[match] = temp_var\r\n else:\r\n try:\r\n if data[opts['subset_column']].ix[i].replace(\"Democratic\",\"D\").replace(\"Dem\",\"D\").replace(\"GOP\",\"R\") == opts['subset']:\r\n match = data[opts['match_column']].ix[i].astype(int)\r\n if match > 0:\r\n try:\r\n temp_var = float( data[opts['column']].ix[i].replace(\"%\",\"\").strip() )\r\n except:\r\n temp_var = float( data[opts['column']].ix[i] )\r\n # print temp_var\r\n final_var_dict[match] = temp_var\r\n except:\r\n if data[opts['subset_column']].ix[i] == opts['subset']:\r\n match = pd.to_numeric(data[opts['match_column']].ix[i], errors = 'coerce')\r\n if match > 0:\r\n try:\r\n temp_var = float( data[opts['column']].ix[i].replace(\"%\",\"\").strip() )\r\n except:\r\n temp_var = float( data[opts['column']].ix[i] )\r\n # print temp_var\r\n final_var_dict[match] = temp_var\r\n # print county_fips\r\n # print final_var_dict[county_fips]\r\n # except:\r\n # pass\r\n\r\n min = np.min(final_var_dict.values())\r\n max = np.max(final_var_dict.values())\r\n if np.mean(final_var_dict.values()) < max/2:\r\n max = np.percentile(final_var_dict.values(),90)\r\n if np.mean(final_var_dict.values()) > min*2:\r\n min = np.percentile(final_var_dict.values(),10)\r\n\r\n \r\n # Load the SVG map\r\n svg = open(main_folder + '/data/maps/counties.svg', 'r').read()\r\n\r\n # Load into Beautiful Soup\r\n soup = BeautifulSoup(svg, 'xml')\r\n if year == None:\r\n soup.find('tspan', attrs={'fill':'black'}).string = opts['title']\r\n else:\r\n soup.find('tspan', attrs={'fill':'black'}).string = opts['title'] + year\r\n\r\n # Find counties\r\n paths = soup.findAll('path')\r\n\r\n # Map colors\r\n\r\n # colors = [\"#FF0000\", \"#FFFFFF\", \"#0000FF\"]\r\n\r\n colors = [opts['low_color'], \"#FFFFFF\", opts['high_color']]\r\n\r\n cmap = linear_gradient(start_hex = colors[0], finish_hex = colors[1], n = 51)\r\n\r\n cmap = cmap + linear_gradient(start_hex = colors[1], finish_hex = colors[2], n = 50)[1:]\r\n\r\n soup.find('stop', attrs={'id':'stop4246'})['style'] = \"stop-color:\" + colors[0] + \";stop-opacity:1\"\r\n soup.find('stop', attrs={'id':'stop4244'})['style'] = \"stop-color:\" + colors[1] + \";stop-opacity:1\"\r\n soup.find('stop', attrs={'id':'stop4242'})['style'] = \"stop-color:\" + colors[2] + \";stop-opacity:1\"\r\n\r\n # County style\r\n path_style = 'font-size:12px;fill-rule:nonzero;stroke:#FFFFFF;stroke-opacity:1;stroke-width:0.1; \\\r\n stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel;fill:'\r\n\r\n # Color the counties based on unemployment rate\r\n for p in paths:\r\n\r\n if p['id'] not in [\"State_Lines\", \"separator\"]:\r\n # print p['id']\r\n try:\r\n pct_var = final_var_dict[int(p['id'])]\r\n name = p['inkscape:label']\r\n p['onmouseover']=\"displayName(\" +\" \\'\" + name + \"\\'\" + \");displayVar(\" +\" \\'\" + str(pct_var) + \"\\'\" + \")\"\r\n except:\r\n continue\r\n if np.int((pct_var - min)/(max - min)*99) < 99 and np.int((pct_var - min)/(max - min)*99) > 0:\r\n color_class = np.int((pct_var - min)/(max - min)*99)\r\n elif np.int((pct_var - min)/(max - min)*99) >= 99:\r\n color_class = 99\r\n elif np.int((pct_var - min)/(max - min)*99) <= 0:\r\n color_class = 0\r\n try:\r\n color = cmap[color_class]\r\n except:\r\n print \"there's a problem... \" + str(color_class)\r\n p['style'] = path_style + color\r\n\r\n soup.find('text', attrs={'id':'textLB'}).string = '< ' + str(int(min))\r\n soup.find('text', attrs={'id':'textUB'}).string = '> ' + str(int(max))\r\n\r\n html = soup.prettify(\"utf-8\")\r\n with open(output + '.svg', \"wb\") as file:\r\n file.write(html)\r\n\r\n svg_file = open(output + '.svg', \"r\")\r\n\r\n with wand.image.Image() as image:\r\n with wand.color.Color('white') as background_color:\r\n library.MagickSetBackgroundColor(image.wand, \r\n background_color.resource) \r\n image.read(blob=svg_file.read())\r\n png_image = image.make_blob(\"png32\")\r\n\r\n with open(output + '.png', \"wb\") as out:\r\n out.write(png_image)\r\n" }, { "alpha_fraction": 0.44460493326187134, "alphanum_fraction": 0.45476147532463074, "avg_line_length": 68.37377166748047, "blob_id": "d1b4d9a6a55aa6a840b5ff4d40a7b2ceb9f02e68", "content_id": "cfdc4466c515073e870ffedaa2c2327c0ffd70b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21464, "license_type": "no_license", "max_line_length": 296, "num_lines": 305, "path": "/scripts/scraper_2012.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport time\r\nmatplotlib.use('Agg')\r\nimport urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nimport us\r\nimport re\r\n# from selenium import webdriver\r\n# from contextlib import closing\r\n# from selenium.webdriver import Firefox # pip install selenium\r\n# from selenium.webdriver.support.ui import WebDriverWait\r\n# from selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\n\r\n# states = ['al']\r\n\r\ndef scraper_2012(output_path, scrape_all, replace):\r\n\r\n af = addfips.AddFIPS()\r\n\r\n states = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\", \"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\n website_president = 'http://www.politico.com/2012-election/results/president/'\r\n\r\n website_house = 'http://www.politico.com/2012-election/results/house/'\r\n\r\n website_senate = 'http://www.politico.com/2012-election/results/senate/'\r\n\r\n website_governor = 'http://www.politico.com/2012-election/results/governor/'\r\n\r\n data_pres_path = output_path + '/president/'\r\n data_senate_path = output_path + '/senate/'\r\n data_house_path = output_path + '/house/'\r\n data_governor_path = output_path + '/governor/'\r\n\r\n if os.path.isdir(output_path) is False:\r\n os.mkdir(output_path)\r\n\r\n if os.path.isdir(data_pres_path) is False:\r\n os.mkdir(data_pres_path)\r\n\r\n file_nat = open(data_pres_path + \"national_2012.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Percentage,Votes\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n st = us.states.lookup(state)\r\n st = str(st).replace(\" \",\"-\").lower()\r\n print st\r\n # try:\r\n url = website_president + st + '/'\r\n hdrs = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers = hdrs)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_pres_path + \"national_2012.csv\", \"a\")\r\n file_state = open(data_pres_path + state + \"_2012.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n # try:\r\n # except:\r\n if soup.find('tbody', id=re.compile('(county.*)[0-9]')) == False:\r\n for res_table in soup.find_all('tbody'):\r\n county_name = state\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n else:\r\n for res_table in soup.find_all('tbody', id=re.compile('(county.*)[0-9]')):\r\n county_name = res_table.find('th', class_='results-county').get_text()\r\n del_temp = res_table.find('span', class_='precincts-reporting').get_text()\r\n county_name = str(county_name).split(\" \" + str(del_temp))[0]\r\n if not \"County\" in county_name and not \"City\" in county_name:\r\n county_name = county_name + \" County\"\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n\r\n if scrape_all == True:\r\n\r\n if os.path.isdir(data_senate_path) is False:\r\n os.mkdir(data_senate_path)\r\n\r\n if os.path.isdir(data_house_path) is False:\r\n os.mkdir(data_house_path)\r\n\r\n if os.path.isdir(data_governor_path) is False:\r\n os.mkdir(data_governor_path)\r\n\r\n file_nat = open(data_senate_path + \"national_2012.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Votes,Percentage\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n st = us.states.lookup(state)\r\n st = str(st).replace(\" \",\"-\").lower()\r\n # try:\r\n url = website_senate + st + '/'\r\n hdrs = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers = hdrs)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_senate_path + \"national_2012.csv\", \"a\")\r\n file_state = open(data_senate_path + state + \"_2012.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n # try:\r\n # except:\r\n if soup.find('tbody', id=re.compile('(county.*)[0-9]')) == False:\r\n for res_table in soup.find_all('tbody'):\r\n county_name = state\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n else:\r\n for res_table in soup.find_all('tbody', id=re.compile('(county.*)[0-9]')):\r\n county_name = res_table.find('th', class_='results-county').get_text()\r\n del_temp = res_table.find('span', class_='precincts-reporting').get_text()\r\n county_name = str(county_name).split(\" \" + str(del_temp))[0]\r\n if not \"County\" in county_name and not \"City\" in county_name:\r\n county_name = county_name + \" County\"\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n\r\n file_nat = open(data_governor_path + \"national_2012.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Votes,Percentage\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n st = us.states.lookup(state)\r\n st = str(st).replace(\" \",\"-\").lower()\r\n # try:\r\n url = website_governor + st + '/'\r\n hdrs = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers = hdrs)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_senate_path + \"national_2012.csv\", \"a\")\r\n file_state = open(data_governor_path + state + \"_2012.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n # try:\r\n # except:\r\n if soup.find('tbody', id=re.compile('(county.*)[0-9]')) == False:\r\n for res_table in soup.find_all('tbody'):\r\n county_name = state\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n else:\r\n for res_table in soup.find_all('tbody', id=re.compile('(county.*)[0-9]')):\r\n county_name = res_table.find('th', class_='results-county').get_text()\r\n del_temp = res_table.find('span', class_='precincts-reporting').get_text()\r\n county_name = str(county_name).split(\" \" + str(del_temp))[0]\r\n if not \"County\" in county_name and not \"City\" in county_name:\r\n county_name = county_name + \" County\"\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n\r\n file_nat = open(data_house_path + \"national_2012.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Votes,Percentage\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n st = us.states.lookup(state)\r\n st = str(st).replace(\" \",\"-\").lower()\r\n # try:\r\n url = website_house + st + '/'\r\n hdrs = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers = hdrs)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_house_path + \"national_2012.csv\", \"a\")\r\n file_state = open(data_house_path + state + \"_2012.csv\", \"w\")\r\n file_state.write(\"District,Party,St_FIPS,Candidate,Votes,Percentage\\n\")\r\n if soup.find('tbody', id=re.compile('(district.*)[0-9]')) == False:\r\n for res_table in soup.find_all('tbody'):\r\n county_name = state\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n else:\r\n for res_table in soup.find_all('tbody', id=re.compile('(district.*)[0-9]')):\r\n county_name = res_table.find('th', class_='results-county').get_text()\r\n del_temp = res_table.find('span', class_='precincts-reporting').get_text()\r\n county_name = str(county_name).split(\" \" + str(del_temp))[0]\r\n for tr in res_table.find_all('tr'):\r\n th_name = tr.find('th', class_='results-candidate').get_text()\r\n tds_party = tr.find('td', class_='results-party').get_text().replace(\"Dem\", \"D\").replace(\"GOP\", \"R\")\r\n if tds_party != \"R\" and tds_party != \"D\":\r\n tds_party = 'I'\r\n tds_pct = tr.find('td', class_='results-percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='results-popular').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + th_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n" }, { "alpha_fraction": 0.4569263756275177, "alphanum_fraction": 0.47357118129730225, "avg_line_length": 64.25296783447266, "blob_id": "835a6f8fbd3bb25116d593755ced8b9d47aed3ee", "content_id": "83f4975b1373d93d918cd83a7946ceab5f9c258d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16762, "license_type": "no_license", "max_line_length": 296, "num_lines": 253, "path": "/scripts/scraper_2016.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\nimport urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\n\r\ndef scraper_2016(output_path, scrape_all, replace):\r\n\r\n af = addfips.AddFIPS()\r\n\r\n states = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\", \"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\n data_pres_path = output_path + '/president/'\r\n data_senate_path = output_path + '/senate/'\r\n data_house_path = output_path + '/house/'\r\n data_governor_path = output_path + '/governor/'\r\n\r\n if os.path.isdir(output_path) is False:\r\n os.mkdir(output_path)\r\n \r\n if os.path.isdir(data_pres_path) is False:\r\n os.mkdir(data_pres_path)\r\n\r\n website_president = 'http://www.realclearpolitics.com/elections/live_results/2016_general/president/'\r\n website_president_2_part1 = 'http://uselectionatlas.org/RESULTS/datagraph.php?year=2016&fips='\r\n website_president_2_part2 = '&f=1&off=0&elect=0'\r\n website_senate = 'http://www.realclearpolitics.com/elections/live_results/2016_general/senate/'\r\n website_house = 'http://www.realclearpolitics.com/elections/live_results/2016_general/house/'\r\n website_governor = 'http://www.realclearpolitics.com/elections/live_results/2016_general/governor/'\r\n\r\n file_nat = open(data_pres_path + \"national_2016.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Percentage,Votes\\n\")\r\n file_nat.close()\r\n for state in states:\r\n county_fips = 1\r\n while county_fips != None:\r\n try:\r\n url = website_president + state + '.html'\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_pres_path + \"national_2016.csv\", \"a\")\r\n file_state = open(data_pres_path + state + \"_2016.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n for res_table in soup.find_all('div', class_='county_section'):\r\n for head in res_table.find_all('div', class_='header'):\r\n county_name = head.find('span', class_='title').get_text()\r\n county_name = str(county_name).split(\" County\")[0] + \" County\"\r\n county_fips = af.get_county_fips(county_name, state)\r\n for tr in res_table.find_all('tr'):\r\n tds_name = tr.find('td', class_='name').get_text()\r\n tds_party = tr.find('span', class_='bubble').get_text().replace('GOP','R').replace('Dem','D').replace('Ind','I')\r\n tds_pct = tr.find('td', class_='percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='votes').get_text()\r\n if county_name != 'Final Results' and county_fips != None:\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n elif state == 'ak' and county_name != 'Final Results':\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n county_fips = 1\r\n elif 'City' in str(county_name):\r\n county_fips = 1\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n break\r\n\r\n if county_fips == None:\r\n df = pd.read_csv(data_pres_path + \"national_2016.csv\")\r\n df = df[df['State'] != state]\r\n df.to_csv(data_pres_path + \"national_2016.csv\", index = False)\r\n if af.get_state_fips(state)[0] == '0':\r\n url = website_president_2_part1 + str(af.get_state_fips(state)[1]) + website_president_2_part2\r\n else:\r\n url = website_president_2_part1 + str(af.get_state_fips(state)) + website_president_2_part2\r\n hdrs = {\"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*\", \r\n # \"Accept-Encoding\": \"gzip, deflate, sdch, br\", \r\n \"Accept-Language\": \"en-US,en;q=0.8\", \"Upgrade-Insecure-Requests\": \"1\", 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers = hdrs)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_pres_path + \"national_2016.csv\", \"a\")\r\n file_state = open(data_pres_path + state + \"_2016.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n for res_table in soup.find_all('table'):\r\n # print res_table\r\n for head in res_table.find_all('td', rowspan='4'):\r\n county_name = head.find('b').get_text()\r\n county_fips = af.get_county_fips(county_name, state)\r\n for tr in res_table.find_all('tr'):\r\n try:\r\n try:\r\n tds_name = tr.find('td', class_='cnd').get_text()\r\n except:\r\n tds_name = tr.find_all('td')[0].get_text()\r\n if tds_name == 'Clinton':\r\n tds_party = 'D'\r\n elif tds_name == 'Trump':\r\n tds_party = 'R'\r\n elif tds_name == 'Stein':\r\n tds_party = 'I'\r\n elif tds_name == 'Johnson':\r\n tds_party = 'I'\r\n elif tds_name == 'McMullin':\r\n tds_party = 'I'\r\n else:\r\n tds_party = 'Unknown'\r\n tds_pct = tr.find('td', class_='per').get_text()\r\n tds_tot = tr.find('td', class_='dat').get_text()\r\n if county_name != 'Final Results':\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n pass\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n\r\n if scrape_all == True:\r\n\r\n if os.path.isdir(data_senate_path) is False:\r\n os.mkdir(data_senate_path)\r\n\r\n if os.path.isdir(data_house_path) is False:\r\n os.mkdir(data_house_path)\r\n\r\n if os.path.isdir(data_governor_path) is False:\r\n os.mkdir(data_governor_path)\r\n\r\n file_nat = open(data_senate_path + \"national_2016.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Votes,Percentage\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n url = website_senate + state + '.html'\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_senate_path + \"national_2016.csv\", \"a\")\r\n file_state = open(data_senate_path + state + \"_2016.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Votes,Percentage\\n\")\r\n for res_table in soup.find_all('div', class_='county_section'):\r\n for head in res_table.find_all('div', class_='header'):\r\n county_name = head.find('span', class_='title').get_text()\r\n for tr in res_table.find_all('tr'):\r\n tds_name = tr.find('td', class_='name').get_text()\r\n tds_party = tr.find('span', class_='bubble').get_text().replace('GOP','R').replace('Dem','D').replace('Ind','I')\r\n tds_pct = tr.find('td', class_='percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='votes').get_text()\r\n if county_name != 'Final Results':\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n\r\n file_nat = open(data_house_path + \"general_2016.csv\", \"w\")\r\n file_nat.write(\"State,District,Party,St_FIPS,Candidate,Votes,Percentage\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n url = website_house + state + '.html'\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_house_path + \"national_2016.csv\", \"a\")\r\n file_state = open(data_house_path + state + \"_2016.csv\", \"w\")\r\n file_state.write(\"District,Party,St_FIPS,Candidate,Votes,Percentage\\n\")\r\n for res_table in soup.find_all('div', class_='county_section'):\r\n for head in res_table.find_all('div', class_='header'):\r\n county_name = head.find('span', class_='title').get_text()\r\n for tr in res_table.find_all('tr'):\r\n tds_name = tr.find('td', class_='name').get_text()\r\n tds_party = tr.find('span', class_='bubble').get_text().replace('GOP','R').replace('Dem','D').replace('Ind','I')\r\n tds_pct = tr.find('td', class_='percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='votes').get_text()\r\n if county_name != 'Final Results':\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n\r\n file_nat = open(data_governor_path + \"national_2016.csv\", \"w\")\r\n file_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Votes,Percentage\\n\")\r\n file_nat.close()\r\n\r\n for state in states:\r\n try:\r\n url = website_governor + state + '.html'\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}\r\n req = urllib2.Request(url, None, headers)\r\n page = urllib2.urlopen(req)\r\n soup = BeautifulSoup(page.read())\r\n file_nat = open(data_governor_path + \"national_2016.csv\", \"a\")\r\n file_state = open(data_governor_path + state + \"_2016.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Votes,Percentage\\n\")\r\n for res_table in soup.find_all('div', class_='county_section'):\r\n for head in res_table.find_all('div', class_='header'):\r\n county_name = head.find('span', class_='title').get_text()\r\n for tr in res_table.find_all('tr'):\r\n tds_name = tr.find('td', class_='name').get_text()\r\n tds_party = tr.find('span', class_='bubble').get_text().replace('GOP','R').replace('Dem','D').replace('Ind','I')\r\n tds_pct = tr.find('td', class_='percentage').get_text().replace(\"%\",\"\")\r\n tds_tot = tr.find('td', class_='votes').get_text()\r\n if county_name != 'Final Results':\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n" }, { "alpha_fraction": 0.5163694620132446, "alphanum_fraction": 0.5267465710639954, "avg_line_length": 42.718955993652344, "blob_id": "0eaf70aedfb78f2067b9b036dcd86f690cfded7b", "content_id": "19aa63dec4ff6877f141ae5eda76d5b0820b8d02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6842, "license_type": "no_license", "max_line_length": 292, "num_lines": 153, "path": "/scripts/scraper_2008.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport time\r\nmatplotlib.use('Agg')\r\nimport urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nimport us\r\n# from pyvirtualdisplay import Display\r\n\r\n# display = Display(visible=0, size=(1024, 768))\r\n# display.start()\r\n\r\n# binary = FirefoxBinary(r'C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe')\r\n# driver = webdriver.Firefox(firefox_binary=binary)\r\n\r\npath = os.path.join(os.path.dirname(os.path.abspath('__file__')))\r\n\r\n# path = os.path.dirname(os.path.abspath('__file__'))\r\n\r\n# states = ['al']\r\n\r\naf = addfips.AddFIPS()\r\n\r\nstates = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\", \"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\nwebsite_president = 'http://www.cnn.com/ELECTION/2008/results/county/#'\r\n\r\nwebsite_house = 'http://www.cnn.com/ELECTION/2008/results/full/#val=Hp'\r\n\r\nwebsite_senate = 'http://www.cnn.com/ELECTION/2008/results/county/#'\r\n\r\nwebsite_governor = 'http://www.cnn.com/ELECTION/2008/results/county/#'\r\n\r\ndata_pres_path = path + '/data/election-2008/president/'\r\ndata_senate_path = path + '/data/election-2008/senate/'\r\ndata_house_path = path + '/data/election-2008/house/'\r\ndata_governor_path = path + '/data/election-2008/governor/'\r\n\r\nif os.path.isdir(data_pres_path) is False:\r\n os.mkdir(data_pres_path)\r\n\r\nif os.path.isdir(data_senate_path) is False:\r\n os.mkdir(data_senate_path)\r\n\r\nif os.path.isdir(data_house_path) is False:\r\n os.mkdir(data_house_path)\r\n\r\nif os.path.isdir(data_governor_path) is False:\r\n os.mkdir(data_governor_path)\r\n\r\nsystem = platform.system()\r\nif system == \"Windows\":\r\n if os.path.isdir(path + '/phantomjs-windows/') is True:\r\n\r\n print \"true\"\r\n for dirName, subdirList, fileList in os.walk(path + '/phantomjs-windows/', topdown=False):\r\n for fname in fileList:\r\n print('\\t%s' % fname)\r\n if fname.startswith('phantomjs.exe'):\r\n phantom_exe = dirName + \"/\" + fname\r\n phantom_loc = dirName\r\n break\r\nelif system == \"Darwin\":\r\n if os.path.isdir(path + '/phantomjs-mac/') is True:\r\n for dirName, subdirList, fileList in os.walk(path + '/phantomjs-mac/', topdown=False):\r\n for fname in fileList:\r\n if fname.startswith('phantomjs.exe'):\r\n phantom_exe = dirName + \"/\" + fname\r\n phantom_loc = dirName\r\n break\r\nelif system == \"Linux\":\r\n if os.path.isdir(path + '/phantomjs-linux/') is True:\r\n for dirName, subdirList, fileList in os.walk(path + '/phantomjs-linux/', topdown=False):\r\n for fname in fileList:\r\n if fname.startswith('phantomjs.exe'):\r\n phantom_exe = dirName + \"/\" + fname\r\n phantom_loc = dirName\r\n break\r\nelse:\r\n system_type = raw_input(\"Python could not detect your system type. Please enter your system type, one of \\\"Windows\\\", \\\"Mac\\\", \\\"Linux\\\".\")\r\n system_type = system_type.lower()\r\n if os.path.isdir(path + '/phantomjs-' + system_type + '/') is True:\r\n for dirName, subdirList, fileList in os.walk(path + '/phantomjs-' + system_type + '/', topdown=False):\r\n for fname in fileList:\r\n if fname.startswith('phantomjs.exe'):\r\n phantom_exe = dirName + \"/\" + fname\r\n phantom_loc = dirName\r\n break\r\n\r\nphantom_exe = str(phantom_exe.replace(\"/\", \"\\\\\"))\r\n\r\nfile_nat = open(data_pres_path + \"national_2008.csv\", \"w\")\r\nfile_nat.write(\"State,County,Party,St_FIPS,County_FIPS,Candidate,Percentage,Votes\\n\")\r\nfile_nat.close()\r\n\r\nfor state in states:\r\n try:\r\n i = 1\r\n file_nat = open(data_pres_path + \"national_2008.csv\", \"a\")\r\n file_state = open(data_pres_path + state + \"_2008.csv\", \"w\")\r\n file_state.write(\"County,County_FIPS,Party,Candidate,Percentage,Votes\\n\")\r\n while i < 100:\r\n driver = webdriver.PhantomJS(executable_path = phantom_exe)\r\n url = website_president + state.upper() + 'P00p' + str(i)\r\n driver.get(url)\r\n time.sleep(5)\r\n soup = BeautifulSoup(driver.page_source)\r\n for res_table in soup.find_all('div', class_='cnnElex_rBoxCTY'):\r\n county_name = res_table.find('div', class_='cnnElex_raceState').get_text()\r\n for tr in res_table.find_all('tr'):\r\n tds_name = tr.find('a', class_='nobio').get_text()\r\n if tr.find('div', class_='cnnR_R'):\r\n tds_party = \"R\"\r\n elif tr.find('div', class_='cnnR_D'):\r\n tds_party = 'D'\r\n elif tr.find('div', class_='cnnR_I'):\r\n tds_party = 'I'\r\n else:\r\n tds_party = 'Unknown'\r\n tds_pct = tr.find('b').get_text()\r\n tds_tot = tr.find('td', class_='vote_p').get_text()\r\n try:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + af.get_county_fips(county_name, state) + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n except:\r\n file_nat.write(state + \",\" + county_name + \",\" + tds_party + \",\" + af.get_state_fips(state) + \",\" + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n file_state.write(county_name + \",\" + \",\" + tds_party + \",\" + tds_name + \",\" + tds_pct + \",\" + tds_tot.replace(\",\",\"\") + \"\\n\")\r\n if soup.find('div', id='cnnElex_fResPagi'):\r\n i = i + 1\r\n else:\r\n i = 101\r\n driver.close()\r\n file_state.close()\r\n file_nat.close()\r\n time.sleep(np.random.uniform(5,17.6))\r\n except:\r\n pass\r\n" }, { "alpha_fraction": 0.5567545294761658, "alphanum_fraction": 0.5795656442642212, "avg_line_length": 46.16447448730469, "blob_id": "952e4d0c151a0c917f7998801154bfc2b069a162", "content_id": "a623531e66818870e433df5c1ba73470a91df61e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7321, "license_type": "no_license", "max_line_length": 295, "num_lines": 152, "path": "/scripts/covariates_cleaner.py", "repo_name": "econsnapshot/election-2016", "src_encoding": "UTF-8", "text": "################################################################################################################\r\n#Author: Ben Griffy\r\n#Institution: University of California, Santa Barbara\r\n#email: [email protected]\r\n#website: https://sites.google.com/site/bengriffy/home\r\n#Date:\r\n################################################################################################################\r\n\r\nfrom __future__ import division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport time\r\nmatplotlib.use('Agg')\r\nimport urllib2\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport addfips\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nimport us\r\nimport wget\r\n# from pyvirtualdisplay import Display\r\n\r\n# display = Display(visible=0, size=(1024, 768))\r\n# display.start()\r\n\r\n# binary = FirefoxBinary(r'C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe')\r\n# driver = webdriver.Firefox(firefox_binary=binary)\r\n\r\ndef clean_covariates(series, output_path, replace_files):\r\n\r\n states = [\"ak\",\"al\",\"ar\",\"az\",\"ca\",\"co\",\"ct\",\"dc\",\"de\",\"fl\",\"ga\",\"gu\",\"hi\",\"ia\",\"id\",\"il\",\"in\",\"ks\",\"ky\",\"la\",\"ma\",\"md\",\"me\",\"mh\",\"mi\",\"mn\",\"mo\",\"ms\",\"mt\",\"nc\",\"nd\",\"ne\",\"nh\",\"nj\",\"nm\",\"nv\",\"ny\", \"oh\",\"ok\",\"or\",\"pa\",\"pr\",\"pw\",\"ri\",\"sc\",\"sd\",\"tn\",\"tx\",\"ut\",\"va\",\"vi\",\"vt\",\"wa\",\"wi\",\"wv\",\"wy\"]\r\n\r\n main_folder = os.path.dirname(os.path.abspath('__file__'))\r\n\r\n fips = pd.read_csv(main_folder + '/data/fips/' + 'fips.csv')\r\n\r\n af = addfips.AddFIPS()\r\n\r\n data_location_path = main_folder + '/data/covariates/location/'\r\n data_industry_path = main_folder + '/data/covariates/industry/'\r\n data_unemp_path = main_folder + '/data/covariates/unemp/'\r\n data_income_path = main_folder + '/data/covariates/income/'\r\n\r\n if os.path.isdir(data_location_path) is False:\r\n os.mkdir(data_location_path)\r\n\r\n if os.path.isdir(data_industry_path) is False:\r\n os.mkdir(data_industry_path)\r\n\r\n if os.path.isdir(data_unemp_path) is False:\r\n os.mkdir(data_unemp_path)\r\n\r\n if os.path.isdir(data_income_path) is False:\r\n os.mkdir(data_income_path)\r\n\r\n\r\n crosswalk = us.states.mapping('name','abbr')\r\n\r\n if series == \"income\":\r\n inc_data = pd.read_excel(data_income_path + \"/income_uncleaned.xlsx\", header = None, names=['County','Inc_2013','Inc_2014','Inc_2015','Inc_Rank','Pct_Chg_2014','Pct_Chg_2015','Pct_Rank'], skiprows = 6)\r\n inc_data = inc_data.dropna()\r\n state_temp = []\r\n\r\n for i in inc_data.iterrows():\r\n try:\r\n state_temp.append(crosswalk[inc_data['County'].ix[i]])\r\n except:\r\n state_temp.append(\"\")\r\n\r\n inc_data['drop'] = state_temp\r\n\r\n for i in range(0,len(state_temp)):\r\n if i > 0:\r\n if state_temp[i] == \"\":\r\n state_temp[i] = state_temp[i-1]\r\n\r\n inc_data['State'] = state_temp\r\n\r\n county_fips = []\r\n for i in inc_data.iterrows():\r\n try:\r\n county_fips.append(str(af.get_county_fips(str(inc_data['County'].ix[i]), str(inc_data['State'].ix[i]))))\r\n except:\r\n county_fips.append(str(0))\r\n\r\n inc_data['County_FIPS'] = county_fips\r\n\r\n inc_data = inc_data[inc_data['drop'] == \"\"]\r\n\r\n inc_data = inc_data.drop('drop', axis = 1)\r\n\r\n\r\n inc_data.to_csv(main_folder + '/data/covariates/income.csv', index = False, sep = \",\", encoding='utf-8', mode = 'w')\r\n\r\n if series == \"industry\":\r\n ind_data = pd.read_csv(data_industry_path + \"/10.csv\")\r\n ind_data = ind_data.rename(columns={'area_fips':'County_FIPS'})\r\n ind_data = ind_data[ind_data['agglvl_code'] == 70]\r\n ind_data.to_csv(main_folder + '/data/covariates/industry.csv', index = False, mode = 'w')\r\n\r\n if series == \"unemp\":\r\n unemp_data = pd.read_table(data_unemp_path + \"/unemployment.txt\", sep = '|', skiprows = 6, header = None, names = ['LAUS','FIPS1','FIPS2','Name','Period','Labor_Force','Employed','Unemp_Level','Unemp_Rate'])\r\n unemp_data['County_FIPS'] = unemp_data['LAUS'].str.slice(start=3,stop=8)\r\n unemp_data = unemp_data[unemp_data['Period'].str.strip().isin(['Sep-16(p)'])]\r\n unemp_data = unemp_data[['Name','Labor_Force','Employed','Unemp_Level','Unemp_Rate','County_FIPS']]\r\n unemp_data = unemp_data[['Name','Labor_Force','Employed','Unemp_Level','Unemp_Rate','County_FIPS']]\r\n drop = []\r\n for i in unemp_data.iterrows():\r\n try:\r\n float(unemp_data['County_FIPS'].ix[i])\r\n drop.append(0)\r\n except:\r\n drop.append(1)\r\n unemp_data['drop'] = drop\r\n unemp_data = unemp_data[unemp_data['drop'] == 0]\r\n unemp_data = unemp_data.drop('drop',axis=1)\r\n # unemp_data['Unemp_Rate'] = unemp_data['Unemp_Rate'].str.replace(',','')\r\n # unemp_data['Unemp_Level'] = unemp_data['Unemp_Level'].str.replace(',','')\r\n # unemp_data['Employed'] = unemp_data['Employed'].str.replace(',','')\r\n # unemp_data['Labor_Force'] = unemp_data['Labor_Force'].str.replace(',','')\r\n unemp_data.to_csv(main_folder + '/data/covariates/unemp.csv', index = False, mode = 'w')\r\n\r\ndef clean_demographics(output_path):\r\n main_folder = os.path.dirname(os.path.abspath('__file__'))\r\n data_path = main_folder + '/data/covariates/demographics/'\r\n\r\n if os.path.isdir(data_path) is False:\r\n os.mkdir(data_path)\r\n\r\n pop_data_temp = pd.read_csv(data_path + \"/demographics.csv\", skiprows = 1, header = 0)\r\n pop_data_temp = pop_data_temp.rename(columns={'Id2':'County_FIPS'})\r\n pop_data = pop_data_temp[['County_FIPS','HC01_VC03','HC03_VC04','HC03_VC05','HC03_VC28','HC03_VC88','HC03_VC94','HC03_VC95']]\r\n pop_data = pop_data.rename(columns={'HC01_VC03':'total_pop','HC03_VC04':'pct_male','HC03_VC05':'pct_female','HC03_VC28':'pct_old','HC03_VC88':'pct_latino','HC03_VC94':'pct_white','HC03_VC95':'pct_black'})\r\n pop_data.to_csv(main_folder + '/data/covariates/demographics.csv', index = False, mode = 'w')\r\n\r\n ed_data = pd.read_csv(data_path + \"/education.csv\", header = 0)\r\n ed_data = ed_data.rename(columns={'HD01_VD01':'total_pop','HD01_VD02':'pct_no_hs','HD01_VD03':'pct_some_hs','HD01_VD04':'pct_hs','HD01_VD05':'pct_some_college','HD01_VD06':'pct_associates','HD01_VD07':'pct_bachelors','HD01_VD08':'pct_graduate'})\r\n ed_data['pct_no_hs'] = ed_data['pct_no_hs']/ed_data['total_pop']*100\r\n ed_data['pct_some_hs'] = ed_data['pct_some_hs']/ed_data['total_pop']*100\r\n ed_data['pct_hs'] = ed_data['pct_hs']/ed_data['total_pop']*100\r\n ed_data['pct_some_college'] = ed_data['pct_some_college']/ed_data['total_pop']*100\r\n ed_data['pct_associates'] = ed_data['pct_associates']/ed_data['total_pop']*100\r\n ed_data['pct_bachelors'] = ed_data['pct_bachelors']/ed_data['total_pop']*100\r\n ed_data['pct_graduate'] = ed_data['pct_graduate']/ed_data['total_pop']*100\r\n ed_data['pct_hs_less'] = ed_data['pct_hs'] + ed_data['pct_some_hs'] + ed_data['pct_no_hs']\r\n ed_data = ed_data[['County_FIPS','pct_no_hs','pct_some_hs','pct_hs','pct_some_college','pct_associates','pct_bachelors','pct_graduate','pct_hs_less']]\r\n ed_data.to_csv(main_folder + '/data/covariates/education.csv', index = False, mode = 'w')\r\n" } ]
14
suthrink/webdev
https://github.com/suthrink/webdev
0574894eb63818367b592d79c94bd3702a7d69e6
c96341884a3bab0534f24992a01310059a9f08d7
90c2ef4405206c93ddcfcd948405d65636dff709
refs/heads/master
2023-03-31T14:34:35.221763
2021-03-14T16:39:49
2021-03-14T16:39:49
347,645,913
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6530612111091614, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 23.75, "blob_id": "6c92e3ac00dfc97d2147826e9aba207297b6629b", "content_id": "76d9f49d4c3a9dc0c6a14091274c8f8784939034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 98, "license_type": "no_license", "max_line_length": 72, "num_lines": 4, "path": "/Desktop/boot/includes/Common.php", "repo_name": "suthrink/webdev", "src_encoding": "UTF-8", "text": "<?php\n$con = mysqli_connect(\"localhost\",\"my_user\",\"my_password\",\"my_db\");\nsession_start();\n?>" }, { "alpha_fraction": 0.7831325531005859, "alphanum_fraction": 0.7831325531005859, "avg_line_length": 40.5, "blob_id": "e4d6227e5dfc74ff16ebc7482f860a9c1557d9fb", "content_id": "984320bab9839499b5e2028bc112ce488af2a6a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "suthrink/webdev", "src_encoding": "UTF-8", "text": "# Lifestore e-commerce website\nOpen index.html in the boot file to see the output.\n" }, { "alpha_fraction": 0.5208333134651184, "alphanum_fraction": 0.6458333134651184, "avg_line_length": 22, "blob_id": "c834351a7ee5e481c2e2ae3316dc9fcd3af216ee", "content_id": "26b610261ff0ad3ca8204a79ee5f2886a0a536f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/Desktop/boot/includes/find the number.py", "repo_name": "suthrink/webdev", "src_encoding": "UTF-8", "text": "import array\narr=array.array('i',[10,15,3,7])\n \n" }, { "alpha_fraction": 0.6113602519035339, "alphanum_fraction": 0.6278026700019836, "avg_line_length": 34.26315689086914, "blob_id": "8731939beff6af5621561bac82e7e6c74229629a", "content_id": "b5d36384daff3f7fac92d53a30b27e7d0723535f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 669, "license_type": "no_license", "max_line_length": 89, "num_lines": 19, "path": "/Desktop/boot/includes/login_submit.php", "repo_name": "suthrink/webdev", "src_encoding": "UTF-8", "text": "<?php\nrequire \"common.php\";\n$email = $_POST['email'];\n$regex_email = \"/^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,3})$/\";\nif (!preg_match($regex_email, $email)) {\n echo \"Incorrect email\";\n}\n$password = $_POST['password'];\necho md5($password);\n}\n$email = mysqli_real_escape_string($con, $email);\n$password = mysqli_real_escape_string($con, $password);\n$select_query = \"SELECT id, email, FROM user\";\n$select_query_result = mysqli_query($con, $select_query) or die(mysqli_error($con));\n$total_rows_fetched = mysqli_num_rows($select_query_result);\necho $total_rows_fetched;\n$_SESSION['email'] = $email;\n$_SESSION['id'] = mysqli_insert_id($con);\n?>" } ]
4
juancho2908/Prueba
https://github.com/juancho2908/Prueba
56ad41837ec8e07fab61203a3befbc3d858b1f3a
bf881a5b1693ad2167993ee0e346a261eaba774e
66afd65d7d0a74ee35087cbc4c407fd4e4b7cfc6
refs/heads/master
2023-02-09T19:43:22.481501
2021-01-09T18:13:55
2021-01-09T18:13:55
325,640,135
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6912928819656372, "alphanum_fraction": 0.7018469572067261, "avg_line_length": 28.230770111083984, "blob_id": "2f8cd819ed815d31541c132cab826a7873ba5ba1", "content_id": "bfd82505ab3707b8ed794f4fa61a379ec2021d8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 379, "license_type": "no_license", "max_line_length": 80, "num_lines": 13, "path": "/webmessages.py", "repo_name": "juancho2908/Prueba", "src_encoding": "UTF-8", "text": "import pyautogui as pg\nimport time\nimport webbrowser as web\nphone_no=input(\"Ingrese el numero: \")\nparsedMessage=\" \"\nweb.open('https://web.whatsapp.com/send?phone='+phone_no+'&text='+parsedMessage)\ntime.sleep(8)\nfor i in range(10):\n pg.write('esto es una prueba')\n pg.press('enter')\n print('Mensaje #'+str(i+1)+' enviado')\npass\npg.alert('Bomba de mensajes finalizada')" }, { "alpha_fraction": 0.612800657749176, "alphanum_fraction": 0.6177698373794556, "avg_line_length": 24.414140701293945, "blob_id": "5bfcbf0c6cc906cc6a14f0140d93facc5ffff156", "content_id": "e953eb85d6e9a654da60f56411559b8482c34b09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5031, "license_type": "no_license", "max_line_length": 73, "num_lines": 198, "path": "/calc.js", "repo_name": "juancho2908/Prueba", "src_encoding": "UTF-8", "text": "var operandoa;\nvar operandob;\nvar operacion;\n\nfunction init() {\n //variables\n var resultado = document.getElementById('resultado');\n var reset = document.getElementById('reset');\n var suma = document.getElementById('suma');\n var resta = document.getElementById('resta');\n var multiplicacion = document.getElementById('multiplicacion');\n var division = document.getElementById('division');\n var igual = document.getElementById('igual');\n var uno = document.getElementById('uno');\n var dos = document.getElementById('dos');\n var tres = document.getElementById('tres');\n var cuatro = document.getElementById('cuatro');\n var cinco = document.getElementById('cinco');\n var seis = document.getElementById('seis');\n var siete = document.getElementById('siete');\n var ocho = document.getElementById('ocho');\n var nueve = document.getElementById('nueve');\n var cero = document.getElementById('cero');\n var fact = document.getElementById('fact');\n var pot = document.getElementById('pot');\n var sin = document.getElementById('sin');\n var cos = document.getElementById('cos');\n var tan = document.getElementById('tan');\n}\n//Eventos de click\nuno.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"1\";\n}\ndos.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"2\";\n}\ntres.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"3\";\n}\ncuatro.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"4\";\n}\ncinco.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"5\";\n}\nseis.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"6\";\n}\nsiete.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"7\";\n}\nocho.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"8\";\n}\nnueve.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"9\";\n}\ncero.onclick = function (e) {\n resultado.textContent = resultado.textContent + \"0\";\n}\nreset.onclick = function (e) {\n resetear();\n}\nsuma.onclick = function (e) {\n operandoa = resultado.textContent;\n operacion = \"+\";\n limpiar();\n}\nresta.onclick = function (e) {\n operandoa = resultado.textContent;\n operacion = \"-\";\n limpiar();\n}\nmultiplicacion.onclick = function (e) {\n operandoa = resultado.textContent;\n operacion = \"*\";\n limpiar();\n}\ndivision.onclick = function (e) {\n operandoa = resultado.textContent;\n operacion = \"/\";\n limpiar();\n}\nigual.onclick = function (e) {\n operandob = resultado.textContent;\n resolver();\n}\n\npot.onclick = function (e) {\n operandoa = resultado.textContent;\n operacion = \"p\"\n limpiar();\n}\nfact.onclick = function(e) {\n operandoa = resultado.textContent;\n operacion = \"f\";\n resolver();\n}\n\nsin.onclick = function(e){\n operandoa = resultado.textContent;\n operacion = 'sin';\n resolver();\n}\ncos.onclick = function(e){\n operandoa = resultado.textContent;\n operacion = 'cos';\n resolver();\n}\n\ntan.onclick = function(e){\n operandoa = resultado.textContent;\n operacion = 'tan';\n resolver();\n}\nbin.onclick = function(e){\n operandoa = resultado.textContent;\n operacion = 'bin';\n resolver();\n\n}\nfunction limpiar() {\n resultado.textContent = \"\";\n}\n\nfunction resetear() {\n resultado.textContent = \"\";\n operandoa = 0;\n operandob = 0;\n operacion = \"\";\n}\n\nfunction resolver() {\n var res = 0;\n switch (operacion) {\n case \"+\":\n res = parseFloat(operandoa) + parseFloat(operandob);\n break;\n case \"-\":\n res = parseFloat(operandoa) - parseFloat(operandob);\n break;\n case \"*\":\n res = parseFloat(operandoa) * parseFloat(operandob);\n break;\n case \"/\":\n res = parseFloat(operandoa) / parseFloat(operandob);\n break;\n case \"p\":\n res = Math.pow(parseFloat(operandoa), parseFloat(operandob));\n break;\n case \"f\":\n res = factorial(parseInt(operandoa));\n break;\n case \"sin\":\n res = seno(parseFloat(operandoa));\n break;\n case \"cos\":\n res = coseno(parseFloat(operandoa));\n break;\n case \"tan\":\n res = tangente(parseFloat(operandoa));\n break;\n case \"bin\":\n res = binario(parseInt(operandoa));\n\n }\n resetear();\n resultado.textContent = res;\n}\nfunction factorial (n) {\n\tvar total = 1; \n\tfor (i=1; i<=n; i++) {\n\t\ttotal = total * i; \n\t}\n\treturn total; \n}\n\nfunction seno (n){\n var n = (Math.PI/180)*n;\n var total = Math.sin(n);\n return total;\n}\n\nfunction coseno (n){\n var n = (Math.PI/180)*n;\n var total = Math.cos(n);\n return total;\n}\nfunction tangente (n){\n var n = (Math.PI/180)*n;\n var total = Math.tan(n);\n return total;\n}\n\nfunction binario (n){\n var total = (n.toString(2));\n return total; \n}" } ]
2
yanchen00/py-crawer
https://github.com/yanchen00/py-crawer
10da40f7fb5663ba58f37e35a3e27abee3c790de
5472f8d05e6df9e5a997cbb0cb6e37292c748316
e3b56f2cb4fd24d048303f29e672c87448a996c4
refs/heads/master
2023-02-11T19:55:20.293904
2020-07-23T02:19:54
2020-07-23T02:19:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5752122402191162, "alphanum_fraction": 0.5830220580101013, "avg_line_length": 27.009523391723633, "blob_id": "2458207b3aacf0ef02a4ecbad75793e9a6b549bd", "content_id": "3a7b92c864a1dbafe34a8bd88abc0255f801c3f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2953, "license_type": "no_license", "max_line_length": 114, "num_lines": 105, "path": "/台中市公共自行車系統資料_測試工作排程修正.py", "repo_name": "yanchen00/py-crawer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[55]:\n\n\nimport json\nfrom selenium import webdriver\ndriver = webdriver.Chrome()\napi_Availability = \"https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Taichung?$format=JSON\"\n\ndriver.get(api_Availability)\n\n\n# In[56]:\n\n\navailability = driver.page_source\nprint(availability)\n\n\n# In[57]:\n\n\nfrom bs4 import BeautifulSoup\nsoup_Availability = BeautifulSoup(driver.page_source, 'lxml')\navailability_text = soup_Availability.find('pre').text\nprint(availability_text)\n\n\n# In[58]:\n\n\nimport datetime\nnow = datetime.datetime.now() #現在時間\n\ndate_time = now.strftime(\"%Y%m%d%H%M%S\")\nprint(date_time)\n\n\n# In[59]:\n\n\nwith open('availability_data_{}.json'.format(int(date_time)), 'w') as f:\n json.dump(availability_text, f)\n\nwith open('availability_data_{}.json'.format(int(date_time)), 'r') as f:\n availability_text = json.load(f)\n\n\n# In[60]:\n\n\navailability_data = json.loads(availability_text)\nprint(availability_data)\n\n\n# In[61]:\n\n\nimport csv\n\nwith open('Taichung_Bike_Availibility_{}.csv'.format(int(date_time)), 'w', encoding = 'utf-8', newline = '') as f:\n filewriter = csv.writer(f, delimiter = ',')\n filewriter.writerow(['StationUID', \n 'ServiceAvailable', \n 'AvailableRentBikes', \n 'AvailableReturnBikes', \n 'SrcUpdateTime'])\n \nwith open('Taichung_Bike_Availibility_{}.csv'.format(int(date_time)), 'a', encoding = 'utf-8', newline = '') as f:\n filewriter = csv.writer(f, delimiter = ',')\n for i in range(len(availability_data)):\n filewriter.writerow([availability_data[i]['StationUID'], \n availability_data[i]['ServiceAvailable'], \n availability_data[i]['AvailableRentBikes'], \n availability_data[i]['AvailableReturnBikes'], \n availability_data[i]['SrcUpdateTime']])\n###################start\nwith open('Taichung_Bike_Availibility_All.csv', 'w', encoding = 'utf-8', newline = '') as f:\n filewriter = csv.writer(f, delimiter = ',')\n filewriter.writerow(['StationUID', \n 'ServiceAvailable', \n 'AvailableRentBikes', \n 'AvailableReturnBikes', \n 'SrcUpdateTime'])\n \nwith open('Taichung_Bike_Availibility_All.csv', 'a', encoding = 'utf-8', newline = '') as f:\n filewriter = csv.writer(f, delimiter = ',')\n for i in range(len(availability_data)):\n filewriter.writerow([availability_data[i]['StationUID'], \n availability_data[i]['ServiceAvailable'], \n availability_data[i]['AvailableRentBikes'], \n availability_data[i]['AvailableReturnBikes'], \n availability_data[i]['SrcUpdateTime']])\n####################end\n\n\n# In[62]:\n\n\ndriver.close()\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.5857937335968018, "alphanum_fraction": 0.6046212911605835, "avg_line_length": 38.50847625732422, "blob_id": "0c7ceb80be47f2e57fb18aa5cfb1b9013c183412", "content_id": "e8dd61aa64ad677d7eebf6538c1c8f9d7e2a1a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2435, "license_type": "no_license", "max_line_length": 137, "num_lines": 59, "path": "/weather-loop.py", "repo_name": "yanchen00/py-crawer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[55]:\nimport time\nimport json\n#for x in range (2):\nfrom selenium import webdriver\ndriver = webdriver.Chrome('D:/00課程/大二下學期/競賽/py-crawer/chromedriver.exe')\napi_weatherinfo = \"https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/O-A0002-001?Authorization=CWB-E9CFB481-091B-449F-9A6A-F0EA269D8DB6&downloadType=WEB&format=JSON\"\ndriver.get(api_weatherinfo)\ntime.sleep(10)\n# In[56]:\ndriver.close()\n#availability = driver.page_source\n#print(availability)\n# In[57]:\n'''\nfrom bs4 import BeautifulSoup\nsoup_Availability = BeautifulSoup(driver.page_source, 'lxml')\navailability_text = soup_Availability.find('pre').text\nprint(availability_text)\n'''\n# In[58]:\n'''\n import datetime\n now = datetime.datetime.now() #現在時間\n date_time = now.strftime(\"%Y%m%d%H%M%S\")\n print(date_time)\n # In[59]:\n with open('D:/00課程/大二下學期/競賽/bike-ava-output/availability_data_{}.json'.format(int(date_time)), 'w') as f:\n json.dump(availability_text, f)\n with open('D:/00課程/大二下學期/競賽/bike-ava-output/availability_data_{}.json'.format(int(date_time)), 'r') as f:\n availability_text = json.load(f)\n # In[60]:\n availability_data = json.loads(availability_text)\n print(availability_data)\n # In[61]:\n import csv\n with open('D:/00課程/大二下學期/競賽/bike-ava-output/weather_info_{}.csv'.format(int(date_time)), 'w', encoding = 'utf-8', newline = '') as f:\n filewriter = csv.writer(f, delimiter = ',')\n filewriter.writerow(['StationUID', \n 'ServiceAvailable', \n 'AvailableRentBikes', \n 'AvailableReturnBikes', \n 'SrcUpdateTime'])\n with open('D:/00課程/大二下學期/競賽/bike-ava-output/weather_info_{}.csv'.format(int(date_time)), 'a', encoding = 'utf-8', newline = '') as f:\n filewriter = csv.writer(f, delimiter = ',')\n for i in range(len(availability_data)):\n filewriter.writerow([availability_data[i]['StationUID'], \n availability_data[i]['ServiceAvailable'], \n availability_data[i]['AvailableRentBikes'], \n availability_data[i]['AvailableReturnBikes'], \n availability_data[i]['SrcUpdateTime']])\n # In[62]:\n driver.close()\n time.sleep(140)\n \n'''\n\n\n\n\n\n\n" } ]
2
marcopetry/cch-seguranca
https://github.com/marcopetry/cch-seguranca
0a0af5ac9220977128e764f4cd00288179083b9f
623167ffbcb3d3caeff71a5d86e8ff767d2b78ae
3922c1fe8e0f50d915d19bc1bddc009da05c7b49
refs/heads/master
2023-06-19T02:01:27.306853
2021-07-20T01:25:18
2021-07-20T01:25:18
387,636,978
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6334068179130554, "alphanum_fraction": 0.65104740858078, "avg_line_length": 21.873416900634766, "blob_id": "1098b5426549f4ccd6905c7214f3c44d4f9b4008", "content_id": "cc3af3510bef8fbc7a27201a7b54739a35218448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1815, "license_type": "no_license", "max_line_length": 108, "num_lines": 79, "path": "/vernam/vernam.py", "repo_name": "marcopetry/cch-seguranca", "src_encoding": "UTF-8", "text": "from sys import argv, stdin, stdout\nfrom unidecode import unidecode\nimport os\nfrom random import seed\nfrom random import randint\nfrom operator import xor\n\n\n# pip install unidecode\nALL_CHARACTERS = []\nOPERATION = 'ENCRIPTER' if argv[1] == '-c' else 'DECRIPTER'\n\nRELATIVE_PATH = os.getcwd()\n\nFILE_KEY = argv[2]\n\n# seed random number generator\nseed(1)\n# generate some integers\n\nfor i in range(0, 125): # percorre tabela ascii\n if (i > 47 and i < 58) or (i > 64 and i < 91) or (i > 96 and i < 123): # separa números e letras num array\n ALL_CHARACTERS.append(chr(i))\n\ndef getLinesFile(): \n return stdin.read()\n\ndef writeFile(text):\n stdout.write(text)\n\n\ndef generatorKey(text):\n keyEncript = ''\n for _ in range(len(text)):\n value = randint(0, 255)\n keyEncript += chr(value)\n keyEncript = ''.join(bin(ord(x))[2:].zfill(8) for x in keyEncript)\n FILE_TO_WRITE = open(RELATIVE_PATH + '/' + FILE_KEY, 'w')\n FILE_TO_WRITE.write(keyEncript)\n return keyEncript\n\ndef getKeyEncoded(text):\n keyEncript = ''\n for _ in text:\n value = randint(0, 255)\n keyEncript += chr(value) \n FILE_TO_WRITE = open(RELATIVE_PATH + '/' + FILE_KEY, 'w')\n FILE_TO_WRITE.write(keyEncript)\n return keyEncript\n\n\ndef encript():\n text = unidecode(getLinesFile())\n KEY_ENCRIPT = generatorKey(text)\n text_code = ''\n\n for i, c in enumerate(text, start=0):\t\n aux_xor = xor(ord(text[i]), ord(KEY_ENCRIPT[i]))\t\n text_code += chr(aux_xor)\t\n\n \n writeFile(text_code)\n \n\ndef decript():\n text = unidecode(getLinesFile())\n KEY_ENCRIPT = open(RELATIVE_PATH + '/' + FILE_KEY).read()\n \n text_code = ''\n for i, c in enumerate(text, start=0):\t\n aux_xor = xor(ord(text[i]), ord(KEY_ENCRIPT[i]))\t\n text_code += chr(aux_xor)\t\n\n writeFile(text_code)\n \nif OPERATION == 'DECRIPTER':\n decript()\nelse:\n encript()\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6639999747276306, "alphanum_fraction": 0.6872727274894714, "avg_line_length": 25.980392456054688, "blob_id": "815005f4e03d502c1fac5357aaba603fa77c8d87", "content_id": "83640b0a53046bec35ce035fe00c8a6444ee486a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1377, "license_type": "no_license", "max_line_length": 178, "num_lines": 51, "path": "/decifra/decifra.py", "repo_name": "marcopetry/cch-seguranca", "src_encoding": "UTF-8", "text": "from sys import stdin, stdout\nimport os\nimport collections\n\nALL_CHARACTERS = []\n\nRELATIVE_PATH = os.getcwd()\n\nCHARACTER_MORE_USED = ['a', 'e', 'o', 's', 'r']\n\nfor i in range(0, 125): # percorre tabela ascii\n if (i > 47 and i < 58) or (i > 64 and i < 91) or (i > 96 and i < 123): # separa números e letras num array\n ALL_CHARACTERS.append(chr(i))\n\nVALUE_CHARACTER_MORE_USED = []\n\nfor char in CHARACTER_MORE_USED:\n VALUE_CHARACTER_MORE_USED.append(ALL_CHARACTERS.index(char))\n \n\ndef getLinesFile(): \n return stdin.read()\n\ndef writeFile(text):\n stdout.write(text)\n\ntext_raw = getLinesFile()\n\ntext_cripted = text_raw.replace(' ', '', 999999999999).lower()\n\n\nfrequencies = collections.Counter(text_cripted)\nfrequencies = sorted(frequencies.items(), key=lambda x: x[1], reverse=True)\nrepeated = {}\ndistanceForFrequences = []\nfor i, (key, value) in enumerate(frequencies[:5]):\n index = ALL_CHARACTERS.index(key)\n distanceForFrequences.append(index - VALUE_CHARACTER_MORE_USED[i])\n repeated[key] = value\n\ndef decript(): \n text_code = '' \n for letter in text_raw:\n try:\n index = ALL_CHARACTERS.index(letter)\n text_code += ALL_CHARACTERS[index - distanceForFrequences[0]] # vai funcionar se o texto tiver mais a, se tiver mais E funciona com o segundo e até o quinto mais utilizado.\n except:\n text_code += letter\n writeFile(text_code) \n\ndecript()" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 9.666666984558105, "blob_id": "10cb5cccd81bc3cfecaa54de38b0dec18f8d0723", "content_id": "c35ca78fe7f214e4c3503cba6841bf7465cbdbf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 31, "license_type": "no_license", "max_line_length": 18, "num_lines": 3, "path": "/decifra/decifra.sh", "repo_name": "marcopetry/cch-seguranca", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 decifra.py" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 12, "blob_id": "5a4551fcf7a099f9d94724b95a5a65cdc97f0cda", "content_id": "0c0baab54fae4d224a51efc93278cd06b5ce6cf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 38, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/cesar/cesar.sh", "repo_name": "marcopetry/cch-seguranca", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 cesar.py $1 $2 $3" }, { "alpha_fraction": 0.622089147567749, "alphanum_fraction": 0.6360611915588379, "avg_line_length": 24.3389835357666, "blob_id": "94822b85a1d7b6c727ca33c3879e5ccdbd7a9ebf", "content_id": "d6958f5f639921a088bdd18027cf2dec15d8e007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 108, "num_lines": 59, "path": "/cesar/cesar.py", "repo_name": "marcopetry/cch-seguranca", "src_encoding": "UTF-8", "text": "\n#!/usr/bin/env python\n\n# This Python file uses the following encoding: utf-8\n\nfrom sys import argv, stdin, stdout\nfrom unidecode import unidecode\n\n# pip install unidecode\n\nALL_CHARACTERS = []\nOPERATION = 'ENCRIPTER' if argv[1] == '-c' else 'DECRIPTER'\nKEY_ENCRIPT = int(argv[3]) # quantas casas vai andar cada letra\n\nfor i in range(0, 125): # percorre tabela ascii\n if (i > 47 and i < 58) or (i > 64 and i < 91) or (i > 96 and i < 123): # separa números e letras num array\n ALL_CHARACTERS.append(chr(i)) \n\ndef getLinesFile(): \n return stdin.readlines()\n\ndef writeFile(text):\n stdout.write(text)\n\ndef encript():\n lines = getLinesFile()\n text_code = ''\n for textaux in lines:\n text = unidecode(textaux)\n for letter in text:\n try:\n index = ALL_CHARACTERS.index(letter)\n if index + KEY_ENCRIPT > len(ALL_CHARACTERS) - 1:\n text_code += ALL_CHARACTERS[index + KEY_ENCRIPT - len(ALL_CHARACTERS)]\n else:\n text_code += ALL_CHARACTERS[index + KEY_ENCRIPT]\n except:\n text_code += letter\n writeFile(text_code)\n return text_code\n\ndef decript():\n lines = getLinesFile()\n text_code = ''\n for textaux in lines:\n text = unidecode(textaux)\n for letter in text:\n try:\n index = ALL_CHARACTERS.index(letter)\n text_code += ALL_CHARACTERS[index - KEY_ENCRIPT] \n except:\n text_code += letter\n writeFile(text_code)\n return text_code\n\n\nif OPERATION == 'DECRIPTER':\n decript()\nelse:\n encript()\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 12.333333015441895, "blob_id": "5278ff5316bce9d183ec49a223297b6f07bf1c2f", "content_id": "32fe4a1bae3010babc7c28cbd2d2d9c88cb93bbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 39, "license_type": "no_license", "max_line_length": 26, "num_lines": 3, "path": "/vernam/vernam.sh", "repo_name": "marcopetry/cch-seguranca", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 vernam.py $1 $2 $3" } ]
6
yangyonggit/AFirstCourseinProbability9thHOMEWORK
https://github.com/yangyonggit/AFirstCourseinProbability9thHOMEWORK
140e12355c1a93ed101cde123616fb0a64b98301
a7912aa1dfef1655aeb607746a1fdd3b3500b6e4
23746886e0cfb977ed81edda2a5fbdd5d4ada4f6
refs/heads/master
2021-05-01T23:46:15.343472
2017-01-09T06:20:36
2017-01-09T06:20:36
77,893,284
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3902077078819275, "alphanum_fraction": 0.49060335755348206, "avg_line_length": 24.607595443725586, "blob_id": "0b87675ecff2c7b764de7db15ad3f997e91d1531", "content_id": "7361df1a9e62fe461253bc35976ca42964af3ba1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2024, "license_type": "no_license", "max_line_length": 142, "num_lines": 79, "path": "/chaper1.md", "repo_name": "yangyonggit/AFirstCourseinProbability9thHOMEWORK", "src_encoding": "UTF-8", "text": "# PROBLEMS\n## 1.29\na) $\\dfrac{10!}{3!4!2!}=12600$ \nb) $\\begin{pmatrix}3\\\\1\\end{pmatrix}$ $\\begin{pmatrix}3\\\\1\\end{pmatrix}$ \n$\\begin{pmatrix}3\\\\2\\end{pmatrix}$ 2! 7! / (3!4!2!) = 945 \n\nb) $\\dfrac{{3\\choose 1}{3\\choose 1}{3\\choose 2} 2!7!}{3!4!2!}=945$\n\n## 1.30\n${2!}{2!}\\Big({\\sum\\limits_{i=0}^6 {6\\choose i} {(i+1)!}{(7-i)!}} + {\\sum\\limits_{i=1}^6 {i!}{(8-i)!}}\\Big) = 564480$\n\n## 1.31*\nb) $H(n,r) = \\begin{cases} 1 & \\text{r = 1} \\\\n-1\\choose 1 & \\text{r = 2} \\\\\\sum\\limits_{i=1}^{n-2} {H(n-i)} & \\text{otherwise} \\end{cases}$ \n H(8,4) = 35\n\na) $\\sum\\limits_{i=1}^{4} {4\\choose i}{\\text{H(8,i)}} = 165$\n\n## 1.32* \n$H(n,r) = \\begin{cases} 1 & \\text{r = 1} \\\\n-1\\choose 1 & \\text{r = 2} \\\\\\sum\\limits_{i=1}^{n-2} {H(n-i)} & \\text{otherwise} \\end{cases}$ \n$zh(n,r) = \\sum\\limits_{i=1}^{r} {r\\choose i}{\\text{H(n,i)}}$ \na) $\\text{zh(8,6)} = 1287$ \nb) $\\text{zh(5,6)zh(3,6)} = 14112$\n\n## 1.33*\n$H(n,r) = \\begin{cases} 1 & \\text{r = 1} \\\\n-1\\choose 1 & \\text{r = 2} \\\\\\sum\\limits_{i=1}^{n-2} {H(n-i)} & \\text{otherwise} \\end{cases}$ \n$zh(n,r) = \\sum\\limits_{i=1}^{r} {r\\choose i}{\\text{H(n,i)}}$ \na) $\\text{zh(9,4) = 220}$ \nb) $\\text{zh(9,4) + zh(13,3) + zh(12,3) + zh(11,3) * 2 = 572}$\n\n## 1.34*\n\n\n\n\n# THEORETICAL EXERCISES\n## 3\n${n\\choose r} r!$\n\n## 4\n$\\dfrac{n!}{(n-r)!r!} = {n\\choose r}$\n\n## 5\n$n\\choose k$\n\n## 7\n${n-1\\choose r-1 } + {n-1\\choose r} = {\\dfrac{(n-1)!}{(n-r)!(r-1)!}} + {\\dfrac{(n-1)!}{(n-r-1)!r!}}$\n$= {\\dfrac{(n-1)!r}{(n-r)!r!}} + {\\dfrac{(n-1)!(n-r)}{(n-r)!r!}}$ \n$= {\\dfrac{(n-1)!n}{(n-r)!r!}}$ \n$= {\\dfrac{n!}{(n-r)!r!}} = {n\\choose r}$\n\n## 8 \n${n+m\\choose r}$\n\n\n\n# SELF-TEST PROBLEMS AND EXERCISES\n## 1.8 \n$10 * 9^{(n-1)}$ \n${10\\choose i} {9^{n-i}}$\n\n## 1.9\na) $3n\\choose 3$ \nb) ${3\\choose 1}{n\\choose 3}$ \nc) ${3\\choose 1}{n\\choose 2}{2\\choose 1}{n-2\\choose 1}$ \nd) ${n\\choose 1}^3$ \ne) a = b + c + d\n\n\n## 1.10\na) $9^5$ \n\n## 1.13\n${8\\choose 4}{9\\choose 4}{10\\choose 4}$\n\n## 1.14\nacording to Fermat’s com-binatorial identity $k\\choose n$\n\n## 1.15\n$\\sum\\limits_{k=0}^n {n\\choose k}k!$" }, { "alpha_fraction": 0.8103448152542114, "alphanum_fraction": 0.8103448152542114, "avg_line_length": 57, "blob_id": "f509f029ce251cd054339488e70fafce31194b7f", "content_id": "cdb6c88b71dd399fce5b9be46231621f88dc0f47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 57, "num_lines": 1, "path": "/README.md", "repo_name": "yangyonggit/AFirstCourseinProbability9thHOMEWORK", "src_encoding": "UTF-8", "text": "# homework of A First Course in Probability,Ninth Edition\n" }, { "alpha_fraction": 0.3781222999095917, "alphanum_fraction": 0.48191213607788086, "avg_line_length": 14.789115905761719, "blob_id": "68e4a3859d4ec33fe4ba018f674c843fdefa386c", "content_id": "d90ff084a0e46f6beda53b5a5bd248cba1d94391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2322, "license_type": "no_license", "max_line_length": 80, "num_lines": 147, "path": "/calc.py", "repo_name": "yangyonggit/AFirstCourseinProbability9thHOMEWORK", "src_encoding": "UTF-8", "text": "import math\n\ndef C(n,r):\n return math.factorial(n) / (math.factorial(n-r) * math.factorial(r))\n\ndef P(n):\n return math.factorial(n)\n\n\n# 1.30\nsum = 0\nfor i in range(0,7): \n sum += C(6,i) * math.factorial(i + 1) * math.factorial(7-i) \n\nfor i in range(1,7):\n sum += C(6,i) * math.factorial(i) * math.factorial(8-i)\n\nprint( 4 * sum)\nprint('---------------------------')\n\n# 1.31*\n\ndef h(n,r):\n if (r == 1):\n return 1\n elif(r == 2):\n return C(n-1,1)\n else:\n s = 0\n for i in range(1,n-1):\n s += h(n-i,r-1)\n return s\n\nprint(h(8,4))\nsum = 0 \nfor i in range(1,5):\n sum += C(4,i) * h(8,i)\nprint(sum)\nprint('---------------------------')\n\n# 1.32*\ndef zh(n,r):\n s = 0\n for i in range(1,r+1):\n s += C(r,i) * h(n,i)\n return s\n\n\nprint zh(8,6)\n\nprint zh(5,6) * zh(3,6)\n\nprint('---------------------------')\n\n# 1.33*\nprint('1.33*')\nprint(zh(9,4))\nprint(zh(9,4) + zh(13,3) + zh(12,3) + 2 * zh(11,3))\n\nprint('---------------------------')\n\n\n# THEORETICAL EXERCISES\n# 6\ndef func6(n,r):\n if (r==1):\n return n\n else:\n s = 1\n for i in range(1,n-r+1):\n s += func6(n-i,r-1)\n return s\n \n\nprint(func6(6,3))\n\n\n\n# SELF-TEST PROBLEMS AND EXERCISES\nprint(\"SELF-TEST PROBLEMS AND EXERCISES\")\n# 1\nprint(\"PROBLEMS 1\")\nprint(2*P(5))\nprint(P(6) / 2)\nprint(P(6) / 6)\nprint(P(6) / 4)\nprint(P(4) * 2 * 2)\nprint(P(6) - P(5))\n\n\n#2\nprint(\"PROBLEMS 2\")\nprint(P(4)*P(3)*P(3)*P(3))\n\n#3\nprint(\"PROBLEMS 3\")\nprint(10 * 9 * 8)\nprint(8*7*6 + C(2,1)*C(3,1)*8*7)\nprint(8*7*6 + 8*P(3))\nprint(C(3,1) * 9 * 8)\nprint(9*8 + 9*8*7)\n\n#4\nprint(\"PROBLEMS 4\")\nprint(C(10,7))\nprint(C(5,3)*C(5,4) + C(5,4)*C(5,3) + C(5,5)*C(5,2))\n\n#5\nprint(\"PROBLEMS 5\")\nprint(P(7) / (P(3) * P(2) * P(2)))\n\n#6\nprint(\"PROBLEMS 6\")\nprint(pow(26,3) * pow(10,4))\n\n#10\nprint(\"PROBLEMS 10\")\nprint(pow(9,5))\nprint(9*8*7*6*5 + C(9,1)* C(5,2) * 8 * 7 *6 + C(9,2) * 7 * P(5) / (P(2) * P(2)))\n\n#11\nprint(\"PROBLEMS 11\")\ns = 0\nfor i in range(0,7): \n s+=C(10,i)*C(10 - i,6 - i)\nprint(s)\nprint(C(10,3) * C(7,3))\n\n#print(C(10,6) * pow(2,6))\n#print(P(10) / (P(3) * P(3) * P(4)))\n\n#12\nprint(\"PROBLEMS 12\")\nprint(C(7,2) * C(8,3))\n\n#16\nprint(\"PROBLEMS 16\")\ns = 0\nfor i in range(1,5):\n s+=C(5,i) + C(15,4-i)\nprint(s)\n\n\n#18\nprint(\"PROBLEMS 18\")\ns = 3+6+10+7*2*2+6*2*3\nprint(s)\n\n" } ]
3
mb-bts/live-astronomical-data
https://github.com/mb-bts/live-astronomical-data
7aeb25b9e7228d4dde5842bae7418ca70867122e
83d0ad5ae57e883b7ca8c887331a5aab07f531c0
eb86f84cb7e7df8753f8c838df80c980788d87a2
refs/heads/master
2022-12-22T19:37:58.012757
2020-09-12T21:18:42
2020-09-12T21:18:42
295,024,327
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5723743438720703, "alphanum_fraction": 0.6192072033882141, "avg_line_length": 44.520912170410156, "blob_id": "2685abf0d94857f33271a2aae873f0d65654fbf9", "content_id": "69fb0863492935a2e994bd211ec099d11b9200d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12235, "license_type": "no_license", "max_line_length": 242, "num_lines": 263, "path": "/LiveAstroData.py", "repo_name": "mb-bts/live-astronomical-data", "src_encoding": "UTF-8", "text": "# Thank you to Paul Schlyter and his resource (http://www.stjarnhimlen.se/comp/ppcomp.html) for making this possible\r\n\r\nimport math\r\nimport numpy\r\nimport geocoder\r\nfrom datetime import datetime\r\nfrom typing import Tuple\r\n\r\nKM_IN_1_AU = 149597870.7\r\nMILES_IN_1_KM = 0.62137119223733\r\nEARTH_RADIUS_KM = 6371\r\nSECONDS_IN_DAY = 86400\r\nSPEED_OF_LIGHT_KMH = 1079252848.8\r\n\r\n# Finds Moon's position and returns it as (distance_earth_radii, right_ascension, declination)\r\ndef moon_position() -> Tuple[float, float, float]:\r\n day = datetime_since_2000_start()\r\n\r\n longitude_ascending_node = (125.1228 - 0.0529538083 * day) % 360\r\n ecliptic_inclination = 5.1454\r\n argument_of_periapsis = (318.0634 + 0.1643573223 * day) % 360\r\n mean_distance_sun = 60.2666\r\n eccentricity = 0.054900\r\n mean_anomaly = (115.3654 + 13.0649929509 * day) % 360\r\n\r\n sun_argument_of_periapsis = 282.9404 + (4.70935*10**-5) * day\r\n sun_mean_anomaly = (356.0470 + 0.9856002585 * day) % 360\r\n sun_mean_longitude = (sun_argument_of_periapsis + sun_mean_anomaly) % 360\r\n mean_longitude = (longitude_ascending_node + argument_of_periapsis + mean_anomaly) % 360\r\n mean_elongation = (mean_longitude - sun_mean_longitude) % 360\r\n latitude_argument = (mean_longitude - longitude_ascending_node) % 360\r\n\r\n longitude_perturbations = (\r\n -1.274 * deg_sin(mean_anomaly - mean_elongation*2),\r\n 0.658 * deg_sin(mean_elongation*2),\r\n -0.186 * deg_sin(sun_mean_anomaly),\r\n -0.059 * deg_sin(mean_anomaly*2 - mean_elongation*2),\r\n -0.057 * deg_sin(mean_anomaly - mean_elongation*2 + sun_mean_anomaly),\r\n 0.053 * deg_sin(mean_anomaly + mean_elongation*2),\r\n 0.046 * deg_sin(mean_elongation*2 - sun_mean_anomaly),\r\n 0.041 * deg_sin(mean_anomaly - sun_mean_anomaly),\r\n -0.035 * deg_sin(mean_elongation),\r\n -0.031 * deg_sin(mean_anomaly + sun_mean_anomaly),\r\n -0.015 * deg_sin(latitude_argument*2 - mean_elongation*2),\r\n 0.011 * deg_sin(mean_anomaly - mean_elongation*4)\r\n )\r\n\r\n latitude_perturbations = (\r\n -0.173 * deg_sin(latitude_argument - mean_elongation*2),\r\n -0.055 * deg_sin(mean_anomaly - latitude_argument - mean_elongation*2),\r\n -0.046 * deg_sin(mean_anomaly + latitude_argument - mean_elongation*2),\r\n 0.033 * deg_sin(latitude_argument + mean_elongation*2),\r\n 0.017 * deg_sin(mean_anomaly*2 + latitude_argument)\r\n )\r\n\r\n distance_perturbations = (\r\n -0.58 * deg_cos(mean_anomaly - mean_elongation*2),\r\n -0.46 * deg_cos(mean_elongation*2)\r\n )\r\n\r\n total_longitude_perturbations = sum(longitude_perturbations)\r\n total_latitude_perturbations = sum(latitude_perturbations)\r\n total_distance_perturbations = sum(distance_perturbations)\r\n\r\n eccentric_anomaly_first_approx = mean_anomaly + ((180/math.pi) * eccentricity * deg_sin(mean_anomaly)) * (1 + eccentricity * deg_cos(mean_anomaly))\r\n eccentric_anomaly = eccentric_anomaly_first_approx - (eccentric_anomaly_first_approx - ((180/math.pi) * eccentricity * deg_sin(eccentric_anomaly_first_approx)) - mean_anomaly) / (1 - eccentricity * deg_cos(eccentric_anomaly_first_approx))\r\n\r\n x_lunar_orbit_plane = mean_distance_sun * (deg_cos(eccentric_anomaly) - eccentricity)\r\n y_lunar_orbit_plane = mean_distance_sun * math.sqrt(1 - eccentricity**2) * deg_sin(eccentric_anomaly)\r\n\r\n distance_earth_radii = math.sqrt(x_lunar_orbit_plane**2 + y_lunar_orbit_plane**2)\r\n true_anomaly = numpy.arctan2(y_lunar_orbit_plane, x_lunar_orbit_plane) * (180/math.pi) % 360\r\n\r\n x_ecliptic = distance_earth_radii * (deg_cos(longitude_ascending_node) * deg_cos(true_anomaly+argument_of_periapsis) - deg_sin(longitude_ascending_node) * deg_sin(true_anomaly+argument_of_periapsis) * deg_cos(ecliptic_inclination))\r\n y_ecliptic = distance_earth_radii * (deg_sin(longitude_ascending_node) * deg_cos(true_anomaly+argument_of_periapsis) + deg_cos(longitude_ascending_node) * deg_sin(true_anomaly+argument_of_periapsis) * deg_cos(ecliptic_inclination))\r\n z_ecliptic = distance_earth_radii * deg_sin(true_anomaly+argument_of_periapsis) * deg_sin(ecliptic_inclination)\r\n\r\n longitude = math.atan2(y_ecliptic, x_ecliptic) * (180/math.pi) % 360 + total_longitude_perturbations\r\n latitude = math.atan2(z_ecliptic, math.sqrt(x_ecliptic**2 + y_ecliptic**2)) * (180/math.pi) + total_latitude_perturbations\r\n distance_earth_radii = math.sqrt(x_ecliptic**2 + y_ecliptic**2 + z_ecliptic**2) + total_distance_perturbations\r\n\r\n ecliptic_obliquity = 23.4393 - (3.563*10**-7) * day\r\n\r\n x_ecliptic = deg_cos(longitude) * deg_cos(latitude)\r\n y_ecliptic = deg_sin(longitude) * deg_cos(latitude)\r\n z_ecliptic = deg_sin(latitude)\r\n\r\n x_equatorial = x_ecliptic\r\n y_equatorial = y_ecliptic * deg_cos(ecliptic_obliquity) - z_ecliptic * deg_sin(ecliptic_obliquity)\r\n z_equatorial = y_ecliptic * deg_sin(ecliptic_obliquity) + z_ecliptic * deg_cos(ecliptic_obliquity)\r\n\r\n right_ascension = math.atan2(y_equatorial, x_equatorial) * (180/math.pi) % 360\r\n declination = math.atan2(z_equatorial, math.sqrt(x_equatorial**2 + y_equatorial**2)) * (180/math.pi)\r\n\r\n return distance_earth_radii, right_ascension, declination\r\n\r\n# Finds Sun's position and returns it as (distance_AU, right_ascension, declination)\r\ndef sun_position() -> Tuple[float, float, float]:\r\n day = datetime_since_2000_start()\r\n\r\n argument_of_periapsis = 282.9404 + (4.70935*10**-5) * day\r\n eccentricity = 0.016709 - (1.151*10**-9) * day\r\n mean_anomaly = (356.0470 + 0.9856002585 * day) % 360\r\n\r\n ecliptic_obliquity = 23.4393 - (3.563*10**-7) * day\r\n\r\n eccentric_anomaly = mean_anomaly + (180/math.pi) * eccentricity * deg_sin(mean_anomaly) * (1 + eccentricity * deg_cos(mean_anomaly))\r\n\r\n x_ecliptic_plane = deg_cos(eccentric_anomaly) - eccentricity\r\n y_ecliptic_plane = deg_sin(eccentric_anomaly) * math.sqrt(1 - (eccentricity**2))\r\n\r\n distance_AU = math.sqrt(x_ecliptic_plane**2 + y_ecliptic_plane**2)\r\n true_anomaly = numpy.arctan2(y_ecliptic_plane, x_ecliptic_plane) * (180/math.pi)\r\n\r\n longitude = (true_anomaly + argument_of_periapsis) % 360\r\n\r\n rectangular_x = distance_AU * deg_cos(longitude)\r\n rectangular_y = distance_AU * deg_sin(longitude)\r\n\r\n x_equatorial = rectangular_x\r\n y_equatorial = rectangular_y * deg_cos(ecliptic_obliquity)\r\n z_equatorial = rectangular_y * deg_sin(ecliptic_obliquity)\r\n\r\n right_ascension = math.atan2(y_equatorial, x_equatorial) * (180/math.pi)\r\n declination = math.atan2(z_equatorial, math.sqrt(x_equatorial**2 + y_equatorial**2)) * (180/math.pi)\r\n\r\n return distance_AU, right_ascension, declination\r\n\r\n# Finds Sun's local azimuth and altitude and returns it as (azimuth, altitude)\r\ndef sun_azimuth_altitude() -> Tuple[float, float]:\r\n day = datetime_since_2000_start()\r\n\r\n user = geocoder.ip(\"me\")\r\n longitude = user.lng\r\n latitude = user.lat\r\n\r\n hours_into_day = (day - int(day)) * 24\r\n\r\n argument_of_periapsis = 282.9404 + (4.70935*10**-5) * day\r\n mean_anomaly = (356.0470 + 0.9856002585 * day) % 360\r\n mean_longitude = (argument_of_periapsis + mean_anomaly) % 360\r\n\r\n greenwich_mean_sidereal_time_midnight = ((mean_longitude + 180) % 360) / 15\r\n sidereal_time = greenwich_mean_sidereal_time_midnight + hours_into_day + (longitude / 15)\r\n\r\n distance_AU, right_ascension, declination = sun_position()\r\n\r\n hour_angle = (sidereal_time - (right_ascension / 15)) * 15\r\n\r\n x = deg_cos(hour_angle) * deg_cos(declination)\r\n y = deg_sin(hour_angle) * deg_cos(declination)\r\n z = deg_sin(declination)\r\n\r\n x_horizontal = x * deg_sin(latitude) - z * deg_cos(latitude)\r\n y_horizontal = y\r\n z_horizontal = x * deg_cos(latitude) + z * deg_sin(latitude)\r\n\r\n azimuth = math.atan2(y_horizontal, x_horizontal) * (180/math.pi) + 180\r\n altitude = math.atan2(z_horizontal, math.sqrt(x_horizontal**2 + y_horizontal**2)) * (180/math.pi)\r\n return azimuth, altitude\r\n\r\n# Calculates time since start of 2000 in days as a decimal number\r\ndef datetime_since_2000_start() -> float:\r\n start_of_2000 = datetime(2000, 1, 1)\r\n current_datetime = datetime.utcnow()\r\n\r\n datetime_passed = current_datetime - start_of_2000\r\n seconds_passed = datetime_passed.total_seconds()\r\n\r\n datetime_since_2000_start = (seconds_passed / SECONDS_IN_DAY) + 1\r\n return datetime_since_2000_start\r\n\r\n# Determines time in minutes and seconds for light to reach Earth from Moon/Sun\r\ndef time_light_reach_earth(distance_from_earth: float) -> str:\r\n time_reach_earth = distance_from_earth / SPEED_OF_LIGHT_KMH\r\n minutes = time_reach_earth * 60\r\n seconds = (minutes - int(minutes)) * 60\r\n\r\n formatted_time = str(int(minutes)) + \" minutes \" + str(round(seconds, 1)) + \" seconds\"\r\n return formatted_time\r\n\r\n# Converts angle in degrees to hours/degrees, minutes, and seconds\r\ndef format_angle(degrees: float, type: str) -> str:\r\n if type == \"hours\":\r\n angle = (degrees * 24) / 360\r\n unit_1 = \"h \"\r\n unit_2 = \"m \"\r\n unit_3 = \"s\"\r\n elif type == \"degrees\":\r\n angle = degrees\r\n unit_1 = u\"\\N{DEGREE SIGN} \"\r\n unit_2 = \"' \"\r\n unit_3 = \"\\\"\"\r\n first_category = int(angle)\r\n minutes = int((angle - first_category) * 60)\r\n seconds = round((angle - first_category - (minutes / 60)) * 3600)\r\n formatted_angle = str(first_category).zfill(2) + unit_1 + str(abs(minutes)).zfill(2) + unit_2 + str(abs(seconds)).zfill(2) + unit_3\r\n return formatted_angle\r\n\r\n# Formats and prints data on Moon/Sun\r\ndef print_data(body_name: str):\r\n line = \"_\" * 55\r\n\r\n print(line)\r\n print()\r\n\r\n if body_name == \"moon\":\r\n distance_earth_radii, right_ascension, declination = moon_position()\r\n distance_in_km = distance_earth_radii * EARTH_RADIUS_KM\r\n elif body_name == \"sun\":\r\n distance_AU, right_ascension, declination = sun_position()\r\n distance_in_km = distance_AU * KM_IN_1_AU\r\n\r\n distance_in_miles = distance_in_km * MILES_IN_1_KM\r\n hms_str = format_angle(right_ascension, \"hours\")\r\n dms_str = format_angle(declination, \"degrees\")\r\n\r\n print(\"Distance: \" + f\"{round(distance_in_km):,}\" + \" km (\" + f\"{round(distance_in_miles):,}\" + \" mi)\")\r\n print(\"Time for light to reach Earth: \" + str(time_light_reach_earth(distance_in_km)))\r\n print(\"Right ascension: \" + hms_str + \" (\" + str(round(right_ascension, 4)) + \" degrees)\")\r\n print(\"Declination: \" + dms_str + \" (\" + str(round(declination, 4)) + \" degrees)\")\r\n\r\n if body_name == \"sun\":\r\n azimuth, altitude = sun_azimuth_altitude()\r\n print(\"Local azimuth: \" + str(round(azimuth, 1)) + \" degrees\")\r\n print(\"Local altitude: \" + str(round(altitude, 1)) + \" degrees\")\r\n\r\n print(line)\r\n print()\r\n\r\n# Takes sine of number in degrees\r\ndef deg_sin(num_in_degrees: float) -> float:\r\n return math.sin(math.radians(num_in_degrees))\r\n\r\n# Takes cosine of number in degrees\r\ndef deg_cos(num_in_degrees: float) -> float:\r\n return math.cos(math.radians(num_in_degrees))\r\n\r\n# Thanks to the following website for the ASCII art text generation http://patorjk.com/software/taag/#p=display&f=Graffiti&t=Type%20Something%20\r\nprint(r\"\"\"\r\n _ _ _ _ _ _____ _\r\n| | (_) /\\ | | (_) | | | __ \\ | |\r\n| | ___ _____ / \\ ___| |_ _ __ ___ _ __ ___ _ __ ___ _ ___ __ _| | | | | | __ _| |_ __ _\r\n| | | \\ \\ / / _ \\ / /\\ \\ / __| __| '__/ _ \\| '_ \\ / _ \\| '_ ` _ \\| |/ __/ _` | | | | | |/ _` | __/ _` |\r\n| |____| |\\ V / __/ / ____ \\\\__ \\ |_| | | (_) | | | | (_) | | | | | | | (_| (_| | | | |__| | (_| | || (_| |\r\n|______|_| \\_/ \\___| /_/ \\_\\___/\\__|_| \\___/|_| |_|\\___/|_| |_| |_|_|\\___\\__,_|_| |_____/ \\__,_|\\__\\__,_|\r\n\r\n\"\"\")\r\n\r\n# Controls user interaction with command line\r\nwhile True:\r\n print(\"Type one of the following:\")\r\n print(\" moon (shows current moon data)\")\r\n print(\" sun (shows current sun data)\")\r\n\r\n user_input = input()\r\n\r\n if user_input == \"moon\":\r\n print_data(\"moon\")\r\n elif user_input == \"sun\":\r\n print_data(\"sun\")\r\n else:\r\n continue\r\n" }, { "alpha_fraction": 0.6265356540679932, "alphanum_fraction": 0.7395577430725098, "avg_line_length": 35.90909194946289, "blob_id": "40269325fad648d6b64cb7234c526fbd5c7df346", "content_id": "72de30e55051b2f8a81f295825bc5e8c17ca603f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 816, "license_type": "no_license", "max_line_length": 108, "num_lines": 22, "path": "/README.md", "repo_name": "mb-bts/live-astronomical-data", "src_encoding": "UTF-8", "text": "# live-astronomical-data\nThank you to Paul Schlyter and his resource (http://www.stjarnhimlen.se/comp/ppcomp.html) for making this possible.\n\nThis script calculates and displays live data on the Moon and Sun.\n\nIf you run the script with your command line you will be asked to type either \"moon\" or \"sun\".\n\nYou would then receive something like the following respectively:\n\nDistance: 382,308 km (237,555 mi) \nTime for light to reach Earth: 0 minutes 1.3 seconds \nRight ascension: 07h 26m 27s (111.6142 degrees) \nDeclination: 24° 02' 16\" (24.0379 degrees) \n\nor\n\nDistance: 150,527,944 km (93,533,728 mi) \nTime for light to reach Earth: 8 minutes 22.1 seconds \nRight ascension: 11h 25m 17s (171.3218 degrees) \nDeclination: 03° 44' 32\" (3.7423 degrees) \nLocal azimuth: 208.3 degrees \nLocal altitude: 39.4 degrees \n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 44, "blob_id": "60c7a1adbb35f128ce078f351435e2bf50a11874", "content_id": "f859f2ba96f2cc88ac73fc68d2c64d8d2efd9d11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 90, "license_type": "no_license", "max_line_length": 47, "num_lines": 2, "path": "/dependencies.md", "repo_name": "mb-bts/live-astronomical-data", "src_encoding": "UTF-8", "text": "- numpy (https://pypi.org/project/numpy/)\n- geocoder (https://pypi.org/project/geocoder/)\n" } ]
3
dborgesr/bcbb
https://github.com/dborgesr/bcbb
bf8761701942e65c2e0757a536d215bb3dcdfaa8
976627ee26ecb4d3f7a4100ce4745f3e968232ad
3de95486ee6b8c452746ce7e88e4cb1caaa662b0
refs/heads/master
2019-12-02T13:51:23.770704
2013-01-28T16:29:52
2013-01-28T16:29:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7434154748916626, "alphanum_fraction": 0.7497875690460205, "avg_line_length": 36.66400146484375, "blob_id": "7b6c84dde3e7f4cb40c90a51c1cfe2d557fe1ebd", "content_id": "5aa97d518a55c7441fc3788c2f0e4aecebf74c79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4708, "license_type": "no_license", "max_line_length": 75, "num_lines": 125, "path": "/nextgen/docs/contents/parallel.rst", "repo_name": "dborgesr/bcbb", "src_encoding": "UTF-8", "text": "Parallel execution\n------------------\n\nThe pipeline runs in parallel in two different ways:\n\n- multiple cores -- Analyses will run in parallel using multiple cores\n on a single machine. This requires only the ``mulitprocessing``\n Python library, included by default with most Python installations.\n\n- parallel messaging -- This allows scaling beyond the cores\n available on a single machine, and requires multiple machines\n with a shared filesystem like standard cluster environments.\n Machine to machine communication occurs via messaging, using the\n `IPython parallel`_ framework.\n\nMultiple cores\n~~~~~~~~~~~~~~\nRunning using multiple cores only requires setting the ``-n``\ncommand line flag::\n\n bcbio_nextgen.py bcbio_system.yaml bcbio_sample.yaml -t local -n 12\n\nIPython parallel\n~~~~~~~~~~~~~~~~\n\n`IPython parallel`_ provides a distributed framework for performing\nparallel computation in standard cluster environments. The\nbcbio-nextgen setup script installs both IPython and `pyzmq`_, which\nprovides Python bindings for the `ZeroMQ`_ messaging library.\n\nWe also provide a setup script that correctly configures IPython for\ndifferent cluster environments. Run this once on new machines::\n\n bcbio_nextgen_setup.py -s lsf -q your_queue_name\n\nThe ``-s`` flag specifies a type of scheduler to use ``(lsf, sge)``.\nThe ``-q`` flag specifies the queue to submit jobs to. The\nsetup script will create IPython configurations for parallel and\nmulticore jobs.\n\nWhen setup, run an analysis specifying ipython for parallel execution::\n\n bcbio_nextgen.py bcbio_system.yaml bcbio_sample.yaml -t ipython -n 12\n\nCelery and RabbitMQ\n~~~~~~~~~~~~~~~~~~~\n\nWe still support celery and RabbitMQ messaging, but please try IPython\nwhen setting up a new cluster. The IPython approach is under active\ndevelopment and supports additional cluster and parallel approaches.\n\nTo enable parallel messaging:\n\n1. Configure RabbitMQ as described below. Ensure all processing machines\n can talk to the RabbitMQ server on port 5672. Update\n ``universe_wsgi.ini`` to contain the server details.\n\n2. Edit your ``post_process.yaml`` file to set parameters in the\n ``distributed`` section corresponding to your environment: this\n includes the type of cluster management and arguments to start jobs.\n\n3. Run ``bcbio_nextgen.py`` with parameters for a distributed cluster\n environment. It takes care of starting worker nodes, running the\n processing, and then cleaning up after jobs::\n\n bcbio_nextgen.py post_process.yaml flowcell_dir run_info.yaml\n -t messaging -n 20\n\nRabbitMQ configuration\n**********************\n\nRabbitMQ messaging manages communication between the sequencing machine\nand the analysis machine. This allows complete separation between all of\nthe machines. The RabbitMQ server can run anywhere; an easy solution is\nto install it on the Galaxy and analysis server::\n\n (yum or apt-get) install rabbitmq-server\n\nSetup rabbitmq for passing Galaxy and processing messages::\n\n rabbitmqctl add_user <username> <password>\n rabbitmqctl add_vhost bionextgen\n rabbitmqctl set_permissions -p bionextgen <username> \".*\" \".*\" \".*\"\n\nThen adjust the ``[galaxy_amqp]`` section of your ``universe_wsgi.ini``\nGalaxy configuration file. An example configuration is available in the\nconfig directory; you'll need to specifically change these three values::\n\n [galaxy_amqp]\n host = <host you installed the RabbitMQ server on>\n userid = <username>\n password = <password>\n\nssh keys\n********\n\nThe sequencing, analysis and storage machines transfer files using\nsecure copy. This requires that you can securely copy files between\nmachines without passwords, using `ssh public key`_ authentication.\nYou want to enable password-less ssh for the following machine\ncombinations:\n\n- Analysis server to ``illumina_finished_msg`` machine\n- Storage server to ``illumina_finished_msg`` machine\n\nSequencing machines\n*******************\n\nThe sequencer automation has been fully tested using Illumina GAII and\nHiSeq sequencing machines. The framework is general and supports other\nplatforms; we welcome feedback from researchers with different machines\nat their institutions.\n\nIllumina machines produce run directories that include the date, machine\nidentifier, and flowcell ID::\n\n 110216_HWI-EAS264_00035_FC638NPAAXX\n\nA shortened name, with just date and flowcell ID, is used to uniquely\nidentify each flowcell during processing.\n\n.. _ssh public key: http://macnugget.org/projects/publickeys/\n.. _IPython parallel: http://ipython.org/ipython-doc/dev/index.html\n.. _pyzmq: https://github.com/zeromq/pyzmq\n.. _ZeroMQ: http://www.zeromq.org/\n" }, { "alpha_fraction": 0.5854613780975342, "alphanum_fraction": 0.5876744985580444, "avg_line_length": 34.817073822021484, "blob_id": "38ef46b717a52664f72fd17ff0f0221c31a48768", "content_id": "e9258e37e4e598d531047829e8b3e0e256a3e8ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5874, "license_type": "no_license", "max_line_length": 87, "num_lines": 164, "path": "/nextgen/bcbio/distributed/ipython.py", "repo_name": "dborgesr/bcbb", "src_encoding": "UTF-8", "text": "\"\"\"Distributed execution using an IPython cluster.\n\nUses IPython parallel to setup a cluster and manage execution:\n\nhttp://ipython.org/ipython-doc/stable/parallel/index.html\n\nBorrowed from Rory Kirchner's Bipy cluster implementation:\n\nhttps://github.com/roryk/bipy/blob/master/bipy/cluster/__init__.py\n\"\"\"\nimport os\nimport copy\nimport time\nimport uuid\nimport subprocess\nimport contextlib\n\nfrom bcbio import utils\nfrom bcbio.log import setup_logging, logger\n\nfrom IPython.parallel import Client\n\ndef _start(workers_needed, profile, cluster_id, delay):\n \"\"\"Starts cluster from commandline.\n \"\"\"\n subprocess.check_call([\"ipcluster\", \"start\",\n \"--daemonize=True\",\n \"--delay=%s\" % delay,\n \"--log-level=%s\" % \"WARN\",\n #\"--cluster-id=%s\" % cluster_id,\n \"--n=%s\" % workers_needed,\n \"--profile=%s\" % profile])\n\ndef _stop(profile, cluster_id):\n subprocess.check_call([\"ipcluster\", \"stop\", \"--profile=%s\" % profile,\n #\"--cluster-id=%s\" % cluster_id\n ])\n\ndef _is_up(profile, cluster_id, n):\n try:\n #client = Client(profile=profile, cluster_id=cluster_id)\n client = Client(profile=profile)\n up = len(client.ids)\n except IOError, msg:\n return False\n else:\n return up >= n\n\[email protected]\ndef cluster_view(parallel, config):\n \"\"\"Provide a view on an ipython cluster for processing.\n\n parallel is a dictionary with:\n - profile: The name of the ipython profile to use\n - cores: The number of cores to start for processing.\n - queue_type: Optionally, the type of parallel queue\n to start. Defaults to a standard parallel queue, can\n also specify 'multicore' for a multiple core machine\n and 'io' for an I/O intensive queue.\n \"\"\"\n delay = 5\n max_delay = 300\n max_tries = 10\n profile = parallel[\"profile\"]\n if parallel.get(\"queue_type\", None):\n profile = \"%s_%s\" % (profile, parallel[\"queue_type\"])\n cluster_id = str(uuid.uuid1())\n num_tries = 0\n while 1:\n try:\n _start(parallel[\"cores\"], profile, cluster_id, delay)\n break\n except subprocess.CalledProcessError:\n if num_tries > max_tries:\n raise\n num_tries += 1\n time.sleep(delay)\n try:\n slept = 0\n target_cores = 1 if parallel.get(\"queue_type\", None) == \"multicore\" \\\n else parallel[\"cores\"]\n while not _is_up(profile, cluster_id, target_cores):\n time.sleep(delay)\n slept += delay\n if slept > max_delay:\n raise IOError(\"Cluster startup timed out.\")\n #client = Client(profile=profile, cluster_id=cluster_id)\n client = Client(profile=profile)\n # push config to all engines and force them to set up logging\n client[:]['config'] = config\n client[:].execute('from bcbio.log import setup_logging')\n client[:].execute('setup_logging(config)')\n client[:].execute('from bcbio.log import logger')\n yield client.load_balanced_view()\n finally:\n _stop(profile, cluster_id)\n\ndef dictadd(orig, k, v):\n \"\"\"Imitates immutability by adding a key/value to a new dictionary.\n Works around not being able to deepcopy view objects; can remove this\n once we create views on demand.\n \"\"\"\n view = orig.pop(\"view\", None)\n new = copy.deepcopy(orig)\n new[k] = v\n if view:\n orig[\"view\"] = view\n new[\"view\"] = view\n return new\n\ndef _get_queue_type(fn):\n if hasattr(fn, \"metadata\"):\n return fn.metadata.get(\"queue_type\", None)\n else:\n return None\n\ndef runner(parallel, fn_name, items, work_dir, config):\n \"\"\"Run a task on an ipython parallel cluster, allowing alternative queue types.\n\n This will spawn clusters for parallel and custom queue types like multicore\n and high I/O tasks on demand.\n\n A checkpoint directory keeps track of finished tasks, avoiding spinning up clusters\n for sections that have been previous processed.\n \"\"\"\n setup_logging(config)\n out = []\n checkpoint_dir = utils.safe_makedir(os.path.join(work_dir, \"checkpoints_ipython\"))\n checkpoint_file = os.path.join(checkpoint_dir, \"%s.done\" % fn_name)\n fn = getattr(__import__(\"{base}.ipythontasks\".format(base=parallel[\"module\"]),\n fromlist=[\"ipythontasks\"]),\n fn_name)\n queue_type = _get_queue_type(fn)\n if queue_type:\n parallel = dictadd(parallel, \"queue_type\", queue_type)\n # already finished, run locally on current machine to collect details\n if os.path.exists(checkpoint_file):\n logger.info(\"ipython: %s -- local; checkpoint passed\" % fn_name)\n for args in items:\n if args:\n data = fn(args)\n if data:\n out.extend(data)\n # Run on a multicore queue with available cores on the same machine\n elif queue_type == \"multicore\":\n logger.info(\"ipython: %s -- multicore\" % fn_name)\n with cluster_view(parallel, config) as view:\n for args in items:\n if args:\n data = view.apply_sync(fn, args)\n if data:\n out.extend(data)\n # Run on a standard parallel queue\n else:\n logger.info(\"ipython: %s -- parallel\" % fn_name)\n with cluster_view(parallel, config) as view:\n xs = [x for x in items if x is not None]\n if len(xs) > 0:\n for data in view.map_sync(fn, xs):\n if data:\n out.extend(data)\n with open(checkpoint_file, \"w\") as out_handle:\n out_handle.write(\"done\\n\")\n return out\n" }, { "alpha_fraction": 0.5917848348617554, "alphanum_fraction": 0.5930570960044861, "avg_line_length": 31.36470603942871, "blob_id": "256e05f5882c5e7143aa5a677576b0e830b8a51c", "content_id": "7d7b1c60f0a8539784c9366e7f5bcb583d1843e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5502, "license_type": "no_license", "max_line_length": 91, "num_lines": 170, "path": "/nextgen/scripts/bcbio_nextgen_setup.py", "repo_name": "dborgesr/bcbb", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"Automate setup of bcbio_nextgen pipeline for common configurations.\n\nHandles:\n Setup of ipython queues for parallel and multicore jobs.\n\n TODO: IO intensive jobs\n\"\"\"\nimport os\nimport sys\nimport shutil\nimport pipes\nimport argparse\n\nimport sh\n\nfrom IPython.parallel.apps import launcher\n\ndef main(args):\n print \"Setting up profiles for ipython: %s\" % args.scheduler\n setup_ipython(args.scheduler, _prep_queues(args.queues))\n\ndef setup_ipython(scheduler, queues):\n for ptype, queue in zip([\"\", \"multicore\"], queues):\n if ptype:\n profile = \"%s_%s\" % (scheduler, ptype)\n else:\n profile = scheduler\n sh.ipython(\"profile\", \"create\", reset=True, parallel=True, profile=profile)\n config_dir = _find_ipython_dir(profile)\n update_ipcontroller_config(config_dir)\n update_ipcluster_config(config_dir, scheduler, ptype, queue)\n\ndef _prep_queues(queues):\n if len(queues) == 1:\n return queues * 3\n else:\n assert len(queues) >= 2, queues\n return queues\n\ndef _find_ipython_dir(profile):\n \"\"\"Find ipython directory associated with the given profile.\n \"\"\"\n to_test = [os.path.join(os.environ[\"HOME\"], \".ipython\"),\n os.path.join(os.environ[\"HOME\"], \".config\", \"ipython\")]\n for basedir in to_test:\n profile_dir = os.path.join(basedir, \"profile_%s\" % profile)\n if os.path.exists(profile_dir) and os.path.isdir(profile_dir):\n return profile_dir\n raise ValueError(\"ipython configuration directory for %s not found\" % profile)\n\ndef _update_config_file(new_config, profile_dir, fname):\n config_file = os.path.join(profile_dir, fname) \n orig_config_file = config_file + \".orig\"\n shutil.move(config_file, orig_config_file)\n with open(orig_config_file) as in_handle:\n with open(config_file, \"w\") as out_handle:\n for line in in_handle:\n out_handle.write(line)\n if line.startswith(\"c = get_config\"):\n out_handle.write(new_config + \"\\n\")\n os.remove(orig_config_file)\n\n# ## ipcontroller\n\ndef _ipcontroller_config():\n return \"\\n\".join(\n [\"# Added by bcbio_nextgen\",\n \"c.HubFactory.ip = '*'\"])\n\ndef update_ipcontroller_config(profile_dir):\n _update_config_file(_ipcontroller_config(),\n profile_dir, \"ipcontroller_config.py\")\n\n# ## ipcluster\n\ndef get_ipcluster_config(scheduler, parallel_type, queue):\n batch_configs = {\"lsf\": lsf_batch_config,\n \"sge\": sge_batch_config}\n return \"\\n\".join(\n [\"# Added by bcbio_nextgen\",\n \"c.IPClusterStart.controller_launcher_class = '%s'\" % scheduler.upper(),\n \"c.IPClusterStart.engine_launcher_class = '%s'\" % scheduler.upper()] +\n batch_configs[scheduler](parallel_type, queue))\n\ndef update_ipcluster_config(profile_dir, scheduler, parallel_type, queue):\n _update_config_file(get_ipcluster_config(scheduler, parallel_type, queue),\n profile_dir, \"ipcluster_config.py\")\n\n# ## LSF/bsub\n\ndef _get_std_cmd(cmd):\n launcher_cmds = {\"ipengine\": launcher.ipengine_cmd_argv,\n \"ipcontroller\": launcher.ipcontroller_cmd_argv}\n return '%s --log-to-file --profile-dir=\"{profile_dir}\" --cluster-id=\"{cluster_id}\"' % \\\n (\" \".join(map(pipes.quote, launcher_cmds[cmd])))\n\nLSF_BATCHES = {\n\"\": \"\"\"#!/bin/sh\n#BSUB -q {queue}\n#BSUB -J %s[1-{n}]\n#BSUB -oo %s.bsub.%%J\n%s\n\"\"\",\n\"multicore\": \"\"\"#!/bin/sh\n#BSUB -q {queue}\n#BSUB -J %s\n#BSUB -oo %s.bsub.%%J\n#BSUB -n {n}\n#BSUB -R \"span[hosts=1]\"\n%s\n\"\"\"}\n\ndef lsf_batch_config(parallel_type, queue):\n out = []\n for ltype, launcher in [(\"ipengine\", \"LSFEngineSetLauncher\"),\n (\"ipcontroller\", \"LSFControllerLauncher\")]:\n batch_str = LSF_BATCHES[parallel_type] % (ltype, ltype, _get_std_cmd(ltype))\n final_str = 'c.%s.batch_template = \"\"\"%s\\n\"\"\"' % (launcher, batch_str.rstrip())\n out.extend(final_str.split(\"\\n\"))\n out.append(\"c.LSFLauncher.queue = '%s'\" % queue)\n return out\n \n# ## SGE/qsub\n\nSGE_BATCHES = {\n\"\" : \"\"\"#$ -V\n#$ -cwd\n#$ -b y\n#$ -j y\n#$ -S /bin/sh\n#$ -q {queue}\n#$ -N %s\n#$ -t 1-{n}\n%s\n\"\"\",\n\"multicore\": \"\"\"#$ -V\n#$ -cwd\n#$ -b y\n#$ -j y\n#$ -S /bin/sh\n#$ -q {queue}\n#$ -N %s\n#$ -pe threaded {n}\n%s\n\"\"\"}\n\ndef sge_batch_config(parallel_type, queue):\n out = []\n for ltype, launcher in [(\"ipengine\", \"SGEEngineSetLauncher\"),\n (\"ipcontroller\", \"SGEControllerLauncher\")]:\n batch_str = SGE_BATCHES[parallel_type] % (ltype, _get_std_cmd(ltype))\n final_str = 'c.%s.batch_template = \"\"\"%s\\n\"\"\"' % (launcher, batch_str.rstrip())\n out.extend(final_str.split(\"\\n\"))\n out.append(\"c.SGELauncher.queue = '%s'\" % queue)\n return out\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--scheduler\", type=lambda x: x.lower(), default=\"lsf\",\n help=\"Type of cluster scheduler: lsf or sge\")\n parser.add_argument(\"-q\", \"--queues\", nargs=\"+\", required=True,\n help=\"Queues to place jobs on. With a single queue \"\\\n \"will use that queue for all types. With multiple \"\\\n \"queues, assign the first to parallel, second to \"\\\n \"multicore jobs, and third to IO intensive jobs.\")\n if len(sys.argv) == 1:\n parser.print_help()\n else:\n main(parser.parse_args())\n" } ]
3
vlev02/pachong
https://github.com/vlev02/pachong
85e235288140ddf39b012f4ff9d07197c9c34857
aa5b4864f5a6fbc5801b8752eeca54c03c08c5fa
06a787188e6330099f44159283d17e5b4d95f99e
refs/heads/master
2020-07-09T21:55:14.071436
2020-03-23T14:10:34
2020-03-23T14:10:34
204,092,186
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.586367130279541, "alphanum_fraction": 0.5979860424995422, "avg_line_length": 29.046510696411133, "blob_id": "f32222bf2eabfe0643c05f70e1a301ed1282ee5d", "content_id": "4bccf38021bb4f4abcfba1a1773c9e7fa13bbc5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1357, "license_type": "no_license", "max_line_length": 75, "num_lines": 43, "path": "/exam_douban.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 20 19:57:16 2019\n\"\"\"\nimport urllib\nfrom bs4 import BeautifulSoup\n\n\ndef get_content(url , data = None):\n with urllib.request.urlopen(url) as response:\n html = response.read().decode(\"utf-8\")\n return html\n\n\ndef get_data(html_text):\n infos = []\n bs = BeautifulSoup(html_text, \"html.parser\") # 创建BeautifulSoup对象\n body = bs.body # 获取body部分\n block = body.find('ul', {'class': 'chart-dashed-list'}) # 找到排行表格\n items = block.find_all('li') # 获取所有的li\n\n for item in items: # 对每个li标签中的内容进行遍历\n rank = item.find(\"strong\", {\"class\": \"fleft green-num-box\"}).string\n name = item.find(\"a\", {\"class\": \"fleft\"}).string\n score = item.find(\"span\", {\"class\": \"font-small color-red fleft\"}\n ).string\n cover_url = item.find(\"img\").attrs[\"src\"]\n infos.append([rank, name, score, cover_url])\n return infos\n\n\n\n\nif __name__ == '__main__':\n url ='https://book.douban.com/chart?subcat=F'\n html_text = get_content(url)\n result = get_data(html_text)\n \n for item in result:\n rank, name, score, cover_url = item\n fig_name = f'{rank}_{name}_{score}.jpg'\n imgres = urllib.request.urlopen(cover_url).read()\n with open(fig_name, \"wb\") as f: f.write(imgres)" }, { "alpha_fraction": 0.5403707027435303, "alphanum_fraction": 0.5673892498016357, "avg_line_length": 32.0947380065918, "blob_id": "3a1c5343d12bc6d35df3aaaedd1e78a55e100df0", "content_id": "f7d24db988aa7a00eca13c0f520e598030de3ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3325, "license_type": "no_license", "max_line_length": 148, "num_lines": 95, "path": "/exam_4K.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 13 10:37:49 2020\n\n@author: Sean\n\"\"\"\nimport os\nimport urllib\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nFIG_SAVE_DIR = r\"C:\\Users\\Sean\\Pictures\\Saved Pictures\" # 图片存储路径\nHOME_URL = r\"https://uhdpixel.com/\"\n#HOME_URL = r\"https://uhdpixel.com/wall/tag/batgirl-dc/\"\nREQ_HEADER = {'User-Agent': r\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36\"}\nFIG_HEADER = {\"referer\": \"https://uhdpixel.com/wall/mountains-forest-minimalist-minimalism-4k-y7113/\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36\"}\n\ndef get_content(url, header=None):\n # 增加请求头\n if header is not None:\n page_h = urllib.request.Request(url, headers=header)\n page = urllib.request.urlopen(page_h, timeout=5)\n html=page.read()\n return html\n\n # 基本请求\n with urllib.request.urlopen(url) as response:\n html = response.read()\n return html\n\n\ndef home_page_extract(html_text):\n \"\"\"提取主页的所有图片链接\"\"\"\n bs = BeautifulSoup(html_text.decode(\"utf-8\"), \"html.parser\") # 创建BeautifulSoup对象\n body = bs.body # 获取body部分\n blocks = body.find_all('div', {'class': 'thumb_con'}) # 获取所有的li\n urls = [block.find('a').attrs[\"href\"] for block in blocks]\n return urls\n\ndef jpg_url_extract(fig_page):\n \"\"\"提取图片页面的图片资源链接\"\"\"\n bs = BeautifulSoup(fig_page.decode(\"utf-8\"), \"html.parser\") # 创建BeautifulSoup对象\n body = bs.body # 获取body部分\n block = body.find('div', {'class': 'wp_dl'}) # 获取div\n fig_source = block.find('a').attrs[\"href\"]\n return fig_source\n \n \n \ndef fig_download(fig_urls, header=None):\n \"\"\"下载指定链接的图片\"\"\"\n for i, fig_url in enumerate(fig_urls):\n print(f\"fig[{len(fig_urls)}-{i+1}]\")\n try_num = 3\n while try_num:\n try:\n fig_page = get_content(fig_url, header=header)\n fig_source = jpg_url_extract(fig_page)\n break\n except:\n print(f\"jpg_url_extract failed!\")\n try_num -= 1\n \n fig_name = os.path.join(FIG_SAVE_DIR, f\"{fig_source.split('/')[-1]}\")\n if os.path.isfile(fig_name):\n print(f\"[{fig_name}] exist!\")\n continue\n \n print(f\"downloading [{fig_name}]\")\n try_num = 3\n while try_num:\n try:\n # key!!!\n html = requests.get(fig_source, headers=FIG_HEADER)\n with open(fig_name, 'wb') as f:\n f.write(html.content)\n # key!!!\n break\n except:\n print(f\"download failed!\")\n try_num -= 1\n print(\"success!\")\n \ndef run(tag_url):\n html_text = get_content(tag_url, REQ_HEADER)\n fig_urls = home_page_extract(html_text)\n fig_download(fig_urls, header=REQ_HEADER)\n \nif __name__ ==\"__main__\":\n html_text = get_content(HOME_URL, REQ_HEADER)\n fig_urls = home_page_extract(html_text)\n fig_download(fig_urls, header=REQ_HEADER)\n pass\n \n \n \n \n \n \n \n " }, { "alpha_fraction": 0.5542594790458679, "alphanum_fraction": 0.565924346446991, "avg_line_length": 28.42708396911621, "blob_id": "d64a69dd2b89f9807b1526186555584f89d99b5a", "content_id": "d13d09747e2bfa55f7a0b9548b79444f1a8d5a18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2861, "license_type": "no_license", "max_line_length": 79, "num_lines": 96, "path": "/exam_movie.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n豆瓣电影排行榜\n\nCreated on Wed Aug 21 08:52:17 2019\n\"\"\"\nimport os\nimport urllib\nfrom bs4 import BeautifulSoup\n\n\nfile_dir, _ = os.path.split(os.path.realpath(__file__))\nMOVIE_URL ='https://movie.douban.com/top250'\nCSV_NAME = 'doubanmovie_top250.csv'\nPOSTER_DIR = os.path.join(file_dir, 'movie_posters')\n\n\ndef proxy_init():\n \"\"\"代理配置\"\"\"\n proxy_support = urllib.request.ProxyHandler(\n {'http': 'http://user:[email protected]:8080',\n 'https': 'http://user:[email protected]:8080'})\n opener = urllib.request.build_opener(proxy_support)\n urllib.request.install_opener(opener)\n \n\ndef html_get(url, decode=\"utf-8\"):\n with urllib.request.urlopen(url) as response:\n html = response.read()\n if decode:\n html = html.decode(decode)\n return html\n \ndef html_parse(html_text):\n infos = []\n html_bs = BeautifulSoup(html_text, \"html.parser\") # 创建BeautifulSoup对象\n block = html_bs.find('ol', {\"class\": \"grid_view\"})\n items = block.find_all('div', {'class': 'item'})\n for item in items:\n rank = item.find('em').string\n poster_url = item.find('img').attrs['src']\n titles = [t.string for t in item.find_all('span', {'class': 'title'})]\n details = [i.strip() for i in item.find('p').strings]\n star = [i.strip() for i in item.find('div', {\"class\": 'star'}).strings]\n star = [i for i in star if i]\n try:\n quote = item.find('span', {\"class\": 'inq'}).string\n except:\n quote = ''\n info = [rank, poster_url, quote] + titles + details + star\n infos.append(info)\n \n next_ = html_bs.find('span', {'class': 'next'})\n if next_:\n try:\n next_ = next_.find('link').attrs['href']\n except:\n next_ = None\n return infos, next_\n \ndef main():\n # proxy_init()\n \n info_lst = []\n url = MOVIE_URL\n while url:\n html_text = html_get(url)\n infos, next_ = html_parse(html_text)\n info_lst.extend(infos)\n if isinstance(next_,str):\n url = MOVIE_URL + next_\n else:\n url = None\n \n to_scv(info_lst)\n\n\ndef to_scv(info_lst):\n file_dir, _ = os.path.split(os.path.realpath(__file__))\n with open(os.path.join(file_dir, CSV_NAME), 'w', encoding='utf-8') as f:\n for info in info_lst:\n print(','.join(info), file=f)\n\n\ndef poster_saver(info_lst):\n if not os.path.isdir(POSTER_DIR):\n os.mkdir(POSTER_DIR)\n for info in info_lst:\n rank, poster_url, quote, title = info[:4]\n fig_name = f'{rank}_{title}.jpg'\n imgres = urllib.request.urlopen(poster_url).read()\n with open(os.path.join(POSTER_DIR, fig_name), \"wb\") as f: \n f.write(imgres)\n\nif __name__ == '__main__':\n main()\n " }, { "alpha_fraction": 0.6054100394248962, "alphanum_fraction": 0.6290253400802612, "avg_line_length": 35.390625, "blob_id": "fbc9ff587975e466e83398ba40713021a916a991", "content_id": "913cd34542325be2a1cdb82ed16bf1523783ff0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2683, "license_type": "no_license", "max_line_length": 140, "num_lines": 64, "path": "/exam_weather.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# coding : UTF-8\nimport csv\nimport re\nimport urllib\nfrom bs4 import BeautifulSoup\n\nheaders = r\"\"\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\nAccept-Encoding: gzip, deflate\nAccept-Language: zh-CN,zh;q=0.9\nHost: www.weather.com.cn\nProxy-Connection: keep-alive\nUpgrade-Insecure-Requests: 1\nUser-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36\"\"\"\n\ndef get_content(url , data = None):\n header = dict()\n header_rec = re.compile(\"^(.*): (.*)$\")\n for h in headers.split('\\n'):\n k, v = header_rec.search(h).groups()\n header[k] = v\n with urllib.request.urlopen(url) as response:\n html = response.read().decode(\"utf-8\")\n return html\n\ndef get_data(html_text):\n final = []\n bs = BeautifulSoup(html_text, \"html.parser\") # 创建BeautifulSoup对象\n body = bs.body # 获取body部分\n data = body.find('div', {'id': '7d'}) # 找到id为7d的div\n ul = data.find('ul') # 获取ul部分\n li = ul.find_all('li') # 获取所有的li\n\n for day in li: # 对每个li标签中的内容进行遍历\n temp = []\n date = day.find('h1').string # 找到日期\n temp.append(date) # 添加到temp中\n inf = day.find_all('p') # 找到li中的所有p标签\n temp.append(inf[0].string,) # 第一个p标签中的内容(天气状况)加到temp中\n if inf[1].find('span') is None:\n temperature_highest = None # 天气预报可能没有当天的最高气温(到了傍晚,就是这样),需要加个判断语句,来输出最低气温\n else:\n temperature_highest = inf[1].find('span').string # 找到最高温\n temperature_highest = temperature_highest.replace('℃', '') # 到了晚上网站会变,最高温度后面也有个℃\n temperature_lowest = inf[1].find('i').string # 找到最低温\n temperature_lowest = temperature_lowest.replace('℃', '') # 最低温度后面有个℃,去掉这个符号\n temp.append(temperature_highest) # 将最高温添加到temp中\n temp.append(temperature_lowest) #将最低温添加到temp中\n final.append(temp) #将temp加到final中\n\n return final\n\n\ndef write_data(data, name):\n file_name = name\n with open(file_name, 'a', errors='ignore', newline='') as f:\n f_csv = csv.writer(f)\n f_csv.writerows(data)\n\n\nif __name__ == '__main__':\n url ='http://www.weather.com.cn/weather/101190401.shtml'\n html = get_content(url)\n result = get_data(html)\n write_data(result, 'weather.csv')\n" }, { "alpha_fraction": 0.5594771504402161, "alphanum_fraction": 0.5947712659835815, "avg_line_length": 26.814815521240234, "blob_id": "87152c47166982b02577df21deb523fab73ad439", "content_id": "903a730d8a04c53c720318d13baed798b2556f05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 81, "num_lines": 27, "path": "/exam_4K_plus.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 23 21:45:31 2020\n\n@author: Sean\n\"\"\"\nimport exam_4K as e4k\nfrom bs4 import BeautifulSoup\n\n# homepage\nhtml_text = e4k.get_content(e4k.HOME_URL, e4k.REQ_HEADER)\n\n# extract sub tags\nbs = BeautifulSoup(html_text.decode(\"utf-8\"), \"html.parser\") # 创建BeautifulSoup对象\nbody = bs.body # 获取body部分\nblocks = body.find('div', {'class': 'home_tag'}).find_all('a') # 获取所有的tags\nurls = [(block.attrs[\"title\"], block.attrs[\"href\"]) for block in blocks]\nfor i_tag, (title, tag_url) in enumerate(urls):\n print(\"=\" * 30)\n print(f\"{tag_url:-^30}\")\n print(f\"{len(urls)}-{i_tag+1}\")\n for i_try in range(3):\n try:\n e4k.run(tag_url)\n break\n except:\n print(\"tag failed!\")\n \n\n" }, { "alpha_fraction": 0.6555386781692505, "alphanum_fraction": 0.658573567867279, "avg_line_length": 26.5, "blob_id": "a1b1c0245a66432f35f711f94d2e3ef6e9af77fd", "content_id": "007dd05845f2da900cab7294fca0be455dfbe139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 683, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/biying.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "\"\"\"being 壁纸下载\"\"\"\nimport re\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nHOME_URL = r\"https://www.prohui.com/bing\" # 主页\nFIG_DIR = r\"C:\\Users\\Sean\\Pictures\\Saved Pictures\" # 图片存储路径\n\n\nresponse = requests.get(HOME_URL)\nbf_body = BeautifulSoup(response.content, \"html.parser\") # .body\nfig_url = bf_body.find(\"span\", {\"class\": \"y\"}).find(\"a\").attrs[\"href\"]\n\nfig_html = requests.get(fig_url)\nfig_name = os.path.join(FIG_DIR, re.search(\"id=(.+jpg)&\", fig_url).groups()[0])\nif os.path.isfile(fig_name):\n print(f\"{fig_name} exist!\")\nelse:\n with open(fig_name, 'wb') as f_wb:\n f_wb.write(fig_html.content)\n\n print(f\"{fig_name} success!\")" }, { "alpha_fraction": 0.40337297320365906, "alphanum_fraction": 0.41636714339256287, "avg_line_length": 33.66345977783203, "blob_id": "d145685a7871ee139a30cb71319ad6fec93de4bd", "content_id": "fa794afa8bbeef149578f9078159b73cae9227d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3693, "license_type": "no_license", "max_line_length": 79, "num_lines": 104, "path": "/test_requests.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"我在菊厂学爬虫——requests\"\"\"\nimport requests\n\n\n# =============================================================================\nresponse = requests.get(\"https://www.baidu.com\")\nprint(type(response))\nprint(response.status_code)\nprint(type(response.text))\nprint(response.text)\nprint(response.cookies)\nprint(response.content)\nprint(response.content.decode(\"utf-8\"))\n\n\n# =============================================================================\n# 各种请求\n# =============================================================================\nrequests.post(\"http://httpbin.org/post\")\nrequests.put(\"http://httpbin.org/put\")\nrequests.delete(\"http://httpbin.org/delete\")\nrequests.head(\"http://httpbin.org/get\")\nrequests.options(\"http://httpbin.org/get\")\n\n\n# =============================================================================\n# params\n# =============================================================================\ndata = {\n \"name\":\"zhaofan\",\n \"age\":22\n}\nresponse = requests.get(\"http://httpbin.org/get\",params=data)\nprint(response.url)\nprint(response.text)\n\n\n# =============================================================================\n# json\n# =============================================================================\nimport json\nresponse = requests.get(\"http://httpbin.org/get\")\nprint(type(response.text))\nprint(response.json())\nprint(json.loads(response.text))\nprint(type(response.json()))\n\n\n# =============================================================================\n# 添加headers\n# =============================================================================\n# 在谷歌浏览器里输入chrome://version\nagent = r\"\"\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\"\" \\\n r\"\"\"(KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36\"\"\"\nheaders = {\"User-Agent\":agent}\nresponse =requests.get(\"https://www.zhihu.com\",headers=headers)\nprint(response.text)\n\n\n# =============================================================================\n# 基本POST请求\n# =============================================================================\ndata = {\"name\":\"zhaofan\", \"age\":23}\nresponse = requests.post(\"http://httpbin.org/post\",data=data)\nprint(response.text)\n\n\n# =============================================================================\n# response\n# =============================================================================\nresponse = requests.get(\"http://www.baidu.com\")\nprint(type(response.status_code),response.status_code)\nprint(type(response.headers),response.headers)\nprint(type(response.cookies),response.cookies)\nprint(type(response.url),response.url)\nprint(type(response.history),response.history)\n\n\n# =============================================================================\n# 获取cookie\n# =============================================================================\nresponse = requests.get(\"http://www.baidu.com\")\nprint(response.cookies)\nfor key,value in response.cookies.items():\n print(key+\"=\"+value)\n\n\n# =============================================================================\n# 会话维持\n# =============================================================================\nwith requests.Session() as s:\n s.get(\"http://httpbin.org/cookies/set/number/123456\")\n response = s.get(\"http://httpbin.org/cookies\")\n print(response.text)\n\n\n# =============================================================================\n# 证书验证\n# =============================================================================\nfrom requests.packages import urllib3\nurllib3.disable_warnings()\nresponse = requests.get(\"https://www.12306.cn\",verify=False)\nprint(response.status_code)\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5807049870491028, "alphanum_fraction": 0.5924551486968994, "avg_line_length": 25.950000762939453, "blob_id": "f3b59d5a7d279f3f6d35cdbdbaaacee6509e351e", "content_id": "f44b25ce87a107d58d07f1047664a3b6d526b5f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 76, "num_lines": 60, "path": "/test_urllib.py", "repo_name": "vlev02/pachong", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"我在菊厂学爬虫——urllib\"\"\"\nimport urllib\n\n\ndef proxy_init():\n \"\"\"代理配置\"\"\"\n proxy_support = urllib.request.ProxyHandler(\n {'http': 'http://user:[email protected]:8080',\n 'https': 'http://user:[email protected]:8080'})\n opener = urllib.request.build_opener(proxy_support)\n urllib.request.install_opener(opener)\n\n\ndef request1():\n \"\"\"urlopen 示例\"\"\"\n response = urllib.request.urlopen(r\"https://www.python.org\")\n print(response.read().decode(\"utf-8\"))\n response.close()\n\n\ndef request2():\n \"\"\"status,reason,getheaders 示例\"\"\"\n with urllib.request.urlopen(r\"https://www.python.org\") as f:\n print('Status:', f.status, f.reason)\n for k, v in sorted(f.getheaders()):\n print('%s: %s' % (k, v))\n\n\ndef request3():\n \"\"\"参数data 示例\"\"\"\n data = bytes(urllib.parse.urlencode({'word': 'hello'}), encoding='utf8')\n with urllib.request.urlopen(r\"http://httpbin.org/post\", data=data) as f:\n print('Status:', f.status, f.reason)\n for k, v in sorted(f.getheaders()):\n print(f'{k}:\\n\\t{v:}\\n')\n print(f.read().decode(\"utf-8\"))\n\n\ndef request4():\n \"\"\"urllib.error 处理异常\"\"\"\n try:\n urllib.request.urlopen('http://cuiqingcai.com/index.htm')\n except urllib.error.URLError as e:\n print(e.reason, e.code, e.headers, sep='\\n')\n except urllib.error.HTTPError as e:\n print(e.reason, e.code, e.headers, sep='\\n')\n else:\n print(\"Request Successfully\")\n finally:\n pass\n\n\ndef main():\n proxy_init()\n request4()\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
8
dsolitaire/GWC
https://github.com/dsolitaire/GWC
e90dc7e9f968c61e999e64a2fe61b174fb75bf42
9398fec95d79936feecccbb4f85cef2d73ab03a4
acdd3a7949e77ad55295849bc518d1d2ffe97be0
refs/heads/master
2020-12-30T19:58:12.677871
2015-07-22T23:57:35
2015-07-22T23:57:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6318562030792236, "alphanum_fraction": 0.649519681930542, "avg_line_length": 20.6644287109375, "blob_id": "ab2b5b9b8c268ef474f1f7d54e662cb7dd934792", "content_id": "e20909c11dfd5e1d5632432072cf913a357c121e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3227, "license_type": "no_license", "max_line_length": 84, "num_lines": 149, "path": "/Ass5.java", "repo_name": "dsolitaire/GWC", "src_encoding": "UTF-8", "text": "import java.io.*;\nimport java.util.*;\nimport java.lang.*;\nimport java.text.*;\nimport javax.swing.*;\nimport java.util.Random;\nimport java.util.Arrays;\n/*\nAlgorithm\n1. create a for loop that randomly assigns int a with intergers that fills the array\n2. add those intergers to total\n3. set count to number of values in array\n4 Pass the array through mean method and print result\n5 pass the array through median and print result\n6 pass the array through mode and print result\n*/\n\npublic class Ass5\n{\n public static void main(String[] args)\n {\n\n ArrayList<Integer> list = new ArrayList<Integer>();\n Random rand= new Random();\n for (int a = 0; a < 20; a++)\n {\n\t int next= rand.nextInt(100);\n\t list.add(next);\n\t}\n\n int count = list.size();\n\n\t\tdouble Mean = mean( list , count);\n\n\t\tdouble Median = median (list, count);\n\t\tSystem.out.println(\"Mean is equal to \" + Mean);\n\t\tSystem.out.println(\"Median is equal to \" + Median);\n\t\tdouble Mode = mode (list);\n\t\tSystem.out.println(\"Mode is equal to \" + Mode);\n\t\tdouble SD = standD (list, count);\n\t\tSystem.out.println(\"Standard Deviation is equal to \" + SD);\n}\n/* Parameter: find the mean\n\nAlgorithm:\n1. take total sum of integers in array and divide by count of array\n*/\n\n\tpublic static double mean(ArrayList <Integer> table, int count)\n\t{\n\tdouble total =0;\n\t for (int a = 0; a < 20; a++)\n {\n\n\t total += table.get(a);\n\t}\n\n\t double avg = total / count;\n return avg;\n }\n\n/* Parameter: find median\nAlgorithm\n1 sort the array\n\ta compare the value of 0 with n\n\tb if n is smaller replace integers and repeat until the second to last value\n2 if the array length is even take the sum of length / 2 and length -1 / 2 and /2\n3 if the array length is odd take length-1 /2\n*/\n\n\tpublic static double median (ArrayList <Integer> table, int count)\n\t{\n\n\t Collections.sort(table);\n\n for(int a =0; a < table.size(); a++)\n {\n\t System.out.println(table.get(a));\n }\n\t double median = 0;\n\t if(count % 2 == 0)\n\t {\n\t\t\tmedian= table.get(((count - 1) / 2) + (count / 2) /2);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tmedian= table.get((count - 1) /2);\n\t\t}\n return median;\n\t}\n/*\nParameter: find mode\nAlgorithm:\n1 pass Possmode through list and set current count to 0\n2 pass element through list if possible mode equals element increase count\n3 compare current count with mode count\n4 if the mode count equals current count\n5 mode is the possible mode\n*/\n public static double mode(ArrayList <Integer> list)\n\t{\n\t\tint mCount = 0;\n\t\tint m = 0;\n\t\tint cCount = 0;\n\t\tint cElement;\n\n\t\tfor (int PossMode : list)\n\t\t{\n\t\t\tcCount = 0;\n\n\n\t\t\tfor (int element : list)\n\t\t\t{\n\t\t\t\tif (PossMode == element)\n\t\t\t\t{\n\t\t\t\t\tcCount++;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (cCount > mCount)\n\t\t\t{\n\t\t\t\tmCount = cCount;\n\t\t\t\tm = PossMode;\n\t\t\t\t}\n\t\t}\n return m;\n\t}\n/*\nparameter: find standard deviation\nAlgorithm:\n1 find the mean and set int i to zero\n2 square (values i to count - mean)\n3 divide this value by the count\n4 find the square root of this value\n*/\n\n public static double standD( ArrayList <Integer> list, int count)\n {\n\t double avg = mean (list, count);\n\t double p = 0;\n \tfor ( double i= 0; i <20; i++)\n \t{\n\t\tp += Math.pow(i - avg, 2);\n }\n \tdouble w= Math.sqrt(p / count);\n\t\tdouble v= Math.sqrt(w);\n return v;\n\n}\n}" }, { "alpha_fraction": 0.6154618263244629, "alphanum_fraction": 0.6746987700462341, "avg_line_length": 67.62068939208984, "blob_id": "1acf9c95d50a0ed216a3a7d709ee98e4e4776ef9", "content_id": "a26869cd1b035512ea080f0ec03a7e32fad398d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1992, "license_type": "no_license", "max_line_length": 442, "num_lines": 29, "path": "/ExamGen.py", "repo_name": "dsolitaire/GWC", "src_encoding": "UTF-8", "text": "import random\nPositions=[\"Assistant Archivist\",\"Project Associate\",\"Aviation Inspector\",\"Chemical Plant Operator\",\n\"Photographic Reproduction Technician\",\"Regional Quality of Life Director\",\"Executive Assistant\",\n\"Memory Care Life Enrichment Coordinator\", \"Inside Sales Representative\",\"Certified Montessori Teacher\",\n\"Chief Executive Officer\" ,\"Nigerian Prince\",\"Litigation Attorney\"]\nItems=[\"$100, 000\",\"$200, 000\" ,\"$5,000,000\", \"40 barrels of gold\",\"600 lbs of ancient tea\",\n\"20 original Salvador Dali sketches\",\"50 pieces ornate China\",\"12 1700th Century Musket Gun sets\"]\nAdj=[\"acceptable\",\"up to snuff\",\"tip-top\",\"deluxe\",\"worthy\",\"first-class\",\"crack\"\n,\"gnarly\",\"terminally chill\"]\nTransfer=[\"transmit\",\"surrender\",\"budge\",\"launch\" ,\"forward\",\"dislocate\",\"give up\",\"transplant\",\"drive\",\"donate\",\"drop\",\"contaminate\",\"broadcast\",\"infect\"]\nAniMin=[\"Horses\",\"Mices\",\"Cows\",\"Goats\",\"Dolphins\",\n\"Gold\",\"Topaz\",\"Quartz\",\"Diamonds\",\"Ruby\",\"Sapphire\",\"Silver\",\"Xbox 1\"]\nname =[\"John Smith\",\"Todd Rogers\",\"Jeremy Meyers\",\"Rachael Leigh\",\"Amanda Hugnkiss\",\"Yesha Alfor\",\n\"Carly Tier\",\"Lisa Carver\",\"Jessica Croft\",\"Kelly Carter\",\"Riley Washington\",\n\"Aftabe Weils\",\"Lewa Relies\",\"Obi Trollston\",\"Mongo Nooberson\"]\nAmount=[\"$2,000\",\"$5,000\",\"$7,000\",\"$1,200\",\"$10,000\",\"$13,000\",\"$600\",\"$7,500\",\"$15,000\",\n\"$20,200\",\"$1,337\",\"over $9,000\"]\nPercent=[\"10\",\"2.3\",\"45\",\"13\",\"6.3\",\".03\",\"34.5\",\"15.6\",\"9.34\",\"13.37\",\"7.9\",\"23\",\"8.23\"]\n\npos= random.choice(Positions)\nits= random.choice(Items)\nad= random.choice(Adj)\ntran= random.choice(Transfer)\nani= random.choice(AniMin)\nna= random.choice(name)\namo= random.choice(Amount)\nper= random.choice(Percent)\n\nprint(\"Hello,\\n I am \",na,\" and I am a \",pos,\". I'm very pleased to be emailing you today. Recently I have came across \",its,\" and I unfortunately can not access this. You seem like a \",ad,\" person and I wants to make a deal with you. If you can \",tran,amo,\" to my Paypal account or sends I the equivalency in \",ani,\" me will gave you\",per,\"% of \",its,\". Thanks you for you're time or considerations.\\n \\n Sincerely, \\n \\n\",na,\"\\n\",pos)\n\n\n" }, { "alpha_fraction": 0.520603597164154, "alphanum_fraction": 0.5333720445632935, "avg_line_length": 21.08974266052246, "blob_id": "df6defd6f13407ec9657679e383a8e6e902a1e22", "content_id": "ae189b321efc8e0af54d996d532437e664ea5718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1723, "license_type": "no_license", "max_line_length": 82, "num_lines": 78, "path": "/Ass6.java", "repo_name": "dsolitaire/GWC", "src_encoding": "UTF-8", "text": "import java.io.*;\nimport java.util.*;\n\n\npublic class Ass6\n{\n\tpublic static void main(String[] args)\n\t{\n\t try(\n\t \t //Open files\n\t FileReader reader = new FileReader(\"Numbers.txt\");\n\t Scanner in = new Scanner(reader);\n\t FileWriter writer = new FileWriter(\"Numbers.out\");\n\t PrintWriter out = new PrintWriter(writer)\n\t\t )\n\t {\n ArrayList<Integer> list = new ArrayList<Integer>();\n double total = 0;\n int count = 0;\n while(in.hasNextLine())\n {\n\t\t\tString next = in.nextLine();\n\t\t\tStringTokenizer st = new StringTokenizer(next);\n\t\t\twhile(st.hasMoreTokens())\n\t\t\t{\n\t\t\t\tString s = st.nextToken();\n\t\t\t\ttry\n\t\t\t\t{\n\t\t\t\t\tint n = Integer.parseInt(s);\n\t\t\t\t\tlist.add(n);\n\t\t\t\t\ttotal += n;\n\t\t\t\t\tcount++;\n\t\t\t\t}\n\t\t\t\tcatch(NumberFormatException e)\n\t\t\t\t{ out.println(\"Error in data file: \" + s);}\n\t\t\t}\n\t\t}\n\t\tif(count == 0)\n\t\t out.println(\"No Valid Data\");\n\t\telse\n\t\t{\n\t\t\tdouble average = total/count;\n\t\t\tout.printf(\"\\n\\n%d items were read\\ntheir average is %7.2f\\n\", count, average);\n\n\t\t\tCollections.sort(list);\n\t\t double median = 0;\n\t\t\t\t if(count % 2 == 0)\n\t\t\t\t {\n\t\t\t\t\t\tmedian= list.get(((count - 1) / 2) + (count / 2) /2);\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmedian= list.get((count - 1) /2);\n\t\t }\n\t\t\tout.printf(\"The median of the items is %h\\n\", median);\n\n\t\t\t\tdouble p = 0;\n\t\t\t \tfor ( double i= 0; i <20; i++)\n\t\t\t \t{\n\t\t\t\t\tp += Math.pow(i - average, 2);\n\t\t\t }\n\t\t\t \tdouble w= Math.sqrt(p / count);\n\t\t double v= Math.sqrt(w);\n\t\t\tout.printf(\"The standard deviation of the items is %.3f\\n\", v);\n\t }\n\t }\n\t catch(IOException e)\n\t {\n\t \tSystem.out.println(\"Error opening the files.\" + e);\n\t \tSystem.exit(1);\n\t }\n\n\n\t}\n\n\n\n}\n" } ]
3
jessecaj/mummy-game-python
https://github.com/jessecaj/mummy-game-python
d07f81757d5349a5a1a592b1a70ee99e86140027
709d131447b33700d2ef8a54b7a64e7683fe4299
0e0d7b7beff863b44f9b73ef121ae96348fb2335
refs/heads/master
2020-08-03T02:11:11.561469
2019-09-29T02:53:09
2019-09-29T02:53:09
211,593,252
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5327659845352173, "alphanum_fraction": 0.5353191494941711, "avg_line_length": 31.339448928833008, "blob_id": "5819dbc76efc53b98301941a995450fe5f8cbde1", "content_id": "95401847cb2354c834661120aafde7da9cbb411c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3525, "license_type": "no_license", "max_line_length": 73, "num_lines": 109, "path": "/mummy_game.py", "repo_name": "jessecaj/mummy-game-python", "src_encoding": "UTF-8", "text": "import time\n\nimport random\n\ntreasure_list = ['scarab', 'staff']\n\n\ndef print_pause(message_to_print):\n print(message_to_print)\n time.sleep(1)\n\n\ndef intro():\n print_pause(\"You are an archaeologist.\")\n print_pause(\"You have been sent to a dig at the Egyptian pyramids.\")\n print_pause(\"You enter one of the pyramids.\")\n\n\ndef choose_door(items):\n print_pause(\"To your left is a glittering gold door \"\n \"encrusted with jewels. \"\n \"To your right is a simpler-looking wooden door.\")\n door = input(\"Press 1 to open the left door. \"\n \"Press 2 to open the right door.\")\n if door == \"1\":\n mummy_room(items)\n if door == \"2\":\n treasure_room(items)\n else:\n print_pause(\"Sorry, I don't understand.\")\n choose_door(items)\n\n\ndef treasure_room(items):\n treasure_item = random_choice(treasure_list)\n items.append(treasure_item)\n if \"scarab\" in items:\n print_pause(\"You already got the silver scarab \"\n \"from the treasure chest. \"\n \"There is nothing more to do here.\")\n choose_door(items)\n elif \"staff\" in items:\n print_pause(\"You already got the golden staff \"\n \"from the treasure chest. \"\n \"There is nothing more to do here.\")\n else:\n print_pause(\"In this room is an ornate silver treasure chest. \"\n \"You open the chest and pick up a silver scarab.\"\n \"You leave the room.\")\n items.append(\"scarab\")\n choose_door(items)\n\n\ndef you_win():\n play_again = input(\"You won the game. Would you like to play again? \"\n \"Please enter Y or N.\")\n if play_again == \"Y\":\n play_game()\n elif play_again == \"N\":\n print_pause(\"Thanks for playing. Have a great day!\")\n exit()\n else:\n print_pause(\"Sorry, I don't understand.\")\n you_win(items)\n\n\ndef mummy_room(items):\n print_pause(\"In this room lies a mummy. \"\n \"You walk up to the mummy to examine it more closely. \"\n \"Suddenly, it sits up. The mummy is alive!\")\n fight = input(\"Press 1 to fight the mummy. \"\n \"Press 2 to run away.\")\n if fight == \"1\":\n if \"scarab\" in items:\n print_pause(\"Your silver scarab comes to life. \"\n \"The mummy now sees you as a friend. \"\n \"The mummy goes back to sleep. \"\n \"You can now safely search the room \"\n \"for more treasures!\")\n you_win(items)\n elif \"staff\" in items:\n print_pause(\"You hold up the golden staff you found \"\n \"in the treasure chest! \"\n \"The mummy now sees you as a friend. \"\n \"The mummy goes back to sleep. \"\n \"You can now safely search the room \"\n \"for more treasures!\")\n else:\n print_pause(\"The mummy swings right at your head! \"\n \"Fortunately, you're able to duck \"\n \"just in time and run out the door.\")\n choose_door(items)\n\n elif fight == \"2\":\n print_pause(\"You made it out of the mummy's room alive!\")\n choose_door(items)\n\n else:\n print_pause(\"I'm sorry, I don't understand.\")\n mummy_room(items)\n\n\ndef play_game():\n items = []\n intro()\n choose_door(items)\n\n\nplay_game()\n" } ]
1